code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1. Enumerate sentence # Create a function that prints words within a sentence along with their index in front of the word itself. # # For example if we give the function the argument "This is a sentence" it should print # # ``` # 1 This # 2 is # 3 a # 4 sentence # ``` def enumWords(sentence): #Complete this method. # # 2. Fibonacci # Create a function `fibonacci()` which takes an integer `num` as an input and returns the first `num` fibonacci numbers. # # Eg. # # Input: `8` # # Output: `[1, 1, 2, 3, 5, 8, 13, 21]` # # *Hint: You might want to recall [fibonacci numbers](https://en.wikipedia.org/wiki/Fibonacci_number)* # + def fibonacci(num): #Complete this method. ################ Checking code ######################## # Please don't edit this code newList = fibonacci(10) if newList == [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]: print("Success!") else: print("Error! Your function returned") print(newList) # - # # 3. Guessing game 2 # Ask the user to input a number and then have the program guess it. After each guess, the user must input whether it was too high, too low or the correct number. In the end, the program must always guess the users number and it must print out the number of guesses it needed. # # 4. Find word # Create a function that searches for a word within a provided lists of words. Inputs to the function should be a list of words and a word to search for # # The function should return `True` if the word is contained within the list and `False` otherwise. # + fruits = ["banana", "orange", "grapefruit", "lime", "lemon"] def findWord(wordList, word): #Complete this method. ################ Checking code ######################## # Please don't edit this code if findWord(fruits, "lime"): print("Success!") else: print("Try again!") # - # # 5. Powers of 2 # Use a while loop to find the largest power of 2 which is less than 30 million. # # 6. Making a better school # This exercise is on defining classes. This topic is covered in the optional notebook python-intro-3-extra-classes. # # Below is a copy of the `School`, `Student` and `Exam` classes, together with a copy of the code needed to populate an object of that class with students and exam results. Edit the `School` class to add in the following functions: # # * `.resits()` : this should return the list of exams that each student should resit if they get a "F" or "U" grade. # * `.prizeStudent()` : this should return the name of the student who scored the highest average percent across all of the exams. # * `.reviseCourse(threshold)` : this should return the name of the exam that gets the lowest average score across all students, if the average score is below `threshold`. # # Use these functions to find out which students need to resit which exams, which student should be awarded the annual school prize, and which courses should be revised as the average mark is less than 50%. # + class School: def __init__(self): self._students = {} self._exams = [] def addStudent(self, name): self._students[name] = Student(name) def addExam(self, exam, max_score): self._exams.append(exam) for key in self._students.keys(): self._students[key].addExam(exam, Exam(max_score)) def addResult(self, name, exam, score): self._students[name].addResult(exam, score) def grades(self): grades = {} for name in self._students.keys(): grades[name] = self._students[name].grades() return grades # NOTE: This is not a class method def addResults(school, exam, results): for student in results.keys(): school.addResult(student, exam, results[student]) class Student: def __init__(self, name): self._exams = {} self._name = name def addExam(self, name, exam): self._exams[name] = exam def addResult(self, name, score): self._exams[name].setResult(score) def result(self, exam): return self._exams[exam].percent() def grade(self, exam): return self._exams[exam].grade() def grades(self): g = {} for exam in self._exams.keys(): g[exam] = self.grade(exam) return g class Exam: def __init__(self, max_score=100): self._max_score = max_score self._actual_score = 0 def percent(self): return 100.0 * self._actual_score / self._max_score def setResult(self, score): if score < 0: self._actual_score = 0 elif score > self._max_score: self._actual_score = self._max_score else: self._actual_score = score def grade(self): if self._actual_score == 0: return "U" elif self.percent() > 70.0: return "A" elif self.percent() > 60.0: return "B" elif self.percent() > 50.0: return "C" else: return "F" # NOTE: This si not a class method def addResults(school, exam, results): for student in results.keys(): school.addResult(student, exam, results[student]) # + school = School() school.grades() students = ["Andrew", "James", "Laura"] exams = { "Maths" : 20, "Physics" : 50, "English": 30} results = {"Maths" : {"Andrew" : 13, "James" : 17, "Laura" : 14}, "Physics" : {"Andrew" : 34, "James" : 44, "Laura" : 27}, "English" : {"Andrew" : 26, "James" : 14, "Laura" : 29}} for student in students: school.addStudent(student) for exam in exams.keys(): school.addExam(exam, exams[exam]) for result in results.keys(): addResults(school, result, results[result]) school.grades()
python-intro-exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # # Code to generate sharp-LIME explanations on Camelyon images (without nuclei contours) # # - # !pip uninstall lime -y # !pip install git+https://github.com/palatos/lime@ColorExperiments # + import numpy as np from matplotlib import pyplot as plt import h5py import tensorflow as tf from tensorflow import keras from tensorflow.keras.applications.inception_v3 import preprocess_input from lime import lime_image from skimage.segmentation import mark_boundaries # - camelyon_segmentations['all500/tumor/level7/centre1/patient020/node4/patches'].keys() model = tf.keras.models.load_model('/mnt/nas2/results/IntermediateResults/Mara/MELBA/baseline_def_1/best_model.h5', compile=False) model.layers[-1].activation = tf.keras.activations.sigmoid model.compile() # + #We don't directly use the base model because we want to be sure we are perturbing the images before preprocessing them. #Otherwise covering the superpixels in 0 might not mean covering them in black. #Best way to control this is to just define a wrapper-like class around the model, that includes the preprocessing in it. #I'm also converting the sigmoid output to softmax-like because that's generally easier to handle and debug in LIME. #GradCAM doesn't care about any of this so we can just use the model directly. class MyModel(): def __init__(self,model): self.model = model self.input_shape = model.input_shape self.output_shape = model.output_shape def predict(self, batch_images): #Image must be preprocessed before passed to LIME. batch_images = preprocess_input(batch_images.copy()) sigm = self.model.predict(batch_images) softm = np.hstack((1-sigm,sigm)) return softm model_with_preprocessing = MyModel(model) # + filename = "/mnt/nas2/results/IntermediateResults/Camelyon/all500/patches.hdf5" camelyon_segmentations=h5py.File('/mnt/nas2/results/IntermediateResults/Camelyon/cam_nuclei.h5py', 'r') f = h5py.File(filename,'r') im = camelyon_segmentations['all500/tumor/level7/centre1/patient020/node4/patches/352/patch'][:] mask = camelyon_segmentations['all500/tumor/level7/centre1/patient020/node4/patches/352/mask'][:,:,0] # - plt.imshow(mask) def connected_components(image): # list of tags we have used tags = [] # current tag (remember 1 and 0 are already in image so start from 2) tag = 2 # counter cntr = 0 for i in range(image.shape[0]): for j in range(image.shape[1]): if image[i, j] != 0: if i != 0 and j != 0 and image[i, j-1] != 0 and image[i-1, j] != 0 and image[i-1, j] != image[i, j-1]: image[i, j] = image[i, j - 1] tags.remove(image[i - 1, j]) cntr -= 1 image[image == image[i - 1, j]] = image[i, j] elif i != 0 and image[i-1, j] != 0: image[i, j] = image[i-1, j] elif j != 0 and image[i, j-1] != 0: image[i, j] = image[i, j-1] else: image[i, j] = tag tags.append(tag) tag += 1 cntr += 1 return cntr, tags cntr, tags=connected_components(mask) # + #Generate Squares squares = np.zeros(im.shape[:2]) counter = 0 for hor in range(3): for ver in range(3): squares[75*hor:75*(hor+1),75*ver:75*(ver+1)]=counter counter+=1 #Renumber segments, because they are numbered arbitrarily in the mask files. #segments = np.sum(mask[:,:,0:5],axis=2).copy() segments = mask[:,:].copy() temp_renumber = [8+item for item in list(range(len(np.unique(segments))))] renumbering_map = dict(zip(np.unique(segments),temp_renumber)) segments = np.vectorize(renumbering_map.get)(segments) segments = np.where(segments.copy()==8,squares,segments) #LIME is expecting a segmentation function, so we define one just to pass the segments above as superpixels. def segfunc(im): return segments #Mark the nuclei mark_colors = [[1,0,0],[0,1,0],[0,0,1],[1,1,0],[1,0,1]] mark = im.copy()/255 for i in range(5): mark = mark_boundaries(mark,mask[:,:].astype('uint8'),mode='outer',outline_color=mark_colors[i]) # - #Generate LIME explanation for class 1 explainer = lime_image.LimeImageExplainer() class_idx=1 explanation_genetic = explainer.explain_instance(im.copy(), model_with_preprocessing.predict, segmentation_fn = None,# segfunc, top_labels=3,#Just making sure we generate explanations for both classes. Any number >2 should work. hide_color=0,#Cover superpixels in black. num_samples=200, progress_bar=False) dict_genetic = dict(explanation_genetic.local_exp[class_idx]) heatmap = np.vectorize(dict_genetic.get)(explanation_genetic.segments) #Generate LIME explanation for class 1 explainer = lime_image.LimeImageExplainer() class_idx=1 explanation_genetic = explainer.explain_instance(im.copy(), model_with_preprocessing.predict, segmentation_fn = segfunc, top_labels=3,#Just making sure we generate explanations for both classes. Any number >2 should work. hide_color=0,#Cover superpixels in black. num_samples=200, progress_bar=False) dict_genetic = dict(explanation_genetic.local_exp[class_idx]) sharp_heatmap = np.vectorize(dict_genetic.get)(explanation_genetic.segments) plt.rcParams['figure.figsize']=(20,10) plt.subplot(1,4,1) plt.imshow(mark) plt.axis('off') plt.subplot(1,4,2) plt.imshow(heatmap, cmap = 'RdBu', vmin=-sharp_heatmap.max(), vmax = sharp_heatmap.max()) plt.axis('off') plt.subplot(1,4,3) im=plt.imshow(sharp_heatmap, cmap = 'RdBu', vmin=-sharp_heatmap.max(), vmax = sharp_heatmap.max()) plt.axis('off') plt.colorbar(im,fraction=0.045)#,cax=cax)
notebooks/sharp-LIME-on-Camelyon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Importing the Packages import numpy as np import pandas as pd import scipy import statsmodels.api as sm import matplotlib.pyplot as plt import seaborn as sns import sklearn import statsmodels.graphics.tsaplots as sgt import statsmodels.tsa.stattools as sts from statsmodels.tsa.arima_model import ARIMA from statsmodels.tsa.statespace.sarimax import SARIMAX from pmdarima.arima import auto_arima from pmdarima.arima import OCSBTest from statsmodels.tsa.arima_model import ARIMA from arch import arch_model import seaborn as sns import yfinance import warnings warnings.filterwarnings("ignore") sns.set() # ## Importing the Data raw_data = yfinance.download (tickers = "VOW3.DE, PAH3.DE, BMW.DE", interval = "1d", group_by = 'ticker', auto_adjust = True, treads = True) df = raw_data.copy() # ## Defining Key Dates # + # Starting Date start_date = "2009-04-05" # First Official Announcement - 49.9% ann_1 = "2009-12-09" # Second Official Announcement - 51.1% ann_2 = "2012-07-05" #Ending Date end_date = "2014-01-01" # Dieselgate d_gate = '2015-09-20' # - # ## Pre-processing the Data # + # Extracting Closing Prices df['vol'] = df['VOW3.DE'].Close df['por'] = df['PAH3.DE'].Close df['bmw'] = df['BMW.DE'].Close # Creating Returns df['ret_vol'] = df['vol'].pct_change(1).mul(100) df['ret_por'] = df['por'].pct_change(1).mul(100) df['ret_bmw'] = df['bmw'].pct_change(1).mul(100) # Creating Squared Returns df['sq_vol'] = df.ret_vol.mul(df.ret_vol) df['sq_por'] = df.ret_por.mul(df.ret_por) df['sq_bmw'] = df.ret_bmw.mul(df.ret_bmw) # Extracting Volume df['q_vol'] = df['VOW3.DE'].Volume df['q_por'] = df['PAH3.DE'].Volume df['q_bmw'] = df['BMW.DE'].Volume # - # Assigning the Frequency and Filling NA Values df = df.asfreq('b') df = df.fillna(method='bfill') # Removing Surplus Data del df['VOW3.DE'] del df['PAH3.DE'] del df['BMW.DE'] # ## Plotting the Prices df['vol'][start_date:end_date].plot(figsize= (20,8), color = "blue") df['por'][start_date:end_date].plot(color = "green") df['bmw'][start_date:end_date].plot(color = "gold") plt.show() # + df['vol'][start_date:ann_1].plot(figsize= (20,8), color = "#33B8FF") df['por'][start_date:ann_1].plot(color = "#49FF3A") df['bmw'][start_date:ann_1].plot(color = "#FEB628") df['vol'][ann_1:ann_2].plot(color = "#1E7EB2") df['por'][ann_1:ann_2].plot(color = "#2FAB25") df['bmw'][ann_1:ann_2].plot(color = "#BA861F") df['vol'][ann_2:end_date].plot(color = "#0E3A52") df['por'][ann_2:end_date].plot(color = "#225414") df['bmw'][ann_2:end_date].plot(color = "#7C5913") plt.legend(['Volkswagen','Porsche','BMW']) plt.show() # - # ## Correlation print('Correlation among manufacturers from ' + str(start_date) + ' to ' + str(end_date)+ '\n') print('Volkswagen and Porsche correlation: \t'+ str(df['vol'][start_date:end_date].corr(df['por'][start_date:end_date]))) print('Volkswagen and BMW correlation: \t'+ str(df['vol'][start_date:end_date].corr(df['bmw'][start_date:end_date]))) print('Porsche and BMW correlation: \t\t'+ str(df['por'][start_date:end_date].corr(df['bmw'][start_date:end_date]))) print('Correlation among manufacturers from ' + str(start_date) + ' to ' + str(ann_1)+ '\n') print('Volkswagen and Porsche correlation: \t'+ str(df['vol'][start_date:ann_1].corr(df['por'][start_date:ann_1]))) print('Volkswagen and BMW correlation: \t'+ str(df['vol'][start_date:ann_1].corr(df['bmw'][start_date:ann_1]))) print('Porsche and BMW correlation: \t\t'+ str(df['por'][start_date:ann_1].corr(df['bmw'][start_date:ann_1]))) print('Correlation among manufacturers from ' + str(ann_1) + ' to ' + str(ann_2)+ '\n') print('Volkswagen and Porsche correlation: \t'+ str(df['vol'][ann_1:ann_2].corr(df['por'][ann_1:ann_2]))) print('Volkswagen and BMW correlation: \t'+ str(df['vol'][ann_1:ann_2].corr(df['bmw'][ann_1:ann_2]))) print('Porsche and BMW correlation: \t\t'+ str(df['por'][ann_1:ann_2].corr(df['bmw'][ann_1:ann_2]))) print('Correlation among manufacturers from ' + str(ann_2) + ' to ' + str(end_date)+ '\n') print('Volkswagen and Porsche correlation: \t'+ str(df['vol'][ann_2:end_date].corr(df['por'][ann_2:end_date]))) print('Volkswagen and BMW correlation: \t'+ str(df['vol'][ann_2:end_date].corr(df['bmw'][ann_2:end_date]))) print('Porsche and BMW correlation: \t\t'+ str(df['por'][ann_2:end_date].corr(df['bmw'][ann_2:end_date]))) print('Correlation among manufacturers from ' + str(end_date) + ' to ' + str(df.index[-1])+ '\n') print('Volkswagen and Porsche correlation: \t'+ str(df['vol'][end_date:].corr(df['por'][end_date:]))) print('Volkswagen and BMW correlation: \t'+ str(df['vol'][end_date:].corr(df['bmw'][end_date:]))) print('Porsche and BMW correlation: \t\t'+ str(df['por'][end_date:].corr(df['bmw'][end_date:]))) # ## Best Fitting Models # ### For Volkswagen mod_pr_pre_vol = auto_arima(df.vol[start_date:ann_1], exogenous = df[['por','bmw']][start_date:ann_1], m = 5, max_p = 5, max_q = 5) mod_pr_btn_vol = auto_arima(df.vol[ann_1:ann_2], exogenous = df[['por','bmw']][ann_1:ann_2], m = 5, max_p = 5, max_q = 5) mod_pr_post_vol = auto_arima(df.vol[ann_2:end_date], exogenous = df[['por','bmw']][ann_2:end_date], m = 5, max_p = 5, max_q = 5) mod_pr_pre_vol.summary() mod_pr_btn_vol.summary() mod_pr_post_vol.summary() # ### For Porsche mod_pr_pre_por = auto_arima(df.por[start_date:ann_1], exogenous = df[['vol','bmw']][start_date:ann_1], m = 5, max_p = 5, max_q = 5) mod_pr_btn_por = auto_arima(df.por[ann_1:ann_2], exogenous = df[['vol','bmw']][ann_1:ann_2], m = 5, max_p = 5, max_q = 5) mod_pr_post_por = auto_arima(df.por[ann_2:end_date], exogenous = df[['vol','bmw']][ann_2:end_date], m = 5, max_p = 5, max_q = 5) mod_pr_pre_por.summary() mod_pr_btn_por.summary() mod_pr_post_por.summary() # ## Predictions for the Future # ### For Porsche # + model_auto_pred_pr = auto_arima(df.vol[start_date:ann_1], m = 5, max_p = 5, max_q = 5, max_P = 5, max_Q = 5, trend = "ct") df_auto_pred_pr = pd.DataFrame(model_auto_pred_pr.predict(n_periods = len(df[ann_1:ann_2])), index = df[ann_1:ann_2].index) df_auto_pred_pr[ann_1:ann_2].plot(figsize = (20,5), color = "red") df.vol[ann_1:ann_2].plot(color = "blue") plt.title("VW Predictions (no Exog) vs Real Data", size = 24) plt.show() # - df_auto_pred_pr[ann_1:'2010-03-01'].plot(figsize = (20,5), color = "red") df.vol[ann_1:'2010-03-01'].plot(color = "blue") plt.title("VW Predictions (no Exog) vs Real Data (short term)", size = 24) plt.show() # ### For Volkswagen # + model_auto_pred_pr = auto_arima(df.vol[start_date:ann_1], exogenous = df[['por']][start_date:ann_1], m = 5, max_p = 5, max_q = 5, max_P = 5, max_Q = 5, trend = "ct") df_auto_pred_pr = pd.DataFrame(model_auto_pred_pr.predict(n_periods = len(df[ann_1:ann_2]),exogenous = df[['por']][ann_1:ann_2]), index = df[ann_1:ann_2].index) df_auto_pred_pr[ann_1:ann_2].plot(figsize = (20,5), color = "red") df.vol[ann_1:ann_2].plot(color = "blue") plt.title("VW Predictions (Porsche as Exog) vs Real Data", size = 24) plt.show() # + model_auto_pred_pr = auto_arima(df.vol[start_date:ann_1], exogenous = df[['bmw']][start_date:ann_1], m = 5, max_p = 5, max_q = 5, max_P = 5, max_Q = 5, trend = "ct") df_auto_pred_pr = pd.DataFrame(model_auto_pred_pr.predict(n_periods = len(df[ann_1:ann_2]), exogenous = df[['bmw']][ann_1:ann_2]), index = df[ann_1:ann_2].index) df_auto_pred_pr[ann_1:ann_2].plot(figsize = (20,5), color = "red") df.vol[ann_1:ann_2].plot(color = "blue") plt.title("VW Predictions (Market Benchmark as Exog) vs Real Data", size = 24) plt.show() # + model_auto_pred_pr = auto_arima(df.vol[start_date:ann_1], exogenous = df[['por', 'bmw']][start_date:ann_1], m = 5, max_p = 5, max_q = 5, max_P = 5, max_Q = 5, trend = "ct") df_auto_pred_pr = pd.DataFrame(model_auto_pred_pr.predict(n_periods = len(df[ann_1:ann_2]), exogenous = df[['por','bmw']][ann_1:ann_2]), index = df[ann_1:ann_2].index) df_auto_pred_pr[ann_1:ann_2].plot(figsize = (20,5), color = "red") df.vol[ann_1:ann_2].plot(color = "blue") plt.title("VW Predictions (Porsche and Market as Exog) vs Real Data", size = 24) plt.show() # - # ## Volatility # ### Volatility of VW for Each Period df['sq_vol'][start_date:ann_1].plot(figsize = (20,5), color = "#33B8FF") df['sq_vol'][ann_1:ann_2].plot(color = "#1E7EB2") df['sq_vol'][ann_2:end_date].plot(color = "#0E3A52") plt.show() # ### Volatility Trends for Each Period # + model_garch_pre = arch_model(df.ret_vol[start_date:ann_1], mean = "Constant", vol = "GARCH", p = 1, q = 1) results_garch_pre = model_garch_pre.fit(update_freq = 5) model_garch_btn = arch_model(df.ret_vol[ann_1:ann_2], mean = "Constant", vol = "GARCH", p = 1, q = 1) results_garch_btn = model_garch_btn.fit(update_freq = 5) model_garch_post = arch_model(df.ret_vol[ann_2:end_date], mean = "Constant", vol = "GARCH", p = 1, q = 1) results_garch_post = model_garch_post.fit(update_freq = 5) # - results_garch_pre.summary() results_garch_btn.summary() results_garch_post.summary()
Section 15 - Business Case - Completed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problem set 3. Why is the 20th-century world so unequal? # # ## Do resources play an important role on a global scale in relative prosperity? # # These problem set assignments are a required part of the course. # # Collaborating on the problem sets is more than okay—it is encouraged! Seek help from a classmate or an instructor or a roommate or a passerby when you get stuck! (Explaining things is beneficial, too—the best way to solidify your knowledge of a subject is to explain it.) # # But the work should be your own. # # No cutting-&-pasting from others' problem sets, please! We want you to learn this stuff, and your fingers typing every keystroke is an important way of building muscle memory here. # # In this problem set, you will... # # Let us get started! # # &nbsp; # # 1. Preliminaries # # ### A. Computing environment # # First, we set up the computing environment with the libraries we need: # + # 3.1.A.1. set up the computing environment: ensure that graphs # appear inline in the notebook & not in extra windows: # %matplotlib inline # + deletable=true # 3.1.A.2. set up the computing environment: get the ok system library... from client.api.notebook import Notebook # ok = Notebook('ps02.ok') # + # 3.1.A.3. set up the computing environment: import other libraries import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt # - # &nbsp; # # ### B. Reproduce the results of problem set 1 # # Recall our estimates of humanity's economy in the very long run from the last problem set. Let's repeat their construction: # # + # 3.1.B.1. repeating the work of problem set 1: long_run_growth_list = [ [-68000, 0.1, 1200, 379.47], [-8000, 2.5, 1200, 1897.37], [-6000, 7, 900, 2381.18], [-3000, 15, 900, 3485.68], [-1000, 50, 900, 6363.96], [1, 170, 900, 11734.56], [1500, 500, 900, 20124.61], [1770, 750, 1100, 30124.74], [1870, 1300, 1300, 46872.1], [2020, 7600, 11842, 1032370.8] ] long_run_growth_df = pd.DataFrame( data=np.array(long_run_growth_list), columns = ['year', 'population', 'income_level', 'human_ideas'] ) long_run_growth_df['year'] = long_run_growth_df['year'].apply(np.int64) initial_year = long_run_growth_df['year'][0:10] span = [] g = [] h = [] n = [] for t in range(9): span = span + [long_run_growth_df['year'][t+1]-long_run_growth_df['year'][t]] h = h + [np.log(long_run_growth_df['human_ideas'][t+1]/long_run_growth_df['human_ideas'][t])/span[t]] g = g + [np.log(long_run_growth_df['income_level'][t+1]/long_run_growth_df['income_level'][t])/span[t]] n = n + [np.log(long_run_growth_df['population'][t+1]/long_run_growth_df['population'][t])/span[t]] long_run_growth_df.set_index('year', inplace=True) # finally, add a note to the end of each observation, reminding # us of what was going on in human history back in each of the # eras into which we have divided it eras = ['at the dawn', 'agriculture & herding', 'proto-agrarian age', 'writing', 'axial age', 'dark & middle age slowdown', 'commercial revolution', 'industrial revolution', 'modern economic growth', 'whatever the 21st century brings'] long_run_growth_df['eras'] = eras format_dict = {'year': '{d}', 'human_ideas': '{0:,.0f}', 'income_level': '${0:,.0f}', 'population': '{0:,.1f}'} print('WORLD LEVELS') long_run_growth_df.style.format(format_dict) # + # 3.1.B.2. data_list = np.array([span, h, g, n]).transpose() long_run_growth_rates_df = pd.DataFrame( data=data_list, columns = ['span', 'n', 'g', 'h']) long_run_growth_rates_df['initial_year'] = initial_year eras2 = eras[0:9] long_run_growth_rates_df['era'] = eras2 format_dict = {'initial_year':'{0:.0f}', 'span': '{0:.0f}', 'h': '{0:,.3%}', 'g': '{0:,.2%}', 'n': '{0:,.2%}'} print('WORLD GROWTH RATES') long_run_growth_rates_df.style.format(format_dict) # - # &nbsp; # # # 2. Global North & Global South # # ### A. Global North # # Now let me provide you with another set of data analogous to those for the world as a whole that I had you examine in problem set 1. This set will be for the "global north" or "west"—that part of the world that dominated the Americas starting in the 1500s and then became much richer and more powerful than the rest since the start of the 1700s—consisting of northwest Europe, and then by 1770 of that plus the Atlantic seaboard of the Americas, adding on Australia and New Zealand by 1870, and now including those areas plus southwest and some of central Europe, plus Japan, South Korea, and Taiwan. # # The data are: # + # 3.2.A.1. for the "global north" or "west": long_run_growth_list_global_north = [ [-68000, 0.00001, 1200, 379.47, 0.0001], [-8000, 0.1, 1200, 1897.37, 0.0294], [-6000, 0.2, 900, 2012.5, 0.0294], [-3000, 0.5, 900, 3182, 0.0294], [-1000, 2, 900, 6364.1, 0.0294], [1, 5, 900, 10062.5, 0.0294], [1500, 25, 1000, 25000.4, 0.0294], [1770, 75, 1400, 42866.8, 0.0588], [1870, 175, 2800, 106928.6, 0.0882], [2020, 800, 50000, 3580637.4, 0.1147] ] # - # Note that there is an extra column: it will be "resources"—the share of the world's resources that is occupied/owned/conquered/exploited by the global north. For the world as a whole, it always had 100% of the world's resources. But as the global north expands, and as it engrosses ownership of resources beyond its borders, its share of the world's resources rises. # # Then, with this list-of-lists, repeat what was done for the world as a whole by stuffing them into a dataframe, and doing the calculations of growth rates by era for the growth-rates dataframe: # + # 3.2.A.2. create global-north levels dataframe long_run_growth_global_north_df = pd.DataFrame( data=np.array(long_run_growth_list_global_north), columns = ['year', 'population', 'income_level', 'human_ideas', 'resources'] ) long_run_growth_global_north_df['year'] = long_run_growth_global_north_df['year'].apply(np.int64) # + # 3.2.A.3. do calculations for the global-north growth-rates dataframe initial_year = long_run_growth_global_north_df['year'][0:10] span = [] g = [] h = [] n = [] rho = [] for t in range(9): span = span + [long_run_growth_global_north_df['year'][t+1]-long_run_growth_global_north_df['year'][t]] h = h + [np.log(long_run_growth_global_north_df['human_ideas'][t+1]/long_run_growth_global_north_df['human_ideas'][t])/span[t]] g = g + [np.log(long_run_growth_global_north_df['income_level'][t+1]/long_run_growth_global_north_df['income_level'][t])/span[t]] n = n + [np.log(long_run_growth_global_north_df['population'][t+1]/long_run_growth_global_north_df['population'][t])/span[t]] rho = rho + [np.log(long_run_growth_global_north_df['resources'][t+1]/long_run_growth_global_north_df['resources'][t])/span[t]] long_run_growth_global_north_df.set_index('year', inplace=True) # + # 3.2.A.4. finally, add a note to the end of each observation, reminding # us of what was going on in human history back in each of the # eras into which we have divided it eras = ['at the dawn', 'agriculture & herding', 'proto-agrarian age', 'writing', 'axial age', 'dark & middle age slowdown', 'commercial revolution', 'industrial revolution', 'modern economic growth', 'whatever the 21st century brings'] long_run_growth_global_north_df['eras'] = eras format_dict = {'year': '{d}', 'human_ideas': '{0:,.0f}', 'income_level': '${0:,.0f}', 'population': '{0:,.1f}','resources': '{0:,.3f}'} print('GLOBAL NORTH LEVELS') long_run_growth_global_north_df.style.format(format_dict) # - # Now construct the global-north growth-rates dataframe: # + # 3.2.A.5. create global-north growth-rates dataframe data_list = np.array([span, h, g, n, rho]).transpose() long_run_growth_rates_global_north_df = pd.DataFrame( data=data_list, columns = ['span', 'h', 'g', 'n', 'rho']) long_run_growth_rates_global_north_df['initial_year'] = initial_year eras2 = eras[0:9] long_run_growth_rates_global_north_df['era'] = eras2 format_dict = {'initial_year':'{0:.0f}', 'span': '{0:.0f}', 'h': '{0:,.3%}', 'g': '{0:,.2%}', 'n': '{0:,.2%}', 'n': '{0:,.2%}' , 'rho': '{0:,.3%}'} print('GLOBAL NORTH GROWTH RATES') long_run_growth_rates_global_north_df.style.format(format_dict) # - # &nbsp; # # ### B. Global South # # Now let me provide you with yet a third set of data, also analogous to those for the world as a whole that I had you examine in problem set 1. This set will be for the "global south" or "non-west"—that part of the world that was outside the charmed circle. It consists at the start of everything outside northwest Europe. As of 1770 we subtract the Atlantic seaboard of the Americas, we substract Australia and New Zealand by 1870, and by now we have subtraced those areas plus southwest and some of central Europe, plus Japan, South Korea, and Taiwan: # + # 3.2.B.1. for the "global south" or "not-west": long_run_growth_list_global_south = [ [-68000, 0.1, 1200, 379.47, 0.9999], [-8000, 2.4, 1200, 1897.37, 0.971], [-6000, 6.8, 900, 2395.3, 0.971], [-3000, 14.5, 900, 3497.9, 0.971], [-1000, 48, 900, 6364.1, 0.971], [1, 165, 900, 11799.4, 0.971], [1500, 475, 900, 20019.9, 0.971], [1770, 675, 1070, 29386.7, 0.9412], [1870, 1125, 1000, 36172.8, 0.9118], [2020, 6800, 7700, 693805.9, 0.8853] ] # - # Now let's have you write a code cell to duplicate the work done in code cell #3.2.A.2 above. Simply wherever you see the character string "north" replace it with "south", and then run the code cell: # + # 3.2.B.2. create global-south levels dataframe long_run_growth_global_south_df = pd.DataFrame( data=np.array(long_run_growth_list_global_south), columns = ['year', 'population', 'income_level', 'human_ideas', 'resources'] ) long_run_growth_global_south_df['year'] = long_run_growth_global_south_df['year'].apply(np.int64) # - # The cell you just wrote should then mesh perfectly with the next three cells to create and print the global-south levels dataframe: # + # 3.2.B.3. do calculations for the global-south growth-rates # dataframe initial_year = long_run_growth_global_south_df['year'][0:10] span = [] g = [] h = [] n = [] rho = [] for t in range(9): span = span + [long_run_growth_global_south_df['year'][t+1]-long_run_growth_global_south_df['year'][t]] h = h + [np.log(long_run_growth_global_south_df['human_ideas'][t+1]/long_run_growth_global_south_df['human_ideas'][t])/span[t]] g = g + [np.log(long_run_growth_global_south_df['income_level'][t+1]/long_run_growth_global_south_df['income_level'][t])/span[t]] n = n + [np.log(long_run_growth_global_south_df['population'][t+1]/long_run_growth_global_south_df['population'][t])/span[t]] rho = rho + [np.log(long_run_growth_global_south_df['resources'][t+1]/long_run_growth_global_south_df['resources'][t])/span[t]] long_run_growth_global_south_df.set_index('year', inplace=True) # + # 3.2.B.4. add legend notes & print the dataframe # # finally, add a note to the end of each observation, reminding # us of what was going on in human history back in each of the # eras into which we have divided it eras = ['at the dawn', 'agriculture & herding', 'proto-agrarian age', 'writing', 'axial age', 'dark & middle age slowdown', 'commercial revolution', 'industrial revolution', 'modern economic growth', 'whatever the 21st century brings'] long_run_growth_global_south_df['eras'] = eras format_dict = {'year': '{d}', 'human_ideas': '{0:,.0f}', 'income_level': '${0:,.0f}', 'population': '{0:,.1f}', 'resources': '{0:,.3f}'} print('GLOBAL SOUTH LEVELS') long_run_growth_global_south_df.style.format(format_dict) # - # Did it work? Everything should have run, and should have produced something like: # # <img src="https://delong.typepad.com/img/very-long-run-growth-global-south-levels-python-2020-09-23.png" width="500" /> # # If not, recheck your work. And if you are still stuck, call someone for help... # Now construct the global-south growth-rates dataframe, duplicating what was in the above code cell #3.2.A.5, once again simply by taking the code and replacing the character string "north" by "south every place that it appears: # + # 3.2.B.5. create global-south growth-rates dataframe data_list = np.array([span, n, g, h, rho]).transpose() long_run_growth_rates_global_south_df = pd.DataFrame( data=data_list, columns = ['span', 'n', 'g', 'h', 'rho']) long_run_growth_rates_global_south_df['initial_year'] = initial_year eras2 = eras[0:9] long_run_growth_rates_global_south_df['era'] = eras2 format_dict = {'initial_year':'{0:.0f}', 'span': '{0:.0f}', 'h': '{0:,.3%}', 'g': '{0:,.2%}', 'n': '{0:,.2%}', 'n': '{0:,.2%}' , 'rho': '{0:,.3%}'} print('GLOBAL SOUTH GROWTH RATES') long_run_growth_rates_global_south_df.style.format(format_dict) # - # And, once again, if things did not work and did not produce a table analogous to the "GLOBAL NORTH GROWTH RATES" table above, go back, try to figure out what went wrong. And correct your work. # &nbsp; # # ### C. North-South Comparisons # # # Now let us calculate the differences in growth rates in labor productivity, incomes, and living standards between the global north and the global south: # + # 3.2.C.1. "differences" dataframe g_north_south_diff = pd.DataFrame(long_run_growth_rates_global_north_df[['g', 'n', 'h', 'rho']] - long_run_growth_rates_global_south_df[['g', 'n', 'h', 'rho']]) g_north_south_diff['era'] = ['-68000 to -8000', '-8000 to -6000', '-8000 to -3000', '-3000 to -1000', '-1000 to 1', '1-1500', '1500-1770', '1770-1870', '1870-2020'] g_north_south_diff['span'] = long_run_growth_rates_global_north_df['span'] g_north_south_diff['initial_year'] = long_run_growth_rates_global_north_df['initial_year'] format_dict = {'initial_year':'{0:.0f}', 'span': '{0:.0f}', 'h': '{0:,.3%}', 'g': '{0:,.2%}', 'n': '{0:,.2%}', 'n': '{0:,.2%}' , 'rho': '{0:,.3%}'} print('GROWTH RATE NORTH-SOUTH DIFFERENCES') g_north_south_diff.style.format(format_dict) # - # #### 1. Population # # Note that the population of the global north grows for two reasons: (1) the populations of economies already in it expand, and (2) new economies join it. In 1500 the civilization we now call the "global north" was pretty much restricted to the countries that touched or were just across the sea from what is now Belgium and Holland—and of what are now France and Germany, only northern France and nortwestern Germany counted. Now it encompasses all of western and most of central Europe, North America, and Asia's Pacific Rim plus Australia and New Zealand. # # &nbsp; # # #### 2. Resources # # Note that the natural resources controlled by the global north grew both because the global north expanded in area and becomes its citizens acquired—well, largely stole—resources outside of the global north, many of which global north citizens control to this day. # # &nbsp; # # #### 3. Productivity # # We first see the global north acquiring a (very small) edge in productivity, income per capita, and living standard growth over the period 1 to 1500. Northwest Europe in 1500 is an an up-phase of the Malthusian cycle: it lost 1/4 of its population to the Black Plague of 1346-8, and subsequent plagues kept its population from recovering, leaving it with a favorable land-labor ratio and a high level of labor productivity. It also had a small edge in technology: sails and guns and clocks, mostly. # # Then, after 1500, in the three subsequent Commercial Revolution, Industrial Revolution, and 20th-century Modern Economic Growth eras, the global north's productivity and income edge surges: useful ideas are invented and deployed in the global north faster than they diffuse across the global south, resources are engrossed by the global north through settlement, expansion, conquest, theft, purchase, and investment. And, until the demographic transition to something close to zero population growth in the global north becomes well established, its population share of the world grows as well. # # &nbsp; # # #### 4. Not in the Model # # The numbers in the "differences" table above understate the magnitude of the true historical differences between the global north and south for three reasons not in the model that seem to me to be obvious, and perhaps for other non-obvious reasons as well. # # First, the global north did not just gain growth advantage from the workings of the global economy and its imperialism after 1500. It gained a current consumption advantage as well, for a component of production and income earned in the global south was transferred to the global north. # # Second, the model above has no place in it for the people killed, enslaved, and enserfed. # # Third, the model has no place in it for differences and changes in the terms-of-trade between global north and global south. Put broadly, The terms of trade of market exchange favored the global north from 1500 to 1770, then favored the global south from 1770 to 1860, then favored the global south as far as manufactured and the global north as far as resource products were concerned from 1860 to 1950, and, last, have favored the global north—with a very important exception of oil—since 1950. # # Plus imperialism and exploitation were profoundly uneven. They share of global south resources in Asia conquered by the global north was small. But if you happen to live on an island in the East Indies and the Portuguese or Dutch arrived, the likelihood was that they took everything that was not nailed down—and then exploited and diverted the income from a lot that was. # &nbsp; # # #### 5. Questions: # # Now let me ask you some questions, that you can then do calculations to answer: # **5.a.** What relative multiple of global-south average income and productivity do we guess global-north average income and productivity was in 1500? (To answer this question, reach back into your dataframes and do a calculation to pull out and then print the answer, like this: # + # 3.2.C.5.a. global-north income multiple in 1500 income_mult_1500 = (long_run_growth_global_north_df['income_level'][1500] / long_run_growth_global_south_df['income_level'][1500]) print("The global north's relative income multiple in 1500 =", income_mult_1500) # - # **5.b.** What relative multiple of global-south average income and productivity do we guess global-north average income and productivity is today? # + # 3.2.C.5.b. global-north income multiple today income_mult_2020 = (long_run_growth_global_north_df['income_level'][2020] / long_run_growth_global_south_df['income_level'][2020]) print("The global north's relative income multiple today =", income_mult_2020) # - # **5.c.** How much greater has been the average annual growth rate in income and productivity in the global north than the global south since 1500? # + # 3.2.C.5.c. income growth-rate difference since 1500? income_growth_rate_diff_1500_2020 = (g_north_south_diff['g'][6]*g_north_south_diff['span'][6] + g_north_south_diff['g'][7]*g_north_south_diff['span'][7] + g_north_south_diff['g'][8]*g_north_south_diff['span'][8] )/(g_north_south_diff['span'][6]+g_north_south_diff['span'][7]+g_north_south_diff['span'][8]) print("The difference in annual average income growth rates since 1500 =", income_growth_rate_diff_1500_2020) # - # **5.d.** How much greater has been the growth rate of the resources available to the global north than to the global south since 1500? # + # 3.2.C.5.d. resource growth-rate difference since 1500? resource_growth_rate_diff_1500_2020 = (g_north_south_diff['rho'][6]*g_north_south_diff['span'][6] + g_north_south_diff['rho'][7]*g_north_south_diff['span'][7] + g_north_south_diff['rho'][8]*g_north_south_diff['span'][8] )/(g_north_south_diff['span'][6]+g_north_south_diff['span'][7]+g_north_south_diff['span'][8]) print("The difference in annual average resource-availability growth rates since 1500 =", resource_growth_rate_diff_1500_2020) # - # **5.e.** Recall that our very crude growth framework assumes that ideas are twice as salient in boosting productivity and income than resources per capita are at retarding it—that while a 1% increase in the value of the ideas stock boosts income and productivity by 1%, other things being equal—_ceteris paribus_, if we drop into Latin, and _cet. par._ if we drop into Latin and abbreviate, as <NAME>'s teachers back around 1900 were wont to do—an increase in resources per capita by 1% increased income and productivity by only 0.5%. (And, of course, this runs in reverse as well for resource scarcity per capita or for ideas lack depressing income and productivity.) # # Suppose that the global north had not engrossed more of the world's resources since 1500—that the 850 million people in today's global north were still drawing on the 2.71% of the global resource base that northwest European civilization drew on back in 1500. # # What does our model then say would be the difference in income and productivity growth rates between the global north and south since 1500—the number that you calculated (or should have calculated) as 0.00339 (that is, 0.34%/year) in **5.c.**? # + # 3.2.C.5.e. counterfactual stable-resources income differential growth rate since 1500 resource_stability_counterfactual_income_growth_rate_diff_1500_2020 = ( income_growth_rate_diff_1500_2020 - resource_growth_rate_diff_1500_2020/2) print("RESOURCE STABILITY COUNTERFACTUAL") print("The difference in annual average income growth rates since 1500 would have been =", resource_stability_counterfactual_income_growth_rate_diff_1500_2020) # - # **5.f.** Under that resource-stability counterfactual, what relative multiple of global-south average income and productivity do we guess about the analogue to the answer to the 6.49 that is the answer to **5.b**—what global-north average income and productivity as a multiple of global south would have been today, holding all variable in our model other than resource access and availability constant? # + # 3.2.C.5.f. resource-stability counterfactual; current global-north income multiple resource_stability_counterfactual_income_diff_2020 = (income_mult_1500 * np.exp( resource_stability_counterfactual_income_growth_rate_diff_1500_2020 * (g_north_south_diff['span'][6]+g_north_south_diff['span'][7]+g_north_south_diff['span'][8]) )) print("RESOURCE STABILITY COUNTERFACTUAL") print("Global north average income today as a multiple of global south would have been =", resource_stability_counterfactual_income_diff_2020) # - # If all has gone well you got an answer of 6.49 for question **5.b**—the actual multiple of global north income today relative to global south—and an answer of 3.14 for question **5.f**—what that multiple would have been had the 850 million people today in the global north not owned and controlled immense proportions of the world's resources outside global-north civilization's original northwest European homelands clustered around what are now Belgium and Holland. # # Thus for question 5.g: # # **5.g.** Tell us, in the markdown cell immediately below, your thoughts as to the relevance or non-relevance of these two numbers—6.49 and 3.14—for what would be the "right" global political-economy order of resource ownership and control going forward into the 21st century. 500-1000 words, please. We are looking for you to set out what you think the best definition of "right" is here, why it is the best definition, whether these two numbers do or do not have a significant role to play in answering that question of the "right" order, and then how these two numbers play that role: # <span style="color:blue;">**ANSWER TO 5.g**: [500-1000 words of answer replace this text and go here...]</span> # ## 4. Done! # # Print your finished notebook to pdf, and upload it as an answer on the problem set 2 assignment page. URL: # ## 5. Appendix Programming Dos and Don'ts... # # ### A Running List... # # 1. **Do** restart your kernel and run cells up to your current working point every fifteen minutes or so. Yes, it takes a little time. But if you don't, sooner or later the machine's namespace will get confused, and then you will get confused about the state of the machine's namespace, and by assuming things about it that are false you will lose hours and hours... # &nbsp; # # 2. **Do** reload the page when restarting the kernel does not seem to do the job... # &nbsp; # # 3. **Do** edit code cells by copying them below your current version and then working on the copy: when you break everything in the current cell (as you will), you can then go back to the old cell and start fresh... # &nbsp; # # 4. **Do** exercise agile development practices: if there is a line of code that you have not tested, test it. The best way to test is to ask the machine to echo back to you the thing you have just created in its namespace to make sure that it is what you want it to be. Only after you are **certain** that your namespace contains what you think it does should you write the next line of code. And then you should immediately test it... # &nbsp; # # 5. **Do** take screenshots of your error messages... # &nbsp; # # 6. **Do** google your error messages: Ms. Google is your best friend here... # &nbsp; # # 7. **Do not** confuse assignment ("=") and test for equality ("=="). In general, if there is an "if" anywhere nearby, you should be testing for equality. If there is not, you should be assignment a variable in your namespace to a value. **Do** curse the mathematicians 500 years ago who did not realize that in the twenty-first century it would be very convenient if we had different and not confusable symbols for equals-as-assignment and equals-as-test... # &nbsp; # # ---- # # &nbsp; # # **Thanks to**: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>... # ---- # # &nbsp; # # ## <font color="880000"> Resources and Relative Prosperity on a Global Scale </font> # # <img src="https://tinyurl.com/20190119a-delong" width="300" style="float:right" /> # # ### <font color="000088">Catch Our Breath—Further Notes:</font> # # <br clear="all" /> # # ---- # # # # &nbsp; # # ----
.ipynb_checkpoints/ps03-answers-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: py39 # language: python # name: py39 # --- # + import json import numpy as np import torch from gpytorch.models import ExactGP from gpytorch.means import ConstantMean from gpytorch.kernels import ScaleKernel, RBFKernel from gpytorch.constraints import GreaterThan from gpytorch.distributions import MultivariateNormal from gpytorch.priors import GammaPrior, NormalPrior from gpytorch.likelihoods import GaussianLikelihood, FixedNoiseGaussianLikelihood from gpytorch.mlls import ExactMarginalLogLikelihood from utils.bo_utils import GPModel, plot_optimization, expected_improvement from bayesian_optimization import BayesianOptimization # - log_dir = "./experiments/bo_den" results = torch.load(f"{log_dir}/results.pt") params_space = results["p_space"].reshape(-1, 1) # + config = "./configs/bo_prior_sigma" with open(config + ".json") as f: config = json.load(f) lengthscale_prior = config["lengthscale_prior"] if "lengthscale_prior" in list(config.keys()) else dict(concentration=0.3, rate=1.) lengthscale_constraint = config["lengthscale_constraint"] if "lengthscale_constraint" in list(config.keys()) else 0.05 mean_prior = config["mean_prior"] if "mean_prior" in list(config.keys()) else dict(loc=25., scale=2.) noise_prior = config["noise_prior"] if "noise_prior" in list(config.keys()) else dict(concentration=1e-2, rate=100.) params = {p["name"]: p["bounds"] for p in config["parameter"]} bounds = np.array(list(params.values())) acq_kwargs = {"xi": 0.1} # + likelihood = GaussianLikelihood(noise_prior=GammaPrior(**noise_prior)) for i in range(len(results) - 1): vals = results[i] params_samples = vals["p_samples"] cost_samples = vals["c_samples"] eval_acq = lambda params, model, likelihood: expected_improvement( model, likelihood, params, params_samples, cost_samples, **acq_kwargs ) model = GPModel( params_samples, cost_samples, likelihood, lengthscale_prior, lengthscale_constraint, mean_prior ).double() model.load_state_dict(vals["state_dict"]) next_params = BayesianOptimization.propose_location( model=model, likelihood=likelihood, eval_acq=eval_acq, params_space=params_space, bounds=bounds, batch_size=3 ) acquisition = eval_acq(params_space.numpy(), model, likelihood) path = None # f"../bo_exps/fig_{i}.pdf" plot_optimization( model, likelihood, acquisition, next_params, params_space, params_samples, cost_samples, path ) # -
eval_bo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Time Series Visualization # #### Impprting Packages # + import statsmodels.api as sm import pandas as pd # %matplotlib inline import matplotlib.pyplot as plt from pylab import rcParams rcParams['figure.figsize'] = 11, 9 # - # #### Load Time-Series Data # + #statsmodels comes with built-in-dataset,so we can load a time-series dataset straight into memory data = sm.datasets.co2.load_pandas() co2 = data.data co2.head() # - co2.index # + #weekly data can be tricky to work with,so lets use the monthly averages of our time-series y = co2['co2'].resample('MS').mean() #here the term MS means that we group the data in buckets by months and ensures that we are using the start of each month as the timestamp y.head() # - #Retreive data after year 1990 y['1990':] #Retreive data point between October 1995 and October 1996 y['1995-10-01':'1996-10-01'] # #### Handling Missing values in Time-Series Data # + print("Missing values:",y.isnull().sum()) y[y.isnull()] # + #the output tell us there are 5 months with missing values in our time series y = y.fillna(y.bfill()) # + #check again if there is any missing value y.isnull().sum() # - # #### Visualizing Time Series Data y.plot(figsize=(15,16)) plt.grid() plt.show() #Perform time-series seasonal decomposition decomposition = sm.tsa.seasonal_decompose(y, model='Addictive') fig = decomposition.plot() plt.grid() plt.show()
0 prediction-basics/time_series_visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Examining the effect of bias in deep network # I understand that bias are required in small networks, to shift the activation function. # But in the case of Deep network that has multiple layers of CNN, and other non -linear activations, is Bias making a difference? The answer this simple experiment is NO. # These are all the modules we'll be using later. Make sure you can import them # before proceeding further. from __future__ import print_function import numpy as np import tensorflow as tf from six.moves import cPickle as pickle from six.moves import range import matplotlib.pyplot as plt # + pickle_file = 'notMNIST.pickle' with open(pickle_file, 'rb') as f: save = pickle.load(f) train_dataset = save['train_dataset'] train_labels = save['train_labels'] valid_dataset = save['valid_dataset'] valid_labels = save['valid_labels'] test_dataset = save['test_dataset'] test_labels = save['test_labels'] del save # hint to help gc free up memory print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape) # - # Reformat into a TensorFlow-friendly shape: # # - convolutions need the image data formatted as a cube (width by height by #channels) # - labels as float 1-hot encodings. # + image_size = 28 num_labels = 10 num_channels = 1 # grayscale import numpy as np def reformat(dataset, labels): dataset = dataset.reshape( (-1, image_size, image_size, num_channels)).astype(np.float32) labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32) return dataset, labels train_dataset, train_labels = reformat(train_dataset, train_labels) valid_dataset, valid_labels = reformat(valid_dataset, valid_labels) test_dataset, test_labels = reformat(test_dataset, test_labels) print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape) # - def accuracy(predictions, labels): return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]) # + batch_size = 16 patch_size = 5 depth = 16 num_hidden = 64 graph = tf.Graph() with graph.as_default(): # Input data. tf_train_dataset = tf.placeholder( tf.float32, shape=(batch_size, image_size, image_size, num_channels)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. layer1_weights = tf.Variable(tf.truncated_normal( [patch_size, patch_size, num_channels, depth], stddev=0.1)) layer1_biases = tf.Variable(tf.zeros([depth])) layer2_weights = tf.Variable(tf.truncated_normal( [patch_size, patch_size, depth, depth], stddev=0.1)) layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth])) layer3_weights = tf.Variable(tf.truncated_normal( [image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1)) layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden])) layer4_weights = tf.Variable(tf.truncated_normal( [num_hidden, num_labels], stddev=0.1)) layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels])) # define a Model with bias . def model_with_bias(data): conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME') hidden = tf.nn.relu(conv + layer1_biases) conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME') hidden = tf.nn.relu(conv + layer2_biases) shape = hidden.get_shape().as_list() reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]]) hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases) return tf.matmul(hidden, layer4_weights) + layer4_biases # define a Model without bias added in the convolutional layer. def model_without_bias(data): conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME') hidden = tf.nn.relu(conv ) # layer1_ bias is not added conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME') hidden = tf.nn.relu(conv) # + layer2_biases) shape = hidden.get_shape().as_list() reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]]) # bias are added only in Fully connected layer(layer 3 and layer 4) hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases) return tf.matmul(hidden, layer4_weights) + layer4_biases # Training computation. logits_with_bias = model_with_bias(tf_train_dataset) loss_with_bias = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits_with_bias)) logits_without_bias = model_without_bias(tf_train_dataset) loss_without_bias = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits_without_bias)) # Optimizer. optimizer_with_bias = tf.train.GradientDescentOptimizer(0.05).minimize(loss_with_bias) optimizer_without_bias = tf.train.GradientDescentOptimizer(0.05).minimize(loss_without_bias) # Predictions for the training, validation, and test data. train_prediction_with_bias = tf.nn.softmax(logits_with_bias) valid_prediction_with_bias = tf.nn.softmax(model_with_bias(tf_valid_dataset)) test_prediction_with_bias = tf.nn.softmax(model_with_bias(tf_test_dataset)) # Predictions for without train_prediction_without_bias = tf.nn.softmax(logits_without_bias) valid_prediction_without_bias = tf.nn.softmax(model_without_bias(tf_valid_dataset)) test_prediction_without_bias = tf.nn.softmax(model_without_bias(tf_test_dataset)) # + num_steps = 1001 with tf.Session(graph=graph) as session: tf.global_variables_initializer().run() print('Initialized') for step in range(num_steps): offset = (step * batch_size) % (train_labels.shape[0] - batch_size) batch_data = train_dataset[offset:(offset + batch_size), :, :, :] batch_labels = train_labels[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} session.run(optimizer_with_bias, feed_dict=feed_dict) session.run(optimizer_without_bias, feed_dict = feed_dict) print('Test accuracy(with bias): %.1f%%' % accuracy(test_prediction_with_bias.eval(), test_labels)) print('Test accuracy(without bias): %.1f%%' % accuracy(test_prediction_without_bias.eval(), test_labels)) # -
bias_vs_nobias.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import numpy as np import scipy.stats as sps import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import random from tqdm import tqdm import sys, os sys.path += [os.path.abspath(os.pardir + '/src')] print(sys.path) from experiment import init_random_state, BanditLoopExperiment, get_ts_model sns.set(font_scale=1.2, palette='tab20') # + p = 0.9 Q = 1 ws = [1,4,10,20] b = 0.0 T = 2000 M = 10 l = 4 interests, responses = [], [] repeats = 30 res = {} for w in tqdm(ws): sum_interests = [] for i in tqdm(range(repeats)): init_random_state(i) bandit = lambda: get_ts_model(M=M, l=l) exp = BanditLoopExperiment(bandit, "TS bandit") exp.prepare(w=w, Q=Q, p=p, b=b) exp.run_experiment(T=T) results = exp.get_as_np() interests = results.interest sum_interests.append(np.linalg.norm(interests - interests[0], axis=1)**2) res[w] = sum_interests # - for w in ws: res[w] = np.concatenate(res[w]) # + ni = [] www = [] ts = [] for w in ws: ni.append(res[w]) www.append(np.ones(repeats*T)*w) ts.append(np.tile(np.arange(1, T+1), repeats)) ni = np.concatenate(ni).reshape(1,-1) www = np.concatenate(www).reshape(1,-1) ts = np.concatenate(ts).reshape(1,-1) # - data = pd.DataFrame(np.vstack([ni, www, ts]).T, columns=['Norm interests', 'w', 't']) data # + plt.figure(figsize=(12, 8)) sns.lineplot(data=data, x='t', y='Norm interests', hue='w', palette='tab20') plt.yscale('log') plt.ylabel(r'$\|\mu_t - \mu_0 \|^2$') plt.legend() plt.xlabel('Шаг') plt.savefig('var_norm_interest.pdf') # -
notebooks/error_analysis_var.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.12 64-bit (''chebroots-dev'': conda)' # name: python3 # --- # # Demonstration of chebroots # ## Defining the function # # First, we must define a function that has **many** roots that cannot easily be found. # # In this example, let us use (somewhat arbitrarily) # $$ # y(x) = \cos(\exp(\sin(x^2)))^2 - \cos(\exp(\sin(x^3)))^2 + \sin(\cos(\exp(x)))^2 # $$ # and plot this in the interval $[-5, +10]$. # + import numpy as np import matplotlib.pyplot as plt # definition fn = np.vectorize(lambda x: np.cos(np.exp(np.sin(x**2)))**2 - np.cos(np.exp(np.sin(x**3)))**2 + np.sin(np.cos(np.exp(x)))**2) x_interval = [-5, +10] # show x = np.linspace(*x_interval, 100000) plt.plot(x, fn(x)) plt.xlabel('x') plt.ylabel('y(x)') plt.show() # - # ## First attempt using chebpy # # [chebpy](https://github.com/chebpy/chebpy) is a Python implementation of the popular MATLAB package [Chebfun](https://chebfun.org), which enables numerical computing with functions based on interpolation with Chebyshev polynomials. # # This is, in principal, well-suited for such problems and can be done as follows: # + from chebpy import chebfun print("Constructing the Chebyshev representation ...") cheb = chebfun(fn, x_interval) print("Locating the roots ...") x0_chebfun = cheb.roots() print(f"Found {len(x0_chebfun):d} roots using chebpy:") print(x0_chebfun) # - # The warning in the output tells us that the automatic constructor failed to converge the function. This means that the subsequent call to the `.roots` method may not contain all roots in the interval. # ## Improved root finding with chebroots # # `chebroots` uses `chebpy` under the hood to interpolate the function efficiently and accurately, but employs recursion to refine the search where needed and thus ensure convergence. It follows a very similar workflow to what was done above: # + from chebroots import ChebRoots print("Initializing the ChebRoots object ...") rootfinder = ChebRoots(fn) # optionally, set custom tolerances now print("Locating the roots ...") x0_chebroots, _ = rootfinder.find_all_roots(x_interval) print(f"Found {len(x0_chebroots)} roots using chebroots:") print(np.array(x0_chebroots)) # use numpy for better formatting # - # Note that we also get a warning about failure to converge the function. However, `chebroots` safely ignores this and lets us know that in the given interval (in this case the initial fit over the entire domain) the function is approximated using 1000 points. This is sufficiently accurate but in any case should not affect the roots due to the further refinement later on in the root finding process.
docs/demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Set up # #### 1. Set up accounts and role # + import sagemaker import boto3 from datetime import datetime sagemaker_session = sagemaker.Session() account_id = boto3.client('sts').get_caller_identity().get('Account') region = boto3.session.Session().region_name #role = sagemaker.get_execution_role() role="arn:aws:iam::{}:role/service-role/AmazonSageMaker-ExecutionRole-20190118T115449".format(account_id) max_runs=1 # - # #### 2. Setup image and instance type # pytorch_custom_image_name="ppi-extractor:gpu-1.0.0-201910130520" instance_type = "ml.p3.2xlarge" # + # docker_repo = "{}.dkr.ecr.{}.amazonaws.com/{}".format(account_id, region, pytorch_custom_image_name) # - # #### 3. Configure train/ test and validation datasets bucket = "aegovan-data" # + train_unique = "s3://{}/aimed_dataset/kfold_unique".format(bucket) train_random ="s3://{}/aimed_dataset/kfold_random".format(bucket) pretrained_bert="s3://{}/embeddings/bert/".format(bucket) train_unique_single_fold="s3://{}/aimed_dataset/kfold_unique/fold_0/train".format(bucket) val_unique_single_fold="s3://{}/aimed_dataset/kfold_unique/fold_0/validation".format(bucket) train_random_single_fold="s3://{}/aimed_dataset/kfold_random/fold_0/train".format(bucket) val_random_single_fold="s3://{}/aimed_dataset/kfold_random/fold_0/validation".format(bucket) s3_output_path= "s3://{}/sagemakerresults/".format(bucket) s3_code_path= "s3://{}/aimed_bert_code".format(bucket) s3_checkpoint = "s3://{}/aimed_bert_checkpoint/{}".format(bucket, datetime.now().strftime("%m%d%Y%H%M%S")) # - # ### Start training commit_id = "6df30be45e08af56a0f10fbfc8a724737f7ca9e1" unique_inputs = { "train" : train_unique, "PRETRAINED_MODEL" : pretrained_bert } random_inputs = { "train" : train_random, "PRETRAINED_MODEL" : pretrained_bert } unique_single_fold = { "train" : train_unique_single_fold, "val" : val_unique_single_fold, "PRETRAINED_MODEL" : pretrained_bert } random_single_fold = { "train" : train_random_single_fold, "val" : val_random_single_fold, "PRETRAINED_MODEL" : pretrained_bert } sm_localcheckpoint_dir="/opt/ml/checkpoints/" # + BertNetworkFactoryhyperparameters_kfold = { "datasetfactory":"datasets.aimed_dataset_factory.AimedDatasetFactory", "modelfactory" :"models.bert_model_factory.BertModelFactory", "tokenisor_lower_case":0, "kfoldtrainprefix": "train", "batch": "8", "gradientaccumulationsteps" : "8", # "protein_name_replacer_random_seed":42, "epochs" : "100", "log-level" : "INFO", "learningrate":.00001, "earlystoppingpatience":9, "checkpointdir" : sm_localcheckpoint_dir, # Checkpoints once every n epochs "checkpointfreq": 2, "commit_id" : commit_id } # - BertNetworkFactoryhyperparameters_single_fold = BertNetworkFactoryhyperparameters_kfold.copy() BertNetworkFactoryhyperparameters_single_fold.pop("kfoldtrainprefix") BertNetworkFactoryhyperparameters_single_fold_loss = BertNetworkFactoryhyperparameters_single_fold.copy() BertNetworkFactoryhyperparameters_single_fold_loss["uselosseval"] =1 metric_definitions = [{"Name": "TrainLoss", "Regex": "###score: train_loss### (\d*[.]?\d*)"} ,{"Name": "ValidationLoss", "Regex": "###score: val_loss### (\d*[.]?\d*)"} ,{"Name": "TrainAucScore", "Regex": "###score: train_ResultScorerAucBinary_score### (\d*[.]?\d*)"} ,{"Name": "ValidationAucScore", "Regex": "###score: val_ResultScorerAucBinary_score### (\d*[.]?\d*)"} ,{"Name": "TrainPRScore", "Regex": "###score: train_ResultScorerPrBinary_score### (\d*[.]?\d*)"} ,{"Name": "ValidationPRScore", "Regex": "###score: val_ResultScorerPrBinary_score### (\d*[.]?\d*)"} ,{"Name": "TrainF1BinaryScore", "Regex": "###score: train_ResultScorerF1Binary_score### (\d*[.]?\d*)"} ,{"Name": "ValidationF1BinaryScore", "Regex": "###score: val_ResultScorerF1Binary_score### (\d*[.]?\d*)"} ] # !git log -1 | head -1 # !git log -1 | head -5 | tail -1 # + # set True if you need spot instance use_spot = True train_max_run_secs = 5 *24 * 60 * 60 spot_wait_sec = 5 * 60 max_wait_time_secs = train_max_run_secs + spot_wait_sec if not use_spot: max_wait_time_secs = None # During local mode, no spot.., use smaller dataset if instance_type == 'local': use_spot = False max_wait_time_secs = 0 wait = True # Use smaller dataset to run locally inputs = inputs_sample # - experiments = { "aimed-bert-unique-1fold-loss" : { "hp" :BertNetworkFactoryhyperparameters_single_fold_loss "inputs" : unique_single_fold }, "aimed-bert-unique-1fold" : { "hp" :BertNetworkFactoryhyperparameters_single_fold "inputs" : unique_single_fold }, "aimed-bert-unique-kfold" : { "hp" :BertNetworkFactoryhyperparameters_kfold "inputs" : unique_inputs } } # + base_name = "aimed-bert-unique-1fold-loss" hyperparameters = experiments[base_name]["hp"] inputs = experiments[base_name]["inputs"] # - hyperparameters git_config = {'repo': 'https://github.com/elangovana/ppi-aimed.git', 'branch': 'main', 'commit': hyperparameters["commit_id"] } hyperparameters inputs # + from sagemaker.pytorch import PyTorch estimator = PyTorch( entry_point='main_train_pipeline.py', source_dir = 'src', dependencies =['src/datasets', 'src/models','src/utils', 'src/scorers'], git_config= git_config, # image_name= docker_repo, role=role, framework_version ="1.4.0", py_version='py3', instance_count=1, instance_type=instance_type, hyperparameters = hyperparameters, output_path=s3_output_path, metric_definitions=metric_definitions, volume_size=30, code_location=s3_code_path, debugger_hook_config=False, base_job_name =base_name, use_spot_instances = use_spot, max_run = train_max_run_secs, max_wait = max_wait_time_secs, checkpoint_s3_uri=s3_checkpoint, checkpoint_local_path=sm_localcheckpoint_dir) estimator.fit(inputs, wait=False)
notebooks/aimed_sagemaker_bert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import pickle import matplotlib.pyplot as plt import numpy as np import os data_folder = 'puppersim/data/system_id/' f = open(os.path.join(data_folder, 'env_real_bf_log.txt'), 'rb') env_log = pickle.load(f) # print(env_log) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 0]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 1]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 2]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 3]) plt.ylim(-2, 2) plt.xlim(0,4) plt.figure() f1 = open(os.path.join(data_folder, 'env_sim_bf_log.txt'), 'rb') env_sim_log = pickle.load(f1) # print(env_log) plt.plot(env_sim_log['t'], np.array(env_sim_log['IMU'])[:, 0]) plt.plot(env_sim_log['t'], np.array(env_sim_log['IMU'])[:, 1]) plt.plot(env_sim_log['t'], np.array(env_sim_log['IMU'])[:, 2]) plt.plot(env_sim_log['t'], np.array(env_sim_log['IMU'])[:, 3]) plt.ylim(-2, 2) plt.xlim(0,4) # + f = open(os.path.join(data_folder, 'env_real_lr_log.txt'), 'rb') env_log = pickle.load(f) # print(env_log) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 0]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 1]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 2]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 3]) plt.ylim(-2, 2) plt.xlim(0,4) plt.figure() f1 = open(os.path.join(data_folder,'env_sim_lr_log.txt'), 'rb') env_sim_log = pickle.load(f1) # print(env_log) plt.plot(env_sim_log['t'], np.array(env_sim_log['IMU'])[:, 0]) plt.plot(env_sim_log['t'], np.array(env_sim_log['IMU'])[:, 1]) plt.plot(env_sim_log['t'], np.array(env_sim_log['IMU'])[:, 2]) plt.plot(env_sim_log['t'], np.array(env_sim_log['IMU'])[:, 3]) plt.ylim(-2, 2) plt.xlim(0,4) # + f = open(os.path.join(data_folder, 'env_log_sim_0116.txt'), 'rb') env_log = pickle.load(f) print(env_log.keys()) plt.plot(env_log['t'], np.array(env_log['MotorAngle'])[:,1], '--') plt.plot(env_log['t'], np.array(env_log['action'])[:,1]) # plt.plot(env_log['t'], np.array(env_log['MotorAngle'])[:, 2]) # plt.plot(env_log['t'], np.array(env_log['MotorAngle'])[:, 3]) plt.ylim(0, 1) plt.xlim(0,4) # plt.figure() f = open(os.path.join(data_folder, 'env_log_real_0116.txt'), 'rb') env_log = pickle.load(f) plt.plot(env_log['t'], np.array(env_log['MotorAngle'])[:,1], '--') plt.plot(env_log['t'], np.array(env_log['action'])[:,1]) # plt.plot(env_log['t'], np.array(env_log['MotorAngle'])[:, 2]) # plt.plot(env_log['t'], np.array(env_log['MotorAngle'])[:, 3]) plt.ylim(0, 1) plt.xlim(0,4) plt.figure() # + f = open(os.path.join(data_folder, 'env_log_com_test_sim_0116.txt'), 'rb') env_log = pickle.load(f) MOTOR_ID = 2 print(env_log.keys()) # plt.plot(env_log['t'], np.array(env_log['action'])[:, 2]) # plt.plot(env_log['t'], np.array(env_log['MotorAngle'])[:, MOTOR_ID]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 0], '--') plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 1], '--') # plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 2]) # plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 3]) # plt.ylim(-2, 2) # plt.xlim(0,4) # plt.figure() f = open(os.path.join(data_folder, 'env_log_com_test_real_0116.txt'), 'rb') env_log = pickle.load(f) # plt.plot(env_log['t'], np.array(env_log['action'])[:, 2]) # plt.plot(env_log['t'], np.array(env_log['MotorAngle'])[:, MOTOR_ID]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 0]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 1]) # plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 3]) # plt.ylim(-2, 2) plt.xlim(0,4) plt.legend(['roll_sim', 'pitch_sim', 'roll_real', 'pitch_real']) plt.figure() # + f = open(os.path.join(data_folder, 'ars_trot_01_22_sim.txt'), 'rb') env_log = pickle.load(f) MOTOR_ID = 2 print(env_log.keys()) # plt.plot(env_log['t'], np.array(env_log['action'])[:, 2]) # plt.plot(env_log['t'], np.array(env_log['MotorAngle'])[:, MOTOR_ID]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 0], '--') plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 1], '--') # plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 2]) # plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 3]) plt.ylim(-2, 2) plt.xlim(0,4) plt.figure() f = open(os.path.join(data_folder, 'ars_trot_01_22_real.txt'), 'rb') env_log = pickle.load(f) # plt.plot(env_log['t'], np.array(env_log['action'])[:, 2]) # plt.plot(env_log['t'], np.array(env_log['MotorAngle'])[:, MOTOR_ID]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 0]) plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 1]) # plt.plot(env_log['t'], np.array(env_log['IMU'])[:, 3]) # plt.ylim(-2, 2) plt.xlim(0,4) plt.legend(['roll_sim', 'pitch_sim', 'roll_real', 'pitch_real']) plt.figure()
IMU identification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # OpenCV # # ## History of OpenCV # OpenCV was started at Intel in 1999 by <NAME> and the first release came out in 2000. <NAME> joined <NAME> to manage Intel’s Russian software OpenCV team. In 2005, OpenCV was used on Stanley, the vehicle who won 2005 DARPA Grand Challenge. Later its active development continued under the support of <NAME>, with <NAME> and <NAME> leading the project. Right now, OpenCV supports a lot of algorithms related to Computer Vision and Machine Learning and it is expanding day-by-day. # # Currently OpenCV supports a wide variety of programming languages like C++, Python, Java etc and is available on different platforms including Windows, Linux, OS X, Android, iOS etc. Also, interfaces based on CUDA and OpenCL are also under active development for high-speed GPU operations. # # OpenCV-Python is the Python API of OpenCV. It combines the best qualities of OpenCV C++ API and Python language. # # ## OpenCV-Python # # Compared to other languages like C/C++, Python is slower. But another important feature of Python is that it can be easily extended with C/C++. This feature helps us to write computationally intensive codes in C/C++ and create a Python wrapper for it so that we can use these wrappers as Python modules. This gives us two advantages: first, our code is as fast as original C/C++ code (since it is the actual C++ code working in background) and second, it is very easy to code in Python. This is how OpenCV-Python works, it is a Python wrapper around original C++ implementation. # # # ## Installing OpenCV # # `pip install opencv-contrib-python` # # `pip install opencv-python` # # <a href="https://pypi.org/project/opencv-contrib-python/" target="_blank">Pypi Link of opencv-contrib</a> # # <a href="https://pypi.org/project/opencv-python/" target="_blank">Pypi link of opencv-python </a> # ## Reading an image # # The function cv2.imread() is used to read an image. The image should be in the working directory or a full path of the image. # # Arguments of the function : - # # * cv2.IMREAD_COLOR : Loads a color image. Any transparency of image will be neglected. It is the default flag. # * cv2.IMREAD_GRAYSCALE : Loads image in grayscale mode # * cv2.IMREAD_UNCHANGED : Loads image as such including alpha channel # # # *Instead of these three flags, you can simply pass integers 1, 0 or -1 respectively.* # # + import cv2 import numpy as np # Loading an color image in grayscale img1 = cv2.imread('img/actress.jpg',0) # Loading an color image in rgb img2 = cv2.imread('img/actress.jpg',1) # Loading an color image in rgb img3 = cv2.imread('img/actress.jpg',-1) # - # > Even if the image path is wrong, it won’t throw any error, but print(img) will give you **None** # ## Displaying an image # # The function **cv2.imshow()** is used to display an image in a window. The window automatically fits to the image size. # # * First argument is the window name which is a string. # * Second argument is our image. You can create as many windows as you wish, but with different window names. cv2.imshow('Grayscale image',img1) cv2.imshow('RGB image',img2) cv2.imshow('RGB image',img3) cv2.waitKey(0) cv2.destroyAllWindows() # **cv2.waitKey()** is a keyboard binding function. Its argument is the time in milliseconds. The function waits for specified milliseconds for any keyboard event. If you press any key in that time, the program continues. If 0 is passed, it waits indefinitely for a key stroke. # # **cv2.destroyAllWindows()** simply destroys all the windows we created. If you want to destroy any specific window, use the function cv2.destroyWindow() where you pass the exact window name as the argument. cv2.imshow('Grayscale image',img1) cv2.waitKey(0) cv2.imshow('RGB image',img2) cv2.waitKey(0) cv2.destroyAllWindows() # ### Special Case # # >We can create a window and load image to it later. Then we can specify whether window is resizable or not. It is done with the function **cv2.namedWindow()**. # # By default, the flag is *cv2.WINDOW_AUTOSIZE*. But if you specify flag to be *cv2.WINDOW_NORMAL*, you can resize window. It will be helpful when image is too large in dimension and adding track bar to windows. cv2.namedWindow('image', cv2.WINDOW_NORMAL) cv2.imshow('image',img1) cv2.waitKey(0) cv2.destroyAllWindows() # ## Writing or Saving an image # Use the function **cv2.imwrite()** to save an image. # # * First argument is the file name # * second argument is the image you want to save. cv2.imwrite('Sharvari-Wagh-gray.png',img1) # ### Using Matplotlib # # Matplotlib is a plotting library for Python which gives you wide variety of plotting methods. We can also display images, zoom images, save with Matplotlib.mm # + import numpy as np import cv2 from matplotlib import pyplot as plt img = cv2.imread('img/actress.jpg',1) plt.imshow(img, cmap = 'gray', interpolation = 'bicubic') plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis plt.show() # - # *****Note***** # ------------------- # ### Color image loaded by OpenCV is in BGR mode. But Matplotlib displays in RGB mode. So color images will not be displayed correctly in Matplotlib if image is read with OpenCV. # # Drawing Functions # ### Drawing Rectangle # # To draw a rectangle, you need top-left corner and bottom-right corner of rectangle. This time we will draw a green rectangle at the top-right corner of image. import numpy as np import cv2 from matplotlib import pyplot as plt #cv2.rectangle(image, starting vertex, opposite vertex, color, thickness) img_2 = cv2.rectangle(img,(384,0),(510,128),(0,255,0),3) plt.imshow( img_2, cmap = 'gray', interpolation = 'bicubic') #cv2.imwrite('Line.png',img_2) plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis plt.show() # ### Drawing Circle # # To draw a circle, you need its center coordinates and radius. We will draw a circle inside the rectangle drawn above. import numpy as np import cv2 from matplotlib import pyplot as plt #cv2.cirlce(image, center, radius, color, fill) img = cv2.circle(img,(447,63), 63, (0,0,255), -1) plt.imshow( img,cmap = 'gray', interpolation = 'bicubic') plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis plt.show() # ### Drawing Ellipse # # To draw the ellipse, we need to pass several arguments. One argument is the center location (x,y). Next argument is axes lengths (major axis length, minor axis length). angle is the angle of rotation of ellipse in anti-clockwise direction. startAngle and endAngle denotes the starting and ending of ellipse arc measured in clockwise direction from major axis. i.e. giving values 0 and 360 gives the full ellipse. For more details, check the documentation of cv2.ellipse(). Below example draws a half ellipse at the center of the image. img = cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1) cv2.imshow("Line", img) cv2.waitKey(0) cv2.destroyAllWindows() # ### Drawing Polygon # # To draw a polygon, first you need coordinates of vertices. Make those points into an array of shape ROWSx1x2 where ROWS are number of vertices and it should be of type int32. Here we draw a small polygon of with four vertices in yellow color. pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32) pts = pts.reshape((-1,1,2)) img = cv2.polylines(img,[pts],True,(0,255,255)) cv2.imshow("Line", img) cv2.waitKey(0) cv2.destroyAllWindows() # ### Adding Text to Images # # To put texts in images, you need specify following things. # * Text data that you want to write # * Position coordinates of where you want put it (i.e. bottom-left corner where data starts). # * Font type (Check cv2.putText() docs for supported fonts) # * Font Scale (specifies the size of font) # regular things like color, thickness, lineType etc. For better look, lineType = cv2.LINE_AA is recommended. # # We will write OpenCV on our image in white color. # cv2.putText(image, 'Text to Display', bottom left starting point, Font, Font Size, Color, Thickness) import numpy as np import cv2 from matplotlib import pyplot as plt font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA) cv2.imshow("Line", img) cv2.waitKey(0) cv2.destroyAllWindows() # Fonts Available # # - FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN # - FONT_HERSHEY_DUPLEX,FONT_HERSHEY_COMPLEX # - FONT_HERSHEY_TRIPLEX, FONT_HERSHEY_COMPLEX_SMALL # - FONT_HERSHEY_SCRIPT_SIMPLEX # - FONT_HERSHEY_SCRIPT_COMPLEX # ### Properties of image # ### Shape # # Shape of image is accessed by *img.shape*. It returns a tuple of number of rows, columns and channels (if image is color): # + import cv2 import numpy as np # Loading an color image image = cv2.imread('img/actress.jpg') print(image.shape) # - # ### Total Pixels # Total number of pixels is accessed by *img.size*: print(image.size) # ### Datatype # Image datatype is obtained by *img.dtype*: print(image.dtype) # ## Aritmmetic Operations # ### Image Addition # # You can add two images by OpenCV function, cv2.add() or simply by numpy operation, res = img1 + img2. Both images should be of same depth and type, or second image can just be a scalar value. # # Note : - *There is a difference between OpenCV addition and Numpy addition. OpenCV addition is a saturated operation while Numpy addition is a modulo operation.* # + import cv2 import numpy as np image = cv2.imread('img/actress.jpg') # Create a matrix of ones, then multiply it by a scaler of 150 # This gives a matrix with same dimesions of our image with all values being 150 M = np.ones(image.shape, dtype = "uint8") * 150 print(M) # We use this to add this matrix M, to our image # Notice the increase in brightness added = cv2.add(image, M) cv2.imshow("Added", added) cv2.waitKey(0) # Likewise we can also subtract # Notice the decrease in brightness subtracted = cv2.subtract(image, M) cv2.imshow("Subtracted", subtracted) cv2.waitKey(0) cv2.destroyAllWindows() # - # ### Image Blending # # This is also image addition, but different weights are given to images so that it gives a feeling of blending or transparency. # # Here I took two images to blend them together. First image is given a weight of 0.7 and second image is given 0.3. cv2.addWeighted() is applied. # + img1 = cv2.imread('img/hd5.jpg') img2 = cv2.imread('img/hd2.jpg') blend = cv2.addWeighted(img1,0.7,img2,0.3,0) cv2.imshow('Blended',blend) cv2.waitKey(0) cv2.destroyAllWindows() # - # ### Bitwise Operations # This includes bitwise AND, OR, NOT and XOR operations. They will be highly useful while extracting any part of the image , defining and working with non-rectangular ROI etc. # + # Load two images import numpy as np import cv2 from matplotlib import pyplot as plt img1 = cv2.imread('img/hd5.jpg') img2 = cv2.imread('img/hot.png') # I want to put logo on top-left corner, So I create a ROI rows,cols,channels = img2.shape roi = img1[0:rows, 0:cols ] # Now create a mask of logo and create its inverse mask also img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(mask) # Now black-out the area of logo in ROI img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) # Take only region of logo from logo image. img2_fg = cv2.bitwise_and(img2,img2,mask = mask) # Put logo in ROI and modify the main image dst = cv2.add(img1_bg,img2_fg) img1[0:rows, 0:cols ] = dst #cv2.imshow('res',img1) plt.imshow(img1,cmap = 'gray', interpolation = 'bicubic') cv2.waitKey(0) cv2.destroyAllWindows() # -
OpenCV-Basics/OpenCV-Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Objectives" data-toc-modified-id="Objectives-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Objectives</a></span></li><li><span><a href="#Spark:-Getting-Started" data-toc-modified-id="Spark:-Getting-Started-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Spark: Getting Started</a></span><ul class="toc-item"><li><span><a href="#Optional-Step-0:-Prerequisites-&amp;-Installation-for-Databricks-or-Local-Run" data-toc-modified-id="Optional-Step-0:-Prerequisites-&amp;-Installation-for-Databricks-or-Local-Run-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Optional Step 0: Prerequisites &amp; Installation for Databricks or Local Run</a></span><ul class="toc-item"><li><span><a href="#Databricks-Setup" data-toc-modified-id="Databricks-Setup-2.1.1"><span class="toc-item-num">2.1.1&nbsp;&nbsp;</span>Databricks Setup</a></span></li><li><span><a href="#Local-Setup" data-toc-modified-id="Local-Setup-2.1.2"><span class="toc-item-num">2.1.2&nbsp;&nbsp;</span>Local Setup</a></span></li></ul></li><li><span><a href="#Step-1:-Create-a-SparkSession-with-a-SparkContext" data-toc-modified-id="Step-1:-Create-a-SparkSession-with-a-SparkContext-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Step 1: Create a SparkSession with a SparkContext</a></span></li><li><span><a href="#Step-2:-Download-some-Amazon-reviews-(Toys-&amp;-Games)" data-toc-modified-id="Step-2:-Download-some-Amazon-reviews-(Toys-&amp;-Games)-2.3"><span class="toc-item-num">2.3&nbsp;&nbsp;</span>Step 2: Download some Amazon reviews (Toys &amp; Games)</a></span><ul class="toc-item"><li><span><a href="#Optional:-For-Databricks-Setup" data-toc-modified-id="Optional:-For-Databricks-Setup-2.3.1"><span class="toc-item-num">2.3.1&nbsp;&nbsp;</span>Optional: For Databricks Setup</a></span></li></ul></li><li><span><a href="#Step-3:-Create-a-Spark-DataFrame" data-toc-modified-id="Step-3:-Create-a-Spark-DataFrame-2.4"><span class="toc-item-num">2.4&nbsp;&nbsp;</span>Step 3: Create a Spark DataFrame</a></span></li><li><span><a href="#Exploring-the-DataFrame" data-toc-modified-id="Exploring-the-DataFrame-2.5"><span class="toc-item-num">2.5&nbsp;&nbsp;</span>Exploring the DataFrame</a></span><ul class="toc-item"><li><span><a href="#Count-the-Words-in-the-First-Row" data-toc-modified-id="Count-the-Words-in-the-First-Row-2.5.1"><span class="toc-item-num">2.5.1&nbsp;&nbsp;</span>Count the Words in the First Row</a></span></li><li><span><a href="#A-Few-More-Basic-Commands" data-toc-modified-id="A-Few-More-Basic-Commands-2.5.2"><span class="toc-item-num">2.5.2&nbsp;&nbsp;</span>A Few More Basic Commands</a></span></li></ul></li><li><span><a href="#Reading-files" data-toc-modified-id="Reading-files-2.6"><span class="toc-item-num">2.6&nbsp;&nbsp;</span>Reading files</a></span></li></ul></li></ul></div> # - # <a href="https://colab.research.google.com/github/flatiron-school/ds-spark/blob/main/spark-programming.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Run for Google Colab environment # !pip install pyspark # !apt install openjdk-8-jdk-headless -qq import pyspark import pyspark.sql.functions as F from pyspark.sql.types import ArrayType, IntegerType # + [markdown] heading_collapsed=true # # Objectives # + [markdown] hidden=true # - Use `pyspark` to manipulate data # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "aff0ce01-e486-4e8c-be60-9022526bc6fe", "showTitle": false, "title": ""} heading_collapsed=true # # Spark: Getting Started # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2601af14-bfc3-47bb-a24c-2544bcb9e674", "showTitle": false, "title": ""} heading_collapsed=true hidden=true # ## Optional Step 0: Prerequisites & Installation for Databricks or Local Run # + [markdown] hidden=true # > If you run this notebook in Google Colab (clicking the button at the beginning of this notebook that says "_Open in Colab_") you can skip to [Step 1](#Step-1:-Create-a-SparkSession-with-a-SparkContext) # + [markdown] heading_collapsed=true hidden=true # ### Databricks Setup # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2601af14-bfc3-47bb-a24c-2544bcb9e674", "showTitle": false, "title": ""} hidden=true # Follow [these instructions](https://docs.databricks.com/notebooks/notebooks-manage.html#import-a-notebook) to import this notebook into Databricks # + [markdown] heading_collapsed=true hidden=true # ### Local Setup # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2601af14-bfc3-47bb-a24c-2544bcb9e674", "showTitle": false, "title": ""} hidden=true # Run these commands in your terminal (just once) if you want to run Spark locally. # # * These instructions require a Mac with [Anaconda3](https://anaconda.com/) and [Homebrew](https://brew.sh/) installed. # * Useful for small data only. For larger data, try [Databricks](https://databricks.com/). # # ```bash # # Make Homebrew aware of old versions of casks # brew tap homebrew/cask-versions # (OLD VERSION:) brew tap caskroom/versions # # # Install Java 1.8 (OpenJDK 8) # brew cask install homebrew/cask-versions/adoptopenjdk8 # OR brew cask install caskroom/versions/adoptopenjdk8 # (OLD VERSION:) brew cask install adoptopenjdk8 # # # Install the current version of Spark # brew install apache-spark # # # Install Py4J (connects PySpark to the Java Virtual Machine) # pip install py4j # # # Add JAVA_HOME to .bash_profile (makes Java 1.8 your default JVM) # # # echo "export JAVA_HOME=$(/usr/libexec/java_home -v 1.8)" >> ~/.bash_profile # # # Add SPARK_HOME to .bash_profile # # # echo "export SPARK_HOME=/usr/local/Cellar/apache-spark/3.0.1/libexec" >> ~/.bash_profile # # # Add PySpark to PYTHONPATH in .bash_profile # # # echo "export PYTHONPATH=$SPARK_HOME/python:$PYTHONPATH" >> ~/.bash_profile # # # Update current environment # source ~/.bash_profile # # ``` # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2b1411e7-bd55-4320-b501-5d1c3f2b5e7b", "showTitle": false, "title": ""} heading_collapsed=true hidden=true # ## Step 1: Create a SparkSession with a SparkContext # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "23d28547-c14e-4a76-b955-12473fb60322", "showTitle": false, "title": ""} hidden=true # !echo $JAVA_HOME # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "789329b9-b2d0-4dfc-af31-8e320bbc1887", "showTitle": false, "title": ""} hidden=true spark = pyspark.sql.SparkSession.builder.getOrCreate() sc = spark.sparkContext # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "23667ae8-70f9-4770-bb46-6e7772a5b868", "showTitle": false, "title": ""} hidden=true spark # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "44cc35f2-73d3-4d31-940f-c876710b737c", "showTitle": false, "title": ""} hidden=true sc # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "646e3ec5-bb21-4df6-88dc-c7135d0b2455", "showTitle": false, "title": ""} heading_collapsed=true hidden=true # ## Step 2: Download some Amazon reviews (Toys & Games) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f61a7c8c-e7db-4981-8354-c65b8f8ae326", "showTitle": false, "title": ""} hidden=true # Get data directly from repo # !wget https://github.com/flatiron-school/ds-spark/releases/download/v1.0/reviews_Toys_and_Games_5.json.gz # + [markdown] heading_collapsed=true hidden=true # ### Optional: For Databricks Setup # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "654b4a58-ef41-4d5a-81cf-47461551087b", "showTitle": false, "title": ""} hidden=true # Follow [these instructions](https://docs.databricks.com/data/data.html#import-data-1) to import `reviews_Toys_and_Games_5.json` into Databricks # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "fdac9228-261d-4c0c-b4ff-9fcfcd972830", "showTitle": false, "title": ""} heading_collapsed=true hidden=true # ## Step 3: Create a Spark DataFrame # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3225c644-ef23-4ee9-9d2b-46fc578811ae", "showTitle": false, "title": ""} hidden=true # this file path will be different if you are running Spark locally df = spark.read.json('reviews_Toys_and_Games_5.json.gz') # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "49e85347-dccb-4b1d-ae5b-858984be845d", "showTitle": false, "title": ""} hidden=true df.persist() # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "840ef494-2da1-4f21-822e-35f50946d1b2", "showTitle": false, "title": ""} hidden=true # This last command, `.persist()`, simply stores the DataFrame in memory. See [this page](https://unraveldata.com/to-cache-or-not-to-cache/). It is similar to `.cache()`, but actually more flexible than the latter since you can specify which storage level you want. See [here](https://stackoverflow.com/questions/26870537/what-is-the-difference-between-cache-and-persist). # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3f4073b4-97af-43df-8301-92c3d603292e", "showTitle": false, "title": ""} hidden=true type(df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1abb1b5b-b641-423e-9118-6af670e96912", "showTitle": false, "title": ""} hidden=true df.show(5) # default of 20 lines # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2ac4f27c-75e6-4800-bbae-8635e46c61f7", "showTitle": false, "title": ""} hidden=true pdf = df.limit(5).toPandas() pdf # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3c69f161-1cd2-48b6-9d06-49443006a84f", "showTitle": false, "title": ""} hidden=true type(pdf) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "32d19406-c61e-4704-9469-2751732065cc", "showTitle": false, "title": ""} hidden=true df.count() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "7182558c-0a2d-4f69-8dbe-07a82d5ca5e6", "showTitle": false, "title": ""} hidden=true df.columns # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "05d96345-c5e6-44d7-9312-3c69df479183", "showTitle": false, "title": ""} hidden=true df.printSchema() # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3755a345-94e2-43f7-8f68-89eb7b17f672", "showTitle": false, "title": ""} hidden=true # The 'nullable = true' bit means that the relevant column tolerates null values. # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d89f5c27-ce27-4868-89d1-e5bf92244945", "showTitle": false, "title": ""} hidden=true df.describe().show() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8ace905b-4de6-4607-a4d5-f675fb408f8b", "showTitle": false, "title": ""} hidden=true df.describe('overall').show() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4e3d146c-e3c1-4d19-ac82-22db78214fdc", "showTitle": false, "title": ""} hidden=true reviews_df = df[['asin', 'overall']] # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "bbfe080b-a70c-48d0-b6c7-14c30b18b6e3", "showTitle": false, "title": ""} hidden=true reviews_df.show() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ab7b77e1-ef41-439c-b9b7-f4ee2bc939d2", "showTitle": false, "title": ""} hidden=true def show(df, n=5): return df.limit(n).toPandas() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ea38b850-817b-4042-bf20-5d825bb8c371", "showTitle": false, "title": ""} hidden=true show(reviews_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1b84bbb5-35bc-4dd8-a2ce-2d382ab710be", "showTitle": false, "title": ""} hidden=true reviews_df.count() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3eb4b79a-a098-4acf-a5b0-80fcff1ff1b1", "showTitle": false, "title": ""} hidden=true sorted_review_df = reviews_df.sort('overall') # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0517bcf0-c7ee-47ae-9655-9900f7858382", "showTitle": false, "title": ""} hidden=true show(sorted_review_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "67cc3b28-b436-4695-9317-f3997b60e748", "showTitle": false, "title": ""} hidden=true counts = reviews_df.agg(F.countDistinct('overall')) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "67c47823-ae78-4bd8-b8f3-a1c9145913b9", "showTitle": false, "title": ""} hidden=true counts.show() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ca103060-bd12-40e3-bda4-68e235a62cd1", "showTitle": false, "title": ""} hidden=true query = """ SELECT overall, COUNT(*) FROM reviews GROUP BY overall ORDER BY overall """ # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e702ccad-1e1d-426e-b036-e0517d3e13a7", "showTitle": false, "title": ""} hidden=true reviews_df.createOrReplaceTempView('reviews') # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "800fc51a-80db-46a0-9cbb-fb663ba61244", "showTitle": false, "title": ""} hidden=true output = spark.sql(query) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "23e8947c-4e29-434d-9fd7-9f70fc2bd2c0", "showTitle": false, "title": ""} hidden=true show(output) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "827e2336-0625-4a3a-ad01-a796868e72d3", "showTitle": false, "title": ""} hidden=true output.collect() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1fdf1d36-0ada-420d-be70-39535ca5199b", "showTitle": false, "title": ""} hidden=true reviews_df.count() - sum(output.collect()[i][1] for i in range(5)) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "30014127-2067-4519-afdc-a06313566e17", "showTitle": false, "title": ""} hidden=true type(reviews_df) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "720ba532-483c-41ac-b31a-d6c022b31ed3", "showTitle": false, "title": ""} hidden=true # Convert to RDD! # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d7dcf271-fbd2-48e6-9ff4-7df92f7fd4f4", "showTitle": false, "title": ""} hidden=true reviews_df.rdd # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "39c964ae-7ce0-45a7-8cd8-b131450e01ae", "showTitle": false, "title": ""} hidden=true type(reviews_df.rdd) # + [markdown] heading_collapsed=true hidden=true # ## Exploring the DataFrame # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "749b839f-72f3-4f7a-b568-c3f1961f128c", "showTitle": false, "title": ""} heading_collapsed=true hidden=true # ### Count the Words in the First Row # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0222dcf1-a224-4c32-9896-d1a6d287e49c", "showTitle": false, "title": ""} hidden=true row_one = df.first() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "939d3997-8d51-4594-9b5a-737bb7a0458d", "showTitle": false, "title": ""} hidden=true row_one # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f72dec72-0ea2-4fbc-ac3b-df83fb71095c", "showTitle": false, "title": ""} hidden=true def word_count(text): return len(text.split()) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "20efd1cc-64ea-4ceb-9119-1ab79bd1446b", "showTitle": false, "title": ""} hidden=true word_count(row_one['reviewText']) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4ba88cbc-649b-44d2-be0a-25e85e9366d9", "showTitle": false, "title": ""} hidden=true #'udf' is for User Defined Function! word_count_udf = F.udf(word_count, returnType=IntegerType()) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "857f6592-b913-4d4d-8917-66b67fa8b4e7", "showTitle": false, "title": ""} hidden=true review_text_col = df['reviewText'] # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c18e3b60-06de-4d94-bd94-9863d1a70db7", "showTitle": false, "title": ""} hidden=true counts_df = df.withColumn('wordCount', word_count_udf(review_text_col)) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5b7f69d2-917b-4b15-ba47-4fe656debf4d", "showTitle": false, "title": ""} hidden=true # Remember that we set the default number of lines to show at 5. show(counts_df).T # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4ae2b8dc-c83d-4dd9-9cc3-371d474f8d4a", "showTitle": false, "title": ""} hidden=true word_count_udf = F.udf(word_count, IntegerType()) # Registering our word_count() function so that we # can use it with SQL! See documentation here: # https://jaceklaskowski.gitbooks.io/mastering-spark-sql/spark-sql-UDFRegistration.html df.createOrReplaceTempView('reviews') spark.udf.register('word_count', word_count_udf) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2d2875cb-375b-4ba5-b089-00a0302dc549", "showTitle": false, "title": ""} hidden=true # Now we can use our function in a SQL query! query = """ SELECT asin, overall, reviewText, word_count(reviewText) AS wordCount FROM reviews """ # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4b5569c1-e96f-4ddd-9ac6-aa85d190a9d4", "showTitle": false, "title": ""} hidden=true counts_df = spark.sql(query) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "deb39966-8067-4ed1-83c5-f8f4d223d36c", "showTitle": false, "title": ""} hidden=true show(counts_df) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f6697610-c546-458b-b19f-06d834c4a948", "showTitle": false, "title": ""} hidden=true def count_all_the_things(text): return [len(text), len(text.split())] # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "06dda3a0-bbbd-4f55-9f41-fea2d215f28f", "showTitle": false, "title": ""} hidden=true count_udf = F.udf(count_all_the_things, returnType=ArrayType(IntegerType())) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b3b6d194-aaf5-4981-8f7b-d52ba6542983", "showTitle": false, "title": ""} hidden=true counts_df = df.withColumn('counts', count_udf(df['reviewText'])) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1bdd4dcb-61fa-4bb9-b693-1623db4eca09", "showTitle": false, "title": ""} hidden=true show(counts_df, 1) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8f6e8d2e-0259-4f00-ae12-aff4acba4f82", "showTitle": false, "title": ""} hidden=true slim_counts_df = ( df.drop('reviewTime', 'helpful') # .drop('helpful') .withColumn('counts', count_udf(df['reviewText'])) .drop('reviewText') ) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3ec3545c-e50c-4d4f-a400-0339dc9d3503", "showTitle": false, "title": ""} hidden=true show(slim_counts_df, n=1) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "20e34a20-d0b0-413d-b728-0cad3d222d5d", "showTitle": false, "title": ""} hidden=true aggs = counts_df.groupBy('reviewerID').agg({'overall': 'mean'}) aggs.collect() # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "eccd843f-e332-45f7-b792-51469fab6fda", "showTitle": false, "title": ""} heading_collapsed=true hidden=true # ### A Few More Basic Commands # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "eccd843f-e332-45f7-b792-51469fab6fda", "showTitle": false, "title": ""} hidden=true # Please refer also to the [official programming guide](http://spark.apache.org/docs/latest/rdd-programming-guide.html). # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4975bd10-bb96-4015-bad0-e98f123de1b0", "showTitle": false, "title": ""} hidden=true data = [1, 2, 3, 4, 5] distData = sc.parallelize(data) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ba3b72c4-ef42-4e70-83eb-a4864f672370", "showTitle": false, "title": ""} hidden=true def multiply(a, b): return a * b # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d02938d0-4c35-444c-a093-50061bc49888", "showTitle": false, "title": ""} hidden=true distData.reduce(multiply) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "614077f3-bf9b-4038-bf3f-c640b0ad7e1e", "showTitle": false, "title": ""} hidden=true distData.filter(lambda x: x < 4).collect() # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d5cbbe3a-84c6-4b80-ada1-47d0d1623d6a", "showTitle": false, "title": ""} heading_collapsed=true hidden=true # ## Reading files # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d5cbbe3a-84c6-4b80-ada1-47d0d1623d6a", "showTitle": false, "title": ""} hidden=true # ```sc.textFile()``` for .txt files # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8a3c50c7-94ed-4d3c-9649-c3e7db168b35", "showTitle": false, "title": ""} hidden=true # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c047102e-d814-41a9-a2c2-e48419456197", "showTitle": false, "title": ""} hidden=true # `.toJSON()` for .json files # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1c17c36f-9dfe-4d93-8eb4-322e2b2f40c7", "showTitle": false, "title": ""} hidden=true dfjson = counts_df.toJSON() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d4a791d2-8c63-449b-bdae-88f09e545253", "showTitle": false, "title": ""} hidden=true df2 = spark.read.json(dfjson) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "64eeb1ef-01b2-460e-9a1e-d1e71cd72482", "showTitle": false, "title": ""} hidden=true df2.printSchema() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a4c21a29-4752-47d6-9140-d87e1e999433", "showTitle": false, "title": ""} hidden=true counts_df # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ba1cc630-305a-40d7-89a5-56a020dfe473", "showTitle": false, "title": ""} hidden=true type(df.toPandas())
Phase_4/ds-spark-kvm32-main/spark-programming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Wine Recommendation by point # Analizing the database of wine reviews: [Wine Reviews](https://www.kaggle.com/zynicide/wine-reviews) # # # inspired by [wine-recommender](https://www.kaggle.com/sudhirnl7/wine-recommender/notebook) # ### Import and read csv # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns #You have to install pycountry-convert import pycountry_convert as pc # %matplotlib inline plt.style.use('fivethirtyeight') plt.rcParams.update({'font.size':12}) # + path = '2-Data/wines_geo.csv' wines = pd.read_csv(path, low_memory=False) # - wines.shape missing_val_count_by_column = (wines.isnull().sum()) print(missing_val_count_by_column[missing_val_count_by_column > 0].sort_values(ascending=False)) # NA was understood as NaN wines['continent'] = wines[ 'continent'].fillna('NA') missing_val_count_by_column = (wines.isnull().sum()) print(missing_val_count_by_column[missing_val_count_by_column > 0].sort_values(ascending=False)) # ### Distribution of Wine Reviews by Top 10 Countries #print('Number of country list in data:',wine['country'].nunique()) plt.figure(figsize=(14,5)) cnt = wines['country'].value_counts().to_frame()[0:10] sns.barplot(x= cnt['country'], y =cnt.index, data=cnt, palette='Set2',orient='h') plt.title('Distribution of Wine Reviews by Top 10 Countries'); # ### Distribution of wine price sns.displot(data = wines,color='r', x="price", kind="ecdf", height=8) plt.title('Distribution of price') plt.figure(figsize=(14,6)) sns.boxplot(data=wines, x='price') plt.title('Boxplot of price') # ### Procurando um bom preço mean_price = 34.179725 std_price = 38.615971 wines['price'].describe() good_price = mean_price + 2*std_price good_price # + wines_good_price = wines[wines['price']<good_price].copy() print('{}%'.format(round(wines_good_price.shape[0]/wines.shape[0]*100,2))) # - sns.displot(data = wines[wines['price']<good_price],color='b', x="price", kde=True, height=8) plt.title('Distribution of price with the good price') sns.displot(data = wines[wines['price']<good_price],color='b', x="points", height=5) plt.title('Distribution of points with the good price') plt.figure(figsize=(14,6)) sns.boxplot(data=wines[wines['price']<111], x='price') plt.title('Boxplot of price with the good price') # ### Top 5 Wines??? # #### Ordered by price with a point equal to or greater than 99 and with good price top_10 = wines_good_price[wines_good_price['points'] >= 99].sort_values(by=['price'],ascending=True) top_10[:5] cnt = wines_good_price.groupby(['province','country','points'])['price'].agg(['count','min','max','mean']).sort_values(by='points',ascending= False)[:15] cnt.reset_index(inplace=True) cnt.style.background_gradient(cmap='Blues',high=0.5) # ### Relation between Price and Points sns.jointplot( x = wines['points'],y = wines['price'],color='g', height=8); # ### More expensive wine and cheaper wine by country # + fig,ax = plt.subplots(1,2,figsize=(16,5)) ax1,ax2 = ax.flatten() cnt = wines.groupby(['country'])['price'].max().sort_values(ascending=False).to_frame()[:15] sns.barplot(x = cnt['price'], y = cnt.index, palette= 'Set2',ax=ax1) ax1.set_title('Most expensive wine in country') ax1.set_ylabel('Country') ax1.set_xlabel('Price') cnt = wines.groupby(['country'])['price'].min().sort_values(ascending=True).to_frame()[:15] sns.barplot(x = cnt['price'], y = cnt.index, palette = 'Set2',ax=ax2) ax2.set_title('Cheaper price wine by country') ax2.set_ylabel('Country') ax2.set_xlabel('Price') plt.subplots_adjust(wspace=0.2); # - # ### Lowest and highest points by country # + fig,ax = plt.subplots(1,2,figsize=(16,5)) ax1,ax2 = ax.flatten() cnt = wines.groupby(['country'])['points'].max().sort_values(ascending=False).to_frame()[:15] sns.barplot(x = cnt['points'], y = cnt.index, palette= 'Set2',ax=ax1) ax1.set_title('Highest point wine by country') ax1.set_ylabel('Country') ax1.set_xlabel('Point') cnt = wines.groupby(['country'])['points'].min().sort_values(ascending=True).to_frame()[:15] sns.barplot(x = cnt['points'], y = cnt.index, palette = 'Set2',ax=ax2) ax2.set_title('Lowest point wine by country') ax2.set_xlim([0, 100]) ax2.set_ylabel('Country') ax2.set_xlabel('Point') plt.subplots_adjust(wspace=0.2); # - # ### More expensive wine and cheaper wine by continent # + fig,ax = plt.subplots(1,2,figsize=(16,5)) ax1,ax2 = ax.flatten() cnt = wines.groupby(['continent'])['price'].max().sort_values(ascending=False).to_frame() sns.barplot(x = cnt['price'], y = cnt.index, palette= 'Set2',ax=ax1) ax1.set_title('Most expensive wine in continent') ax1.set_ylabel('Country') ax1.set_xlabel('Price') cnt = wines.groupby(['continent'])['price'].min().sort_values(ascending=True).to_frame() sns.barplot(x = cnt['price'], y = cnt.index, palette = 'Set2',ax=ax2) ax2.set_title('Cheaper price wine by continent') ax2.set_ylabel('Country') ax2.set_xlabel('Price') plt.subplots_adjust(wspace=0.2); # + fig,ax = plt.subplots(1,2,figsize=(16,8)) ax1,ax2 = ax.flatten() cnt = wines.groupby(['variety'])['price'].max().sort_values(ascending=False).to_frame()[:15] sns.barplot(x = cnt['price'], y = cnt.index, palette= 'Set2',ax=ax1) ax1.set_title('The grapes used for most expensive wine') ax1.set_ylabel('Variety') ax1.set_xlabel('') cnt = wines.groupby(['variety'])['points'].max().sort_values(ascending=False).to_frame()[:15] sns.barplot(x = cnt['points'], y = cnt.index, palette = 'Set2',ax=ax2) ax2.set_title('The grapes used for most rated wine') ax2.set_ylabel('') ax2.set_xlabel('') plt.subplots_adjust(wspace=0.3); # - plt.figure(figsize=(14,5)) cnt = wines['variety'].value_counts().to_frame()[0:10] sns.barplot(x= cnt['variety'], y =cnt.index, data=cnt, palette='Set2',orient='h') plt.title('Grapes most used'); # # A score that considers price and points # ### With good prices # ### Top 10 Wines??? # + wines_good_price['score'] = wines_good_price['points'] - wines_good_price['price']/4 analysis_table = ['country','province','description','variety', 'winery','points','price','score'] wines_good_price.sort_values(by=['score'],ascending=False, inplace=True) wines_good_price[:10][analysis_table] # - plt.figure(figsize=(14,10)) sns.displot(data = wines_good_price,color='b', x="score", kde=True, height=8) plt.title('Distribution of score') # ### Lowest and highest scores by country # + fig,ax = plt.subplots(1,2,figsize=(16,5)) ax1,ax2 = ax.flatten() cnt = wines_good_price.groupby(['country'])['score'].max().sort_values(ascending=False).to_frame()[:15] sns.barplot(x = cnt['score'], y = cnt.index, palette= 'Set2',ax=ax1) ax1.set_title('Highest score wine by country') ax1.set_ylabel('Country') ax1.set_xlabel('Score') cnt = wines_good_price.groupby(['country'])['score'].min().sort_values(ascending=True).to_frame()[:15] sns.barplot(x = cnt['score'], y = cnt.index, palette = 'Set2',ax=ax2) ax2.set_title('Lowest score wine by country') ax2.set_ylabel('Country') ax2.set_xlabel('Score') plt.subplots_adjust(wspace=0.2); # - # ### My country is Brazil what will be the best wines in my country? wines_good_price[wines_good_price['country'] == 'Brazil'][:5][analysis_table] # ### and your country? wines_good_price[wines_good_price['country'] == '?????'][:5][analysis_table] # # Analyzing wines by continents # ### Best wines in south america wines_good_price[wines_good_price['continent'] == 'SA'][:5][analysis_table] wines_good_price[wines_good_price['continent'] == '????'][:5][analysis_table] # # Analyzing wines by variety wines['variety'].value_counts().to_frame()[0:10] wines_good_price[wines_good_price['variety'] == 'Merlot'][:5][analysis_table] wines_good_price[wines_good_price['variety'] == '????'][:5][analysis_table] # ### Choose the variety, country or continent to get indications def get_indications(variety,country,continent): variety_l = len(variety) country_l = len(country) continent_l = len(continent) if(variety_l+country_l+continent_l == 0): return wines_good_price[:num_indications] if(variety_l > 0): if(country_l > 0): return wines_good_price[(wines_good_price['variety'] == variety)&(wines_good_price['country'] == country)][:num_indications] if(continent_l > 0): return wines_good_price[(wines_good_price['variety'] == variety)&(wines_good_price['continent'] == continent)][:num_indications] if(country_l > 0): return wines_good_price[(wines_good_price['country'] == country)][:num_indications] if(continent_l > 0): return wines_good_price[(wines_good_price['continent'] == continent)][:num_indications] # + variety = '' country = 'Brazil' continent = '' num_indications = 5 get_indications(variety,country,continent) # -
.ipynb_checkpoints/Wine recommendation by point-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Opacus - Syft Duet - Data Owner 🎸 # ## PART 1: Launch a Duet Server and Connect # # As a Data Owner, you want to allow someone else to perform data science on data that you own and likely want to protect. # # In order to do this, we must load our data into a locally running server within this notebook. We call this server a "Duet". # # To begin, you must launch Duet and help your Duet "partner" (a Data Scientist) connect to this server. # # You do this by running the code below and sending the code snippit containing your unqiue Server ID to your partner and following the instructions it gives! import syft as sy sy.load_lib("opacus") duet = sy.launch_duet(loopback=True) sy.logger.add(sink="./syft_do.log") # If you are in Jupyter Notebook (not Colab) the ☝🏾DUET LIVE STATUS above will animate duet.requests.add_handler( action="accept" ) duet.requests.handlers # + # duet.requests[0].accept() # + # duet.requests.add_handler( # name="cuda_is_available", # action="accept" # ) # duet.requests.add_handler( # name="loss", # action="deny", # timeout_secs=-1, # no timeout # print_local=True # print the result in your notebook # ) # duet.requests.add_handler( # name="train_size", # action="accept" # ) # duet.requests.add_handler( # name="inference", # action="accept" # ) # duet.requests.add_handler( # name="model_download", # action="accept" # ) # + # duet.requests.handlers # + # duet.requests.clear_handlers()
examples/differential-privacy/opacus/Opacus_Syft_Data_Owner.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (scidev) # language: python # name: scidev # --- # # Exploring & Testing FFT/Cross Correlation Algorithms & Associated Interopolation # ## <NAME> # ## Current: 2/28/19 # ## Path and Imports # bootstrap $PATH import sys import os sys.path.extend(['C:\\Users\\potterst1\\Desktop\Repositories\BitBucket\dic', 'C:/Users/potterst1/Desktop/Repositories/BitBucket/dic']) sys.path.extend(['/workspace/stpotter/git/bitbucket/dic']) import warnings from dic import nurbs from dic import fileIO from dic import numerics from dic import analysis from dic import image_processing from dic import utilities import cv2 from matplotlib import pyplot as plt import numpy as np from geomdl import BSpline as bs from geomdl import utilities as gutil import scipy.optimize as sciopt import scipy.signal as sig from scipy.ndimage import sobel import numba from numba import jit # ## Magics # %matplotlib inline # ### Read in ref image file # Read # Hard code absolute paths for now. Fix later' dic_name = 'C:\\Users\\potterst1\\Desktop\\Repositories\\BitBucket\\dic\\data\\DIC_S_cropped_gray_pad_0.tiff' psfdi_name = 'C:\\Users\\potterst1\\Desktop\\Repositories\\BitBucket\\dic\\data\\DOA_cropped_gray_pad_0.tiff' dic_name = '/workspace/stpotter/git/bitbucket/dic/data/DIC_S_cropped_gray_pad_0.tiff' psfdi_name = '/workspace/stpotter/git/bitbucket/dic/data/DOSA_cropped_gray_pad_0.tiff' ref_image = cv2.imread(dic_name, -1) # Read in image 'as is' #ref_image = ref_image.astype(np.float) #ref_image = ref_image.astype('uint8') # Display plt.imshow(ref_image, cmap='gray') # ## Deform Ref Image # Translate image in x transx = np.array([[1.0, 0.0, 25.0], [0.0, 1.0, 0.0]]) def_image = image_processing.im_warp(ref_image, transx) # Display plt.imshow(def_image, cmap='gray') # ## Manually Specify Region of Interest # Format: [column index for start of X, column index for end of X, row index for start of Y, row index for end of Y] subregion_indices = np.array([225, 275, 225, 275]) # Extract these regions from ref image and plot ref_subimage = np.copy(ref_image[subregion_indices[2]:subregion_indices[3], subregion_indices[0]:subregion_indices[1]]) # ## Compute Cross Correlation # normalize data sub_normed = ref_subimage - ref_subimage.mean() def_normed = def_image - def_image.mean() foo = sig.correlate2d(def_normed, sub_normed, boundary='symm', mode='same') plt.imshow(foo, cmap='gray') bar = np.unravel_index(np.argmax(foo), foo.shape) print(bar) # ## Vectorization of ZNSSD computation # + np.random.seed(0) f = np.random.randint(0, 255, size=((100, 100))) np.random.seed(1) g = np.random.randint(0, 255, size=((100, 100))) f_mean = np.mean(f) f_stddev = np.std(f) g_mean = np.mean(g) g_stddev = np.std(g) # + # %%timeit znssd_loop = 0.0 for k in range(0, 100): # Adding one to account for range for l in range(0, 100): znssd_loop += np.square((f[k, l] - f_mean) / f_stddev - (g[k, l] - g_mean) / g_stddev) #print('For loops: {}'.format(znssd_loop)) # + # %%timeit fval = 1 / f_stddev * (f - np.ones(f.shape) * f_mean) gval = 1 / g_stddev * (g - np.ones(g.shape) * g_mean) znssd_vec = np.sum(np.square(fval - gval)) #print('Matrix: {}'.format(znssd_vec)) # - # ## Bicubic Interpolation sx = sobel(ref_image, axis=0, mode='constant') sy = sobel(ref_image, axis=1, mode='constant') plt.imshow(sx, cmap='gray') plt.imshow(sy, cmap='gray') sxy = sobel(sy, axis=0, mode='constant') plt.imshow(sxy, cmap='gray') print('sx: {}'.format(sx[100, 100])) print('sy: {}'.format(sy[100, 100])) print('sxy: {}'.format(sxy[100, 100])) @jit(nopython=True, cache=True) def testim(image, sx, sy, sxy): row, col = image.shape shape = ((row-1) * (col - 1), 4, 4) coeffs = np.zeros(shape, dtype=np.float64) C = np.array([[1., 0., 0., 0.], [0., 0., 1., 0.], [-3., 3., -2., -1.], [2., -2., 1., 1.]]) D = np.array([[1., 0., -3., 2.], [0., 0., 3., -2.], [0., 1., -2., 1.], [0., 0., -1., 1.]]) k = 0 for j in range(0, col - 1): # Move through x first, which is columns for i in range(0, row - 1): # Move through y next, which is rows # Transpose sub-matrices because equation expects top row to be (0, 0), (0, 1) bot row (1, 0), (1, 1) F = np.vstack((np.hstack((ref_image[i:i+2, j:j+2].T, sy[i:i+2, j:j+2].T)), np.hstack((sx[i:i+2, j:j+2].T, sxy[i:i+2, j:j+2].T)))) A = C @ F @ D coeffs[k, :, :] = A k+=1 return coeffs testinterp = testim(ref_image.astype(np.float64), sx.astype(np.float64), sy.astype(np.float64), sxy.astype(np.float64)) # %timeit testinterp = testim(ref_image.astype(float), sx.astype(float), sy.astype(float), sxy.astype(float)) def evalinterp(coeffs, x, y, shape): row = int(np.floor(y)) col = int(np.floor(x)) rows = shape[0] - 1 cols = shape[1] - 1 xval = x % 1.0 yval = y % 1.0 A = coeffs[col * rows + row, :, :] # Switch x and y because of the image coord sys xar = np.array([1.0, xval, xval ** 2, xval ** 3]) yar = np.array([1.0, yval, yval ** 2, yval ** 3]) p = yar @ A @ xar return p # %timeit testeval = evalinterp(testinterp, 100, 100, ref_image.shape) print(ref_image[100:102, 100:102]) print(evalinterp(testinterp, 100, 100, ref_image.shape)) print(evalinterp(testinterp, 101, 100, ref_image.shape)) print(evalinterp(testinterp, 100, 101, ref_image.shape)) print(evalinterp(testinterp, 101, 101, ref_image.shape))
notebooks/exploratory/stp_explore_image_convolution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Azure Cognitive Services # # Just as you created a web service that could consume data and return predictions, so there are many AI software-as-a-service (SaaS) offerings on the web that will return predictions or classifications based on data you supply to them. One family of these is Microsoft Azure Cognitive Services. # # The advantage of using cloud-based services is that they provide cutting-edge models that you can access without having to train them. This can help accelerate both your exploration and use of ML. # # Azure provides Cognitive Services APIs that can be consumed using Python to conduct image recognition, speech recognition, and text recognition, just to name a few. For the purposes of this notebook, we're going to look at using the Computer Vision API and the Text Analytics API. # # First, we’ll start by obtaining a Cognitive Services API key. Note that you can get a free key for seven days, and then you'll be required to pay. # # To learn more about pricing for Cognitive Services, see https://azure.microsoft.com/pricing/details/cognitive-services/ # # Browse to **Try Azure Cognitive Services** at https://azure.microsoft.com/try/cognitive-services/ # # 1. Select **Vision API**. # 2. Select **Computer Vision**. # 3. Click **Get API key**. # 4. If prompted for credentials, select **Free 7-day trial**. # # Complete the above steps to also retrieve a Text Analytics API key from the Language APIs category. (You can also do this by scrolling down on the page with your API keys and clicking **Add** under the appropriate service.) # # Once you have your API keys in hand, you're ready to start. # # > **Learning goal:** By the end of this part, you should have a basic comfort with accessing cloud-based cognitive services by API from a Python environment. # ## Azure Cognitive Services Computer Vision # # Computer vision is a hot topic in academic AI research and in business, medical, government, and environmental applications. We will explore it here by seeing firsthand how computers can tag and identify images. # # The first step in using the Cognitive Services Computer Vision API is to create a client object using the ComputerVisionClient class. # # Replace **ACCOUNT_ENDPOINT** with the account endpoint provided from the free trial. Replace **ACCOUNT_KEY** with the account key provided from the free trial. # !pip install azure-cognitiveservices-vision-computervision # + from azure.cognitiveservices.vision.computervision import ComputerVisionClient from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes from msrest.authentication import CognitiveServicesCredentials # Get endpoint and key from environment variables endpoint = 'ACCOUNT_ENDPOINT' # Example: endpoint = 'https://westcentralus.api.cognitive.microsoft.com' key = 'ACCOUNT_KEY' # Example key = '1234567890abcdefghijklmnopqrstuv # Set credentials credentials = CognitiveServicesCredentials(key) # Create client client = ComputerVisionClient(endpoint, credentials) # - # Now that we have a client object to work with, let's see what we can do. # # Using analyze_image, we can see the properties of the image with VisualFeatureTypes.tags. # + url = 'https://cdn.pixabay.com/photo/2014/05/02/23/54/times-square-336508_960_720.jpg' image_analysis = client.analyze_image(url,visual_features=[VisualFeatureTypes.tags]) for tag in image_analysis.tags: print(tag) # - # ### Exercise: # + # How can you use the code above to also see the description using VisualFeatureTypes property? # - # Now let's look at the subject domain of the image. An example of a domain is celebrity. # As of now, the analyze_image_by_domain method only supports celebrities and landmarks domain-specific models. # + # This will list the available subject domains models = client.list_models() for x in models.models_property: print(x) # - # Let's analyze an image by domain: # + # Type of prediction domain = "landmarks" # Public-domain image of Seattle url = "https://images.pexels.com/photos/37350/space-needle-seattle-washington-cityscape.jpg" # English-language response language = "en" analysis = client.analyze_image_by_domain(domain, url, language) for landmark in analysis.result["landmarks"]: print(landmark["name"]) print(landmark["confidence"]) # - # ### Exercise: # + # How can you use the code above to predict an image of a celebrity? # Using this image, https://images.pexels.com/photos/270968/pexels-photo-270968.jpeg? # Remember that the domains were printed out earlier. # - # Let's see how we can get a text description of an image using the describe_image method. Use max_descriptions to retrieve how many descriptions of the image the API service can find. # + domain = "landmarks" url = "https://images.pexels.com/photos/726484/pexels-photo-726484.jpeg" language = "en" max_descriptions = 3 analysis = client.describe_image(url, max_descriptions, language) for caption in analysis.captions: print(caption.text) print(caption.confidence) # - # ### Exercise: # What other descriptions can be found with other images? # What happens if you change the count of descriptions to output? # Let's say that the images contain text. How do we retrieve that information? There are two methods that need to be used for this type of call. Batch_read_file and get_read_operation_result. TextOperationStatusCodes is used to ensure that the batch_read_file call is completed before the text is read from the image. # + # import models from azure.cognitiveservices.vision.computervision.models import TextRecognitionMode from azure.cognitiveservices.vision.computervision.models import TextOperationStatusCodes import time url = "https://images.pexels.com/photos/6375/quote-chalk-think-words.jpg" mode = TextRecognitionMode.handwritten raw = True custom_headers = None numberOfCharsInOperationId = 36 # Async SDK call rawHttpResponse = client.batch_read_file(url, mode, custom_headers, raw) # Get ID from returned headers operationLocation = rawHttpResponse.headers["Operation-Location"] idLocation = len(operationLocation) - numberOfCharsInOperationId operationId = operationLocation[idLocation:] # SDK call while True: result = client.get_read_operation_result(operationId) if result.status not in ['NotStarted', 'Running']: break time.sleep(1) # Get data if result.status == TextOperationStatusCodes.succeeded: for textResult in result.recognition_results: for line in textResult.lines: print(line.text) print(line.bounding_box) # - # ### Exercise: # + # What other images with words can be analyzed? # - # You can find addition Cognitive Services demonstrations at the following URLs: # - https://aidemos.microsoft.com/ # - https://github.com/microsoft/computerscience/blob/master/Events%20and%20Hacks/Student%20Hacks/hackmit/cogservices_demos/ # - https://azure.microsoft.com/services/cognitive-services/directory/ # Images come in varying sizes, and there might be cases where you want to create a thumbnail of the image. For this, we need to install the Pillow library, which you can learn about at https://python-pillow.org/. Pillow is the PIL fork, or Python Imaging Library, which allows for image processing. # Install Pillow # !pip install Pillow # Now that the Pillow library is installed, we will import the Image module and create a thumbnail from a provided image. (Once generated, you can find the thumbnail image in your project folder on Azure Notebooks.) # + # Pillow package from PIL import Image # IO package to create local image import io width = 50 height = 50 url = "https://images.pexels.com/photos/37350/space-needle-seattle-washington-cityscape.jpg" thumbnail = client.generate_thumbnail(width, height, url) for x in thumbnail: image = Image.open(io.BytesIO(x)) image.save('thumbnail.jpg') # - # > **Takeaway:** In this subsection, you explored how to access computer-vision cognitive services by API. Specifically, you used tools to analyze and describe images that you submitted to these services. # ## Azure Cognitive Services Text Analytics # # Another area where cloud-based AI shines is text analytics. Like computer vision, identifying and pulling meaning from natural human languages is really the intersection of a lot of specialized disciplines, so using cloud services for it provides an economical means of tapping a lot of cognitive horsepower. # # To prepare to use the Cognitive Services Text Analytics API, the requests library must be imported, along with the ability to print out JSON formats. import requests # pprint is pretty print (formats the JSON) from pprint import pprint from IPython.display import HTML # Replace 'ACCOUNT_KEY' with the API key that was created during the creation of the seven-day free trial account. # + subscription_key = 'ACCOUNT_KEY' assert subscription_key # If using a Free Trial account, this URL does not need to be udpated. # If using a paid account, verify that it matches the region where the # Text Analytics Service was setup. text_analytics_base_url = "https://westcentralus.api.cognitive.microsoft.com/text/analytics/v2.1/" # - # ### Text Analytics API # Now it's time to start processing some text languages. # # To verify the URL endpoint for text_analytics_base_url, run the following: language_api_url = text_analytics_base_url + "languages" print(language_api_url) # The API requires that the payload be formatted in the form of documents containing `id` and `text` attributes: documents = { 'documents': [ { 'id': '1', 'text': 'This is a document written in English.' }, { 'id': '2', 'text': 'Este es un documento escrito en Español.' }, { 'id': '3', 'text': '这是一个用中文写的文件' }, { 'id': '4', 'text': 'Ez egy magyar nyelvű dokumentum.' }, { 'id': '5', 'text': 'Dette er et dokument skrevet på dansk.' }, { 'id': '6', 'text': 'これは日本語で書かれた文書です。' } ]} # The next lines of code call the API service using the requests library to determine the languages that were passed in from the documents: headers = {"Ocp-Apim-Subscription-Key": subscription_key} response = requests.post(language_api_url, headers=headers, json=documents) languages = response.json() pprint(languages) # The next line of code outputs the documents in a table format with the language information for each document: table = [] for document in languages["documents"]: text = next(filter(lambda d: d["id"] == document["id"], documents["documents"]))["text"] langs = ", ".join(["{0}({1})".format(lang["name"], lang["score"]) for lang in document["detectedLanguages"]]) table.append("<tr><td>{0}</td><td>{1}</td>".format(text, langs)) HTML("<table><tr><th>Text</th><th>Detected languages(scores)</th></tr>{0}</table>".format("\n".join(table))) # The service did a pretty good job of identifying the languages. It did confidently identify the Danish phrase as being Norwegian, but in fairness, even linguists argue as to whether Danish and Norwegian constitute distinct languages or are dialects of the same language. (**Note:** Danes and Norwegians have no doubts on the subject.) # # ### Exercise: # + # Create another document set of text and use the text analytics API to detect the language for the text. # - # ### Sentiment Analysis API # Now that we know how to use the Text Analytics API to detect the language, let's use it for sentiment analysis. Basically, the computers at the other end of the API connection will judge the sentiments of written phrases (anywhere on the spectrum of positive to negative) based solely on the context clues provided by the text. # Verify the API URl source for the Sentiment Analysis API sentiment_api_url = text_analytics_base_url + "sentiment" print(sentiment_api_url) # As above, the Sentiment Analysis API requires the language to be passed in as documents with `id` and `text` attributes. documents = {'documents' : [ {'id': '1', 'language': 'en', 'text': 'I had a wonderful experience! The rooms were wonderful and the staff was helpful.'}, {'id': '2', 'language': 'en', 'text': 'I had a terrible time at the hotel. The staff was rude and the food was awful.'}, {'id': '3', 'language': 'es', 'text': 'Los caminos que llevan hasta Monte Rainier son espectaculares y hermosos.'}, {'id': '4', 'language': 'es', 'text': 'La carretera estaba atascada. Había mucho tráfico el día de ayer.'} ]} # Let's analyze the text using the Sentiment Analysis API to output a sentiment analysis score: headers = {"Ocp-Apim-Subscription-Key": subscription_key} response = requests.post(sentiment_api_url, headers=headers, json=documents) sentiments = response.json() pprint(sentiments) # ### Exercise: # + # Create another document set with varying degree of sentiment and use the Sentiment Analysis API to detect what # the sentiment is # - # ### Key Phrases API # We've detected the language type using the Text Analytics API and the sentiment using the Sentiment Analysis API. What if we want to detect key phrases in the text? We can use the Key Phrase API. # As with the other services, setup the Key Phrases API with the following parameters key_phrase_api_url = text_analytics_base_url + "keyPhrases" print(key_phrase_api_url) # Create the documents needed to pass to the Key Phrases API with the `id` and `text` attributes. documents = {'documents' : [ {'id': '1', 'language': 'en', 'text': 'I had a wonderful experience! The rooms were wonderful and the staff was helpful.'}, {'id': '2', 'language': 'en', 'text': 'I had a terrible time at the hotel. The staff was rude and the food was awful.'}, {'id': '3', 'language': 'es', 'text': 'Los caminos que llevan hasta Monte Rainier son espectaculares y hermosos.'}, {'id': '4', 'language': 'es', 'text': 'La carretera estaba atascada. Había mucho tráfico el día de ayer.'} ]} # Now, call the Key Phrases API with the formatted documents to retrieve the key phrases. headers = {'Ocp-Apim-Subscription-Key': subscription_key} response = requests.post(key_phrase_api_url, headers=headers, json=documents) key_phrases = response.json() pprint(key_phrases) # We can make this easier to read by outputing the documents in an HTML table format. table = [] for document in key_phrases["documents"]: text = next(filter(lambda d: d["id"] == document["id"], documents["documents"]))["text"] phrases = ",".join(document["keyPhrases"]) table.append("<tr><td>{0}</td><td>{1}</td>".format(text, phrases)) HTML("<table><tr><th>Text</th><th>Key phrases</th></tr>{0}</table>".format("\n".join(table))) # Now call the Key Phrases API with the formatted documents to retrive the key phrases. # ### Exercise: # + # What other key phrases can you come up with for analysis? # - # ### Entities API # The final API we will use in the Text Analytics API service is the Entities API. This will retrieve attributes for documents provided to the API service. # Configure the Entities URI entity_linking_api_url = text_analytics_base_url + "entities" print(entity_linking_api_url) # The next step is creating a document with id and text attributes to pass on to the Entities API. documents = {'documents' : [ {'id': '1', 'text': 'Microsoft is an It company.'} ]} # Finally, call the service using the rest call below to retrieve the data listed in the text attribute. headers = {"Ocp-Apim-Subscription-Key": subscription_key} response = requests.post(entity_linking_api_url, headers=headers, json=documents) entities = response.json() entities # ### Exercise: # What other entities can be retrieved with the API? # Create a document setup and use the Text Analytics, Sentiment Analysis, # Key Phrase, and Entities API services to retrieve the data. # > **Takeaway:** In this subsection, you explored text analytics in the cloud. Specifically, you used a variety of different APIs to extract different information from text: language, sentiment, key phrases, and entities. # # That's it the instructional portion of this course. In these eight sections, you've now seen the range of tools that go into preparing data for analysis and performing ML and AI analysis on data. In the next, concluding section, you will bring these skills together in a final project.
workshop-resources/data-science-and-machine-learning/Data_Science_2/workshop-materials/4-AzureCognitiveServices-Reference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fraud detection with Neural Networks # We are going to solve this problem using NN #importing the libraries # %matplotlib inline import pandas as pd import matplotlib.pyplot as plt import numpy as np # ## Loading data and preprocessing df_data = pd.read_csv('creditcard.csv',delimiter=',') # We will not use the attribute time in this model, although we are dropping relevant information. This shouldn't be done in a real context without justified it df = df_data.drop(['Time'], axis=1) df.head() # We have to normalize amount from sklearn.preprocessing import MinMaxScaler # Let's extract the amounts values and scaled them using sklearn MinMaxScaler. (It exists other ways but we decide this for comodity) # + amount = df['Amount'].values amount = amount.reshape(-1,1) mms = MinMaxScaler() amount_scaled = mms.fit_transform(amount) # - # Replacing amount column with the normalize version df['Amount']=amount_scaled df.head() # ## Proportions from collections import Counter # Let's see Class 0s and 1s proportions = Counter(df['Class']) print('Original dataset shape {}'.format(proportions)) # In a plot values = proportions.values() colors = ['g', 'r'] explode = [0, 0.2] labels = ['Non-fraudulent', 'Fraudulent'] plt.pie(values, colors= colors, labels=labels, explode = explode, autopct='%.2f%%') plt.title('Fraudulent and non-fraudulent transactions') plt.show() # Only 0,17% of the data correspond to fraudulent transactions! The dataset is clearly imbalanced, so it is needed a resampling technique # ## Train Test Split # Before using an resampling method let's separate the data in train and test from sklearn.model_selection import train_test_split # + features = list(df.columns[:-1]) #features names X_train, X_test, y_train, y_test = train_test_split(df[features], df['Class'], test_size=0.2) # - # Proportions train_proportions, test_proportions = Counter(y_train), Counter(y_test) print('Train {}'.format(train_proportions)) print('Test {}'.format(test_proportions)) # ## Train set before resampling values = train_proportions.values() colors = ['g', 'r'] explode = [0, 0.2] labels = ['Non-fraudulent', 'Fraudulent'] plt.pie(values, colors= colors, labels=labels, explode = explode, autopct='%.2f%%') plt.title('Training set before resampling') plt.show() # ## Resampling # We are going to use a technique that combines an oversampling technique with an undersampling technique. We select SMOTE with Edited Nearest Neighbouf from imblearn.combine import SMOTEENN from imblearn.under_sampling import EditedNearestNeighbours # + enn = EditedNearestNeighbours(sampling_strategy='majority',n_neighbors=25) smoteenn = SMOTEENN(enn=enn) X_rs,y_rs = smoteenn.fit_sample(X_train,y_train) print('Sampler results {}'.format(Counter(y_rs))) # - # ## Train set after resampling values = Counter(y_rs).values() colors = ['g', 'r'] explode = [0, 0] labels = ['Non-fraudulent', 'Fraudulent'] plt.pie(values, colors= colors, labels=labels, explode = explode, autopct='%.2f%%') plt.title('Training set after resampling') plt.show() # ## Training the model # from sklearn.linear_model import LogisticRegression # from sklearn.model_selection import KFold, GridSearchCV # # clf_lr = LogisticRegression(max_iter=100) # # cv = KFold(n_splits=5, shuffle=True) # # grid_lr = {'solver':['saga','liblinear','warn'], # #'penalty':['l1','l2'], # 'class_weight':[{0:1,1:1.3},'balanced']} # # clf_lr = GridSearchCV(clf_lr, grid_lr , cv=cv) # clf_lr = clf_lr.fit(X_rs, y_rs) # clf_lr.estimator # #### Neural Network from keras import backend as K from keras.models import Sequential from keras import optimizers from keras.layers import Activation, Dropout, Flatten, Dense # + model = Sequential() n_cols = X_rs.shape[1] #number of columns model.add(Dense(256, activation='relu', input_shape=(n_cols,))) model.add(Dropout(0.1)) model.add(Dense(128, activation='relu')) model.add(Dropout(0.1)) model.add(Dense(128, activation='relu')) model.add(Dense(1,activation='sigmoid')) opt=optimizers.SGD(lr=0.01,momentum=0.9) model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['acc']) # - model.fit(X_rs, y_rs, validation_split=0.2, epochs=8, class_weight={0:1,1:3}) y_pred_nn = model.predict_classes(X_test) # ## Results from sklearn.metrics import confusion_matrix, classification_report print('\nNeural Network \n', confusion_matrix(y_test, y_pred_nn, labels = [0,1])) target_names = ['Non-fraudulent','Fraudulent'] print('\n\tNeural Network\n',classification_report(y_test, y_pred_nn, target_names=target_names))
Fraud-Detection/Logistic Regression.ipynb
# ##### Copyright 2021 Google LLC. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # flexible_job_shop_sat # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/examples/flexible_job_shop_sat.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a> # </td> # <td> # <a href="https://github.com/google/or-tools/blob/master/examples/python/flexible_job_shop_sat.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a> # </td> # </table> # First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab. # !pip install ortools # + # Copyright 2010-2021 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Solves a flexible jobshop problems with the CP-SAT solver. A jobshop is a standard scheduling problem when you must sequence a series of task_types on a set of machines. Each job contains one task_type per machine. The order of execution and the length of each job on each machine is task_type dependent. The objective is to minimize the maximum completion time of all jobs. This is called the makespan. """ # overloaded sum() clashes with pytype. # pytype: disable=wrong-arg-types import collections from ortools.sat.python import cp_model class SolutionPrinter(cp_model.CpSolverSolutionCallback): """Print intermediate solutions.""" def __init__(self): cp_model.CpSolverSolutionCallback.__init__(self) self.__solution_count = 0 def on_solution_callback(self): """Called at each new solution.""" print('Solution %i, time = %f s, objective = %i' % (self.__solution_count, self.WallTime(), self.ObjectiveValue())) self.__solution_count += 1 def flexible_jobshop(): """Solve a small flexible jobshop problem.""" # Data part. jobs = [ # task = (processing_time, machine_id) [ # Job 0 [(3, 0), (1, 1), (5, 2)], # task 0 with 3 alternatives [(2, 0), (4, 1), (6, 2)], # task 1 with 3 alternatives [(2, 0), (3, 1), (1, 2)], # task 2 with 3 alternatives ], [ # Job 1 [(2, 0), (3, 1), (4, 2)], [(1, 0), (5, 1), (4, 2)], [(2, 0), (1, 1), (4, 2)], ], [ # Job 2 [(2, 0), (1, 1), (4, 2)], [(2, 0), (3, 1), (4, 2)], [(3, 0), (1, 1), (5, 2)], ], ] num_jobs = len(jobs) all_jobs = range(num_jobs) num_machines = 3 all_machines = range(num_machines) # Model the flexible jobshop problem. model = cp_model.CpModel() horizon = 0 for job in jobs: for task in job: max_task_duration = 0 for alternative in task: max_task_duration = max(max_task_duration, alternative[0]) horizon += max_task_duration print('Horizon = %i' % horizon) # Global storage of variables. intervals_per_resources = collections.defaultdict(list) starts = {} # indexed by (job_id, task_id). presences = {} # indexed by (job_id, task_id, alt_id). job_ends = [] # Scan the jobs and create the relevant variables and intervals. for job_id in all_jobs: job = jobs[job_id] num_tasks = len(job) previous_end = None for task_id in range(num_tasks): task = job[task_id] min_duration = task[0][0] max_duration = task[0][0] num_alternatives = len(task) all_alternatives = range(num_alternatives) for alt_id in range(1, num_alternatives): alt_duration = task[alt_id][0] min_duration = min(min_duration, alt_duration) max_duration = max(max_duration, alt_duration) # Create main interval for the task. suffix_name = '_j%i_t%i' % (job_id, task_id) start = model.NewIntVar(0, horizon, 'start' + suffix_name) duration = model.NewIntVar(min_duration, max_duration, 'duration' + suffix_name) end = model.NewIntVar(0, horizon, 'end' + suffix_name) interval = model.NewIntervalVar(start, duration, end, 'interval' + suffix_name) # Store the start for the solution. starts[(job_id, task_id)] = start # Add precedence with previous task in the same job. if previous_end is not None: model.Add(start >= previous_end) previous_end = end # Create alternative intervals. if num_alternatives > 1: l_presences = [] for alt_id in all_alternatives: alt_suffix = '_j%i_t%i_a%i' % (job_id, task_id, alt_id) l_presence = model.NewBoolVar('presence' + alt_suffix) l_start = model.NewIntVar(0, horizon, 'start' + alt_suffix) l_duration = task[alt_id][0] l_end = model.NewIntVar(0, horizon, 'end' + alt_suffix) l_interval = model.NewOptionalIntervalVar( l_start, l_duration, l_end, l_presence, 'interval' + alt_suffix) l_presences.append(l_presence) # Link the master variables with the local ones. model.Add(start == l_start).OnlyEnforceIf(l_presence) model.Add(duration == l_duration).OnlyEnforceIf(l_presence) model.Add(end == l_end).OnlyEnforceIf(l_presence) # Add the local interval to the right machine. intervals_per_resources[task[alt_id][1]].append(l_interval) # Store the presences for the solution. presences[(job_id, task_id, alt_id)] = l_presence # Select exactly one presence variable. model.Add(sum(l_presences) == 1) else: intervals_per_resources[task[0][1]].append(interval) presences[(job_id, task_id, 0)] = model.NewConstant(1) job_ends.append(previous_end) # Create machines constraints. for machine_id in all_machines: intervals = intervals_per_resources[machine_id] if len(intervals) > 1: model.AddNoOverlap(intervals) # Makespan objective makespan = model.NewIntVar(0, horizon, 'makespan') model.AddMaxEquality(makespan, job_ends) model.Minimize(makespan) # Solve model. solver = cp_model.CpSolver() solution_printer = SolutionPrinter() status = solver.SolveWithSolutionCallback(model, solution_printer) # Print final solution. for job_id in all_jobs: print('Job %i:' % job_id) for task_id in range(len(jobs[job_id])): start_value = solver.Value(starts[(job_id, task_id)]) machine = -1 duration = -1 selected = -1 for alt_id in range(len(jobs[job_id][task_id])): if solver.Value(presences[(job_id, task_id, alt_id)]): duration = jobs[job_id][task_id][alt_id][0] machine = jobs[job_id][task_id][alt_id][1] selected = alt_id print( ' task_%i_%i starts at %i (alt %i, machine %i, duration %i)' % (job_id, task_id, start_value, selected, machine, duration)) print('Solve status: %s' % solver.StatusName(status)) print('Optimal objective value: %i' % solver.ObjectiveValue()) print('Statistics') print(' - conflicts : %i' % solver.NumConflicts()) print(' - branches : %i' % solver.NumBranches()) print(' - wall time : %f s' % solver.WallTime()) flexible_jobshop()
examples/notebook/examples/flexible_job_shop_sat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plugins # Import the element creator from pcg_gazebo.parsers.sdf import create_sdf_element plugin_args = dict( robotNamespace='robot', topic='/topic', args=dict( param1=1, param2=True, param3=dict( param31='option', param32=3.434534 ), param4=[3.1, 4.5, 8.5] ) ) plugin = create_sdf_element('plugin', plugin_args) plugin.name = 'some_plugin' plugin.filename = 'libplugin.so' print(plugin) plugin = create_sdf_element('plugin', plugin_args) plugin.value = plugin_args plugin.name = 'another_plugin' plugin.filename = 'libanother_plugin.so' print(plugin) # # Some plugin default constructors from pcg_gazebo.parsers.sdf import Plugin # ## `gazebo_ros_control` print(Plugin.gazebo_ros_control( name='gazebo_ros_control', robot_namespace='/my_robot', control_period=10, robot_param='/robot_description', robot_sim_type=None)) # ## `gazebo_ros_bumper` print(Plugin.gazebo_ros_bumper( name='gazebo_ros_bumper', robot_namespace='/my_robot', bumper_topic_name='bumper_states', frame_name='world')) # ## `gazebo_ros_ft_sensor` print(Plugin.gazebo_ros_ft_sensor( name='gazebo_ros_ft_sensor', robot_namespace='my_robot', joint_name='some_joint', topic_name='force_torque_sensor_output', gaussian_noise=0.05, update_rate=0))
examples/sdf_parser_plugins.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="hX4n9TsbGw-f" # ##### Copyright 2018 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="0nbI5DtDGw-i" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="9TnJztDZGw-n" # # Text classification with an RNN # + [markdown] colab_type="text" id="AfN3bMR5Gw-o" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/beta/tutorials/text/text_classification_rnn"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/text/text_classification_rnn.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/text/text_classification_rnn.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/tutorials/text/text_classification_rnn.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="lUWearf0Gw-p" # This text classification tutorial trains a [recurrent neural network](https://developers.google.com/machine-learning/glossary/#recurrent_neural_network) on the [IMDB large movie review dataset](http://ai.stanford.edu/~amaas/data/sentiment/) for sentiment analysis. # + colab={} colab_type="code" id="z682XYsrjkY9" from __future__ import absolute_import, division, print_function, unicode_literals # !pip install tensorflow-gpu==2.0.0-beta1 import tensorflow_datasets as tfds import tensorflow as tf # + [markdown] colab_type="text" id="1rXHa-w9JZhb" # Import `matplotlib` and create a helper function to plot graphs: # + colab={} colab_type="code" id="Mp1Z7P9pYRSK" import matplotlib.pyplot as plt def plot_graphs(history, string): plt.plot(history.history[string]) plt.plot(history.history['val_'+string]) plt.xlabel("Epochs") plt.ylabel(string) plt.legend([string, 'val_'+string]) plt.show() # + [markdown] colab_type="text" id="pRmMubr0jrE2" # ## Setup input pipeline # # # The IMDB large movie review dataset is a *binary classification* dataset—all the reviews have either a *positive* or *negative* sentiment. # # Download the dataset using [TFDS](https://www.tensorflow.org/datasets). The dataset comes with an inbuilt subword tokenizer. # # + colab={} colab_type="code" id="SHRwRoP2nVHX" dataset, info = tfds.load('imdb_reviews/subwords8k', with_info=True, as_supervised=True) train_dataset, test_dataset = dataset['train'], dataset['test'] # + [markdown] colab_type="text" id="MCorLciXSDJE" # As this is a subwords tokenizer, it can be passed any string and the tokenizer will tokenize it. # + colab={} colab_type="code" id="EplYp5pNnW1S" tokenizer = info.features['text'].encoder # + colab={} colab_type="code" id="e7ACuHM5hFp3" print ('Vocabulary size: {}'.format(tokenizer.vocab_size)) # + colab={} colab_type="code" id="Bq6xDmf2SAs-" sample_string = 'TensorFlow is cool.' tokenized_string = tokenizer.encode(sample_string) print ('Tokenized string is {}'.format(tokenized_string)) original_string = tokenizer.decode(tokenized_string) print ('The original string: {}'.format(original_string)) assert original_string == sample_string # + [markdown] colab_type="text" id="TbhM970AVA8w" # The tokenizer encodes the string by breaking it into subwords if the word is not in its dictionary. # + colab={} colab_type="code" id="GUIRWSO8yxT5" for ts in tokenized_string: print ('{} ----> {}'.format(ts, tokenizer.decode([ts]))) # + colab={} colab_type="code" id="dDsCaZCDYZgm" BUFFER_SIZE = 10000 BATCH_SIZE = 64 # + colab={} colab_type="code" id="VznrltNOnUc5" train_dataset = train_dataset.shuffle(BUFFER_SIZE) train_dataset = train_dataset.padded_batch(BATCH_SIZE, train_dataset.output_shapes) test_dataset = test_dataset.padded_batch(BATCH_SIZE, test_dataset.output_shapes) # + [markdown] colab_type="text" id="bjUqGVBxGw-t" # ## Create the model # + [markdown] colab_type="text" id="bgs6nnSTGw-t" # Build a `tf.keras.Sequential` model and start with an embedding layer. An embedding layer stores one vector per word. When called, it converts the sequences of word indices to sequences of vectors. These vectors are trainable. After training (on enough data), words with similar meanings often have similar vectors. # # This index-lookup is much more efficient than the equivalent operation of passing a one-hot encoded vector through a `tf.keras.layers.Dense` layer. # # A recurrent neural network (RNN) processes sequence input by iterating through the elements. RNNs pass the outputs from one timestep to their input—and then to the next. # # The `tf.keras.layers.Bidirectional` wrapper can also be used with an RNN layer. This propagates the input forward and backwards through the RNN layer and then concatenates the output. This helps the RNN to learn long range dependencies. # + colab={} colab_type="code" id="LwfoBkmRYcP3" model = tf.keras.Sequential([ tf.keras.layers.Embedding(tokenizer.vocab_size, 64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) # + [markdown] colab_type="text" id="sRI776ZcH3Tf" # Compile the Keras model to configure the training process: # + colab={} colab_type="code" id="kj2xei41YZjC" model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # + [markdown] colab_type="text" id="zIwH3nto596k" # ## Train the model # + colab={} colab_type="code" id="hw86wWS4YgR2" history = model.fit(train_dataset, epochs=10, validation_data=test_dataset) # + colab={} colab_type="code" id="BaNbXi43YgUT" test_loss, test_acc = model.evaluate(test_dataset) print('Test Loss: {}'.format(test_loss)) print('Test Accuracy: {}'.format(test_acc)) # + [markdown] colab_type="text" id="DwSE_386uhxD" # The above model does not mask the padding applied to the sequences. This can lead to skewness if we train on padded sequences and test on un-padded sequences. Ideally the model would learn to ignore the padding, but as you can see below it does have a small effect on the output. # # If the prediction is >= 0.5, it is positive else it is negative. # + colab={} colab_type="code" id="8w0dseJMiEUh" def pad_to_size(vec, size): zeros = [0] * (size - len(vec)) vec.extend(zeros) return vec # + colab={} colab_type="code" id="Y-E4cgkIvmVu" def sample_predict(sentence, pad): tokenized_sample_pred_text = tokenizer.encode(sample_pred_text) if pad: tokenized_sample_pred_text = pad_to_size(tokenized_sample_pred_text, 64) predictions = model.predict(tf.expand_dims(tokenized_sample_pred_text, 0)) return (predictions) # + colab={} colab_type="code" id="O41gw3KfWHus" # predict on a sample text without padding. sample_pred_text = ('The movie was cool. The animation and the graphics ' 'were out of this world. I would recommend this movie.') predictions = sample_predict(sample_pred_text, pad=False) print (predictions) # + colab={} colab_type="code" id="kFh4xLARucTy" # predict on a sample text with padding sample_pred_text = ('The movie was cool. The animation and the graphics ' 'were out of this world. I would recommend this movie.') predictions = sample_predict(sample_pred_text, pad=True) print (predictions) # + colab={} colab_type="code" id="ZfIVoxiNmKBF" plot_graphs(history, 'accuracy') # + colab={} colab_type="code" id="IUzgkqnhmKD2" plot_graphs(history, 'loss') # + [markdown] colab_type="text" id="7g1evcaRpTKm" # ## Stack two or more LSTM layers # # Keras recurrent layers have two available modes that are controlled by the `return_sequences` constructor argument: # # * Return either the full sequences of successive outputs for each timestep (a 3D tensor of shape `(batch_size, timesteps, output_features)`). # * Return only the last output for each input sequence (a 2D tensor of shape (batch_size, output_features)). # + colab={} colab_type="code" id="jo1jjO3vn0jo" model = tf.keras.Sequential([ tf.keras.layers.Embedding(tokenizer.vocab_size, 64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM( 64, return_sequences=True)), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) # + colab={} colab_type="code" id="hEPV5jVGp-is" model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # + colab={} colab_type="code" id="LeSE-YjdqAeN" history = model.fit(train_dataset, epochs=10, validation_data=test_dataset) # + colab={} colab_type="code" id="_LdwilM1qPM3" test_loss, test_acc = model.evaluate(test_dataset) print('Test Loss: {}'.format(test_loss)) print('Test Accuracy: {}'.format(test_acc)) # + colab={} colab_type="code" id="ykUKnAoqbycW" # predict on a sample text without padding. sample_pred_text = ('The movie was not good. The animation and the graphics ' 'were terrible. I would not recommend this movie.') predictions = sample_predict(sample_pred_text, pad=False) print (predictions) # + colab={} colab_type="code" id="2RiC-94zvdZO" # predict on a sample text with padding sample_pred_text = ('The movie was not good. The animation and the graphics ' 'were terrible. I would not recommend this movie.') predictions = sample_predict(sample_pred_text, pad=True) print (predictions) # + colab={} colab_type="code" id="_YYub0EDtwCu" plot_graphs(history, 'accuracy') # + colab={} colab_type="code" id="DPV3Nn9xtwFM" plot_graphs(history, 'loss') # + [markdown] colab_type="text" id="9xvpE3BaGw_V" # Check out other existing recurrent layers such as [GRU layers](https://www.tensorflow.org/api_docs/python/tf/keras/layers/GRU).
site/en/r2/tutorials/text/text_classification_rnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Daily Coding Problem 36 # + active="" # This problem was asked by Dropbox. # # Given the root to a binary search tree, find the second largest node in the tree. # - # # Solution # # An in-order traversal of the binary search tree would give us all the nodes of the tree in sorted order. So the naive solution here might be do an in-order traversal of the tree, store it in an array, and return the second-to-last element in the array. # # This takes O(N) time and space since we have to go through and store every node in the tree. # # We can do better. Notice that the in-order traversal explores always the left node first before the current node. We could do something similar to that by exploring the right node first. # # Let's do a reverse in-order traversal, where we first call ourselves recursively on the right node. Because it's reversed, that should give us the binary tree in reverse sorted order. # # So we can keep a counter, and once we start processing the current node we can increment the counter. Once it hits 2, that means the current node we're looking at is the second largest, so we can stuff it in a variable and eventually return that. def second_largest(root): def inorder(node): if not node or count[0] == 2: return if node.right: inorder(node.right) count[0] += 1 if count[0] == 2: val.append(node.val) return if node.left: inorder(node.left) count = [0] val = [] inorder(root) return val[0] # Unfortunately because of Python's demented scoping rules, we have to wrap count and val in a list. Ugly!
Daily Coding Problem 36.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # Third-party import astropy.coordinates as coord from astropy.table import Table import astropy.units as u import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np # %matplotlib inline rave = Table.read('/Users/adrian/data/RAVE/RAVE-on-v1.0.fits.gz') dr5 = Table.read('/Users/adrian/data/RAVE/RAVE_DR5.fits.gz') dr5
Rave-on.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable import numpy as np import torchvision from torchvision import datasets, models, transforms import matplotlib.pyplot as plt import time import copy import os # %matplotlib inline # - # Data Augmentation and Normalization data_transforms = { "train": transforms.Compose([ transforms.Scale(266), transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), "val": transforms.Compose([ transforms.Scale(266), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } from data_loaders import ClothingAttributesDataset, combine_labels IMAGES_FOLDER = "data/ClothingAttributeDataset/images/" LABEL_DIR = "data/ClothingAttributeDataset/labels/" labels_df = combine_labels(LABEL_DIR, na_value=0.5) # labels_df.to_csv("data/labels.csv", index=False) # Map Target Column with their index and Output Dimensions TARGET_COLUMNS = dict([(column, (idx, 1)) for idx, column in enumerate(labels_df.columns)]) TARGET_COLUMNS["category_GT"] = (TARGET_COLUMNS["category_GT"][0], 7) TARGET_COLUMNS["sleevelength_GT"] = (TARGET_COLUMNS["sleevelength_GT"][0], 3) TARGET_COLUMNS["neckline_GT"] = (TARGET_COLUMNS["neckline_GT"][0], 3) target_columns = [column for column in labels_df.columns if column not in ["category_GT", "sleevelength_GT", "neckline_GT"]] labels_df.columns # + binary_columns = ['skin_exposure_GT', 'collar_GT', 'gender_GT', 'scarf_GT', 'necktie_GT', 'placket_GT'] multi_columns = ['category_GT', 'neckline_GT', 'sleevelength_GT'] color_columns = ['white_GT', 'yellow_GT', 'gray_GT', 'green_GT', 'blue_GT', 'brown_GT', 'red_GT', 'cyan_GT', 'black_GT', 'purple_GT', 'many_colors_GT'] pattern_columns = ['pattern_spot_GT', 'pattern_solid_GT', 'pattern_graphics_GT', 'pattern_plaid_GT', 'pattern_stripe_GT', 'pattern_floral_GT'] # - target_columns = binary_columns len(binary_columns + multi_columns + color_columns + pattern_columns) len(target_columns) # + # DATA_DIR = "/Users/sampathweb/datasets/hymenoptera_data/" # dsets = { # "train": ClothingAttributesDataset(os.path.join(DATA_DIR, "train"), labels_df, # data_transforms["train"]), # "val": datasets.ImageFolder(os.path.join(DATA_DIR, "val"), # data_transforms["val"]) dsets = { "train": ClothingAttributesDataset(IMAGES_FOLDER, labels_df, target_columns=target_columns, transform=data_transforms["train"]), "val": ClothingAttributesDataset(IMAGES_FOLDER, labels_df, target_columns=target_columns, transform=data_transforms["val"]) } # - dset_sizes = { "train": len(dsets["train"]), "val": len(dsets["val"]) } dset_sizes batch_size = 256 num_workers = 4 dset_loaders = { "train": torch.utils.data.DataLoader(dsets["train"], batch_size=batch_size, shuffle=True, num_workers=num_workers), "val": torch.utils.data.DataLoader(dsets["val"], batch_size=batch_size, shuffle=True, num_workers=num_workers) } len(dset_loaders["train"].dataset) print(dir(dset_loaders["train"])) # + model = torchvision.models.resnet18(pretrained=True) for param in model.parameters(): param.requires_grad = False # Parameter of newly constructed modules have requires_grad=True by default num_features = model.fc.in_features # Take 23 Cloting Attributes model.fc = nn.Sequential(*[nn.Linear(num_features, len(target_columns)), nn.Sigmoid()]) use_gpu = torch.cuda.is_available() if use_gpu: model = model.cuda() criterion = nn.BCELoss() # - np.isnull def optim_scheduler_ft(model, epoch, init_lr=0.001, lr_decay_epoch=7): lr = init_lr * (0.1**(epoch//lr_decay_epoch)) if epoch % lr_decay_epoch == 0: print("LR is set to {}".format(lr)) optimizer = optim.SGD(model.fc.parameters(), lr=lr, momentum=0.9) return optimizer def train_model(model, criterion, optim_scheduler, num_epochs=25): since = time.time() best_model = model best_acc = 0.0 for epoch in range(num_epochs): print("Epoch {}/{}".format(epoch, num_epochs - 1)) # Each epoch has a train and validation Phase for phase in ["train"]: # "val" if phase == "train": optimizer = optim_scheduler(model, epoch) running_loss = 0.0 running_corrects = 0.0 # Iterate over data for data in dset_loaders[phase]: # Get the inputs inputs, labels = data labels = labels.float() num_targets = labels.size()[1] # Wrap them in Variable if use_gpu: inputs, labels = Variable(inputs.cuda()), \ Variable(labels.cuda()) else: inputs, labels = Variable(inputs), Variable(labels) # Zero the parameter gradients optimizer.zero_grad() # Forward outputs = model(inputs) preds = torch.round(outputs) total_loss = sum(criterion(outputs[:, col], labels[:, col]) for col in range(num_targets)) # _, preds = torch.max(outputs.data, 1) # loss = criterion(outputs, labels) # Backward + Optimize only in Training Phase if phase == "train": total_loss.backward() optimizer.step() # Statistics running_loss += total_loss.data[0] running_corrects += torch.sum(preds.data == labels.data) / num_targets # Caveat 0.5 is coverted to Ones epoch_loss = running_loss / dset_sizes[phase] epoch_acc = running_corrects / dset_sizes[phase] print("{} Loss: {:.4f} Acc: {:.4f}".format(phase, epoch_loss, epoch_acc)) # Deep copy the model if phase == "val" and epoch_acc > best_acc: best_acc = epoch_acc best_model = copy.deepcopy(model) time_elapsed = time.time() - since print("Training completed in {:0f}m {:0f}s".format( time_elapsed // 60, time_elapsed % 60)) print("Best val Acc: {:4f}".format(best_acc)) return best_model dset_sizes["train"] model = train_model(model, criterion, optim_scheduler_ft, num_epochs=10) tot_loss losses loss1.data[0] # ## Validate the Results for data in dset_loaders["train"]: inputs, labels = data if use_gpu: X = Variable(inputs.cuda()) else: X = Variable(inputs) results = model(X) if use_gpu: results = results.cpu() break labels_arr = labels.numpy() labels_arr.size results_arr = results.data.numpy() from sklearn.metrics import confusion_matrix confusion_matrix(np.round(results_arr.flatten()).astype(int), labels_arr.flatten().astype(int)) # + reset = torchvision.models.resnet18(pretrained=True) for param in resnet.parameters(): param.requires_grad = False # # Parameter of newly constructed modules have requires_grad=True by default # num_features = model.fc.in_features # # Take 23 Cloting Attributes # model.fc = nn.Sequential(*[nn.Linear(num_features, len(target_columns)), nn.Sigmoid()]) # use_gpu = torch.cuda.is_available() # if use_gpu: # model = model.cuda() # criterion = nn.BCELoss() # - pretrained_model = nn.Sequential(*list(resnet.children())[:-1]) f = resnet(Variable(inputs)) f = f.view(f.size(0), -1) outputs = outputs.view(0, -1) model = nn.Sequential(nn.Linear(512, 3)) if use_gpu: resnet.cuda() pretrained_model = nn.Sequential(*list(resnet.children())[:-1]) for data in dset_loaders["train"]: # Get the inputs inputs, labels = data labels = labels.float() num_targets = labels.size()[1] # Wrap them in Variable if use_gpu: inputs, labels = Variable(inputs.cuda()), \ Variable(labels.cuda()) else: inputs, labels = Variable(inputs), Variable(labels) # Forward outputs = resnet(inputs)
ml_src/old_code/clothing-attributes-binary-pytorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Putting It All Together # # As you might have guessed from the last notebook, using all of the variables was allowing you to drastically overfit the training data. This was great for looking good in terms of your Rsquared on these points. However, this was not great in terms of how well you were able to predict on the test data. # # We will start where we left off in the last notebook. First read in the dataset. # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error import AllTogether as t import seaborn as sns # %matplotlib inline df = pd.read_csv('./survey_results_public.csv') df.head() # - # #### Question 1 # # **1.** To begin fill in the format function below with the correct variable. Notice each **{ }** holds a space where one of your variables will be added to the string. This will give you something to do while the the function does all the steps you did throughout this lesson. # + a = 'test_score' b = 'train_score' c = 'linear model (lm_model)' d = 'X_train and y_train' e = 'X_test' f = 'y_test' g = 'train and test data sets' h = 'overfitting' q1_piat = '''In order to understand how well our {} fit the dataset, we first needed to split our data into {}. Then we were able to fit our {} on the {}. We could then predict using our {} by providing the linear model the {} for it to make predictions. These predictions were for {}. By looking at the {}, it looked like we were doing awesome because it was 1! However, looking at the {} suggested our model was not extending well. The purpose of this notebook will be to see how well we can get our model to extend to new data. This problem where our data fits the training data well, but does not perform well on test data is commonly known as {}.'''.format(c, g, c, d, c, e, f, b, a, h) print(q1_piat) # - # Print the solution order of the letters in the format t.q1_piat_answer() # #### Question 2 # # **2.** Now, we need to improve the model . Use the dictionary below to provide the true statements about improving **this model**. **Also consider each statement as a stand alone**. Though, it might be a good idea after other steps, which would you consider a useful **next step**? # + a = 'yes' b = 'no' q2_piat = {'add interactions, quadratics, cubics, and other higher order terms': b, 'fit the model many times with different rows, then average the responses': a, 'subset the features used for fitting the model each time': a, 'this model is hopeless, we should start over': b} # - #Check your solution t.q2_piat_check(q2_piat) # ##### Question 3 # # **3.** Before we get too far along, follow the steps in the function below to create the X (explanatory matrix) and y (response vector) to be used in the model. If your solution is correct, you should see a plot similar to the one shown in the Screencast. # + def clean_data(df): ''' INPUT df - pandas dataframe OUTPUT X - A matrix holding all of the variables you want to consider when predicting the response y - the corresponding response vector This function cleans df using the following steps to produce X and y: 1. Drop all the rows with no salaries 2. Create X as all the columns that are not the Salary column 3. Create y as the Salary column 4. Drop the Salary, Respondent, and the ExpectedSalary columns from X 5. For each numeric variable in X, fill the column with the mean value of the column. 6. Create dummy columns for all the categorical variables in X, drop the original columns ''' # 1. Drop all the rows with no salaries df = df.dropna(subset=['Salary'], axis=0) # 3. Create y as the Salary column y = df['Salary'] # 2. Create X as all the columns that are not the Salary column # 4. Drop the Salary, Respondent, and the ExpectedSalary columns from X df = df.drop(['Respondent', 'ExpectedSalary', 'Salary'], axis=1) # 5. For each numeric variable in X, fill the column with the mean value of the column. num_vars = df.select_dtypes(include=['float', 'int']).columns for col in num_vars: df[col].fillna((df[col].mean()), inplace=True) # 6. Create dummy columns for all the categorical variables in X, drop the original columns cat_vars = df.select_dtypes(include=['object']).copy().columns for var in cat_vars: # for each cat add dummy var, drop original column df = pd.concat([df.drop(var, axis=1), pd.get_dummies(df[var], prefix=var, prefix_sep='_', drop_first=True)], axis=1) X = df return X, y #Use the function to create X and y X, y = clean_data(df) # + #cutoffs here pertains to the number of missing values allowed in the used columns. #Therefore, lower values for the cutoff provides more predictors in the model. cutoffs = [5000, 3500, 2500, 1000, 100, 50, 30, 25] r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test = t.find_optimal_lm_mod(X, y, cutoffs) # - # #### Question 4 # # **4.** Use the output and above plot to correctly fill in the keys of the **q4_piat** dictionary with the correct variable. Notice that only the optimal model results are given back in the above - they are stored in **lm_model**, **X_train**, **X_test**, **y_train**, and **y_test**. If more than one answer holds, provide a tuple holding all the correct variables in the order of first variable alphabetically to last variable alphabetically. print(X_train.shape[1]) #Number of columns print(r2_scores_test[np.argmax(r2_scores_test)]) # The model we should implement test_r2 print(r2_scores_train[np.argmax(r2_scores_test)]) # The model we should implement train_r2 # + a = 'we would likely have a better rsquared for the test data.' b = 1000 c = 872 d = 0.69 e = 0.82 f = 0.88 g = 0.72 h = 'we would likely have a better rsquared for the training data.' q4_piat = {'The optimal number of features based on the results is': c, 'The model we should implement in practice has a train rsquared of': e, 'The model we should implement in practice has a test rsquared of': d, 'If we were to allow the number of features to continue to increase': h } # - #Check against your solution t.q4_piat_check(q4_piat) # #### Question 5 # # **5.** The default penalty on coefficients using linear regression in sklearn is a ridge (also known as an L2) penalty. Because of this penalty, and that all the variables were normalized, we can look at the size of the coefficients in the model as an indication of the impact of each variable on the salary. The larger the coefficient, the larger the expected impact on salary. # # Use the space below to take a look at the coefficients. Then use the results to provide the **True** or **False** statements based on the data. # + def coef_weights(coefficients, X_train): ''' INPUT: coefficients - the coefficients of the linear model X_train - the training data, so the column names can be used OUTPUT: coefs_df - a dataframe holding the coefficient, estimate, and abs(estimate) Provides a dataframe that can be used to understand the most influential coefficients in a linear model by providing the coefficient estimates along with the name of the variable attached to the coefficient. ''' coefs_df = pd.DataFrame() coefs_df['est_int'] = X_train.columns coefs_df['coefs'] = lm_model.coef_ coefs_df['abs_coefs'] = np.abs(lm_model.coef_) coefs_df = coefs_df.sort_values('abs_coefs', ascending=False) return coefs_df #Use the function coef_df = coef_weights(lm_model.coef_, X_train) #A quick look at the top results coef_df.head(20) # + a = True b = False #According to the data... q5_piat = {'Country appears to be one of the top indicators for salary': a, 'Gender appears to be one of the indicators for salary': b, 'How long an individual has been programming appears to be one of the top indicators for salary': a, 'The longer an individual has been programming the more they are likely to earn': b} # - t.q5_piat_check(q5_piat) # #### Congrats of some kind # # Congrats! Hopefully this was a great review, or an eye opening experience about how to put the steps together for an analysis. List the steps. In the next lesson, you will look at how take this and show it off to others so they can act on it.
L1_CRISP-DM/notebooks/Putting It All Together - Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 第二十二讲 对角化和A的幂 # # ## 对角化矩阵 # # 将矩阵 $A$ 的所有特征向量作为矩阵 $S$ 的列,那么 $$A * S = A \begin{bmatrix}x_1 & x_2 & \dots & x_n\end{bmatrix} = \begin{bmatrix}\lambda_1x_1 & \lambda_2x_2 & \dots & \lambda_nx_n\end{bmatrix} = \begin{bmatrix}x_1 & x_2 & \dots & x_n \end{bmatrix}\begin{bmatrix} \lambda_1 & 0 & \dots & 0 \\ 0 & \lambda_2 & \dots & 0\\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \dots & \lambda_n\end{bmatrix} = S\Lambda$$ # # 其中 $\Lambda$ 矩阵为对角线均为特征值,其它位置均为零的对角矩阵。因为 $AS = S\Lambda$,所以有 $A = S\Lambda S^{-1}$,这种分解称为**对角化**(Diagonalization)。对于特征值均不相同的矩阵,才可以对角化。 # # 结合之前的章节,现在一共有 $3$ 中矩阵的分解: # * 消元,$A = LU$ # * 正交,$A = QR$ # * 对角化,$A = S\Lambda S^{-1}$ # ## A的幂 # # 现在来看矩阵的平方 $A^2$,如果 $Ax = \lambda x$,那么 $A^2x = \lambda Ax = \lambda^2x$;如果将矩阵进行对角化, $A = S\Lambda S^{-1}$,那么 $A^2 = S\Lambda S^{-1}S\Lambda S^{-1} = S \Lambda^2S^{-1}$;推广到矩阵的 $k$ 次幂,有 $A^k = S\Lambda^{k}S^{-1}$。 # # 定理:如果矩阵所有的 $\lambda < 1$,那么当 $k \rightarrow \infty$ 时,$A^k \rightarrow 0$ # # 注意:以上所有的结论都建立在矩阵没有相等的特征值。 # ## 求解$u_{k+1} = Au_k$ # # 假设有序列,首项为 $u_0$,序列每项满足 $u_{k+1} = Au_{k}$。首先将 $u_0$ 展开为矩阵 $A$ 特征向量的线性组合,即 $u_0 = c_1x_1 + c_2x_2 + \dots + c_nx_n$,其中 $x_i$ 为矩阵 $A$ 的特征向量。那么 $Au_0 = c_1\lambda_1x_1 + \dots + c_n\lambda_nx_n = \Lambda Sc$,推广到 $k$ 次幂有 $A^ku_0 = c_1\lambda_1^kx_1 + \dots + c_n\lambda_n^kx_n = \Lambda^kSc$。 # # 例题:以斐波那契数列为例,数列首项 $F_0 = 0$,第二项 $F_1 = 1$,而第 $k$ 项满足 $F_k = F_{k-2} + F_{k-1}$,那么现在设 $$u_k = \begin{bmatrix}F_{k+1} \\ F_k \end{bmatrix}, u_{k+1} = \begin{bmatrix} F_{k+2} \\ F_{k+1} \end{bmatrix} = \begin{bmatrix} F_{k+1} + F_{k} \\ F_{k}\end{bmatrix}$$ # # 因此有 $A = \begin{bmatrix} 1 & 1 \\ 1 & 0\end{bmatrix}$,其中特征值有 $\lambda_1 = \frac{1 + \sqrt{5}}{2}, \lambda_2 = \frac{1 - \sqrt{5}}{2}$,特征向量 $x_1 = \begin{bmatrix} \frac{1 + \sqrt{5}}{2} \\ 1\end{bmatrix}, x_2 = \begin{bmatrix} \frac{1 - \sqrt{5}}{2} \\ 1\end{bmatrix}$。然后,我们需要将 $u_0$ 分解为 $c_1x_1 + c_2x_2 = \frac{\sqrt{5}}{5}\frac{1 + \sqrt{5}}{2} - \frac{\sqrt{5}}{5}\frac{1 - \sqrt{5}}{2}$。通解形式有 $u_k=c_1\lambda_1^kx_1+c_2\lambda_2^kx_2$。
src/Linear Algebra/Lecture22.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # source: https://www.kaggle.com/c/house-prices-advanced-regression-techniques import pandas as pd import numpy as np from keras.models import Sequential from keras.layers import Dense from keras.wrappers.scikit_learn import KerasRegressor from sklearn.cross_validation import cross_val_score train_df = pd.read_csv("./input/train.csv") test_df = pd.read_csv("./input/test.csv") # + def scale_features(X): ''' scale the features columnwise to 0 mean and normalize by range ''' for i in range(len(X[1,:])): X[:,i] = (X[:,i] - X[:,i].mean())/(X[:,i].max()-X[:,i].min()) return X def fill_nan(X): '''replace NaNs with mean for each column ''' for i in range(len(X[1,:])): mean = np.nanmean(X[:,i]) mask = np.isnan(X[:,i]) X[mask,i] = mean return X def strings_to_num(df): '''Input: df (Pandas dataframe) Ouput: numpy array with categorical (string) columns transformed into numerical ''' #transform dataframe categories to numbers return df.apply(lambda x: pd.factorize(x)[0]).values def df_to_numpy_array(df): ''' Seperate categorical and numerical columns of dataframe Input df: Pandas dataframe Output: numpy array ''' #get names of numerical columns num_columns = list(df.select_dtypes(include=['float64', 'int64']).columns.values) #get numerical values into NumPy array num_values = df[num_columns].values #fill NaN in numerical features num_values = fill_nan(num_values) #scale numerical features num_values = scale_features(num_values) #get categorical columns cat_columns = list(df.select_dtypes(include=['object']).columns.values) #transform categorical columns into numpy array cat_values = strings_to_num(df[cat_columns]) return np.concatenate((cat_values,num_values),axis=1) feature_names = train_df.drop(["SalePrice","Id"],axis=1).columns.tolist() num_features = len(feature_names) X = df_to_numpy_array(train_df.drop(["SalePrice","Id"],axis=1)) #training data Xtest = df_to_numpy_array(test_df.drop(["Id"],axis=1)) #test data y = train_df['SalePrice'].values #target test_ids = test_df["Id"].values print("train_df:\n%r" % (train_df[:10])) # - # define base mode def model(): # create model model = Sequential() model.add(Dense(60, input_dim=num_features, kernel_initializer='normal', activation='relu')) model.add(Dense(20, kernel_initializer='normal', activation='relu')) model.add(Dense(1, kernel_initializer='normal')) # Compile model model.compile(loss='msle', optimizer='adam') return model # + # evaluate model with standardized dataset kr = KerasRegressor(build_fn=model, nb_epoch=100, batch_size=5, verbose=0) scores = cross_val_score(kr, X, y, cv=4) print("msle = %4.2f std = %4.2f" % (scores.mean(),scores.std())) # - kr.fit(X, y, epochs=100, batch_size=5) result = kr.predict(X) print("result.shape:%r" % (result.shape)) print("%r" % (result[:10])) print("%r" % (y[:10]))
todo/[Kaggle]House Prices Advanced Regression Techniques/House Prices Advanced Regression Techniques.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Produce table-entry-page-retrieval-effectiveness # + # #!pip install trectools from trectools import TrecQrel, TrecRun, TrecEval # + QREL_DIR='../../Data/navigational-topics-and-qrels-ms-marco-v1/' QREL_DIR_MARCO_V2='../../Data/navigational-topics-and-qrels-ms-marco-v2/' RUN_DIR='/mnt/ceph/storage/data-in-progress/data-teaching/theses/wstud-thesis-probst/retrievalExperiments/runs-ecir22/' RUN_DIR_MARCO_V2='/mnt/ceph/storage/data-in-progress/data-teaching/theses/wstud-thesis-probst/retrievalExperiments/runs-marco-v2-ecir22/' TOPIC_TO_NAME={'entrypage-random': 'Random@V1', 'entrypage-popular': 'Popular@V1'} QRELS={i: TrecQrel(QREL_DIR + 'qrels.msmarco-' + i + '.txt') for i in TOPIC_TO_NAME.keys()} QRELS_MARCO_V2={i: TrecQrel(QREL_DIR_MARCO_V2 + 'qrels.msmarco-v2-' + i + '.txt') for i in TOPIC_TO_NAME.keys()} # + APPROACH_TO_MARCO_V1_RUN_FILE={ 'BM25@2016-07': 'run.cc-16-07-anchortext.bm25-default.txt', 'BM25@2017-04': 'run.cc-17-04-anchortext.bm25-default.txt', 'BM25@2018-13': 'run.cc-18-13-anchortext.bm25-default.txt', 'BM25@2019-47': 'run.cc-19-47-anchortext.bm25-default.txt', 'BM25@2020-05': 'run.cc-20-05-anchortext.bm25-default.txt', 'BM25@2021-04': 'run.cc-21-04-anchortext.bm25-default.txt', 'BM25@16--21': 'run.cc-combined-anchortext.bm25-default.txt', 'BM25@Content': 'run.ms-marco-content.bm25-default.txt', 'BM25@Title': 'run.msmarco-document-v1-title-only.pos+docvectors+raw.bm25-default.txt', 'BM25@Orcas': 'run.orcas.bm25-default.txt', 'DeepCT@Anchor': 'run.ms-marco-deepct-v1-anserini-docs-cc-2019-47-sampled-test-overlap-removed-389979.bm25-default.txt', 'DeepCT@Orcas': 'run.ms-marco-deepct-v1-anserini-docs-orcas-sampled-test-overlap-removed-390009.bm25-default.txt', 'DeepCT@Train':'run.ms-marco-deepct-v1-anserini-docs-ms-marco-training-set-test-overlap-removed-389973.bm25-default.txt', 'MonoT5': 'run.ms-marco-content.bm25-mono-t5-maxp.txt', 'MonoBERT': 'run.ms-marco-content.bm25-mono-bert-maxp.txt', 'LambdaMART@CTA':'run.ms-marco.lambda-mart-cta-trees-1000.txt', 'LambdaMART@CTOA':'run.ms-marco.lambda-mart-ctoa-trees-1000.txt', 'LambdaMART@CTO':'run.ms-marco.lambda-mart-cto-trees-1000.txt', 'LambdaMART@CT':'run.ms-marco.lambda-mart-ct-trees-1000.txt', } APPROACH_TO_MARCO_V2_RUN_FILE={ 'BM25@Content': 'run.msmarco-doc-v2.bm25-default.txt', 'BM25@Orcas': 'run.orcas-ms-marco-v2.bm25-default.txt', 'BM25@2016-07': 'run.cc-16-07-anchortext.bm25-default.txt', 'BM25@2017-04': 'run.cc-17-04-anchortext.bm25-default.txt', 'BM25@2018-13': 'run.cc-18-13-anchortext.bm25-default.txt', 'BM25@2019-47': 'run.cc-19-47-anchortext-v2.bm25-default.txt', 'BM25@2020-05': 'run.cc-20-05-anchortext.bm25-default.txt', 'BM25@2021-04': 'run.cc-21-04-anchortext.bm25-default.txt', 'BM25@16--21': 'run.cc-union-16-to-21-anchortext-1000.bm25-default.txt', 'DeepCT@Anchor': 'run.ms-marco-deepct-v2-anserini-docs-cc-2019-47-sampled-test-overlap-removed-389979.bm25-default.txt', 'DeepCT@Orcas': 'run.ms-marco-deepct-v2-anserini-docs-orcas-sampled-test-overlap-removed-390009.bm25-default.txt', 'DeepCT@Train':'run.ms-marco-deepct-v2-anserini-docs-ms-marco-training-set-test-overlap-removed-389973.bm25-default.txt', 'MonoT5': 'run.ms-marco-content.bm25-mono-t5-maxp.txt', 'MonoBERT': 'run.ms-marco-content.bm25-mono-bert-maxp.txt', 'LambdaMART@CTA':'run.ms-marco.lambda-mart-cta-trees-1000.txt', 'LambdaMART@CTOA':'run.ms-marco.lambda-mart-ctoa-trees-1000.txt', 'LambdaMART@CTO':'run.ms-marco.lambda-mart-cto-trees-1000.txt', 'LambdaMART@CT':'run.ms-marco.lambda-mart-ct-trees-1000.txt', } # + def recall(trec_eval, depth): import pandas as pd trecformat = trec_eval.run.run_data.sort_values(["query", "score", "docid"], ascending=[True,False,False]).reset_index() topX = trecformat.groupby("query")[["query","docid"]].head(depth) merged = pd.merge(topX[["query","docid"]], trec_eval.qrels.qrels_data[["query","docid","rel"]]) nqueries = len(trec_eval.qrels.topics()) result = merged[merged["rel"]>0].groupby("query")["rel"].count() return result.sum()/nqueries def is_anchor_text(run_file): return '-anchortext' in run_file def format_score(score, run_file, topics, position): # Manually maintained if is_anchor_text(run_file): max_scores = { 'entrypage-random': [0.74, 0.82, 0.88, 0.66, 0.69, 0.8], 'entrypage-popular': [0.62, 0.71, 0.85, 0.56, 0.63, 0.79], } else: max_scores = { 'entrypage-random': [0.59, 0.63, 0.69, 0.55, 0.58, 0.65], 'entrypage-popular': [0.28, 0.32, 0.43, 0.27, 0.32, 0.43], } ret = '{:.2f}'.format(score) if max_scores[topics][position] <= score: return '{\\textbf{' + ret + '}} ' else: return ret def eval_on_marco_v1(run_file): ret = ' ' for topics in ['entrypage-random', 'entrypage-popular']: run = TrecRun(RUN_DIR + topics + '/' + run_file) trec_eval=TrecEval(run, QRELS[topics]) ret += '& ' + format_score(trec_eval.get_reciprocal_rank(), run_file, topics, 0) ret += '& ' + format_score(recall(trec_eval, 3), run_file, topics, 1) ret += '& ' + format_score(recall(trec_eval, 10), run_file, topics, 2) return ret + '&' def eval_on_marco_v2(run_file): ret = ' ' for topics in ['entrypage-random', 'entrypage-popular']: run = TrecRun(RUN_DIR_MARCO_V2 + topics + '/' + run_file) trec_eval=TrecEval(run, QRELS_MARCO_V2[topics]) ret += (' ' if 'random' in topics else '& ') + format_score(trec_eval.get_reciprocal_rank(), run_file, topics, 3) ret += '& ' + format_score(recall(trec_eval, 3), run_file, topics, 4) ret += '& ' + format_score(recall(trec_eval, 10), run_file, topics, 5) return ret def table_row(approach): v1_eval = ' & --- & --- & --- & --- & --- & --- &' if approach in APPROACH_TO_MARCO_V1_RUN_FILE: v1_eval = eval_on_marco_v1(APPROACH_TO_MARCO_V1_RUN_FILE[approach]) v2_eval = ' --- & --- & --- & --- & --- & ---' if approach in APPROACH_TO_MARCO_V2_RUN_FILE: v2_eval = eval_on_marco_v2(APPROACH_TO_MARCO_V2_RUN_FILE[approach]) return '& ' + approach + v1_eval + v2_eval + ' \\\\' def table_entry_page_retrieval_effectiveness(): return '''\\begin{table*}[bt] \\setlength{\\tabcolsep}{0.3em} \\caption{Overview of the retrieval effectiveness on 100~random entry page topics and 100~entry page topics for popular pages on version~1 of MS~Marco (V1) and version~2 of MS~Marco (V2). We Report the mean reciprocal rank (MRR) and the recall at~3 (R@3) and at~10 (R@10).} \\label{table-entry-page-retrieval-effectiveness} \\scriptsize \\begin{tabular*}{\\textwidth}{@{\\extracolsep{\\fill}}ll@{\\qquad}ccc@{\\quad}ccc@{\\quad}ccc@{\\quad}ccc@{}} \\toprule & & \\multicolumn{3}{@{}c@{\\quad}}{Random@V1} & \\multicolumn{3}{@{}c@{\\quad}}{Popular@V1} & \\multicolumn{3}{@{}c@{\\qquad}}{Random@V2} & \\multicolumn{3}{@{}c@{}}{Popular@V2} \\\\ \\cmidrule(r{1em}){3-5} \\cmidrule(r{1em}){6-8} \\cmidrule(r{1em}){9-11} \\cmidrule{12-14} & & MRR & R@3 & R@10 & MRR & R@3 & R@10 & MRR & R@3 & R@10 & MRR & R@3 & R@10 \\\\ \\midrule \\multirow{7}{*}{\\rotatebox[origin=c]{90}{\\parbox[c]{4em}{\\centering \\textbf{Anchor}}}} ''' + table_row('BM25@2016-07') + ''' ''' + table_row('BM25@2017-04') + ''' ''' + table_row('BM25@2018-13') + ''' ''' + table_row('BM25@2019-47') + ''' ''' + table_row('BM25@2020-05') + ''' ''' + table_row('BM25@2021-04') + ''' ''' + table_row('BM25@16--21') + ''' \\midrule \\multirow{7}{*}{\\rotatebox[origin=c]{90}{\\parbox[c]{4em}{\\centering \\textbf{Baselines}}}} ''' + table_row('BM25@Content') + ''' ''' + table_row('BM25@Orcas') + ''' ''' + table_row('DeepCT@Anchor') + ''' ''' + table_row('DeepCT@Orcas') + ''' ''' + table_row('DeepCT@Train') + ''' ''' + table_row('MonoT5') + ''' ''' + table_row('MonoBERT') + ''' ''' + table_row('LambdaMART@CTOA') + ''' ''' + table_row('LambdaMART@CTO') + ''' ''' + table_row('LambdaMART@CTA') + ''' ''' + table_row('LambdaMART@CT') + ''' \\bottomrule \\end{tabular*} \\vspace*{-2ex} \\end{table*} ''' print(table_entry_page_retrieval_effectiveness())
src/jupyter/table-entry-page-retrieval-effectiveness.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # ## Fig. 2 Circulation around idealized Astoria and Barkley canyons # import cmocean as cmo import matplotlib.pyplot as plt import matplotlib.gridspec as gspec import matplotlib as mpl import matplotlib.patches as patch # %matplotlib inline from netCDF4 import Dataset import numpy as np import seaborn as sns import xarray as xr import canyon_tools.readout_tools as rout import canyon_tools.savitzky_golay as sg # + def calc_rho(RhoRef,T,S,alpha=2.0E-4, beta=7.4E-4): """----------------------------------------------------------------------------- calc_rho calculates the density using a linear equation of state. INPUT: RhoRef : reference density at the same z as T and S slices. Can be a scalar or a vector, depending on the size of T and S. T, S : should be at least 2D arrays in coordinate order (..., Y , X ) alpha = 2.0E-4 # 1/degC, thermal expansion coefficient beta = 7.4E-4, haline expansion coefficient OUTPUT: rho - Density [...,ny,nx] -----------------------------------------------------------------------------""" #Linear eq. of state rho = RhoRef*(np.ones(np.shape(T)) - alpha*(T[...,:,:]) + beta*(S[...,:,:])) return rho def call_unstag(t): UU,VV = rout.unstagger(state.U.isel(T=t, Xp1=slice(0,361)),state.V.isel(T=t, X=slice(0,360))) return(UU,VV) def call_rho(state,t,yslice,xslice): T = state.Temp.isel(T=t,Y=yslice,X=xslice) S = state.S.isel(T=t,Y=yslice,X=xslice) rho = calc_rho(RhoRef,T,S,alpha=2.0E-4, beta=7.4E-4) return(rho) def mask2DCanyon(bathy, sbdepth=-152.5): '''Mask out the canyon from the shelf. bathy : depths 2D array from the grid file sbdepth: shelf depth, always negative float Returns mask''' bathyMasked = np.ma.masked_less(-bathy, -152.5) return(bathyMasked.mask) def ConcAreaFunc(Tr, hfac, ra, bathy, sbdepth=-152.5): '''Tr: Tracer field (nt,nz,ny,nx) hfac: fraction of open cell at center (nz,ny,nx) ra: array of cell horizontal areas (ny,nx) bathy : depths 2D array from the grid file (ny,nx) sbdepth: shelf break depth (negative value) RETURNS: ConcArea = concentration at cell closest to bottom times its area (nt,ny,nx) Conc = cocnetration near bottom (nt,ny,nx)''' shape = np.shape(Tr) ConcArea = np.empty((shape[0],shape[2],shape[3])) Conc = np.empty((shape[0],shape[2],shape[3])) ConcFiltered = np.empty((shape[0],shape[2],shape[3])) Area = np.empty((shape[0],shape[2],shape[3])) BottomInd = np.argmax(hfac[::-1,:,:]>0.0,axis=0) # start looking for first no-land cell from the bottom up. BottomInd = np.ones(np.shape(BottomInd))*89 - BottomInd # Get index of unreversed z axis print(np.shape(BottomInd)) for tt in range(shape[0]): #print(tt) for j in range(shape[3]): for i in range(shape[2]): TrBottom = Tr[tt,BottomInd[i,j],i,j] if TrBottom > 0.0: ConcArea[tt,i,j] = TrBottom*ra[i,j] Conc[tt,i,j] = TrBottom Area[tt,i,j] = ra[i,j] else: ConcArea[tt,i,j] = np.NaN Conc[tt,i,j] = np.NaN Area[tt,i,j] = np.NaN # Filter step noise ConcFiltered[tt,:,j] = sg.savitzky_golay(Conc[tt,:,j], 7,3) print(np.shape(ConcArea)) maskShelf = mask2DCanyon(bathy, sbdepth) maskShelf = np.expand_dims(maskShelf,0) # expand along time dimension maskShelf = maskShelf + np.zeros(Conc.shape) return (np.ma.masked_array(ConcArea, mask=maskShelf), np.ma.masked_array(ConcFiltered, mask=maskShelf), np.ma.masked_array(Area, mask=maskShelf), ) # + def U_vel(ax,UU,yslice, xind, grid, MaskC): '''UU is the mean u field, unstaggered, size (nz,ny,nx) ax is the corresponding handle for the axis''' umin = -0.6 umax = 0.6 csU = np.linspace(umin,umax,num=20) csU2 = np.linspace(umin,umax,num=10) Uplot = np.ma.array(UU.isel(Xp1=xind,Y=yslice).data,mask=MaskC[:,yslice,xind]) mesh = ax.contourf(grid.Y[yslice]/1000,grid.Z[:],Uplot[:,:]*100.0,csU*100,cmap=cmo.cm.balance) cbar_ax = f.add_axes([0.66, 0.37, 0.015, 0.13]) cb=f.colorbar(mesh, cax=cbar_ax,ticks=[-60,-40,-20,-0,20,40,60],format='%d') cb.ax.yaxis.set_tick_params(pad=2) ax.set_facecolor('#a99582') #ax.set_xlabel('Alongshore distance (km)',labelpad=0.0) ax.tick_params(axis='x', pad=1.2) ax.tick_params(axis='y', pad=1.2) ax.yaxis.tick_right() def V_vel(ax,VV,xslice, yind, grid, MaskC, state): '''VV is the mean v field, unstaggered, size (nz,ny,nx) ax is the corresponding handle for the axis''' umin = -0.4 umax = 0.4 csU = np.linspace(umin,umax,num=20) csU2 = np.linspace(umin,umax,num=10) density = call_rho(state,slice(6,10),yind,xslice) cs_dens = [20.7,20.8,20.9,21.0,21.1,21.2,21.3,21.4,21.5,21.6,21.7,21.8,21.9,22.0,22.1,22.2] Uplot = np.ma.array(VV.isel(X=xslice,Yp1=yind).data,mask=MaskC[:,yind,xslice]) mesh = ax.contourf(grid.X[xslice]/1000,grid.Z[:75],Uplot[:75,:]*100.0,csU*100,cmap=cmo.cm.balance) CS = ax.contour(grid.X[xslice]/1000,grid.Z[:75], np.ma.array(np.nanmean(density[:,:75,:].data-1000,axis=0),mask=MaskC[:75,yind,xslice]), cs_dens,colors='0.3',linewidths=[0.75] ) plt.clabel(CS,[cs_dens[1],cs_dens[3]],inline=True, fmt='%.1f',inline_spacing=1) cbar_ax = f.add_axes([0.46, 0.37, 0.015, 0.13]) cb=f.colorbar(mesh, cax=cbar_ax,ticks=[-40,-20,0,20,40],format='%d') cb.ax.yaxis.set_tick_params(pad=2) ax.set_facecolor('#a99582') #ax.set_xlabel('Alongshore distance (km)',labelpad=0.0) ax.tick_params(axis='x', pad=1.2) ax.tick_params(axis='y', pad=1.2) ax.yaxis.tick_right() def W_vel(ax,WW,UU,VV, yslice, xslice,zind, grid, MaskC, sbdepth=-150): '''WW is the mean w field, unstaggered, size (nz,ny,nx) ax is the corresponding handle for the axis''' umin = -0.005 umax = 0.005 csU = np.linspace(umin,umax,num=20) csU2 = np.linspace(umin,umax,num=10) Uplot = np.ma.array(WW.isel(X=xslice,Y=yslice,Zl=zind).data,mask=MaskC[zind,yslice,xslice]) Uquiv = np.ma.array(UU.isel(Xp1=xslice,Y=yslice,Z=zind).data,mask=MaskC[zind,yslice,xslice]) Vquiv = np.ma.array(VV.isel(X=xslice,Yp1=yslice,Z=zind).data,mask=MaskC[zind,yslice,xslice]) mesh = ax.contourf(grid.X[xslice]/1000,grid.Y[yslice]/1000,Uplot[:,:]*1000.0,csU*1000,cmap=cmo.cm.balance) ax.contour(grid.X[xslice]/1000,grid.Y[yslice]/1000,grid.Depth[yslice,xslice],[sbdepth], colors='0.5') Q = ax.quiver(grid.X[xslice][::7]/1000,grid.Y[yslice][::6]/1000, Uquiv[::6,::7],Vquiv[::6,::7], color='k', angles='xy', scale_units='xy', scale=1/7, width=0.005, headwidth=4) qk = ax.quiverkey(Q, 0.1, 0.85, 1, r'$1 \frac{m}{s}$', labelpos='S', coordinates='axes', fontproperties={'weight': 'bold'}) cbar_ax = f.add_axes([0.25, 0.445, 0.015, 0.13]) cb=f.colorbar(mesh, cax=cbar_ax,ticks=[-5,-2.5,0,2.5,5],format='%1.1f') cb.ax.yaxis.set_tick_params(pad=2) ax.set_facecolor('#a99582') #ax.set_xlabel('Alongshore distance (km)',labelpad=0.0) ax.tick_params(axis='x', pad=1.2) ax.tick_params(axis='y', pad=1.2) ax.set_aspect(1) def density_cs(ax,tslice,yslice,xslice,state,grid,ptracers,MaskC): #rho_min = 1020.4-1000 #rho_max = 1022.4-1000 #density = call_rho(tslice,yslice,xslice) #csU2 = np.linspace(rho_min,rho_max,num=11) csU = np.linspace(2.1,16.5,25) rho_min = 1020.7-1000 rho_max = 1021.9-1000 density = call_rho(state,tslice,yslice,xslice) csU2 = [20.7,20.8,20.9,21.0,21.1,21.2,21.3,21.4,21.5,21.6,21.7] tr_min = 0 tr_max = 22 csU = np.linspace(tr_min,tr_max,num=20) mesh=ax.contourf(grid.Y[yslice]/1000,grid.Z[:56], np.ma.array(np.nanmean(ptracers.Tr01[tslice,:56,yslice,xslice].data,axis=0), mask=MaskC[:56,yslice,xslice]), csU,cmap=cmo.cm.dense) cbar_ax = f.add_axes([0.86, 0.37, 0.015, 0.13]) cb=f.colorbar(mesh, cax=cbar_ax,ticks=[0,3,6,9,12,15,18,21],format='%d') cb.ax.yaxis.set_tick_params(pad=2) CS = ax.contour(grid.Y[yslice]/1000,grid.Z[:56], np.ma.array(np.nanmean(density[:,:56,:].data-1000,axis=0),mask=MaskC[:56,yslice,xslice]), csU2,colors='k',linewidths=[0.75] ) ax.yaxis.tick_right() #manual_locations = [(45, -50), (45, -150), (45, -250),(45,-350)] plt.clabel(CS,[csU2[1],csU2[3]],inline=True, fmt='%.1f',inline_spacing=1) ax.set_facecolor('#a99582') ax.tick_params(axis='x', pad=1.2) ax.tick_params(axis='y', pad=1.2) #ax.text(0.95,0.1,r'(a)',transform=ax.transAxes) # Incoming U def inc_U(ax,UU,yslice, color='k', label='Astoria'): ax.axvline(4,color='0.8', linestyle=':', linewidth=2) Uplot=np.nanmean(np.nanmean(UU.isel(Y=yslice).data,axis=1), axis=1) ax.plot(np.arange(19)/2.0,Uplot,color=color, label=label, linewidth=2) plt.yticks( [0.00,0.10,0.20,0.30], ('0.0', '0.1', '0.2', '0.3', '0.4') ) ax.set_ylabel(r'$U$ / ms$^{-1}$',labelpad=0.3) ax.set_xlabel('Days',labelpad=0.3) ax.tick_params(axis='x', pad=1) ax.tick_params(axis='y', pad=1) ax.set_xlim([0,9]) def water_transports(ax,dfcan): ax.axhline(0, color='gold') ax.plot(np.arange(19)/2.0,(dfcan.Vert_water_trans_sb)/1E4,':',color='k',label = 'LID', linewidth=2) ax.plot(np.arange(19)/2.0,(dfcan.CS1_water_trans)/1E4,color='0.4',label = 'CS1', linewidth=2) ax.plot(np.arange(19)/2.0,(dfcan.CS2_water_trans)/1E4,color='0.6',label = 'CS2', linewidth=2) ax.plot(np.arange(19)/2.0,(dfcan.CS3_water_trans)/1E4,color='0.8',label = 'CS3', linewidth=2) ax.plot(np.arange(19)/2.0,(dfcan.CS4_water_trans)/1E4,':',color='0.5',label= 'CS4', linewidth=2) ax.plot(np.arange(19)/2.0,(dfcan.CS5_water_trans)/1E4,color='k',label = 'CS5', linewidth=2) ax.plot(np.arange(19)/2.0,(dfcan.CS6_water_trans)/1E4,'--',color='k',label = 'CS6', linewidth=2) total = (dfcan.CS1_water_trans + dfcan.CS2_water_trans + dfcan.CS3_water_trans + dfcan.CS4_water_trans + dfcan.CS5_water_trans + dfcan.CS6_water_trans + dfcan.Vert_water_trans_sb) ax.plot(np.arange(19)/2.0,total/1E4,'--',color='mediumturquoise',label = 'Total', linewidth=2) ax.set_xlabel('Days',labelpad=0.5) ax.tick_params(axis='x', pad=1) ax.tick_params(axis='y', pad=1) #ax.set_ylim(-25,15) # + # Grid, state and datasets for Astoria grid_file = '/data/kramosmu/results/TracerExperiments/UPW_10TR_BF2_AST/01_Ast03/gridGlob.nc' gridA = xr.open_dataset(grid_file) state_file = '/data/kramosmu/results/TracerExperiments/UPW_10TR_BF2_AST/01_Ast03/stateGlob.nc' stateA = xr.open_dataset(state_file) ptrac_file = '/data/kramosmu/results/TracerExperiments/UPW_10TR_BF2_AST/01_Ast03/ptracersGlob.nc' ptracersA = xr.open_dataset(ptrac_file) # Grid, state and tracers for Barkley grid_file_b = '/data/kramosmu/results/TracerExperiments/UPW_10TR_BF4_BAR/01_Bar03/gridGlob.nc' gridB = xr.open_dataset(grid_file_b) state_file_b = '/data/kramosmu/results/TracerExperiments/UPW_10TR_BF4_BAR/01_Bar03/stateGlob.nc' stateB = xr.open_dataset(state_file_b) ptrac_file_b = '/data/kramosmu/results/TracerExperiments/UPW_10TR_BF4_BAR/01_Bar03/ptracersGlob.nc' ptracersB = xr.open_dataset(ptrac_file_b) fileWatA = ('/data/kramosmu/results/TracerExperiments/UPW_10TR_BF2_AST/01_Ast03/water_CS_transports.nc') fileWatB = ('/data/kramosmu/results/TracerExperiments/UPW_10TR_BF4_BAR/01_Bar03/water_CS_transports.nc') dfwatA = xr.open_dataset(fileWatA) dfwatB = xr.open_dataset(fileWatB) #RhoRef = np.squeeze(rdmds('/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run38/RhoRef')) RhoRef = 999.79998779 # It is constant in all my runs, can't run rdmds # + # General input nx = 616 ny = 360 nz = 104 nt = 19 # t dimension size xslice=slice(0,360) xp1_slice=slice(0,361) yslice=slice(100,300) # u contourf xind = 180 yslice_u = slice(50,359) # v contourf yindA = 131 yindB = 131 xslice_v = slice(120,240) # w contour xslice_w = slice(120,240) yslice_w = slice(120,240) x_qslice = slice(60,300,10) y_qslice = slice(150,280,10) tslice = slice(8,16) # incoming U xind_U = 80 ysliceU_A = slice(130,230) zsliceU_A = slice(14,19) ysliceU_B = slice(130,160) zsliceU_B = slice(29,34) # yslice_umean = slice(150,267) zslice_umean = slice(25,50) tslice_umean = slice(0,19) # plot 6 xslice_bac_calc = slice(0,463) yslice_bac_2d = slice(225,350) yslice_bac = slice(227,315) xslice_bac = slice(118,463) hFacmaskedA = np.ma.masked_values(gridA.HFacC.isel(X=xslice).data, 0) MaskCA = np.ma.getmask(hFacmaskedA) hFacmaskedB = np.ma.masked_values(gridB.HFacC.isel(X=xslice).data, 0) MaskCB = np.ma.getmask(hFacmaskedB) # + incUA,incVA = rout.unstagger(stateA.U.isel(Xp1=xind_U,Z=zsliceU_A),stateA.V.isel(X=xind_U,Z=zsliceU_A)) UUA,VVA = rout.unstagger(stateA.U.isel(T=tslice, Xp1=xp1_slice),stateA.V.isel(T=tslice, X=xslice)) UU_incA,VV_incA = rout.unstagger(stateA.U.isel(T=tslice_umean, Xp1=xp1_slice, Z=zslice_umean), stateA.V.isel(T=tslice_umean, X=xslice, Z=zslice_umean)) # + incUB,incVB = rout.unstagger(stateB.U.isel(Xp1=xind_U,Z=zsliceU_B),stateB.V.isel(X=xind_U,Z=zsliceU_B)) UUB,VVB = rout.unstagger(stateB.U.isel(T=tslice, Xp1=xp1_slice),stateB.V.isel(T=tslice, X=xslice)) UU_incB,VV_incB = rout.unstagger(stateB.U.isel(T=tslice_umean, Xp1=xp1_slice, Z=zslice_umean), stateB.V.isel(T=tslice_umean, X=xslice, Z=zslice_umean)) # - WWA = stateA.W.isel(T=tslice) WWB = stateB.W.isel(T=tslice) # + sns.set_context('paper') sns.set_style('white') plt.rcParams['font.size'] = 9.0 f = plt.figure(figsize = (9.5,8.7)) # 19.0cm = 7.4in, 115cm = 4.52in gs = gspec.GridSpec(3, 1,hspace=0.17, height_ratios=[1,1,0.75]) gs0 = gspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=gs[0],wspace=0.2) gs1 = gspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=gs[1], wspace=0.2) gs2 = gspec.GridSpecFromSubplotSpec(1, 3, subplot_spec=gs[2],width_ratios=[1,1.5,1.5]) ax1 = plt.subplot(gs0[0]) W_vel(ax1,WWA.mean(dim='T'),UUA.mean(dim='T'),VVA.mean(dim='T'), yslice_w, xslice_w,25, gridA, MaskCA, sbdepth=150) ax1.set_ylabel('CS-distance / km', labelpad=1) ax1.text(0.85,0.9,'(a)',transform=ax1.transAxes,weight='semibold') ax5 = plt.subplot(gs1[0]) W_vel(ax5,WWB.mean(dim='T'),UUB.mean(dim='T'),VVB.mean(dim='T'), yslice_w, xslice_w,37, gridB, MaskCB, sbdepth=200) ax5.set_xlabel('Alongshore distance / km', labelpad=1) ax5.set_ylabel('CS-distance / km', labelpad=1) ax5.text(0.3,0.9,'w / mm s$^{-1}$',transform=ax5.transAxes,weight='semibold') ax5.text(0.05,0.9,'(e)',transform=ax5.transAxes,weight='semibold') ax2 = plt.subplot(gs0[1]) V_vel(ax2,VVA.mean(dim='T'),xslice_v,yindA, gridA, MaskCA,stateA) ax2.text(0.85,0.05,'(b)',transform=ax2.transAxes,weight='semibold') ax6 = plt.subplot(gs1[1]) V_vel(ax6,VVB.mean(dim='T'),xslice_v,yindB, gridB, MaskCB, stateB) ax6.set_xlabel('Alongshore distance / km', labelpad=1) ax6.text(0.3,0.1,'v / cm s$^{-1}$',transform=ax6.transAxes,weight='semibold') ax6.text(0.05,0.05,'(f)',transform=ax6.transAxes,weight='semibold') ax3 = plt.subplot(gs0[2]) U_vel(ax3,UUA.mean(dim='T'),yslice_u, xind, gridA, MaskCA) ax3.text(0.85,0.05,'(c)',transform=ax3.transAxes,weight='semibold') r1 = patch.Rectangle((gridA.Y[130]/1000,gridA.Z[19]), (gridA.Y[230]-gridA.Y[130])/1000, gridA.Z[14]-gridA.Z[19],fc='none',ec='gold',linewidth=2) ax3.add_patch(r1) ax7 = plt.subplot(gs1[2]) U_vel(ax7,UUB.mean(dim='T'),yslice_u, xind, gridB, MaskCB) r2 = patch.Rectangle((gridB.Y[130]/1000,gridA.Z[34]), (gridA.Y[160]-gridA.Y[130])/1000, gridA.Z[29]-gridA.Z[34],fc='none',ec='gold',linewidth=2) ax7.add_patch(r2) ax7.set_xlabel('CS-distance / km', labelpad=1) ax7.text(0.3,0.1,'u / cm s$^{-1}$',transform=ax7.transAxes,weight='semibold') ax7.text(0.05,0.05,'(g)',transform=ax7.transAxes,weight='semibold') ax4 = plt.subplot(gs0[3]) density_cs(ax4,tslice,yslice_u,xind, stateA,gridA,ptracersA,MaskCA) ax4.yaxis.set_label_position("right") ax4.set_ylabel('Depth / m', labelpad=1) ax4.text(0.85,0.05,'(d)',transform=ax4.transAxes,weight='semibold') ax8 = plt.subplot(gs1[3]) density_cs(ax8,tslice,yslice_u,xind, stateB,gridB,ptracersB,MaskCB) ax8.set_xlabel('CS-distance / km', labelpad=1) ax8.yaxis.set_label_position("right") ax8.set_ylabel('Depth / m', labelpad=1) ax8.text(0.5,0.1,r'C / $\mu$M',transform=ax8.transAxes,weight='semibold') ax8.text(0.05,0.05,'(h)',transform=ax8.transAxes,weight='semibold') ax9 = plt.subplot(gs2[0]) inc_U(ax9,incUA,ysliceU_A, color='k', label='Astoria') inc_U(ax9,incUB,ysliceU_B, color='0.5', label='Barkley') ax9.legend(loc=0) ax9.text(0.05,0.9,'(i)',transform=ax9.transAxes,weight='semibold') ax10 = plt.subplot(gs2[1]) water_transports(ax10,dfwatA) ax10.set_ylabel('Water transport / 10$^{4}$ m$^3$s$^{-1}$',labelpad=-4) ax10.text(0.05,0.05,'(j)',transform=ax10.transAxes,weight='semibold') ax11 = plt.subplot(gs2[2]) water_transports(ax11,dfwatB) ax11.legend(loc=0, ncol=2,handletextpad=0 , labelspacing=0.1, handlelength=1.5, columnspacing=0.1) ax11.text(0.05,0.05,'(k)',transform=ax11.transAxes,weight='semibold') #plt.tight_layout() plt.savefig('circulation.eps',format='eps',bbox_inches='tight') # -
forPaper2/paperFigures/circulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="USSV_OlCFKOD" # # Training a neural network on MNIST with Keras # # This simple example demonstrate how to plug TFDS into a Keras model. # # + [markdown] id="J8y9ZkLXmAZc" # Copyright 2020 The TensorFlow Datasets Authors, Licensed under the Apache License, Version 2.0 # + [markdown] id="OGw9EgE0tC0C" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/datasets/keras_example"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/datasets/blob/master/docs/keras_example.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/datasets/blob/master/docs/keras_example.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + id="TTBSvHcSLBzc" import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds tf.enable_v2_behavior() # + [markdown] id="VjI6VgOBf0v0" # ## Step 1: Create your input pipeline # # Build efficient input pipeline using advices from: # * [TFDS performance guide](https://www.tensorflow.org/datasets/performances) # * [tf.data performance guide](https://www.tensorflow.org/guide/data_performance#optimize_performance) # # + [markdown] id="c3aH3vP_XLI8" # ### Load MNIST # # Load with the following arguments: # # * `shuffle_files`: The MNIST data is only stored in a single file, but for larger datasets with multiple files on disk, it's good practice to shuffle them when training. # * `as_supervised`: Returns tuple `(img, label)` instead of dict `{'image': img, 'label': label}` # + id="ZUMhCXhFXdHQ" (ds_train, ds_test), ds_info = tfds.load( 'mnist', split=['train', 'test'], shuffle_files=True, as_supervised=True, with_info=True, ) # + [markdown] id="rgwCFAcWXQTx" # ### Build training pipeline # # Apply the following transormations: # # * `ds.map`: TFDS provide the images as tf.uint8, while the model expect tf.float32, so normalize images # * `ds.cache` As the dataset fit in memory, cache before shuffling for better performance.<br/> # __Note:__ Random transformations should be applied after caching # * `ds.shuffle`: For true randomness, set the shuffle buffer to the full dataset size.<br/> # __Note:__ For bigger datasets which do not fit in memory, a standard value is 1000 if your system allows it. # * `ds.batch`: Batch after shuffling to get unique batches at each epoch. # * `ds.prefetch`: Good practice to end the pipeline by prefetching [for performances](https://www.tensorflow.org/guide/data_performance#prefetching). # + id="haykx2K9XgiI" def normalize_img(image, label): """Normalizes images: `uint8` -> `float32`.""" return tf.cast(image, tf.float32) / 255., label ds_train = ds_train.map( normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE) ds_train = ds_train.cache() ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples) ds_train = ds_train.batch(128) ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE) # + [markdown] id="RbsMy4X1XVFv" # ### Build evaluation pipeline # # Testing pipeline is similar to the training pipeline, with small differences: # # * No `ds.shuffle()` call # * Caching is done after batching (as batches can be the same between epoch) # + id="A0KjuDf7XiqY" ds_test = ds_test.map( normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE) ds_test = ds_test.batch(128) ds_test = ds_test.cache() ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE) # + [markdown] id="nTFoji3INMEM" # ## Step 2: Create and train the model # # Plug the input pipeline into Keras. # + id="XWqxdmS1NLKA" model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128,activation='relu'), tf.keras.layers.Dense(10) ]) model.compile( optimizer=tf.keras.optimizers.Adam(0.001), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()], ) model.fit( ds_train, epochs=6, validation_data=ds_test, )
site/en-snapshot/datasets/keras_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 21388, "status": "ok", "timestamp": 1642923646970, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhitU02ue_iWYHYJt4_tij9btNw_etj4CYuUMY=s64", "userId": "04600300855494195617"}, "user_tz": 480} id="em01MqE0byqc" outputId="21ec445b-d3f4-4ad2-f4f2-a3274832ae43" ## used when using google colab for running the code. from google.colab import drive drive.mount('/content/drive') # + executionInfo={"elapsed": 314, "status": "ok", "timestamp": 1642923652251, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhitU02ue_iWYHYJt4_tij9btNw_etj4CYuUMY=s64", "userId": "04600300855494195617"}, "user_tz": 480} id="_cPkE4Y9GoQ7" import os import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os # + [markdown] id="AN0a3z1xErk-" # ## Loading the dataset # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1281, "status": "ok", "timestamp": 1642923676602, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhitU02ue_iWYHYJt4_tij9btNw_etj4CYuUMY=s64", "userId": "04600300855494195617"}, "user_tz": 480} id="ilS942msErlA" outputId="ffc4262e-c2e7-4aea-deb1-5ac2bf1a6a11" ### google colab print("Working Directory: ",os.getcwd()) print("List of files in working directory: ",os.listdir()) print("List of files in new directory: ",os.listdir('./drive/MyDrive/Machine_Learning/HW2')) input_path = './drive/MyDrive/Machine_Learning/HW2/all_emails.csv' data_set = pd.read_csv(input_path) train_data = pd.read_csv(input_path) print("data read complete!!") # + [markdown] id="JB8n-9WMErlA" # ## Inspecting the dataset # + colab={"base_uri": "https://localhost:8080/", "height": 380} id="ObxHHj6mErlB" outputId="21df8fbf-84b6-43be-82a0-96bea3992a3a" ### 1. printing data from 10 rows on the top. print("shape of data: ",data_set.shape) data_set.head(10) # + colab={"base_uri": "https://localhost:8080/", "height": 380} id="RjtEckOoErlB" outputId="e32e2d65-5e10-4ff8-8610-e03dcad471e3" n = 10 row, col = data_set.shape col_names = data_set.columns not_spam = data_set.loc[data_set['spam'] == 0] print("below are data few line from dataset which are not spam.") not_spam [0:10] # + colab={"base_uri": "https://localhost:8080/"} id="GkLu97o28HHW" outputId="4c580736-2e9a-48b4-e1bc-40e0bd0225ca" n_notSpam,_ = not_spam.shape n_total,_ = data_set.shape print("Not Spam: ", n_notSpam, "Spam:", n_total - n_notSpam) ratio = int((n_total - n_notSpam)/n_total * 100) print("Ratio spam to total: %d" %(ratio)) # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="IaU7qemZErlB" outputId="0cb4b96d-5ce0-433a-8d21-df626f74d953" # 3. Print the emails between lines 5000 and 5010 in the data set ## there are total 4260 mails in the dataset. so, considering Id 4000 to 4010 to print the data. a = [4000+x for x in range(11)] data_set.loc[data_set.index.isin(a)] # + colab={"base_uri": "https://localhost:8080/"} id="IpeFkem6ErlB" outputId="cb5abbd6-e8d3-47fc-f431-7fa4c33cb9c4" import nltk import sklearn from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from nltk.tokenize import RegexpTokenizer from nltk.stem.snowball import SnowballStemmer from string import punctuation # import nltk nltk.download() # + colab={"base_uri": "https://localhost:8080/"} id="8lJhFSqqErlC" outputId="85a5028d-2b82-49f6-890e-a1dc49987e7b" ## for reference tokenization and stopword implementation temp1 = data_set.loc[data_set.index[0]]['text'] sen1 = word_tokenize(temp1) print("Before: ",len(sen1)) sen1 = [word for word in sen1 if word not in stopwords.words('english')] print("After: ",len(sen1)) # + [markdown] id="ArOx9XQ6PuOG" # ## Tokenize the data # + colab={"base_uri": "https://localhost:8080/"} id="fB3w107nSS8V" outputId="cf17ffed-854c-406c-bab5-8b6e879a28a1" #### trail room 1 import re # text = data_set.loc[data_set.index[1]]['text'] text = "sdttdfdf ffd. 11 fkk" print("before: ",text) text_nonum = re.sub(r'\d+', '', text) text_nonum = re.sub('\s+', ' ', text_nonum) print("after: ",text_nonum) # + id="f_BLzb3KWjMc" ### trial room 2 - Hyperlink removal trial # # temp_df = pd.DataFrame(data= ['asasddas https://facebook.com/ahfbfkd/ Hello world https://facebook.com/']) # temp_df = pd.DataFrame(data=['http : / / romano . chorally . com / rm . php guzzle like a fountain spur m rocks , our customer speaks : " my girlfriend and me have been really enjoying making our own homemade erotic films . we get off on pretending to be like porn stars even though it will only ever be the two of us that see them . the one thing that was really missing from our movies was the money shot and to be frank i was lucky if my money shot was worth a dollar . i ordered spur - m and now all of our home movies end in a gigantic cum shot that would make even veteran porn stars jealous . thanks spur - m for helping to spice up our sex life ! " anthony , ky " spur - m really works . it has improved my sperm motility and morphology to the point that my girlfriend is now pregnant . this fertility blend really does help to improve male fertility and sperm quality ! " adam j . , san francisco , usa http : / / karla . chorally . com / spur / ? sheep need not be disturbed ? go here'] ) # display('before:', temp_df) # temp_df[0] = temp_df[0].str.replace(r's*http?://S+(s+|$)', ' ').str.strip() # # temp_df[0] = temp_df[0].str.replace('http.*.com', '',regex = True) # display("after: ",temp_df) # + id="ri3MGNKUErlC" ### tokenizing all the rows in dataset thelist = [] for idx in data_set.index: temp = data_set.loc[data_set.index[idx]]['text'] temp = re.sub(r'\d+', '', temp) ## remove numbers from the string temp = re.sub('\s+', ' ', temp) ## replace multiple spaces with single space thelist.append(word_tokenize(temp)) # + [markdown] id="73RD25DdQLA_" # ## Remove Punctuation and other stopwords # + id="KUEXsd3_ErlC" ## removing stopwords from all the rows of dataset. thelist1 = [] the_puncs = list(punctuation) the_puncs.append('``') # the_puncs.append('http') # the_puncs.append('https') # the_puncs.append('.com') # the_puncs.append('www') ### get list after removing punctuation and stopwords. ### IMP: converts words to lower case. helpful in next steps. for array in thelist: temp = [word.lower() for word in array if word not in stopwords.words('english')] temp1 = [word for word in temp if word not in the_puncs] thelist1.append(temp1) # + colab={"base_uri": "https://localhost:8080/"} id="9cMmVZRoErlD" outputId="ac828384-7034-4c0f-b264-e5fca3a28ff9" print(stopwords.words('english')) # + colab={"base_uri": "https://localhost:8080/"} id="smTGXVEqErlD" outputId="a92fc9fd-2962-4064-d747-662dd18764b8" ## check if it actually happen. """before:""",len(thelist[0]),"""after:""",len(thelist1[0]) # + colab={"base_uri": "https://localhost:8080/"} id="Vz1JIsRVErlD" outputId="91430408-c736-4946-8320-92a0698b63ec" def countUniqueWord(thelist1): word_count = {} for i in range(len(thelist1)): for word in thelist1[i]: if word not in word_count.keys(): word_count[word] = 1 else: word_count[word] += 1 return len(word_count.keys()), word_count word_count1, word_list1 = countUniqueWord(thelist1) print("Count of unique words before : ",word_count1) # word_count.keys() # + [markdown] id="3rCJS4hnRI7x" # ## Stem data # + id="5tlVfPaXjO25" from nltk.stem import WordNetLemmatizer import copy # + colab={"base_uri": "https://localhost:8080/"} id="cWS9ySs7ErlD" outputId="632af324-7a36-4e32-9d1e-abca696270d7" ### convert each list to root words. stemmer = SnowballStemmer("english") lemmatizer = WordNetLemmatizer() thelist2 = copy.deepcopy(thelist1) ### make a copy, just for comparison purposes. ###################### ## global control department step = 0 # 0-stemmer , 1 = lemmatizer # max_feature = 5000 #################### def getRoots(thelist2, step = 1): """ takes 2D list and convert each tokens into their respective roots. """ ### stemmer if (step == 0): for i in range(len(thelist2)): for j in range(len(thelist2[i])): thelist2[i][j] = stemmer.stem(thelist2[i][j]) ## lemmatizer if (step == 1): for i in range(len(thelist2)): for j in range(len(thelist2[i])): thelist2[i][j] = lemmatizer.lemmatize(thelist2[i][j]) return thelist2 ### call the function to get the root words for each subsequent tokens. thelist2 = getRoots(thelist2,step) word_count2, word_list2 = countUniqueWord(thelist2) print("Count of unique words after changing to root word: ",word_count2) # + colab={"base_uri": "https://localhost:8080/"} id="1IOA6CzQErlE" outputId="1290e4e8-e034-41ba-eeae-881827675560" ### just for fun def countKeys(_dict, n): count = 0 for x in _dict: if _dict[x] == n: count = count + 1 # print(x) return count count1 = countKeys(word_list1, 1) count2 = countKeys(word_list2, 1) count1, count2 # + id="DcyrVzuLErlE" from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer import copy # + id="tHPr7GgsgaL2" thelist = copy.deepcopy(thelist2) thelist_f = "" for i in range(len(thelist)): temp = " ".join(thelist[i]) thelist[i] = temp # + id="2j-gtNc3ErlF" #vectorizer = TfidfVectorizer(analyzer= 'word', stop_words='english') ###################### ## global control department # step = 0 # 0-stemmer , 1 = lemmatizer max_feature = 2500 #################### vectorizer = TfidfVectorizer(max_features=max_feature) #X = [[[vectorizer.fit_transform([m])] for m in x] for x in thelist1] vectorizer.fit_transform(thelist) tfidf = vectorizer.transform(thelist) tfidf_tokens = vectorizer.get_feature_names_out() # + colab={"base_uri": "https://localhost:8080/", "height": 505} id="J41772F6j8Uk" outputId="1569bcd2-6d72-4ea4-ee37-f6a54c818c16" print(tfidf.shape, type(tfidf)) id_list = data_set['id'].tolist() TFIDF_final = pd.DataFrame.sparse.from_spmatrix(data = tfidf) TFIDF_final_ref = copy.deepcopy(TFIDF_final) TFIDF_final_ref.columns = tfidf_tokens TFIDF_final_ref.index = id_list TFIDF_final_ref # + id="HK_cfEKSErlF" countvectorizer = CountVectorizer(max_features = max_feature) countvectorizer.fit_transform(thelist) count_vec_df = countvectorizer.transform(thelist) count_vec_df = pd.DataFrame.sparse.from_spmatrix(data = count_vec_df) count_tokens = countvectorizer.get_feature_names_out() count_vec_df.columns = count_tokens count_vec_df.index = id_list # + colab={"base_uri": "https://localhost:8080/", "height": 522} id="2eUamuC_ErlG" outputId="c3bb6b68-b34b-4927-f092-94999f80f998" print("Count Vectorizer\n") count_vec_df # print("\nTD-IDF Vectorizer\n") # print(df_tfidfvect) # + id="mj9Sjw9DKU-K" Bag_of_words = copy.deepcopy(TFIDF_final_ref) ### Split Test and Validation set ### random state = 43, accuracy = 76.995 from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split y = data_set['spam'].to_list() X_train, X_val, y_train, y_val = train_test_split(Bag_of_words, y, test_size=0.2, random_state=43) # + id="Eul8VjI4BvxA" import warnings warnings.filterwarnings('ignore') from sklearn.model_selection import GridSearchCV modelGrid = LogisticRegression() d = {0:ratio, 1:(100-ratio)} parameters = [] therange = np.arange(-10, 10, 1) for a in therange: dtemp = {0: 0, 1:0} temp = d[0] + a dtemp[0] = temp dtemp[1] = 100-temp parameters.append(dtemp) # grid_values = {'penalty': ['l1','l2'], 'C': [0.001,0.01,0.1,1,10,100,1000]} # grid_values = {'penalty': ['l1','l2'], 'C': [5,6,7,8,9,10, 11, 12, 13]} hyperparameters= {"class_weight": parameters ,'C': [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1], 'penalty' : ['l1','l2']} ### grid search for hyper parameter tuning model = GridSearchCV(modelGrid, hyperparameters, scoring='f1', cv=5) model.fit(x_val, y_val) # + id="mxe9H1jjLWbm" print("best parameters: ",model.best_params_) print("best score: ",model.best_score_) # + colab={"base_uri": "https://localhost:8080/"} id="OfZqkzG7Qt4G" outputId="b62bdab6-293b-4220-dc4b-56453af5575e" import sklearn.metrics as sm x_train = X_train print("Training Data") prediction_train = model.predict(x_train) tn, fp, fn, tp = sm.confusion_matrix(y_train, prediction_train).ravel() print("TN: ",tn, "FP:",fp,"FN:", fn,"TP:", tp) recall = tp/(tp+fn) precision = tp/(tp+fp) Zscore = sm.fbeta_score(y_train, prediction_train, beta = 0.5) accuracy = (tp+tn)/ (tn+tp+fn+fp) manual_Z = (2*recall*precision)/(recall + precision) # Area_under_curve = sm.auc(y_val, prediction) print("Recall: ",recall,"\nPrecision: ",precision,"\nF1score: ",Zscore, "\nManual F1:",manual_Z,"\nAccuracy: ",accuracy) # + id="24cOS-AX5793" x_val = X_val prediction_val = model.predict(x_val) # + colab={"base_uri": "https://localhost:8080/"} id="BjQvL0kE8m-J" outputId="e3916408-c930-4277-ee28-a009a6703708" import sklearn.metrics as sm tn, fp, fn, tp = sm.confusion_matrix(y_val, prediction_val).ravel() print("TN: ",tn, "FP:",fp,"FN:", fn,"TP:", tp) recall = tp/(tp+fn) precision = tp/(tp+fp) Zscore = sm.fbeta_score(y_val, prediction_val, beta = 0.5) accuracy = (tp+tn)/ (tn+tp+fn+fp) manual_Z = (2*recall*precision)/(recall + precision) # Area_under_curve = sm.auc(y_val, prediction) print("Recall: ",recall,"\nPrecision: ",precision,"\nF1score: ",Zscore, "\nManual F1:",manual_Z,"\nAccuracy: ",accuracy) # + id="-0T25nPqfwaE" fpr, tpr, thresholds = sm.roc_curve(y_val, prediction_val, pos_label=1) # + colab={"base_uri": "https://localhost:8080/"} id="xQFGej5IgE7r" outputId="f9a49610-768c-4366-9017-e0dcc7ab05f4" #Getting Area under curve value sm.auc(fpr, tpr) # + [markdown] id="41iI7l8COLJv" # ## Evaluation # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 660, "status": "ok", "timestamp": 1642923723794, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhitU02ue_iWYHYJt4_tij9btNw_etj4CYuUMY=s64", "userId": "04600300855494195617"}, "user_tz": 480} id="4BnhPANDONFq" outputId="63924135-e7a6-47f9-cc7f-3dcefa93a092" print("List of files in new directory: ",os.listdir('./drive/MyDrive/Machine_Learning/HW2')) input_path = './drive/MyDrive/Machine_Learning/HW2/eval_students_2.csv' test_data = pd.read_csv(input_path) print("data read complete!!") # + colab={"base_uri": "https://localhost:8080/", "height": 380} id="oTgng2JbPS93" outputId="38591ac3-c93c-4bba-e048-181692e49039" print("shape of data: ",test_data.shape) test_data.head(10) # + id="DLy4Bt2hP4MQ" ### tokenizing all the rows in dataset testlist = [] for idx in test_data.index: temp = test_data.loc[test_data.index[idx]]['text'] temp = re.sub(r'\d+', '', temp) ## remove numbers from the string temp = re.sub('\s+', ' ', temp) ## replace multiple spaces with single space testlist.append(word_tokenize(temp)) # + id="_uS54PBzQESb" ## removing stopwords from all the rows of dataset. testlist1 = [] the_puncs_test = list(punctuation) the_puncs_test.append('``') # the_puncs_test.append('http') # the_puncs_test.append('https') # the_puncs_test.append('.com') # the_puncs_test.append('www') ### get list after removing punctuation and stopwords. ### IMP: converts words to lower case. helpful in next steps. for array in testlist: temp = [word.lower() for word in array if word not in stopwords.words('english')] temp1 = [word for word in temp if word not in the_puncs_test] testlist1.append(temp1) # + id="2Aut_546Qe67" testlist2 = getRoots(testlist1,step) # + colab={"base_uri": "https://localhost:8080/"} id="NogOcsmvUBXn" outputId="68f45e49-9681-417e-abfe-70666c79158a" len(testlist),len(testlist1),len(testlist2) # + id="LbkHJ3JZRhx1" ### Not useless testlist3 = copy.deepcopy(testlist2) for i in range(len(testlist3)): temp = " ".join(testlist3[i]) testlist3[i] = temp # + id="OhrELpyLR-Pa" #vectorizer = TfidfVectorizer(max_features=max_feature) test_tfidf = vectorizer.transform(testlist3) # + colab={"base_uri": "https://localhost:8080/", "height": 505} id="r6Hchns5SZ-o" outputId="61163c9b-5112-44e8-c770-<KEY>" print(test_tfidf.shape, type(test_tfidf)) test_id_list = test_data['id'].tolist() test_tfidf_tokens = vectorizer.get_feature_names_out() test_TFIDF_final = pd.DataFrame.sparse.from_spmatrix(data = test_tfidf) test_TFIDF_final_ref = copy.deepcopy(test_TFIDF_final) test_TFIDF_final_ref.columns = test_tfidf_tokens test_TFIDF_final_ref.index = test_id_list test_TFIDF_final_ref # + id="OH0omw3hTz_z" test_bag = copy.deepcopy(test_TFIDF_final_ref) test_col_names = test_bag.columns.to_list() x_test = test_bag test_prediction = model.predict(x_test) # + colab={"base_uri": "https://localhost:8080/"} id="xBh1UZXy33t9" outputId="31a800da-990f-4d2e-e95e-c551ac1624ba" # Test a = [["generalize"]] getRoots(a, step=0) # + id="d2KHesXXXhA0" output_path = '/content/drive/MyDrive/UW Assignments/EE P 596 : Adv Intro to ML/HW2/output_eval6.csv' f = open(output_path, 'w') f.write("id,spam\n") for x in range(len(test_id_list)): f.write(f"{test_id_list[x]}, {test_prediction[x]}\n") f.close() # + colab={"base_uri": "https://localhost:8080/"} id="gVC3UaH1aXIe" outputId="e156f111-6761-4f68-d7d4-17d9ec92e04e" count_test = 0 for x in range(len(test_prediction)): if test_prediction[x] == 0: count_test += 1 print("count of zero: ",count_test) # print("Accuracy on validation:", (count_val/len(prediction)) *100)
Spam Detection using Logistic Regression/SpamDetection_LogisticsRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # Simple notebook to generate a variety of graphs and store them in the file spec. # # + pycharm={"name": "#%%\n"} import networkx as nx import random from graph import GraphIO # + [markdown] pycharm={"name": "#%% md\n"} # Generate an unattributed ten node star graph and write it. This will result in an archive called `basic_unattributed.mgraph` conatining one directory `graph_0` which in turn has as single `edgelist.csv` file # + pycharm={"name": "#%%\n"} #load default networkx graph g = nx.generators.star_graph(10) # + pycharm={"name": "#%%\n"} # create metagraph object with a single graph, no node attributes and no edge attributes #attempt dump GraphIO.dump(g, './ExampleGraphs/basic_unattributed.json') # + [markdown] pycharm={"name": "#%% md\n"} # We can also store node attributes for each graph. Next we load the graph we just created, add betweenness as a node attribute and dump it to disk again. # This will result in an archive called `basic_node_att.mgraph` with a file call `nodeatt_betweenness.json` being added to the `graph_0 directory` along with the same edgelist as above. # # + pycharm={"name": "#%%\n"} # load the star graph g, _, _, _ = GraphIO.load("./ExampleGraphs/basic_unattributed.json") # + pycharm={"name": "#%%\n"} # add betweeness attribute to nodes bb = nx.betweenness_centrality(g) nx.set_node_attributes(g, bb, "betweenness") # + pycharm={"name": "#%%\n"} # dump GraphIO.dump(g, './ExampleGraphs/basic_node_att.json') # + [markdown] pycharm={"name": "#%% md\n"} # Similarly, we can add arbitrary edge attributes to the graph. We will add a 'color' attribute to a few of the edges in the star graph. # This will result in an archive called `basic_attributed.mgraph` with the same structure as `basic_node_att.mgraph`, but with and new # file called `edgeatt_color.json` in the `graph_0` directory. # + pycharm={"name": "#%%\n"} # load g, edgeatts, nodeatts, graphatts = GraphIO.load('./ExampleGraphs/basic_node_att.json') att = {(0, 1): "red", (0, 2): "blue", (0, 5): "red", (0, 8): "blue"} nx.set_edge_attributes(g, att, "color") # + pycharm={"name": "#%%\n"} # dump GraphIO.dump(g, './ExampleGraphs/basic_attributed.json') # + [markdown] pycharm={"name": "#%% md\n"} # An arbitrary number of different node and edge attribute types can be added to each graph, each will result in a new `.json` file in that graph's directory. # - # We can also add weights to the graph. Weights are stored in networkx as a special edge attribute called 'weight'. The file spec writes weights, # if they exist, as a third column in `edgelist.csv`. # + pycharm={"name": "#%%\n"} for edge in g.edges: g[edge[0]][edge[1]]['weight'] =random.randint(0, 5) # + pycharm={"name": "#%%\n"} GraphIO.dump(g, './ExampleGraphs/weighted_attributed.json') # + [markdown] pycharm={"name": "#%% md\n"} # We can have multiple attributes per node or edge # + pycharm={"name": "#%%\n"} # load g, edgeatts, nodeatts, graphatts = GraphIO.load('./ExampleGraphs/weighted_attributed.json') att = {(0, 5): (2018, 2010, 2005), (0, 6): (2000,), (0, 2): (2020, 1999), } nx.set_edge_attributes(g, att, "years") GraphIO.dump(g, './ExampleGraphs/weighted_attributed_2.json') # + [markdown] pycharm={"name": "#%% md\n"} # We can also store multigraphs. # + pycharm={"name": "#%%\n"} g, _, _, _ = GraphIO.load('./ExampleGraphs/weighted_attributed_2.json') gm = nx.MultiGraph(g) # add new colored edges gm.add_edge(0, 1, color='green', weight=1) gm.add_edge(5, 6, key=1, color='red', weight=3) gm.add_edge(1, 5, key=1, color='blue', weight=8) gm.add_edge(6, 2, key=1, color='yellow', weight=1) gm.add_edge(0, 2, color='green', weight=2) # + [markdown] pycharm={"name": "#%% md\n"} # Now when we dump the multigraph, we will create an archive with both a `graph_0` and `graph_1` directory. Each will have an edgelist and attribute `.json` files. # # + pycharm={"name": "#%%\n"} GraphIO.dump(gm, './ExampleGraphs/weighted_attributed_multigraph.json') # + [markdown] pycharm={"name": "#%% md\n"} # when we load the multigraph again, we can convert it to a list of graphs: # + pycharm={"name": "#%%\n"} g, _, _, _ = GraphIO.load('./ExampleGraphs/weighted_attributed_multigraph.json') import matplotlib.pyplot as plt graphs = GraphIO.multigraph_to_graphs(g) fig = plt.figure() plt.subplot(1, 2, 1) nx.draw(graphs[0]) plt.subplot(1, 2, 2) nx.draw(graphs[1]) plt.show() # + pycharm={"name": "#%%\n"} g_dict = {'one': graphs[0], 'two': graphs[1]} # try converting back to multigraph mg = GraphIO.graphs_to_multigraph(g_dict) print("done")
graph_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + # -*- coding: utf-8 -*- """ Convolutional Neural Network for MNIST dataset classification task. References: <NAME>, <NAME>, <NAME>, and <NAME>. "Gradient-based learning applied to document recognition." Proceedings of the IEEE, 86(11):2278-2324, November 1998. Links: [MNIST Dataset] http://yann.lecun.com/exdb/mnist/ """ from __future__ import division, print_function, absolute_import import tflearn from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.normalization import local_response_normalization from tflearn.layers.estimator import regression # - # Data loading and preprocessing import tflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data(data_dir='/tmp/mnist-data', one_hot=True) X = X.reshape([-1, 28, 28, 1]) testX = testX.reshape([-1, 28, 28, 1]) # Building convolutional network network = input_data(shape=[None, 28, 28, 1], name='input') network = conv_2d(network, 32, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = conv_2d(network, 64, 3, activation='relu', regularizer="L2") network = max_pool_2d(network, 2) network = local_response_normalization(network) network = fully_connected(network, 128, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 256, activation='tanh') network = dropout(network, 0.8) network = fully_connected(network, 10, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') # Training model = tflearn.DNN(network, max_checkpoints=3, checkpoint_path='/tmp/tflearn_mnist', best_checkpoint_path='/tmp/tflearn_mnist_best', tensorboard_verbose=3, tensorboard_dir='/tmp/tflearn_mnist_logs/') model.load('/tmp/tflearn_mnist-4200') model.fit({'input': X}, {'target': Y}, n_epoch=1, validation_set=({'input': testX}, {'target': testY}), snapshot_step=100, show_metric=True, run_id='convnet_mnist')
study/tflearn-mnist/.ipynb_checkpoints/tflearn-mnist-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Performing eigenvalue analysis/reconstruction in Python # If you are new to Python, this jumps in a bit quickly. I'd suggest looking at Numpy for Matlab Users before reading this. The following is simply a brief demonstration of using Python 3.5 for eigenanalysis. A few things to note: # # 1. If you use an earlier version of Python (than 3.5), the @ operator isn't defined yet. To perform matrix multiplication, you neet to type use numpy.dot(A,B) in place of A@B. That will require you to start with import numpy before doing any of this. # # 2. I demonstrate for a non-symmetric matrix. For a symmetric matrix, you should use eigh. The H in eigh means Hermetian, and is a more general definition than symmetric. If you don't deal with complex valued matrices, it's irrelevant to you. Using eigh leverages the properties of Hermetian matrices in the solution process resulting in potentially faster and more accurate results than the more general eig code. For a small matrix, this is irrelevant, but it becomes important for more substantial calculations. # 3. I show a couple of tips later that matter for later matrices (avoiding the inverse). Be aware of them. Why may or may not matter to you, but when you get to big or sensitive problems, they do. # # The first thing I need to do is import a couple of tools (scipy, and its linear algebra package). import numpy as np import scipy.linalg as la # We are going to attempt to solve for # # $$A r = r v$$ # # where A is the matrix, and $r$ represents the right eigenvectors, while # $v$ represents the eigenvalues. We are also going to obtain the left # eigenvalues as well, for later use. A = np.array([[1,2,3],[4,5,6],[7,8,9]]) # Defining the array (I'm avoiding using the matrix class) # It's a personal preference, and I'm still not locked into it myself. (v, l, r) = la.eig(A, left = True) # You can read the help, buy the left eigenvectors don't get created without this. v = np.diag(v) # by default, eig puts the eigenvalues in a 1-D array. We will need a diagonal matrix in a moment. print(l) print(v) print(r) v # These should be identical based on the eigen equation. They are to the # default precision. print(v) print(la.inv(r)@A@r) # In reality, one should never ever use the inverse function, unless the # actual answer you want is the inverse itself (which I've never seen for # a real problem). What you typically want is the inverse of a matrix # times another matrix or vector, which is the solution to a linear # algebra problem. We can use the `solve` function to obtain this. This is # arguably no better, but illustrative. For a larger problem, the benefit # is easier to demonstrate. la.solve(r,A)@r # This is "rebuilding" the original matrix from the eigensolution. Looks # pretty good. <EMAIL>(r) # Avoiding the inverse is a bit uncomfortable in this case, but a bit of # doodling yields that # # $$A B^{-1} = \left(\left(A B^{-1}\right)^T\right)^T = \left(\left(B^{-1}\right)^T A^T \right)^T$$ # # Noting that # # $$\left(B^{-1}\right)^T = \left(B^{T}\right)^{-1}$$ # # we can use r@la.solve(r.T,v.T).T # For the left eigenvectors, they are actually simply the right # eigenvectors of the transpose of the matrix, so # # $$A^T l = l v$$ # # Below I lazily obtain the eigenvalues using the left eigenvectors, with # an inverse. la.inv(l)@A.T@l
Eigen analysis and reconstruction in Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns; sns.set() from matplotlib.ticker import NullFormatter from sklearn import manifold, datasets from time import time from openTSNE import TSNE as oTSNE from MulticoreTSNE import MulticoreTSNE as mTSNE # - data = pd.read_csv("features_all_labels.csv") data.head() data.columns x_cols = ['f_zero_crossings_sum', 'f_tempo', 'f_chroma_stft_mean', 'f_chroma_stft_var', 'f_chroma_cqt_mean', 'f_chroma_cqt_var', 'f_chroma_cens_mean', 'f_chroma_cens_var', 'f_melspec_mean', 'f_melspec_var', 'f_mfcc_3_mean', 'f_mfcc_3_var', 'f_mfcc_5_mean', 'f_mfcc_5_var', 'f_mfcc_8_mean', 'f_mfcc_8_var', 'f_mfcc_10_mean', 'f_mfcc_10_var', 'f_mfcc_11_mean', 'f_mfcc_11_var', 'f_mfcc_12_mean', 'f_mfcc_12_var', 'f_mfcc_13_mean', 'f_mfcc_13_var', 'f_mfcc_14_mean', 'f_mfcc_14_var', 'f_mfcc_15_mean', 'f_mfcc_15_var', 'f_mfcc_16_mean', 'f_mfcc_16_var', 'f_mfcc_17_mean', 'f_mfcc_17_var', 'f_mfcc_18_mean', 'f_mfcc_18_var', 'f_mfcc_19_mean', 'f_mfcc_19_var', 'f_mfcc_20_mean', 'f_mfcc_20_var', 'f_rms_mean', 'f_spec_centr_mean', 'f_spec_centr_var', 'f_spec_bandw_mean', 'f_spec_bandw_var', 'f_spec_contr_mean', 'f_spec_contr_var', 'f_spec_flat_mean', 'f_spec_flat_var', 'f_spec_roll_mean', 'f_spec_roll_var', 'f_tonnetz_mean', 'f_tonnetz_var', 'f_poly_0_mean', 'f_poly_0_var', 'f_poly_1_mean', 'f_poly_1_var', 'f_poly_2_mean', 'f_poly_2_var', 'f_tempogram_mean', 'f_tempogram_var', 'f_fourier_tempogram_mean_real', 'f_fourier_tempogram_mean_imag', 'f_fourier_tempogram_var', 'f_harm_mean', 'f_harm_var', 'f_perc_mean', 'f_perc_var'] x = data.loc[:,x_cols] x y_instrument = data.instrument.replace({'flute': 0, 'sarod': 1, 'sitar': 2}) y_instrument kw ={'flute': 0, 'sarod': 1, 'sitar': 2} kw_inv = {v: k for k, v in kw.items()} kw_inv y_emotion = data.emotion.replace({'anxiety': 0, 'calm': 1, 'sad': 2, 'happy': 3}) y_emotion data.emotion.value_counts() embeddings = manifold.TSNE(perplexity=100).fit_transform(x) vis_x = embeddings[:, 0] vis_y = embeddings[:, 1] (fig, subplots) = plt.subplots(1, 1, figsize=(15, 8)) scatter = plt.scatter(vis_x, vis_y, c=y_instrument, cmap=plt.cm.get_cmap("jet", 10), marker='.') plt.clim(-0.5, 2.5) handles, labels = scatter.legend_elements() labels = ['flute', 'sarod', 'sitar'] legend1 = subplots.legend( handles, labels, loc="lower left", title="Instruments", fontsize='x-large', title_fontsize='x-large' ) legend1.set_label("string") plt.axis('off') plt.savefig('TSNE-instrument.png') (fig, subplots) = plt.subplots(1, 1, figsize=(15, 8)) scatter = plt.scatter(vis_x, vis_y, c=y_emotion, cmap=plt.cm.get_cmap("jet", 10), marker='.') handles, labels = scatter.legend_elements() labels = ['anxiety', 'calm', 'sad', 'happy'] legend1 = subplots.legend( handles, labels, loc="lower left", title="Emotions", fontsize='x-large', title_fontsize='x-large' ) plt.clim(-0.5, 4.5) plt.axis('off') plt.savefig('TSNE-emotion.png')
notebooks/TSNE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys sys.path.insert(0, os.path.abspath('../')) import numpy as np import matplotlib.pyplot as plt import phytools from quadrantphotodiode import quadrantphotodiode # - qpd = quadrantphotodiode.QPD(size=5e-3, gap=40e-6, shape='circular') x, y = np.meshgrid(qpd.detector_x, qpd.detector_y) qpd.laser_intensity = phytools.functions.gaussian2d(x=x, y=y, a=1, x0=0, y0=0, fwhm_x=1e-3, fwhm_y=2e-3, offset=0) plt.imshow(qpd.detector_intensity) q1, q2, q3, q4 = qpd.quadrants qpd.quadrants qpd.x_pos np.shape(np.zeros((1000,1000))) # + diameter = 5e-3 gap = 40e-6 n = 1000 roundoff = 1e-14 # The maximum possible gap size is sqrt(2)*Radius of detector; # raise an exception if this condition is violated: if gap >= np.sqrt(2) * diameter/2: raise Exception('The gap is too large!') delta = diameter / n y, x = np.mgrid[-diameter / 2 + delta / 2: diameter / 2 + delta / 2: delta, -diameter / 2 + delta / 2: diameter / 2 + delta / 2: delta] # This computes the distance of each grid point from the origin # and then we extract a masked array of points where r_sqr is less # than the distance of each grid point from the origin: r_sqr = x ** 2 + y ** 2 inside = np.ma.getmask(np.ma.masked_where(r_sqr <= (diameter / 2) ** 2, x)) # This portion takes care of masking out elements of the detector where # the gap exists. It returns an array of light intensity over the detector. all_dead = (np.abs(x) + delta / 2 - roundoff > gap / 2) \ & (np.abs(y) + delta / 2 - roundoff > gap / 2) partial_dead_x_only = (np.abs(x) + delta / 2 - roundoff > gap / 2) & \ (np.abs(x) - delta / 2 - roundoff < gap / 2) & \ (np.abs(y) - delta / 2 - roundoff > gap / 2) partial_dead_y_only = (np.abs(y) + delta / 2 - roundoff > gap / 2) & \ (np.abs(y) - delta / 2 - roundoff < gap / 2) & \ (np.abs(x) - delta / 2 - roundoff > gap / 2) partial_dead_x_or_y = (1 / delta) * (np.abs(x) + delta / 2 - gap / 2) * partial_dead_x_only + \ (1 / delta) * (np.abs(y) + delta / 2 - gap / 2) * partial_dead_y_only partial_dead_x_and_y = (1 / delta ** 2) * (np.abs(x) + delta / 2 - gap / 2) ** 2 * \ ( (np.abs(x) + delta / 2 - roundoff > gap / 2) & (np.abs(x) - delta / 2 - roundoff < gap / 2) & (np.abs(y) + delta / 2 - roundoff > gap / 2) & (np.abs(y) + delta / 2 - roundoff > gap / 2) & (np.abs(y) - delta / 2 - roundoff < gap / 2) & (np.abs(x) + delta / 2 - roundoff > gap / 2) ) gap_mask = all_dead # not strictly needed, but partial_mask = partial_dead_x_or_y + partial_dead_x_and_y partial_mask[partial_mask == 0] = 1 detector_matrix = inside * gap_mask * partial_mask * delta ** 2 # - plt.imshow(x) x[0,:] y[:,0] plt.imshow((all_dead*inside).astype(int)) all_dead.astype(int)
tests/tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Scaling Analysis # # Author: <NAME> (<EMAIL>) # # # All this is using the taucmdr python libraries from paratools # http://taucommander.paratools.com/ # # ## Imports # This section imports necessary libraies, the metrics.py and utilities.py files and sets up the window. # + # A couple of scripts to set the environent and import data from a .tau set of results from utilities import * from metrics import * # Plotting, notebook settings: # %matplotlib inline #plt.rcParams.update({'font.size': 16}) import numbers from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) pd.set_option('display.float_format', lambda x: '%.2e' % x) pd.set_option('display.max_columns',100) pd.set_option('max_colwidth', 70) import copy # - # ## Getting Data # # TAU Commander uses TAU to run the application and measure it using runtime sampling techniques (similar to Intel VTune). Many customization options are available. For example, we may consider each function regardless of calling context, or we may decide to enable callpath profiling to see each context separately. # # From the talapas_scaling application the following experiments are available. These use Talapas (with 28 thread Broadwell processors) and the build-ce (realistic) option for mkFit. The first six experiments use the --num-thr option to set the thread count which is intended to perform threading within the events. the last two add the --num-ev-thr option to set the event threads, so that all threads are used to process events in parallel and each event is processed by a single thread. # * manual_scaling_Large_talapas # * manual_scaling_Large_talapas_fullnode # * manual_scaling_TTbar70_talapas # * manual_scaling_TTbar70_talapas_fullnode # * manual_scaling_TTbar35_talapas # * manual_scaling_TTbar35_talapas_fullnode # * ev_thr_scaling_Large_talapas # * ev_thr_scaling_Large_talapas_fullnode # # Additionally available in the cori_scaling application are the following. These were run on NERSC's Cori on the KNL with the default memory settings (quad - 1 NUMA domain, cache - MCDRAM as direct mapped cache). See http://www.nersc.gov/users/computational-systems/cori/running-jobs/advanced-running-jobs-options/ for more info on the KNL modes. Similar to the talapas scaling they use the build-ce option and threading within each event. # * manual_scaling_TTbar35 # * manual_scaling_TTbar70 # * manual_scaling_Large # * mixed_thr_scaling_Large # # ### Importing Scaling Data # Here we import the data. In this case we are using Cori data from the experiments with the threads working within each event using the TTbar35 file. Note that this box will take 10 or more minutes to run; please go enjoy a coffee while you wait. # + application = "talapas_scaling" # experiment = "manual_scaling_TTbar70_talapas" # experiment = "manual_scaling_Large_talapas" # experiment = "ev_thr_scaling_Large_talapas" # application = "talapas_no_throttle_scaling" # experiment = "manual_scaling_TTbar70_talapas_fullnode" experiment = "0" # application = "cori_scaling" # experiment = "manual_scaling_TTbar35" # experiment = "manual_scaling_TTbar70" # experiment = "manual_scaling_Large" # experiment = "mixed_thr_scaling_Large" path = ".tau/" + application + "/" + experiment + "/" # note that this function takes a long time to run, so only rerun if you must metric_data = get_pandas(path) # metric_data = get_pandas_scaling(path, callpaths=False) print(metric_data.keys()) # + metric_data['PAPI_TLB_DM'].sort_values(by='Inclusive',ascending=False).head(10) # + if (application == "talapas_scaling") or (application == "talapas_no_throttle_scaling"): metric_data = remove_erroneous_threads(metric_data, [1, 2, 8, 16, 32, 48, 56]) elif application == "cori_scaling": metric_data = remove_erroneous_threads(metric_data, [1, 4, 8, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256]) print metric_data.keys() for kt in metric_data: print_available_metrics(metric_data[kt]) print " " # - # #### A list of metrics print_available_metrics(metric_data[48]) # #### Metric metadata print_metadata(metric_data[8]) # ## Adding metrics # # metrics are available in metrics.py. At this time the following can be added: # * add_IPC(metrics) - Instructions per Cycle # * add_CPI(metrics) - Cycles per instruction # * add_VIPC(metrics) - vector instructions per cycle # * add_VIPI(metrics) - vector instructions per instruction (i.e. fraction of total) # * add_L1_missrate(metrics) - miss rate for L1 cache # # for scaling data please use the add_metric_to_scaling_data(data, metric_func) function to add a metric # # Here we add some predeefined metrics and print the top 10 functions with the best IPC # + add_metric_to_scaling_data(metric_data, add_CPI) add_metric_to_scaling_data(metric_data, add_IPC) add_metric_to_scaling_data(metric_data, add_L1_missrate) add_metric_to_scaling_data(metric_data, add_L2_missrate) if application == 'cori_scaling': llc = True else: llc = False add_metric_to_scaling_data(metric_data, add_L3_missrate, llc) print_available_metrics(metric_data, scaling=True) metric_data[2]['DERIVED_IPC'].sort_values(by='Inclusive',ascending=False).head(10) # - # ## Scaling Results # # In this section we demo some scaling results with several different metrics. # # We use the scaling plot function to plot the data vs thread count. # scaling_plot(data, inclusive=True, plot=True, function="\[SUMMARY\] .TAU application$", metric='PAPI_TOT_CYC', max=False) # * data = the full dictionary of scaling data # * inclusive = determines if the inclusive data or exclusive data will be used # * plot = true makes a plot false does not # * function = the string that will be searched for to plot. Default looks at the whole application # * metric = the metric choosen from the above list # * max = use the max value or average value across the threads # ### Scaling with total cycles vs the thread count # Here we plot the cycle count for each thread count as a proxy for execution time. We use the max cycle count rather than the average as this number will limit the time of execution. thread_list, tot_cyc_list = scaling_plot(metric_data, function="\[SUMMARY\] .TAU application$", max=True, metric='PAPI_TOT_CYC', inclusive=True) # ### Cycles per thread for each thread count # Here we show load balancing with a series of plots showing the cycle count per thread. We have one plot for each thread count used # + metric_data = remove_erroneous_threads(metric_data, [1, 2, 8, 16, 48, 56]) thread_list = sorted(metric_data.keys()) thread_cyc_data = get_thread_level_metric_scaling(metric_data, metric='PAPI_TOT_CYC', inclusive=False) for kt in thread_list: print kt data = list(thread_cyc_data[kt]) matplotlib.pyplot.bar(range(len(data)), data) matplotlib.pyplot.ylim(ymax=50000000000) matplotlib.pyplot.show() # - # ### L1 Missrate vs thread count # Similar to above these cells show the L1 missrates. In this case we want to get the plotting data for L1 acceses and misses but comupte the miss rate before plotting, so we set plot=False # + thread_list, L1A_data = scaling_plot(metric_data, plot=False, metric='PAPI_LST_INS') thread_list, L1M_data = scaling_plot(metric_data, plot=False, metric='PAPI_L1_TCM') L1_MR_list = [L1M_data[i] / L1A_data[i] for i in range(len(thread_list))] plt = matplotlib.pyplot.plot(thread_list, L1_MR_list) # - # ### L1 Miss rate by each thread of each thread count # + thread_L1A_data = get_thread_level_metric_scaling(metric_data, metric='PAPI_LST_INS') thread_L1M_data = get_thread_level_metric_scaling(metric_data, metric='PAPI_L1_TCM') MR_data = {} for kt in thread_list: # print(thread_L1M_data[kt]) # print(thread_L1A_data[kt]) MR_data[kt] = thread_L1M_data[kt] / thread_L1A_data[kt] for kt in thread_list: print kt data = list(MR_data[kt]) matplotlib.pyplot.bar(range(len(data)), data) matplotlib.pyplot.ylim(ymax=0.05) matplotlib.pyplot.show() # - # ### L1 Top 10 bad miss rates L1_data = select_metric_from_scaling(metric_data, 'DERIVED_L1_MISSRATE') L1_MR_dict = {} for n_thr in thread_list: L1_MR_dict[n_thr] = filter_libs_out(L1_data[n_thr]).sort_values(by='Exclusive',ascending=False)[["Exclusive"]] print thread_list # + THREAD_COUNT = 2 # L1_MR_dict[THREAD_COUNT].head(10) get_func_level_metric(L1_MR_dict[THREAD_COUNT], avg=True).head(20) # + THREAD_COUNT = 8 # L1_MR_dict[THREAD_COUNT].head(10) get_func_level_metric(L1_MR_dict[THREAD_COUNT], avg=True).head(20) # + THREAD_COUNT = 48 # L1_MR_dict[THREAD_COUNT].head(10) get_func_level_metric(L1_MR_dict[THREAD_COUNT], avg=True).head(20) # - # ### L2 Missrate vs thread count # Similar to above these cells show the L2 missrates. # + thread_list, L2A_data = scaling_plot(metric_data, plot=False, metric='PAPI_L2_TCA') thread_list, L2M_data = scaling_plot(metric_data, plot=False, metric='PAPI_L2_TCM') L2_MR_list = [L2M_data[i] / L2A_data[i] for i in range(len(thread_list))] plt = matplotlib.pyplot.plot(thread_list, L2_MR_list) # + thread_L2A_data = get_thread_level_metric_scaling(select_metric_from_scaling(metric_data, 'PAPI_L2_TCA')) thread_L2M_data = get_thread_level_metric_scaling(select_metric_from_scaling(metric_data, 'PAPI_L2_TCM')) L2_MR_data = {} for kt in thread_list: L2_MR_data[kt] = thread_L2M_data[kt] / thread_L2A_data[kt] for kt in thread_list: print kt data = list(L2_MR_data[kt]) matplotlib.pyplot.bar(range(len(data)), data) matplotlib.pyplot.ylim(ymax=1) matplotlib.pyplot.show() # - # ### L2 Top 10 bad miss rates # + L2_data = select_metric_from_scaling(metric_data, 'DERIVED_L2_MISSRATE') L2_MR_dict = {} for n_thr in thread_list: L2_MR_dict[n_thr] = filter_libs_out(L2_data[n_thr]).sort_values(by='Exclusive',ascending=False)[["Exclusive"]] print thread_list # - THREAD_COUNT = 2 get_func_level_metric(L2_MR_dict[THREAD_COUNT], avg=True).head(20) # ### L3 Missrate vs thread count # Similar to above these cells show the L3 missrates. # + if application == 'talapas_scaling' or application == 'talapas_no_throttle_scaling': thread_list, LLA_data = scaling_plot(metric_data, plot=False, metric='PAPI_L3_TCA') thread_list, LLM_data = scaling_plot(metric_data, plot=False, metric='PAPI_L3_TCM') else: thread_list, LLA_data = scaling_plot(metric_data, plot=False, metric='PAPI_NATIVE_LLC_REFERENCES') thread_list, LLM_data = scaling_plot(metric_data, plot=False, metric='PAPI_NATIVE_LLC_MISSES') LL_MR_list = [LLM_data[i] / LLA_data[i] for i in range(len(thread_list))] plt = matplotlib.pyplot.plot(thread_list, LL_MR_list) # + if application == 'talapas_scaling' or application == 'talapas_no_throttle_scaling': thread_LLA_data = get_thread_level_metric_scaling(metric_data, metric='PAPI_L3_TCA') thread_LLM_data = get_thread_level_metric_scaling(metric_data, metric='PAPI_L3_TCM') else: thread_LLA_data = get_thread_level_metric_scaling(metric_data, metric='PAPI_NATIVE_LLC_REFERENCES') thread_LLM_data = get_thread_level_metric_scaling(metric_data, metric='PAPI_NATIVE_LLC_MISSES') LL_MR_data = {} for kt in thread_list: LL_MR_data[kt] = thread_LLM_data[kt] / thread_LLA_data[kt] def thread_bar_plots(data_dict, t_list, y=-1): for kt in t_list: print "Thread Count: %d" % kt data = list(data_dict[kt]) matplotlib.pyplot.bar(range(len(data)), data) if y != -1: matplotlib.pyplot.ylim(ymax=y) matplotlib.pyplot.show() thread_bar_plots(LL_MR_data, thread_list, 1) # - # ### L3 Top 10 bad miss rates L3_data = select_metric_from_scaling(metric_data, 'DERIVED_L3_MISSRATE') L3_MR_dict = {} for n_thr in thread_list: L3_MR_dict[n_thr] = filter_libs_out(L3_data[n_thr]).sort_values(by='Inclusive',ascending=False)[["Inclusive"]] print thread_list THREAD_COUNT = 2 L3_MR_dict[THREAD_COUNT].head(10) # + def scaling_plot_2(data, inclusive=True, plot=True, function="SelectHitInd", metric='DERIVED_L3_MISSRATE', max=False): ''' data is the whole scaling data function is what to search in the call-path please use regular functions default looks at the whole application metric is the metric to plot returns lists of threads and metrics per thread (i.e. data to plot) ''' if inclusive: which='Inclusive' else: which='Exclusive' metric_data = select_metric_from_scaling(data, metric) thread_list = sorted(metric_data.keys()) if max: data_list = [metric_data[kt][metric_data[kt].index.get_level_values('region').str.contains(function)][which].max() for kt in thread_list] else: # cause TAU has 2 of everything average is half data_list = [metric_data[kt][metric_data[kt].index.get_level_values('region').str.contains(function)][which].sum()/(2*kt) for kt in thread_list] if plot: plt = matplotlib.pyplot.plot(thread_list, data_list) return thread_list, data_list t,d = scaling_plot_2(metric_data, inclusive=True, plot=True, function="MultHelixPropTransp", metric='DERIVED_L1_MISSRATE', max=False) print d[8] t,d = scaling_plot_2(metric_data, inclusive=True, plot=True, function="MultHelixPropTransp", metric='DERIVED_L1_MISSRATE', max=True) print d[8] # 0.0002159764720002364 # 0.024189364864026477 # - # ### Resource Stalls vs thread count # Similar to above these cells show the Resource Stalls. In this case we have nothing to compute, so we simply call the function. Future work includes exploring the different types of stalls. thread_list, res_stall_data = scaling_plot(metric_data, metric='PAPI_RES_STL') thread_stall_data = get_thread_level_metric_scaling(metric_data, metric='PAPI_RES_STL') thread_bar_plots(thread_stall_data, thread_list, 4000000000) alldata = combine_metrics_2(copy.deepcopy(metric_data[56]),'Inclusive') cm = sns.light_palette("yellow", as_cmap=True) correlations_pearson = alldata.corr('pearson').fillna(0) # Other methods: 'kendall', 'spearman' correlations_pearson.style.format("{:.2%}").background_gradient(cmap=cm) # + def combine_metrics_2(metric_dict,inc_exc='Inclusive'): if inc_exc == 'Inclusive': todrop = 'Exclusive' else: todrop = 'Inclusive' for m in metric_dict: if (not m == 'METADATA') and ('DERIVED' not in m): metric_dict[m].index = metric_dict[m].index.droplevel() alldata = metric_dict['PAPI_TOT_CYC'].copy().drop(['Calls','Subcalls',todrop,'ProfileCalls'], axis=1) alldata['PAPI_TOT_CYC'] = alldata[inc_exc] alldata.drop([inc_exc],axis=1,inplace=True) for x in metric_dict.keys(): if x in ['PAPI_TOT_CYC','METADATA']: continue alldata[x] = metric_dict[x][inc_exc] return alldata data = copy.deepcopy(metric_data) THREAD_COUNT = 56 metric = 'DERIVED_L1_MISSRATE' # data[8]['PAPI_TOT_CYC'].sort_values(by='Inclusive',ascending=False).head(10) alldata = combine_metrics_2(data[THREAD_COUNT],inc_exc='Inclusive') for m in metric_data[THREAD_COUNT].keys(): if (not m == 'METADATA') and (not m == 'PAPI_TOT_CYC') and (not m == metric): alldata.drop([m],axis=1,inplace=True) alldata.sort_values(by='PAPI_TOT_CYC',ascending=False).head(20) # -
mictest/ScalingAnalysis-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline phrases = ['We love you, Guido! You are the best ever!', 'Python is my favorite langage.', 'Mother Teresa cared for the poor.', 'Why is he evil? He lacks empathy.', 'The death of democracy is a traumatic nightmare.'] from sentiment_score import (SentimentScore, create_scores_df, plot_sentiment_scores) example = SentimentScore(phrases[0]) example example.score scores = create_scores_df(phrases) scores (fig, ax) = plot_sentiment_scores(scores)
VaderSentimentDemo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %reload_ext autoreload # %autoreload 2 # %matplotlib inline # + import sys import copy import numpy as np sys.path.append('./../..') from PointCloudUtils import op3, load_ply_as_pc, visualize_pc, measure_pc_centroid, adjust_pc_coords, radian2degree, m2mm # + import math # Checks if a matrix is a valid rotation matrix. def is_rotation_matrix(R: np.array) -> bool: """ Check??? :param R: a matrix of 4x4 :return: a boolean, ??? """ Rt = np.transpose(R) shouldBeIdentity = np.dot(Rt, R) I = np.identity(3, dtype=R.dtype) n = np.linalg.norm(I - shouldBeIdentity) return n < 1e-6 def rotation_matrix_to_euler_angles(R): """ Measure rotations around x, y and z from transformation matrix :param R: a rotation matrix :return: an array of 3 values that describe rotations around x, y and z axis, unit is "radian" """ assert (is_rotation_matrix(R)) sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0]) singular = sy < 1e-6 if not singular: x = math.atan2(R[2, 1], R[2, 2]) y = math.atan2(-R[2, 0], sy) z = math.atan2(R[1, 0], R[0, 0]) else: x = math.atan2(-R[1, 2], R[1, 1]) y = math.atan2(-R[2, 0], sy) z = 0 return np.array([x, y, z]) # - def execute_global_registration(source_down: op3.PointCloud, target_down: op3.PointCloud, source_fpfh: op3.PointCloud, target_fpfh: op3.PointCloud, voxel_size: float, verbose=False): """ Do global matching, find gross transformation form source to target :param source_down, target_down: 2 objects of Open3D, that are point clouds of source and target after down-sampling :param source_fpfh, target_fpfh: 2 objects of Open3D, that are point cloud features of source and target :param voxel_size: a float value, that is how sparse you want the sample points is :param verbose: a boolean value, display notification and visualization when True and no notification when False :return: a transformation object """ distance_threshold = voxel_size * 1.5 if verbose: print(":: RANSAC registration on downsampled point clouds.") print(" Since the downsampling voxel size is %.3f," % voxel_size) print(" we use a liberal distance threshold %.3f." % distance_threshold) result = op3.registration_ransac_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, distance_threshold, op3.TransformationEstimationPointToPoint(False), 4, [op3.CorrespondenceCheckerBasedOnEdgeLength(0.9), op3.CorrespondenceCheckerBasedOnDistance(distance_threshold)], op3.RANSACConvergenceCriteria(4000000, 500)) return result def execute_fast_global_registration(source_down: op3.PointCloud, target_down: op3.PointCloud, source_fpfh: op3.PointCloud, target_fpfh: op3.PointCloud, voxel_size: float, verbose=False): """ Find registertration to transform source point cloud to target point cloud :param source, target: 2 objects of Open3D, that are point clouds of source and target :param voxel_size: a float value, that is how sparse you want the sample points is :param verbose: a boolean value, display notification and visualization when True and no notification when False :return: a transformation object """ distance_threshold = voxel_size * 0.5 if verbose: print(":: Apply fast global registration with distance threshold %.3f" % distance_threshold) result = op3.registration_fast_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, op3.FastGlobalRegistrationOption(maximum_correspondence_distance = distance_threshold)) return result from PointCloudUtils import sample_point_cloud_feature, refine_registration, visualize_registration def global_icp(source: op3.PointCloud, target: op3.PointCloud, voxel_size = 0.005, verbose=False): """ Find registertration to transform source point cloud to target point cloud :param source, target: 2 objects of Open3D, that are point clouds of source and target :param voxel_size: a float value, that is how sparse you want the sample points is :param verbose: a boolean value, display notification and visualization when True and no notification when False :return: a transformation object """ if verbose: visualize_registration(source=source, target=target, transformation=np.identity(4)) # visualize point cloud # downsample data source_down, source_fpfh = sample_point_cloud_feature(point_cloud=source, voxel_size=voxel_size, verbose=verbose) target_down, target_fpfh = sample_point_cloud_feature(point_cloud=target, voxel_size=voxel_size, verbose=verbose) # 1st: gross matching(RANSAC) result_ransac = execute_global_registration(source_down=source_down, target_down=target_down, source_fpfh=source_fpfh, target_fpfh=target_fpfh, voxel_size=voxel_size, verbose=verbose) if verbose: visualize_registration(source=source_down, target=target_down, transformation=result_ransac.transformation) # 2nd: fine-tune matching(ICP) result_icp = refine_registration(source=source_down, target=target_down, voxel_size=voxel_size, gross_matching=result_ransac) if verbose: visualize_registration(source=source_down, target=target_down, transformation=result_icp.transformation) return result_icp def fast_global_icp(source: op3.PointCloud, target: op3.PointCloud, voxel_size = 0.005, verbose=False): """ Find registertration to transform source point cloud to target point cloud :param source, target: 2 objects of Open3D, that are point clouds of source and target :param voxel_size: a float value, that is how sparse you want the sample points is :param verbose: a boolean value, display notification and visualization when True and no notification when False :return: a transformation object """ if verbose: visualize_registration(source=source, target=target, transformation=np.identity(4)) # visualize point cloud # downsample data source_down, source_fpfh = sample_point_cloud_feature(point_cloud=source, voxel_size=voxel_size, verbose=verbose) target_down, target_fpfh = sample_point_cloud_feature(point_cloud=target, voxel_size=voxel_size, verbose=verbose) # 1st: gross matching(RANSAC) result_ransac = execute_fast_global_registration(source_down=source_down, target_down=target_down, source_fpfh=source_fpfh, target_fpfh=target_fpfh, voxel_size=voxel_size, verbose=verbose) if verbose: visualize_registration(source=source_down, target=target_down, transformation=result_ransac.transformation) # 2nd: fine-tune matching(ICP) result_icp = refine_registration(source=source_down, target=target_down, voxel_size=voxel_size, gross_matching=result_ransac) if verbose: visualize_registration(source=source_down, target=target_down, transformation=result_icp.transformation) return result_icp # # Illustrate how to use surface matching path_point_cloud1 = './../../data/72_scenes_of_pipe/pipe/1001.ply' path_point_cloud2 = './../../data/72_scenes_of_pipe/pipe/1002.ply' path_point_cloud3 = './../../data/72_scenes_of_pipe/pipe/1003.ply' # Load data point_cloud1 = load_ply_as_pc(file_path=path_point_cloud1) point_cloud2 = load_ply_as_pc(file_path=path_point_cloud2) point_cloud3 = load_ply_as_pc(file_path=path_point_cloud3) # point_cloud.visualize() visualize_pc(point_cloud1, point_cloud2, point_cloud3) # ### Should adjust z axis to have better matching result point_cloud1 = adjust_pc_coords(point_cloud=point_cloud1, coord=[0, 0, 0.850]) point_cloud2 = adjust_pc_coords(point_cloud=point_cloud2, coord=[0, 0, 0.850]) # ### Should adjust model to centroid to have better matching result centroid1 = measure_pc_centroid(point_cloud=point_cloud1); print(centroid1) point_cloud1 = adjust_pc_coords(point_cloud=point_cloud1, coord=centroid1[:3]) # ### Find matching transformation xtrans = global_icp(source=point_cloud1, target=point_cloud2, voxel_size=0.002, verbose=True) print(xtrans) print('Theta x, Theta y, Theta z(in Degree): \n {}'.format(radian2degree(rotation_matrix_to_euler_angles(xtrans.transformation[:3, :3])))) print('Translation(in miliMeters): \n {}'.format(m2mm(xtrans.transformation[:3, 3]) - m2mm(centroid1[:3]))) xtrans = fast_global_icp(source=point_cloud1, target=point_cloud2, voxel_size=0.005, verbose=True) print(xtrans) print('Theta x, Theta y, Theta z(in Degree): \n {}'.format(radian2degree(rotation_matrix_to_euler_angles(xtrans.transformation[:3, :3])))) print('Translation(in miliMeters): \n {}'.format(m2mm(xtrans.transformation[:3, 3]) - m2mm(centroid1[:3]))) # %timeit xtrans = global_icp(source=point_cloud1, target=point_cloud2, voxel_size=0.005, verbose=False) # %timeit xtrans = fast_global_icp(source=point_cloud1, target=point_cloud2, voxel_size=0.005, verbose=False)
Released2019June06/PointCloud/SurfaceMatching/SurfaceMatching-NewVer.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.4.2 # language: julia # name: julia-1.4 # --- # A lot of people are building modeling languages for their specific domains. However, while the syntax my vary greatly between these domain-specific languages (DSLs), the internals of modeling frameworks are surprisingly similar: building differential equations, calculating Jacobians, etc. # # #### ModelingToolkit.jl is metamodeling systemitized # # After building our third modeling interface, we realized that this problem can be better approached by having a reusable internal structure which DSLs can target. This internal is ModelingToolkit.jl: an Intermediate Representation (IR) with a well-defined interface for defining system transformations and compiling to Julia functions for use in numerical libraries. Now a DSL can easily be written by simply defining the translation to ModelingToolkit.jl's primatives and querying for the mathematical quantities one needs. # # ### Basic usage: defining differential equation systems, with performance! # # Let's explore the IR itself. ModelingToolkit.jl is friendly to use, and can used as a symbolic DSL in its own right. Let's define and solve the Lorenz differential equation system using ModelingToolkit to generate the functions: # + using ModelingToolkit ### Define a differential equation system @parameters t σ ρ β @variables x(t) y(t) z(t) @derivatives D'~t eqs = [D(x) ~ σ*(y-x), D(y) ~ x*(ρ-z)-y, D(z) ~ x*y - β*z] de = ODESystem(eqs, t, [x,y,z], [σ,ρ,β]) ode_f = ODEFunction(de) ### Use in DifferentialEquations.jl using OrdinaryDiffEq u₀ = ones(3) tspan = (0.0,100.0) p = [10.0,28.0,10/3] prob = ODEProblem(ode_f,u₀,tspan,p) sol = solve(prob,Tsit5()) using Plots plot(sol,vars=(1,2,3)) # - # ### ModelingToolkit is a compiler for mathematical systems # # At its core, ModelingToolkit is a compiler. It's IR is its type system, and its output are Julia functions (it's a compiler for Julia code to Julia code, written in Julia). # # DifferentialEquations.jl wants a function `f(u,p,t)` or `f(du,u,p,t)` for defining an ODE system, # so ModelingToolkit.jl builds both. First the out of place version: generate_function(de)[1] # and the in-place: generate_function(de)[2] # ModelingToolkit.jl can be used to calculate the Jacobian of the differential equation system: jac = calculate_jacobian(de) # It will automatically generate functions for using this Jacobian within the stiff ODE solvers for faster solving: jac_expr = generate_jacobian(de) # It can even do fancy linear algebra. Stiff ODE solvers need to perform an LU-factorization which is their most expensive part. But ModelingToolkit.jl can skip this operation and instead generate the analytical solution to a matrix factorization, and build a Julia function for directly computing the factorization, which is then optimized in LLVM compiler passes. ModelingToolkit.generate_factorized_W(de)[1] # ### Solving Nonlinear systems # # ModelingToolkit.jl is not just for differential equations. It can be used for any mathematical target that is representable by its IR. For example, let's solve a rootfinding problem `F(x)=0`. What we do is define a nonlinear system and generate a function for use in NLsolve.jl # + @variables x y z @parameters σ ρ β # Define a nonlinear system eqs = [0 ~ σ*(y-x), 0 ~ x*(ρ-z)-y, 0 ~ x*y - β*z] ns = NonlinearSystem(eqs, [x,y,z], [σ,ρ,β]) nlsys_func = generate_function(ns) # - # We can then tell ModelingToolkit.jl to compile this function for use in NLsolve.jl, and then numerically solve the rootfinding problem: # + nl_f = @eval eval(nlsys_func[2]) # Make a closure over the parameters for for NLsolve.jl f2 = (du,u) -> nl_f(du,u,(10.0,26.0,2.33)) using NLsolve nlsolve(f2,ones(3)) # - # ### Library of transformations on mathematical systems # # The reason for using ModelingToolkit is not just for defining performant Julia functions for solving systems, but also for performing mathematical transformations which may be required in order to numerically solve the system. For example, let's solve a third order ODE. The way this is done is by transforming the third order ODE into a first order ODE, and then solving the resulting ODE. This transformation is given by the `ode_order_lowering` function. @derivatives D3'''~t @derivatives D2''~t @variables u(t), x(t) eqs = [D3(u) ~ 2(D2(u)) + D(u) + D(x) + 1 D2(x) ~ D(x) + 2] de = ODESystem(eqs, t, [u,x], []) de1 = ode_order_lowering(de) de1.eqs # This has generated a system of 5 first order ODE systems which can now be used in the ODE solvers. # # ### Linear Algebra... for free? # # Let's take a look at how to extend ModelingToolkit.jl in new directions. Let's define a Jacobian just by using the derivative primatives by hand: @parameters t σ ρ β @variables x(t) y(t) z(t) @derivatives D'~t Dx'~x Dy'~y Dz'~z eqs = [D(x) ~ σ*(y-x), D(y) ~ x*(ρ-z)-y, D(z) ~ x*y - β*z] J = [Dx(eqs[1].rhs) Dy(eqs[1].rhs) Dz(eqs[1].rhs) Dx(eqs[2].rhs) Dy(eqs[2].rhs) Dz(eqs[2].rhs) Dx(eqs[3].rhs) Dy(eqs[3].rhs) Dz(eqs[3].rhs)] # Notice that this writes the derivatives in a "lazy" manner. If we want to actually compute the derivatives, we can expand out those expressions: J = expand_derivatives.(J) # Here's the magic of ModelingToolkit.jl: **Julia treats ModelingToolkit expressions like a Number, and so generic numerical functions are directly usable on ModelingToolkit expressions!** Let's compute the LU-factorization of this Jacobian we defined using Julia's Base linear algebra library. using LinearAlgebra luJ = lu(J,Val(false)) luJ.L # and the inverse? invJ = inv(luJ) # #### Thus ModelingToolkit.jl can utilize existing numerical code on symbolic codes # # Let's follow this thread a little deeper. # # ### Automatically convert numerical codes to symbolic # # Let's take someone's code written to numerically solve the Lorenz equation: function lorenz(du,u,p,t) du[1] = p[1]*(u[2]-u[1]) du[2] = u[1]*(p[2]-u[3]) - u[2] du[3] = u[1]*u[2] - p[3]*u[3] end # Since ModelingToolkit can trace generic numerical functions in Julia, let's trace it with Operations. When we do this, it'll spit out a symbolic representation of their numerical code: u = [x,y,z] du = similar(u) p = [σ,ρ,β] lorenz(du,u,p,t) du # We can then perform symbolic manipulations on their numerical code, and build a new numerical code that optimizes/fixes their original function! J = [Dx(du[1]) Dy(du[1]) Dz(du[1]) Dx(du[2]) Dy(du[2]) Dz(du[2]) Dx(du[3]) Dy(du[3]) Dz(du[3])] J = expand_derivatives.(J) # ### Automated Sparsity Detection # # In many cases one has to speed up large modeling frameworks by taking into account sparsity. While ModelingToolkit.jl can be used to compute Jacobians, we can write a standard Julia function in order to get a spase matrix of expressions which automatically detects and utilizes the sparsity of their function. using SparseArrays function SparseArrays.SparseMatrixCSC(M::Matrix{T}) where {T<:ModelingToolkit.Expression} idxs = findall(!iszero, M) I = [i[1] for i in idxs] J = [i[2] for i in idxs] V = [M[i] for i in idxs] return SparseArrays.sparse(I, J, V, size(M)...) end sJ = SparseMatrixCSC(J) # ### Dependent Variables, Functions, Chain Rule # # "Variables" are overloaded. When you are solving a differential equation, the variable `u(t)` is actually a function of time. In order to handle these kinds of variables in a mathematically correct and extensible manner, the ModelingToolkit IR actually treats variables as functions, and constant variables are simply 0-ary functions (`t()`). # # We can utilize this idea to have parameters that are also functions. For example, we can have a parameter σ which acts as a function of 1 argument, and then utilize this function within our differential equations: @parameters σ(..) eqs = [D(x) ~ σ(t-1)*(y-x), D(y) ~ x*(σ(t^2)-z)-y, D(z) ~ x*y - β*z] # Notice that when we calculate the derivative with respect to `t`, the chain rule is automatically handled: @derivatives Dₜ'~t Dₜ(x*(σ(t^2)-z)-y) expand_derivatives(Dₜ(x*(σ(t^2)-z)-y)) # ### Hackability: Extend directly from the language # # ModelingToolkit.jl is written in Julia, and thus it can be directly extended from Julia itself. Let's define a normal Julia function and call it with a variable: _f(x) = 2x + x^2 _f(x) # Recall that when we do that, it will automatically trace this function and then build a symbolic expression. But what if we wanted our function to be a primative in the symbolic framework? This can be done by registering the function. f(x) = 2x + x^2 @register f(x) # Now this function is a new primitive: f(x) # and we can now define derivatives of our function: function ModelingToolkit.derivative(::typeof(f), args::NTuple{1,Any}, ::Val{1}) 2 + 2args[1] end expand_derivatives(Dx(f(x)))
notebook/ode_extras/01-ModelingToolkit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # **Visualization - How Does BERT Answer Questions?** # In this notebook, we will carry out the following badges: # # 1. Replicate the visualization for the bert architecture: # * Visualize hidden states of Bert finetuned on Squad and Biba # %load_ext autoreload # %autoreload 2 import json import torch import numpy as np from transformers import AutoModelForQuestionAnswering, AutoTokenizer, AutoConfig # ## **1. Load pretrained models (Squad, Biba (In second part)) and prepare the samples** # The models are from the paper, which are more or less equal to the ones one can find in the pretrained section of the transformers lib. # <img src="http://mccormickml.com/assets/BERT/SQuAD/input_formatting.png" alt="How QA works in Bert" width="500"/> # + tokenizer_bert_squad = AutoTokenizer.from_pretrained("bert-base-uncased") config = AutoConfig.from_pretrained( "bert-base-uncased", output_hidden_states=True, cache_dir="../cache" ) pretrained_weights = torch.load( "../models/squad.bin", map_location=torch.device("cpu") ) model_bert_squad = AutoModelForQuestionAnswering.from_pretrained( "../models/squad.bin", state_dict=pretrained_weights, config=config, cache_dir="../cache", ) # - # Prepare squad sample sample = json.load(open("./visualization/samples/sample_paper_squad.json"))# QASample.from_json_file("./visualization/samples/sample_paper_squad.json") features = tokenizer_bert_squad.encode_plus(sample['question'], sample['context'], return_tensors='pt') tokens = tokenizer_bert_squad.convert_ids_to_tokens(features['input_ids'][0]) print(features, tokens) # ## **2. Generate Answer and get Answer, Question and Support Facts Ranges for visualization** # + # Generate answer and get hidden states outs = model_bert_squad( features["input_ids"], token_type_ids=features["token_type_ids"], attention_mask=features["attention_mask"] ) answer_start = torch.argmax(outs.start_logits) answer_end = torch.argmax(outs.end_logits) answer = ' '.join(tokens[answer_start:answer_end+1]) answer_ids = tokenizer_bert_squad.encode(answer, add_special_tokens=False, return_tensors='pt') hstates = outs.hidden_states print(answer, answer_ids) # + from nltk import tokenize # get token ids for answer/question/supporting facts ids = features["input_ids"][0].int().numpy() answer_index = [np.where(ids==id.numpy())[0][0] for id in answer_ids[0]] # get only the first occurence of the answer. # question index range question_end_index = [i for i, e in enumerate(tokens) if '[SEP]' in e][0] question_index_range = [1, question_end_index] # skip cls token # support fact index range for squad sample sentences = tokenize.sent_tokenize(sample['context']) support_sentence = [s for s in sentences if answer in s][0] sup_sent_ids = tokenizer_bert_squad(support_sentence, add_special_tokens=False, return_tensors='pt')['input_ids'][0].numpy() # remove end token sup_start = [x for x in range(len(ids)) if np.all(ids[x:x+len(sup_sent_ids)] == sup_sent_ids)][0] support_facts_range = [sup_start, sup_start+len(sup_sent_ids)-1] print(tokens[question_index_range[0]:question_index_range[1]]) print(tokens[support_facts_range[0]:support_facts_range[1]]) print(tokens[answer_index[0]]) # - # ## **3. Visualize the Hidden States using PCA** # + # helper methods for plotting from enum import Enum import seaborn as sns import matplotlib.pyplot as plt from sklearn.decomposition import PCA class TokenLabel(Enum): PREDICTION = "d", "red" SUP = "o", "green" QUESTION = "o", "cyan" DEFAULT = "o", "grey" class TokenInfo(): def __init__(self, x, y, token, label): self.x = x self.y = y self.token = token self.label = label def plot_hidden_state(token_vector, title): for token in token_vector: plt.scatter(token.x, token.y, c=token.label.value[1], marker=token.label.value[0]) plt.text(token.x + 0.1, token.y + 0.2, token.token, fontsize=6) plt.xlabel("PC 1") plt.ylabel("PC 2") plt.title(title) plt.show() def visualize_states(hstates, answer, question, sup_facts): for index, layer in enumerate(hstates): token_vectors = layer[0][:len(tokens)] reduction = PCA(n_components=2) layer_reduced = reduction.fit_transform(token_vectors.detach().numpy()).transpose() token_vectors = [] for token_index, value in enumerate(layer_reduced[0]): label = TokenLabel.DEFAULT is_supporting_fact_token = False if sup_facts[0] <= token_index <= sup_facts[1]: label = TokenLabel.SUP if token_index in answer: label = TokenLabel.PREDICTION elif token_index >= question[0] and token_index <= question[1]: label = TokenLabel.QUESTION token_vector = TokenInfo( x=value, y=layer_reduced[1][token_index], token=tokens[token_index], label=label ) token_vectors.append(token_vector) title = "Layer {}".format(index) plot_hidden_state(token_vectors, title) # - visualize_states(hstates, answer_index, question_index_range, support_facts_range) # ## **4. Do (basically) the same for the biba sample** # + tokenizer_bert_babi = AutoTokenizer.from_pretrained("bert-base-uncased") config = AutoConfig.from_pretrained( "bert-base-uncased", output_hidden_states=True, cache_dir="../cache" ) pretrained_weights = torch.load( "../models/babi.bin", map_location=torch.device("cpu") ) model_bert_babi = AutoModelForQuestionAnswering.from_pretrained( "../models/babi.bin", state_dict=pretrained_weights, config=config, cache_dir="../cache", ) # - # Prepare bib sample sample = json.load(open("./visualization/samples/sample_paper_babi.json")) features = tokenizer_bert_babi.encode_plus(sample['question'], sample['context'], return_tensors='pt') tokens = tokenizer_bert_babi.convert_ids_to_tokens(features['input_ids'][0]) print(features, tokens) # + # Generate answer and get hidden states outs = model_bert_babi( features["input_ids"], token_type_ids=features["token_type_ids"], attention_mask=features["attention_mask"] ) answer_start = torch.argmax(outs.start_logits) answer_end = torch.argmax(outs.end_logits) answer = ''.join(tokens[answer_start:answer_end+1]) answer_ids = tokenizer_bert_babi.encode(answer, add_special_tokens=False, return_tensors='pt') hstates = outs.hidden_states print(answer, answer_ids) # + from nltk import tokenize # get token ids for answer/question/supporting facts ids = features["input_ids"][0].int().numpy() answer_index = [np.where(ids==id.numpy())[0][0] for id in answer_ids[0]] # get only the first occurence of the answer. # question index range question_end_index = [i for i, e in enumerate(tokens) if '[SEP]' in e][0] question_index_range = [1, question_end_index] # skip cls token # support fact index range for squad sample sentences = tokenize.sent_tokenize(sample['context']) print(sentences) support_sentence = [s for s in sentences if answer in s][0] sup_sent_ids = tokenizer_bert_babi(support_sentence, add_special_tokens=False, return_tensors='pt')['input_ids'][0].numpy() # remove end token sup_start = [x for x in range(len(ids)) if np.all(ids[x:x+len(sup_sent_ids)] == sup_sent_ids)][0] support_facts_range = [sup_start, sup_start+len(sup_sent_ids)-1] print(tokens[question_index_range[0]:question_index_range[1]]) print(tokens[support_facts_range[0]:support_facts_range[1]]) print(tokens[answer_index[0]]) # - visualize_states(hstates, answer_index, question_index_range, support_facts_range)
src/visu_replication.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Navigating Rates # ### Summary # # Following the spike in USD swaption implied vol last summer, vols have come down materially. When we dissect the moves of the vol surface over the past 2 years, we find that: # # * Vols on 1y tails are back at lows while vols on 10y tails haven’t fully retraced from last summer's spike. # * Skew on 1y tails remains bid for receivers (and payers trade close to flat vs. ATM) while payer skew on 10y tails has richened translating into a flat skew but very high smile (both receiver and payer skews are high). # # Given these findings, we look at a hypothetical scenario analysis of a short 3m10y strangle over time. # # # The content of this notebook is split into: # * [1 - Let's get started with gs quant](#1---Let's-get-started-with-gs_quant) # * [2 - Implied volatility in historical context](#2---Implied-volatility-in-historical-context) # * [3 - Skew](#3---Skew) # * [4 - Potential structures](#4---Potential-structures) # ### 1 - Let's get started with gs_quant # Start every session with authenticating with your unique client id and secret. If you don't have a registered app, create one [here](https://marquee.gs.com/s/developer/myapps/register). `run_analytics` scope is required for the functionality covered in this example. Below produced using gs-quant version 0.8.95. from gs_quant.session import GsSession GsSession.use(client_id=None, client_secret=None, scopes=('run_analytics',)) # ### 2 - Implied volatility in historical context # Let's start by looking at the implied volatility of 1y and 10y payer and receiver swaptions expiring at 3m. # # To do that, let's first create the 8 swaptions (2 types x 2 expiries x 2 strikes) and put them in a portfolio. # + from gs_quant.markets.portfolio import Portfolio from gs_quant.common import Currency, PayReceive from gs_quant.instrument import IRSwaption currency = Currency.USD expiry = '3m' notional = 1e8 tenors = ('1y', '10y') r_strikes = ('ATMF', 'ATMF-50') p_strikes = ('ATMF', 'ATMF+50') receivers = [IRSwaption(PayReceive.Receive, tenor, currency, expiration_date=expiry, strike=strike, notional_amount=notional, name='{},{},{}'.format('R',tenor,strike)) for strike in r_strikes for tenor in tenors] payers = [IRSwaption(PayReceive.Pay, tenor, currency, expiration_date=expiry, strike=strike, notional_amount=notional, name='{},{},{}'.format('P',tenor,strike)) for strike in p_strikes for tenor in tenors] swaptions = payers + receivers port = Portfolio(swaptions) # - # Let's now calculate annualized implied volatility for the last two years looking at month ends as well as the most recent date. # + from gs_quant.markets import HistoricalPricingContext from gs_quant.risk import IRAnnualImpliedVol import pandas as pd import datetime as dt dates = pd.date_range('2018-01-01', '2020-01-01', freq='BM').date.tolist() dates.append(dt.date(2020,1,24)) with HistoricalPricingContext(dates=dates): vols = port.calc(IRAnnualImpliedVol) # - # We can now construct a dataframe with the results and plot it. # + import matplotlib.pyplot as plt d = pd.DataFrame({s.name: vols[s.name] for s in swaptions}) d.plot(figsize=(10,6)) # - # As we can see in the charts above, implied vol for both 1y and 10y tenors has come in since 2019 highs but significantly more so for 1y tenor than 10y. # ### 3 - Skew # # Let's now use the same data to take a look at skew. # + receiverSkew3m10y = d['R,10y,ATMF-50'] - d['R,10y,ATMF'] payerSkew3m10y = d['P,10y,ATMF+50'] - d['P,10y,ATMF'] receiverSkew3m1y = d['R,1y,ATMF-50'] - d['R,1y,ATMF'] payerSkew3m1y = d['P,1y,ATMF+50'] - d['P,1y,ATMF'] # + plt.figure(figsize=(10,6)) plt.plot(receiverSkew3m10y*10000,label='3m10y Receiver Skew') plt.plot(payerSkew3m10y*10000,label='3m10y Payer Skew') plt.legend(loc="upper left") plt.show() plt.figure(figsize=(10,6)) plt.plot(receiverSkew3m1y*10000, label='3m1y Receiver Skew') plt.plot(payerSkew3m1y*10000, label='3m1y Payer Skew') plt.legend(loc="upper left") plt.show() # - # In the charts above we can see that 3m10y payer and receiver skew remains high but the spread between them has tightened. For the 1y tenor, however, the payer skew has flattened while receiver skew remains rich in the context of the last 2 years. # ### 4 - Potential structures # # Let's now look at a 3m10y F+/-25 strangle to fade the steep vol smile. # # For this exercise, we will age it in a roll to spot scenario. # + strangles = {} labels = ('Inception', 'After 1m', 'After 2m', 'After 3m') expiries = ('3m', '2m', '1m', '1d') for label, expiry in zip(labels, expiries): R = IRSwaption('Receive', '10y', 'USD', expiration_date=expiry, strike='ATMF-25', notional_amount=1e8) P = IRSwaption('Pay', '10y', 'USD', expiration_date=expiry, strike='ATMF+25', notional_amount=1e8) strangle = Portfolio((R, P)) strangle.resolve() strangles[label] = strangle # - # Let's now construct a set of spot shocks and examine the impact on price accross the different expiries. # + from gs_quant.risk import MarketDataShockBasedScenario, MarketDataPattern, MarketDataShock, MarketDataShockType from gs_quant.markets import PricingContext from gs_quant.risk import IRVegaParallel, Price from collections import defaultdict shocks = [x for x in range(-50, 55, 5)] results = defaultdict(dict) with PricingContext(): for expiry, strangle in strangles.items(): for shock in shocks: ir_spot_scenario = MarketDataShockBasedScenario(shocks={ MarketDataPattern('IR', 'USD'): MarketDataShock(MarketDataShockType.Absolute, -shock / 10000), MarketDataPattern('IR Reset', 'USD'): MarketDataShock(MarketDataShockType.Absolute, shock / 10000)}) with ir_spot_scenario: results[expiry].update({shock:strangle.calc((Price, IRVegaParallel))}) # - # We can now roll up the visualize the results: p_res = {k:{shock:-x[Price].aggregate() for shock, x in v.items()} for k, v in results.items()} pd.DataFrame(p_res).plot(figsize=(10,6), title='Price by Shock') v_res = {k:{shock:-x[IRVegaParallel].aggregate() for shock, x in v.items()} for k, v in results.items()} pd.DataFrame(v_res).plot(figsize=(10,6), title='Vega by Shock') # Finally let's look at our breakevens as we move from inception to expiry. df = pd.DataFrame(p_res).reindex(range(-50,50)).interpolate() breakeven = df.loc[0]['Inception'] from gs_quant.risk import IRSpotRate spot = strangles['Inception'][0].calc(IRSpotRate) * 100 # + top = pd.Series() bottom = pd.Series() for column in df.columns: sl = df.iloc[(df[column]-breakeven).abs().argsort()[:2]] a, b = sl[column].index top.at[column] = a if a >= 0 else b bottom.at[column] = a if a < 0 else b # - (top/100 + spot).reindex(labels).plot(figsize=(10,6)) (bottom/100 + spot).reindex(labels).plot(figsize=(10,6)) # Please reach out to `<EMAIL>` with any questions. # ### Disclaimer # This website may contain links to websites and the content of third parties ("Third Party Content"). We do not monitor, review or update, and do not have any control over, any Third Party Content or third party websites. We make no representation, warranty or guarantee as to the accuracy, completeness, timeliness or reliability of any Third Party Content and are not responsible for any loss or damage of any sort resulting from the use of, or for any failure of, products or services provided at or from a third party resource. If you use these links and the Third Party Content, you acknowledge that you are doing so entirely at your own risk.
gs_quant/content/made_with_gs_quant/1-Navigating Rates.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import scipy as sp from scipy.sparse import diags import numpy as np from numpy import linalg as LA import sys import matplotlib.pyplot as plt #importing seaborn for plotting import seaborn as sns #for plotting purposes # %pylab inline sns.set_style('ticks') sns.set_context('paper') from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import matplotlib as mpl from scipy.signal import find_peaks # mpl.rcParams mpl.rcParams['axes.labelsize'] = 14 mpl.rcParams['axes.titlesize'] = 16 mpl.rcParams['xtick.labelsize'] = 12 mpl.rcParams['ytick.labelsize'] = 12 mpl.rcParams['legend.fontsize'] = 12 mpl.rcParams['figure.figsize'] = [8, 16/3] # - # ### converting ladder to nts # ls filename = 'Eukaryote Total RNA Nano_2020-10-22_11-27-32' # + ladder_dict = {} ladder_times = {} ladder_values = {} ladder_df = pd.read_csv(filename+'_Ladder.csv', skiprows=17)[:-1] ladder_time = np.array(ladder_df['Time'].astype(float)) ladder_value = np.array(ladder_df['Value'].astype(float)) peaks,_ = find_peaks(ladder_value, height=7.5, distance=15) ladder_dict = list(peaks[:6]) plot(ladder_time, ladder_value, label='ladder trace') plot(ladder_time[peaks], ladder_value[peaks], 'x', label='peaks', markersize=15) title('Ladder Peaks: '+filename, fontsize=16) legend(fontsize=14) xlabel('Time', fontsize=14) ylabel('FU', fontsize=14) xticks(fontsize=14) yticks(fontsize=14) tight_layout() savefig(filename+'_ladder.png', dpi=300) # + peak_times = ladder_time[ladder_dict] # peak_times peak_fu = np.array([25, 200, 500, 1000, 2000, 4000]) ladder_fit = np.polyfit(x=peak_times, y = peak_fu, deg = 4) lf = np.poly1d(ladder_fit) ladder_nts = lf plot(peak_fu, lf(peak_times), 'o',label='calculated nts vs. ladder nts') plot(peak_fu, peak_fu,label='perfect correlation') ylabel('Calculated nts (from time)', fontsize=14) xlabel('Ladder nts', fontsize=14) yticks(fontsize=12) xticks(fontsize=12) title('Ladder polynomial fit: '+filename, fontsize=16) legend(fontsize=12) tight_layout() savefig(filename+'_ladder_fit.png', dpi=300) # - # ls # ### converting samples to nucleotides as well # + samples = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'] num_samples = len(samples) ncolumns=3 nrows = math.ceil(num_samples/ncolumns) figure(figsize=(ncolumns*6, nrows*4+3)) samples_dict = {} for i, sample in enumerate(samples): sample_df = pd.read_csv(filename+'_Sample'+sample+'.csv', skiprows=17)[:-1] samples_dict[sample] = sample_df times = np.array(sample_df['Time'].astype(float)) sample_df['Nucleotides'] = ladder_nts(times) sample_df.to_csv(filename+'_'+sample+'_nts.csv') # for i, sample in enumerate(ladder_nts): # nt_array = ladder_nts[sample](ladder_times[sample]) # peaks_store = [] # # heights_store = [] # subplot(nrows,ncolumns,i+1) # #read in dataframe for per sample here # sample_df = samples_dict[sample] # timepoints = [0,0.5,1.0,1.5,2,3,4,5,18,24] # for i,time in enumerate(timepoints): # data = np.array(sample_df[sample_df['Timepoint']==time]['Value']) # peaks, _ = find_peaks(data, distance=50, height=2.5) # # peaks_store.append(peaks) # heights_store.append(data[peaks]) # plot(nt_array[:int(len(nt_array)/2)],data[:int(len(nt_array)/2)], label=time) # plot(np.array(nt_array)[peaks], data[peaks], 'x',markersize=6) # ylabel('Flourescence Units', fontsize=14) # xlabel('Nucleotides', fontsize=14) # legend() # title(sample, fontweight='bold') # tight_layout() # for sample in samples: # -
data/102120_PSU_5mC_BioAnalyzer/Data/Run12_Plate3_F3-12/.ipynb_checkpoints/Convert_ladders_Run12-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: geo_env # language: python # name: geo_env # --- # ## Creating Households Before excelBuildingInformation = 'BuildingInSimulationAndStatsApril6.xlsx' import pandas as pd import numpy as np pd.options.display.max_rows=100 pd.options.display.max_columns=100 buildingsDf = pd.read_excel(excelBuildingInformation) buildingsDf.head() mainColumns = buildingsDf.columns.tolist() buildingsDf = buildingsDf.drop(columns=['plan_num']) buildingsDf.head() buildingsDf['Average_age_2020'].mean() buildingsDf['percent_above_65'].mean() len([20,10,10,10,10,10,10,10,10,10]) AgeDistribution = pd.DataFrame({'Age':[20,30,40,50,65,70,80,90,100],'Distribution':[0.14,0.18,0.23,0.14,0.09,0.08,0.07,0.06,0.01]}) AgeDistribution[AgeDistribution['Age']>=65]['Distribution'].sum() AgeDistribution[AgeDistribution['Age']<65]['Distribution'].sum() AgeDistribution['Distribution'].sum() AgeDistribution.plot(kind='bar',x='Age',y='Distribution') mainColumns = ['BeforeBldgs','ProjNumber','min_living_till_2020','max_living_till_2020','Average_age_2020','StdDev_age_2020','Before_app','Above_65','Area_round_mode','High_discount_sum','Low_Discount_35_sum','Renter_sum'] ApartmentColums = ['bldCode','doorIndex','bldCodeDoorIndex','aprtmentSize','yearsInBldg','age','lowDiscount','highDiscount','noDiscount','income','rent','own','agentID'] orginalAgentsDS = pd.DataFrame(columns=ApartmentColums) orginalAgentsDS.info() orginalAgentsDS['doorIndex'] = orginalAgentsDS['doorIndex'].astype('int') orginalAgentsDS['aprtmentSize'] = orginalAgentsDS['aprtmentSize'].astype('int') orginalAgentsDS['yearsInBldg'] = orginalAgentsDS['yearsInBldg'].astype('int') orginalAgentsDS['age'] = orginalAgentsDS['age'].astype('int') orginalAgentsDS['lowDiscount'] = orginalAgentsDS['lowDiscount'].astype('int') orginalAgentsDS['highDiscount'] = orginalAgentsDS['highDiscount'].astype('int') orginalAgentsDS['noDiscount'] = orginalAgentsDS['noDiscount'].astype('int') orginalAgentsDS['rent'] = orginalAgentsDS['rent'].astype('int') orginalAgentsDS['own'] = orginalAgentsDS['own'].astype('int') orginalAgentsDSJustHead = orginalAgentsDS.copy() # Just Leave Head def createAgentAndApartment(bldgInfo,hedersonly): bldgInfo['Above_65'] = bldgInfo['Above_65'].astype(int) bldgInfo.numApartments = bldgInfo['Before_app'] bldgInfo.apartmentSize = bldgInfo['Area_round_mode'].astype('int') bldgInfo.rent = bldgInfo['Renter_sum'].astype('int') bldgInfo.High = bldgInfo['High_discount_sum'].astype('int') bldgInfo.Low = bldgInfo['Low_Discount_35_sum'].astype('int') bldgInfo.code = bldgInfo['BeforeBldgs'].split(',') currentBldgloop = hedersonly.copy() currentBldg = hedersonly.copy() for bldItem in bldgInfo.code: tempDataSet = pd.DataFrame({'doorIndex':[door for door in range(1,int(bldgInfo.numApartments)+1)], 'min_living_till_2020':bldgInfo['min_living_till_2020'],'max_living_till_2020':bldgInfo['max_living_till_2020'], 'Average_age_2020':bldgInfo['Average_age_2020'],'StdDev_age_2020':bldgInfo['StdDev_age_2020']}) #Age AgeDistRandom = np.random.choice(a= AgeDistribution['Age'].values,size=bldgInfo.numApartments.astype(int),p=AgeDistribution['Distribution'].values) AgeDistRandomNoise = [np.random.randint(age-5,age+5) for age in AgeDistRandom] #add Some Noise tempDataSet['age_'] = AgeDistRandomNoise tempDataSet['yearsLivingInBldg'] = tempDataSet.apply(lambda x: np.random.randint(x['min_living_till_2020'],x['max_living_till_2020']),axis=1) tempDataSet = tempDataSet[['doorIndex','age_','yearsLivingInBldg']] currentBldgloop['doorIndex'] = [door for door in range(1,int(bldgInfo.numApartments)+1)] currentBldgloop['bldCode'] = bldItem # Coppy Clean Slate currentBldgloop['bldCodeDoorIndex'] = currentBldgloop['bldCode']+"_"+currentBldgloop['doorIndex'].astype(str) currentBldgloop['aprtmentSize'] = bldgInfo.apartmentSize currentBldgloop = pd.merge(currentBldgloop,tempDataSet,on='doorIndex') currentBldgloop['age'] =currentBldgloop['age_'] currentBldgloop['yearsInBldg']=currentBldgloop['yearsLivingInBldg'] currentBldgloop.drop(columns=['age_','yearsLivingInBldg'],inplace=True) currentBldgloop.loc[currentBldgloop.sample(bldgInfo.High).index,'highDiscount']=1 print('bld item',bldItem) print('high',bldgInfo.High) print('Low',bldgInfo.Low) lowdiscountIndex = currentBldgloop.loc[currentBldgloop['highDiscount'].isna()].sample(n=bldgInfo.Low).index currentBldgloop.loc[lowdiscountIndex,'lowDiscount']=1 currentBldgloop['lowDiscount'].fillna(0) currentBldgloop['lowDiscount'] =currentBldgloop['lowDiscount'].fillna(0) currentBldgloop['highDiscount'] =currentBldgloop['highDiscount'].fillna(0) currentBldgloop.loc[currentBldgloop.query('highDiscount==0 and lowDiscount==0').index,'noDiscount']=1 currentBldg['noDiscount'] =currentBldgloop['noDiscount'].fillna(0) # Rent Own currentBldgloop['rent'] =0 currentBldgloop['own'] =0 currentBldgloop['rent'] = currentBldgloop['rent'].astype('int') currentBldgloop['own'] = currentBldgloop['own'].astype('int') rentIndex = currentBldgloop.sample(n=bldgInfo.rent).index currentBldgloop.loc[rentIndex,'rent']=1 currentBldgloop.loc[currentBldgloop.query('rent==0').index,'own']=1 if bldItem==bldgInfo.code[0]: currentBldg = currentBldgloop else: currentBldg = currentBldg.append(currentBldgloop) currentBldg.reset_index(inplace=True,drop=True) currentBldgloop = hedersonly.copy() return currentBldg.copy() for i in buildingsDf.index: bldgInfo = buildingsDf.loc[i,mainColumns] currentBldg = createAgentAndApartment(bldgInfo,orginalAgentsDSJustHead.copy()) if (i==0): orginalAgentsDS = currentBldg else: orginalAgentsDS = orginalAgentsDS.append(currentBldg) orginalAgentsDS.reset_index(inplace=True) orginalAgentsDS discountTalbe = pd.DataFrame({'noDiscount':[9220,15430],'lowDiscount':[6514,9219],'highDiscount':[5011,6513]},index=['min_','max_']) discountTalbe for item in ['lowDiscount','highDiscount','noDiscount']: print(item) con = orginalAgentsDS[item]==1 orginalAgentsDS.loc[con,'income'] = orginalAgentsDS.loc[con].apply(lambda x: np.random.randint(discountTalbe.loc['min_',item],discountTalbe.loc['max_',item]),axis=1) #orginalAgentsDS['income'] == orginalAgentsDS[item].apply(lambda x: np.random.randint(discountTalbe.loc['min_',item],discountTalbe.loc['max_',item]))) orginalAgentsDS['income'].hist() orginalAgentsDS['age'].hist() orginalAgentsDS['rent'].sum() orginalAgentsDS['own'].sum() orginalAgentsDS['age'].mean() orginalAgentsDS import uuid orginalAgentsDS['agentID'] =orginalAgentsDS['agentID'].apply(lambda x:uuid.uuid1()) orginalAgentsDS orginalAgentsDS.to_excel('OrigianlAgentsApril6_2021.xlsx') orginalAgentsDS['bldCode'].unique() buildingsDf buildingsDf['newApartmentSize'] = buildingsDf['Area_round_mode'] + buildingsDf['ProjType'].apply(lambda x: 12 if x==1 else 25 ) # ## Need to add floors to the old and new buidlings # ## Need to add floors to the old and new buidlings Rent and House Price newApartmentColums = ['bldCode','doorIndex','bldCodeDoorIndex','aprtmentSize','prjectType','ProjNumber'] newApartmentColums newApartmentsDS = pd.DataFrame(columns=newApartmentColums) newApartmentsDS.info() newApartmentsDS['aprtmentSize'] = newApartmentsDS['aprtmentSize'].astype('int') newApartmentsDSDSJustHead = newApartmentsDS.copy() # Just Leave Head newApartmentsDSDSJustHead mainNewBldColumns = ['AfterBldgs','ProjType','after_app','newApartmentSize','ProjNumber'] buildingsDf.head() def creatNewApartments(bldgInfo,hedersonly): currentBldgloop = hedersonly.copy() currentBldg = hedersonly.copy() for bldItem in bldgInfo.code: currentBldgloop['doorIndex'] = [door for door in range(1,int(bldgInfo.numApartments)+1)] currentBldgloop['bldCode'] = bldItem # Coppy Clean Slate currentBldgloop['bldCodeDoorIndex'] = currentBldgloop['bldCode']+"_"+currentBldgloop['doorIndex'].astype(str) currentBldgloop['aprtmentSize'] = bldgInfo.apartmentSize currentBldgloop['prjectType'] = bldgInfo.ProjType currentBldgloop['ProjNumber'] = bldgInfo.ProjNumber if bldItem==bldgInfo.code[0]: currentBldg = currentBldgloop else: currentBldg = currentBldg.append(currentBldgloop) currentBldg.reset_index(inplace=True,drop=True) currentBldgloop = hedersonly.copy() return currentBldg.copy() for i in buildingsDf.index: bldgInfo = buildingsDf.loc[i,mainNewBldColumns] bldgInfo.numApartments = bldgInfo['after_app'].astype('int') bldgInfo.apartmentSize = bldgInfo['newApartmentSize'].astype('int') bldgInfo.code = bldgInfo['AfterBldgs'].split(',') bldgInfo.ProjType = bldgInfo['ProjType'] bldgInfo.ProjNumber = bldgInfo['ProjNumber'] newBldg = creatNewApartments(bldgInfo,newApartmentsDSDSJustHead.copy()) if (i==0): newApartmentsDS = newBldg else: newApartmentsDS = newApartmentsDS.append(newBldg) bldgInfo newApartmentsDS newApartmentsDS['prjectType'].hist() newApartmentsDS['aprtmentSize'].hist() newApartmentsDS.to_excel('newApartmentsDataSetApril_6.xlsx')
SyntheticAgentCreation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Solaris # language: python # name: solaris # --- # # Notebook 1: An introduction to __Solaris__ and your working environment # # This notebook is developed for the FOSS4G International 2019 `solaris` Workshop. If you're using it outside of that context, some of the working environment materials will be unavailable. Check the GitHub repo for instructions on how to alter the notebooks for usage outside of the workshop. # # This notebook provides five parts: # # 1. [__Checking your `solaris` Installation__](#Checking-your-solaris-installation) # 2. [__Listing the data provided__](#section2) # 3. [__Input tile to building footprint vectors with 7 Python commands__](#section3) # 3. [__Getting your pre-trained model ready__](#section4) # 4. [__Running inference with `Solaris` on SpaceNet MVOI data__](#section5) # 5. [__Visualizing outputs from the models__](#section6) # # Let's get started! # # ## Checking your solaris installation # # The working environment provided for this workshop has `solaris` and all its dependencies pre-installed in a conda environment. If you're using the notebook outside of the workshop and need installation instructions, [click here](https://solaris.readthedocs.io/en/latest/installation.html). # # Let's import `solaris` and check the package version to make sure it's available. import solaris as sol sol.__version__ # The above may throw a whole bunch of red text (tensorflow deprecation warnings) - those can be safely ignored for now. # <a id="section2"></a> # ## Listing the data provided # # We've provided a subset of the [SpaceNet](https://spacenet.ai) dataset for use in this workshop. If you're using the notebook outside of the FOSS4G International workshop, you'll need to collect the data yourself - see the GitHub repo containing this notebook for instructions on how to get the data you'll need. # # First, let's look at the data provided. Everything is stored in one directory, `/data` (unless you're viewing this outside of the workshop). # + import os data_path = '/data' # NON-WORKSHOP PARTICIPANTS: change this path to point to the directory where you've stored the data. print('{} directory contents:'.format(data_path)) print(os.listdir(data_path)) print() print('SpaceNet MVOI data stored in the directory "MVOI_data":') print(os.listdir(os.path.join(data_path, 'MVOI_data'))) print() print('SpaceNet 2 Khartoum imagery stored in the directory "Khartoum_data":') print(os.listdir(os.path.join(data_path, 'Khartoum_data'))) print() print('Configuration files stored in the directory "workshop_configs":') print(os.listdir(os.path.join(data_path, 'workshop_configs'))) # - # The configurations path also contains .csv files that specify data for inference. # # Here, you can see the different data that you have access to: # # - Test images for the SpaceNet Off-Nadir Dataset (AKA [SpaceNet MVOI](https://arxiv.org/abs/1903.12239)) # - Training images for SpaceNet Khartoum building footprint extraction # - Configuration files for a few different model training and inference processes # # <a id="section3"></a> # ## Running the full pipeline # # First, we'll run the entire inference process, just to show you the end result of what you get from `solaris`. Below, we break down each step to describe what's going on. # + import time import skimage from shapely.ops import cascaded_union # just for visualization purposes print('Loading config...') config = sol.utils.config.parse(os.path.join(data_path, 'workshop_configs/xdxd_workshop_infer.yml')) print('config loaded. Initializing model...') xdxd_inferer = sol.nets.infer.Inferer(config) print('model initialized. Loading dataset...') inf_df = sol.nets.infer.get_infer_df(config) print('dataset loaded. Running inference on the image.') start_time = time.time() xdxd_inferer(inf_df) end_time = time.time() print('running inference on one image took {} seconds'.format(end_time-start_time)) print('vectorizing output...') resulting_preds = skimage.io.imread(os.path.join('xdxd_inference_out', 'MVOI_nadir10_test_sample.tif')) predicted_footprints = sol.vector.mask.mask_to_poly_geojson( pred_arr=resulting_preds, reference_im=os.path.join(data_path, 'MVOI_data', inf_df.loc[0, 'image'])) print('output vectorized. A few of the vector-formatted building predictions:') predicted_footprints.head() # - # Excluding the printing and recording commands, __it only took 7 lines of code to run an entire inference pipeline, from input tile to output vectors!__ # # Let's visualize those labels alongside the source image and ground truth. # + import numpy as np import matplotlib.pyplot as plt src_im_path = os.path.join(data_path, 'MVOI_data/MVOI_nadir10_test_sample.tif') # read the image in im_arr = skimage.io.imread(os.path.join(data_path, 'MVOI_data/viz_version.tif')) # rescale to min/max in each channel # im_arr = im_arr.astype('float') - np.amin(im_arr, axis=(0,1)) # im_arr = im_arr/np.amax(im_arr, axis=(0,1)) # im_arr = (im_arr*255).astype('uint8') # switch B and R for viz # tmp = im_arr[:, :, 0].copy() # im_arr[:, :, 0] = im_arr[:, :, 2] # im_arr[:, :, 2] = tmp # generate mask from the predictions pred_arr = sol.vector.mask.footprint_mask(predicted_footprints, reference_im=src_im_path) ground_truth = sol.vector.mask.footprint_mask( os.path.join(data_path, 'MVOI_data/MVOI_nadir10_test_sample.geojson'), reference_im=src_im_path) f, axarr = plt.subplots(1, 3, figsize=(16, 12)) axarr[0].imshow(im_arr[:, :, 0:3]) axarr[0].set_title('Source image', size=14) axarr[0].axis('off') axarr[1].imshow(pred_arr, cmap='gray') axarr[1].set_title('Predictions', size=14) axarr[1].axis('off') axarr[2].imshow(ground_truth, cmap='gray') axarr[2].set_title('Ground Truth', size=14); axarr[2].axis('off'); # - # There you have it. With 7 lines of python code (followed by a few more to visualize the outputs), you were able to go from the input image (left) to the predictions (middle) and compare them to the ground truth labels (right). Great work! Take a close look at the predictions compared to the ground truth. How do they compare? Are there any common problems that could raise problems in a use case where you want the best building labels you can get? Discuss with your partner if you'd like. # # A step-by-step walkthrough of the above steps # # <a id="section4"></a> # ## Getting your pre-trained model ready # # For our first pass, we'll use a [standard configuration file for XD_XD's model](https://github.com/CosmiQ/solaris/blob/master/solaris/nets/configs/xdxd_spacenet4.yml). See [the YAML config tutorial](https://solaris.readthedocs.io/en/latest/tutorials/notebooks/creating_the_yaml_config_file.html) for a description of what each item means. We'll display the configuration below, but don't worry if you can't follow what each config parameter is - it's just in case you're curious. config = sol.utils.config.parse(os.path.join(data_path, 'workshop_configs/xdxd_workshop_infer.yml')) config # As you can see, `solaris` reads the config YAML file in as a dictionary. `solaris` uses this `config` dictionary to specify all of the parameters for model training and inference (as well as some pre-processing steps). Then, you just pass the `config` object to the inference object: xdxd_inferer = sol.nets.infer.Inferer(config) # You already have XD_XD's pretrained model stored on your EC2 instance, but if you hadn't, the above line would have downloaded the model weights for you. Note that this will happen automagically for any pre-trained SpaceNet model provided by `solaris` (if you haven't downloaded it already). If you wish to use your own model weights, you can modify the configuration YAML file to point the `"model_path"` parameter of the config YAML file to your weights file. # # Next, let's load in the .csv file that specifies the image we're going to run inference on. Below the next cell, you'll see the contents of the inference target `pandas.DataFrame`: a single row specifying the path to the image you ran inference on before. inf_df = sol.nets.infer.get_infer_df(config) inf_df # Now that we've loaded in the path to the image we want to analyze, we're ready to identify buildings in the image! # # <a id="section5"></a> # ## Running inference # # Running inference is as easy as calling your inferer (`xdxd_inferer`) with the inference target dataframe (here, `inf_df`) as an argument. This will run the entire inference process on that image and save the resulting mask as a TIFF file. _Non-workshop participants: this may take a couple of minutes if you're not using a GPU - be patient!)_ model_result_mask = xdxd_inferer(inf_df) # _The above cell won't generate any output. Watch for the asterisk to the left to turn into a number to know when it finishes._ # # And you're done! Simple as that. Let's check out what that mask looks like: # # <a id="section6"></a> # ## Visualizing inference outputs # # We saw a binary black-and-white image of building footprints in the full pipeline example earlier, but that's not actually what comes directly out of a deep learning model. They actually produce a continuous "probability mask", corresponding to the likelihood that the neural net thinks each pixel is part of a building. Run the cell below to see what that looks like. # + import matplotlib.pyplot as plt import numpy as np import skimage resulting_preds = skimage.io.imread(os.path.join('xdxd_inference_out', 'MVOI_nadir10_test_sample.tif')) plt.figure(figsize=(8, 8)) plt.imshow(resulting_preds[:, :, 0], cmap='gray') plt.axis('off') plt.title('Raw neural net output', size=16); # - # The above is a pixel mask where higher values indicate higher probability of a pixel corresponding to buildings. What `solaris` does internally and we'll do below is binarize this to convert to a building/no building image: binary_preds = resulting_preds > 0 plt.figure(figsize=(8,8)) plt.imshow(binary_preds.astype('uint8')[:, :, 0], cmap='gray') plt.axis('off') plt.title('Binarized predictions', size=16); # All that you can get directly from this, though, is "which pixels are part of buildings?" This isn't that useful, though, for identifying individual buildings; let's generate a more useful output, i.e. georegistered building footprints: # + from shapely.ops import cascaded_union # just for visualization predicted_footprints = sol.vector.mask.mask_to_poly_geojson( pred_arr=resulting_preds, reference_im=inf_df.loc[0, 'image'], do_transform=True) cascaded_union(predicted_footprints['geometry'].values) # - # (Note that the above doesn't necessarily display in some Jupyter notebook environments - but it's more or less identical to the raster-formatted version above, except each polygon is separated and outlined.) # # The building footprints are stored as WKT polygons in a `geopandas.GeoDataFrame`. The next cell will show you what those look like: print(predicted_footprints['geometry'].head()) # __Congratulations!__ You've run an _entire_ inference pipeline to predict where buildings are using `solaris` - it's as simple as using the commands above! # # _Coming up next:_ We'll talk about what's going on under the hood in the code you just ran, including a quick tutorial on how neural nets work. To start with, continue to `2_under_the_hood.ipynb` and go through the first part of the notebook until it tells you to stop.
Solaris_FOSS4G_2019/1_intro_to_solaris.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Splitting test and training data # When you train a data model you may need to split up your data into test and training data sets # # To accomplish this task we will use the [scikit-learn](https://scikit-learn.org/stable/) library # # scikit-learn is an open source, BSD licensed library for data science for preprocessing and training models. # Before we can split our data test and training data, we need to do some data preparation import pandas as pd # Let's load our csv file with information about flights and flight delays # # Use **shape** to find out how many rows and columns are in the original DataFrame delays_df = pd.read_csv('Data/Lots_of_flight_data.csv') delays_df.shape # ## Split data into features and labels # Create a DataFrame called X containing only the features we want to use to train our model. # # **Note** You can only use numeric values as features, if you have non-numeric values you must apply different techniques such as Hot Encoding to convert these into numeric values before using them as features to train a model. Check out Data Science courses for more information on these techniques! X = delays_df.loc[:,['DISTANCE', 'CRS_ELAPSED_TIME']] X.head() # Create a DataFrame called y containing only the value we want to predict with our model. # # In our case we want to predict how many minutes late a flight will arrive. This information is in the ARR_DELAY column. y = delays_df.loc[:,['ARR_DELAY']] y.head() # ## Split into test and training data # Use **scikitlearn train_test_split** to move 30% of the rows into Test DataFrames # # The other 70% of the rows into DataFrames we can use to train our model # # NOTE: by specifying a value for *random_state* we ensure that if we run the code again the same rows will be moved into the test DataFrame. This makes our results repeatable. # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # - # We now have a DataFrame **X_train** which contains 70% of the rows # # We will use this DataFrame to train our model X_train.shape # The DataFrame **X_test** contains the remaining 30% of the rows # # We will use this DataFrame to test our trained model, so we can check it's accuracy X_test.shape # **X_train** and **X_test** contain our features # # The features are the columns we think can help us predict how late a flight will arrive: **DISTANCE** and **CRS_ELAPSED_TIME** X_train.head() # The DataFrame **y_train** contains 70% of the rows # # We will use this DataFrame to train our model # If you don't need to keep the original DataFrame, you can just delete the rows within the existing DataFrame instead of creating a new one # **inplace=*True*** indicates you want to drop the rows in the specified DataFrame y_train.shape # The DataFrame **y_test** contains the remaining 30% of the rows # # We will use this DataFrame to test our trained model, so we can check it's accuracy y_test.shape # **y_train** and **y_test** contain our label # # The label is the columns we want to predict with our trained model: **ARR_DELAY** # # **NOTE:** a negative value for ARR_DELAY indicates a flight arrived early y_train.head()
even-more-python-for-beginners-data-tools/10 - Splitting test and training data with scikit-learn/10 - Train Test split.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from bs4 import BeautifulSoup import requests,re,os import textract g8_link = "http://www.g8.utoronto.ca/summit/index.htm" prefix_link = "http://www.g8.utoronto.ca/summit/" os.chdir("..") datadir ="data/g8" checkdir = "data/hirose_san" html = requests.get(g8_link) soup = BeautifulSoup(html.content, "lxml") link={"20170526":"http://www.g8.utoronto.ca/summit/2017taormina/communique.html", "20160526":"http://www.g8.utoronto.ca/summit/2016shima/ise-shima-declaration-en.html", "20150607":"http://www.g8.utoronto.ca/summit/2015elmau/2015-G7-declaration-en.html", "20140604":"http://www.g8.utoronto.ca/summit/2014brussels/declaration.html", "20130617":"http://www.g8.utoronto.ca/summit/2013lougherne/lough-erne-communique.html", "20120518":"http://www.g8.utoronto.ca/summit/2012campdavid/g8-declaration.html", "20110526":"http://www.g8.utoronto.ca/summit/2011deauville/2011-declaration-en.html", "20100625":"http://www.g8.utoronto.ca/summit/2010muskoka/communique.html", "20090708":"http://www.g8.utoronto.ca/summit/2009laquila/2009-declaration.html"} for key, value in link.items(): html = requests.get(value) soup = BeautifulSoup(html.content,"lxml") out = soup.findAll("li",text=re.compile("fiscal")) out1 = soup.findAll("p",text=re.compile("fiscal")) temp = [] save_dir = os.path.join(datadir,key+".txt") if len(out)>0: for k in out: temp1=re.sub(re.compile(r"\d+. "),"",k.contents[0]) temp.append(temp1) elif len(out1)>0: for k in out1: temp1=re.sub(re.compile(r"\d+. "),"",k.contents[0]) temp.append(temp1) with open(save_dir, 'w') as f: f.write(" ".join(temp)) import pandas as pd, numpy as np # -*- coding: utf-8 -*- check_text = pd.read_csv(os.path.join(checkdir,"fiscal_sentence_score(hirosesan-check).csv")) check_text.dropna(axis=0,how="any",inplace=True) check_text["check"]=check_text['fiscal\nstance']==check_text["Hirose"] check_text.sum(axis=0)["check"]/check_text.shape[0] check_text.shape[0] data_dir = "data" os.path.join(data_dir,"hirose_san/fiscal_sentence_score(hirosesan-check).csv") check_text["test"]=77.5 check_text import corenlp parser = corenlp.StanfordCoreNLP("stanford-corenlp-full-2016-10-31")
code/.ipynb_checkpoints/scrape_g8_test-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Table 2. Household characteristics of the 20 study homes. # # This notebook recreates Table 2 of the paper 'Heating behaviour in English homes: An assessment of indirect calculation methods'. # # The paper is available here: https://www.sciencedirect.com/science/article/pii/S0378778817314342 # # # ## Setup from lxml import etree NS={'a':'http://www.refitsmarthomes.org'} from collections import Counter import pandas as pd # ## Step 1: Read in the data filename=r'C:\Users\cvskf\OneDrive - Loughborough University\_Data\REFIT_Smart_Home_dataset_PUBLIC_v1\Data\REFIT_BUILDING_SURVEY.xml' tree = etree.parse(filename) tree type(tree) # + #print(etree.tostring(tree, pretty_print=True).decode()) # - # ## Step 2: Clean and/or process the data # ## Step 3: Analyse the data # ### What is the distribution of house type in the 20 buildings? xpath_query_string='//a:Building' buildings=tree.getroot().xpath(xpath_query_string,namespaces=NS) buildings result=[] for building in buildings: result.append(building.get('builtFormType')) result result=[building.get('builtFormType') for building in buildings] # list comprehension result xpath_query_string='//a:Building/@builtFormType' result=tree.getroot().xpath(xpath_query_string,namespaces=NS) result house_type_distribution=Counter(result) house_type_distribution # ... in a single code cell... xpath_query_string='//a:Building/@builtFormType' result=tree.getroot().xpath(xpath_query_string,namespaces=NS) house_type_distribution=Counter(result) house_type_distribution house_type_distribution['Mid terrace house or bungalow'] # ## Step 4: Output the graphs and tables # ### Creating Table 2 index=pd.Index(['House type','Construction type'],name='Characteristic') df=pd.DataFrame(index=index, data={'Description':[f'Detached ({house_type_distribution["Detached house or bungalow"]}), semi-detached ({house_type_distribution["Semi detached house or bungalow"]}), mid-terrace ({house_type_distribution["Mid terrace house or bungalow"]})', 'test' ]}) df df.to_csv('Table2.csv')
Table2/Creating_Table_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: .jitterbug # language: python # name: .jitterbug # --- # + from datetime import datetime import numpy as np import pandas as pd import matplotlib.pyplot as plt # - raw = pd.read_csv("data/congestion-measurements/raw.csv") raw.head() mins = pd.read_csv("data/congestion-measurements/mins.csv") mins.head() inferences = pd.read_csv("data/congestion-inferences/jd_inferences.csv") inferences.head() def epoch2datetime(t): return datetime.utcfromtimestamp(t) # + starts = inferences["starts"].map(epoch2datetime) ends = inferences["ends"].map(epoch2datetime) viz_congestion_dt = [None]*(len(starts) + len(ends)) viz_congestion_dt[::2] = list(starts) viz_congestion_dt[1::2] = list(ends) viz_congestion_inferences = np.repeat(inferences["congestion"].values, 2) # + N = 3 fig, ax = plt.subplots(N, figsize=(18, 6), sharex=True) for i in range(N): ax[i].yaxis.grid(True, linestyle='-', color='#bababa', alpha=0.5, which='both') ax[i].xaxis.grid(True, linestyle='-', color='#bababa', alpha=0.5, which='both') ax[0].plot_date( raw["epoch"].map(epoch2datetime), raw["values"], xdate=True, ydate=False, label="RTT", tz="UTC", alpha=0.75, lw=4, linestyle='solid', marker='None', color="C0" ) ax[1].plot_date( mins["epoch"].map(epoch2datetime), mins["values"], xdate=True, ydate=False, label="min(RTT)", tz="UTC", alpha=0.75, lw=4, linestyle='solid', marker='None', color="C1" ) ax[2].plot_date( viz_congestion_dt, viz_congestion_inferences, xdate=True, ydate=False, label="Cong. Inferences", tz="UTC", alpha=0.75, lw=4, linestyle='solid', marker='None', color="red" ) for i in range(N): ax[i].tick_params(labelsize=20) ax[i].legend(loc='upper right', ncol=1, frameon=True, fontsize=20, handletextpad=0.1, columnspacing=0.1, handlelength=0.25) # -
example/jitter-dispersion-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Funciones (cont.) # # # ## Alcance de una variable # # Cuando escribimos funciones, es importante tener claro cuál es el **alcance** de las variables definidas dentro de las funciones. Tomando por ejemplo la función definida en la última clase: # # + a = 1 b = 2 def eleva_potencia(a,b): ''' Eleva número "a" a la "b" potencia. Insumo (input): a: número b: número Producto (output): resultado: un número ''' c = 10 resultado = a**b return resultado # - # Enfocándonos en la variable ``` a ```: eleva_potencia(1,1) a # Vemos que existe, y tiene un **_alcance global_**. Eso quiere decir que fue asignada _fuera_ de una función y su tiempo de vida se dará mientras corra el programa. # Sin embargo, si queremos acceder a la variable c, definida dentro de la función: c # Nos sale un error!!. Eso es porque ```c``` solo tiene un **_alcance local_**. Es decir, está definida dentro de una función y solo existe cuando esta es llamada. Hacer esta distinción es muy importante porque: # # - 1. El código que está en el alcance global (como nuestra variable a) no puede llamar a código de alcance local (como nuestra variable c). # - 2. Sin embargo, nuestra código de alcance local puede hacer operaciones con el código de alcance global. # # - 3. El código definido dentro de un alcance local no puede usarse en el alcance local de otra función. # # - 4. Se puede usar el mismo nombre para una variable si están en diferentes alcances. # + ### Ejemplo del punto 2 z_var = 5 def eleva_potencia_a(a,b): ''' Eleva número "a" a la "b" potencia. Insumo (input): a: número b: número Producto (output): resultado: un número ''' c = 10 + z_var ##c = 15 resultado = a**b + c return resultado # - eleva_potencia_a(1,2) # + ### Ejemplo del punto 4 z_var = 5 def eleva_potencia_b(a,b): ''' Eleva número "a" a la "b" potencia. Insumo (input): a: número b: número Producto (output): resultado: un número ''' z_var = 11 c = 10 + z_var resultado = a**b + c return resultado # - eleva_potencia_a(1,2) eleva_potencia_b(1,2) # # Cómo importar funciones # # Cuando importamos funciones de otros scripts, su nueva denominación son **módulos** (por ello cuando descargamos paquetes, a lo que llamamos sus módulos son en realidad funciones dentro de ese paquete). # + # %load_ext autoreload # %autoreload 2 # - import ejemplo2 as e2 #modo 1: alias, e2.eleva_potencia_b(1,2) # + from ejemplo2 import * #Modo 2, * es un wildcard para seleccionar todos los módulos # - eleva_potencia_cc(1,2) from ejemplo2 import eleva_potencia_dd ej.eleva_potencia_new(4,2) def clasifica_cunamas(rural, pobreza, num_ccpp_urbano = False, centros_rural = False, desnutricion_cronica = False, es_juntos = False): ''' verifica si distrito es cunamas insumos: retorna: booleano ''' if rural: UMBRAL_POBREZA = 50 UMBRAL_RURAL = 50 DESNUTRICION_CRONICA = 30 es_cunamas = ((pobreza >= UMBRAL_POBREZA) and (centros_rural >= UMBRAL_RURAL) and \ (desnutricion_cronica >= DESNUTRICION_CRONICA) \ and es_juntos) else: UMBRAL_POBREZA = 19.1 CCPP_URBANO = 1 es_cunamas =((pobreza >= UMBRAL_POBREZA) and (num_ccpp_urbano >= CCPP_URBANO)) return es_cunamas # + #Ejemplo urbano rural = False pobreza = 30 num_ccpp_urbano = 3 clasifica_cunamas(rural, pobreza, num_ccpp_urbano) # + #Ejemplo rural rural = True pobreza = 60 centros_rural = 51 desnutricion_cronica = 40 es_juntos = True clasifica(rural, pobreza, False ,centros_rural, desnutricion_cronica, es_juntos) # - ## Ejemplo del trio pitagórico set_ = range(1,25) for a in set_: for b in set_: for c in set_: if a**2 + b**2 == c**2: print(a,b,c) # + ###Cómo sería como función. def trio_pit(min_, max_): ## Definiendo qué quiero parametrizar. set_values = range(min_, max_) lst_trio = [] for a in set_values: for b in set_values: for c in set_values: if a**2 + b**2 == c**2: lst_trio.append((a,b,c)) return lst_trio # + #trio_pit(1, 50) # + ingresos = 100 juegos_por_dia = 3 def juegos_switch(ingresos, juegos_por_dia): gastos = 0 precio_juegos_switch = 7 juegos_que_compre = 0 dias = 0 while gastos < ingresos: gastos += precio_juegos_switch * juegos_por_dia juegos_que_compre += juegos_por_dia dias += 1 print(f'Me alcanzan para {juegos_que_compre} juegos, en {dias} días y me gasté {gastos} soles') # - juegos_switch(2000, 5)
Semana 5/Funciones (cont.).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf12 # language: python # name: tf12 # --- # + import tensorflow as tf tf.enable_eager_execution() from tensorflow.keras.layers import Lambda, Input, Embedding, Dense, TimeDistributed, Dropout, BatchNormalization, Add, SpatialDropout1D from tensorflow.keras.layers import CuDNNLSTM as LSTM vocab_size=30000 inp=Input(batch_shape=(1,1,),dtype=tf.int64,name="input_sent") emb_layer=Embedding(input_dim=vocab_size,output_dim=1500,embeddings_initializer=tf.keras.initializers.Constant(0.01)) emb=emb_layer(inp) emb_do=SpatialDropout1D(0.5)(emb) emb_drop_n=BatchNormalization()(emb_do) rnn1=LSTM(1500,return_sequences=True,stateful=True)(emb_drop_n) rnn1_n=BatchNormalization()(rnn1) rnn2=LSTM(1500,return_sequences=True,stateful=True)(rnn1_n) rnn2_n=BatchNormalization()(rnn2) rnn3=LSTM(1500,return_sequences=True,stateful=True)(rnn2_n) rnn3_n=BatchNormalization()(rnn3) proj_weights=TimeDistributed(Dense(1500))(Add()([rnn1_n,rnn2_n,rnn3_n])) proj_weights_gelu=Lambda(lambda x: tf.multiply(x,tf.nn.sigmoid(tf.scalar_mul(1.702,x))))(proj_weights) dec=Dense(vocab_size,activation="softmax",name="decision") dec_td=TimeDistributed(dec)(proj_weights_gelu) mod=tf.keras.Model(inputs=[inp],outputs=[dec_td]) # - mod.load_weights("/home/ginter/rnnlm_models/epoch.2019-03-24-00-00.00020.last.rnnlm") import data dp=data.SubwordDataPipeline("spiece_vocab.sp") dp.subword_model.IdToPiece(2) def reset_model(m): lstms=[l for l in m.layers if isinstance(l,tf.keras.layers.CuDNNLSTM)] for l in lstms: l.reset_states() # + import numpy as np #winner=tf.argmax(res,axis=-1) #print(winner[0][0].numpy()) #dp.subword_model.IdToPiece(int(winner)) def sample(preds, temperature=1.0): # helper function to sample an index from a probability array preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) def prewarm(txt,mod,dp): reset_model(mod) ids=dp.subword_model.EncodeAsIds(txt) for i in ids: inp=np.asarray([[i]],dtype=np.int64) inp_t=tf.convert_to_tensor(inp,tf.int64) res=mod(inp_t) winner=tf.argmax(res,axis=-1) spiece_id=winner[0][0].numpy() sw=dp.subword_model.IdToPiece(int(spiece_id)) print("Prewarm:",sw) winner=tf.argmax(res,axis=-1) spiece_id=winner[0][0].numpy() return int(spiece_id) #prewarm_text="Meillä on siis pieni ongelma. Tai no oikeastaan aika suuri sellainen. 13-vuotias poikani haluaa nimittäin viikonloppuna mennä kaverinsa ja tämän perheen kanssa Ruotsin risteilylle. Kaveri siis itse ehdotti tätä ja poika haluaa todellakin mennä. Kaikki olisi minullekin täysin OK, jos poikani ei sattuisi olemaan" #prewarm_text="Helsingin alla risteilee jopa yli satavuotisia vesiputkia. Ikä ei kuitenkaan yksin määritä putken kuntoa, ja siksi Helsingissäkään ei pystytä tekemään riskiarvioita vain sen perusteella" prewarm_text="Kaupunkien tarpeet ovat Suomessa jääneet lähes tyystin sivuun. Viime vuosina on kuitenkin nähty, että Helsingissä ja sen naapureissa Espoossa ja Vantaalla vuokrat nousevat ja asumisen väljyys" #prewarm_text="Vantaan terveysasemille hankittiin uusi puhelinpalvelu: soittajat jonottavat jopa 45 minuuttia" #prewarm_text="Olipa kerran pieni prinsessa" #prewarm_text="En mä vaan tiiä" start=prewarm(prewarm_text,mod,dp) lst=[] for _ in range(100): inp=np.asarray([[start]],dtype=np.int64) inp_t=tf.convert_to_tensor(inp,tf.int64) res=mod(inp_t) probs=tf.squeeze(res).numpy() winner=sample(probs,0.4) #winner=np.random.choice(a=np.arange(probs.shape[0]),p=probs) #print("argmax:",tf.argmax(res,axis=-1)) #print("winner:",winner) spiece_id=winner sw=dp.subword_model.IdToPiece(int(spiece_id)) lst.append(sw) start=spiece_id print("Final output:") dp.subword_model.DecodePieces(lst)
decoder_experiments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Auto-regression # An auto-regressive, **AR(1)**, process is one that is not always stationary and may contain a unit root. # # For an AR process to be stationary, $ |\rho| < 1 $: # # \begin{align} # X_{t} &= X_{t-1} + \epsilon_{t} \\ # &= X_{t-2} + \epsilon_{t-1} + \epsilon_{t} \\ # &= X_{0} + \sum_{i=0}^{t-1} \epsilon_{t-i} # \end{align} # for $ \epsilon_{t} \sim iid(0,1) $ where $ X_{0}$ is a constant. # # Further attributes are as follows: # # \begin{align} # E(X_{t}) &= E(X_{0}) + E \sum_{i=0}^{t-1} \epsilon_{t-i} \\ # &= E(X_{0}) # \end{align} # # since $ \epsilon_{t} \sim iid(0,1) $ and $ \therefore E(X_{t}) = 0 $, $ Var(\epsilon_{t}) = \sigma^2 $, # so the random noise has zero mean. # \begin{align} # Var(X_{t}) &= \sum_{i=0}^{t-1} Var ( \epsilon_{t-i}) \\ # &= t \sigma^2 \\ # &= f(t) # \end{align} # since $ Var(\epsilon_{t}) = \sigma^2$. Therefore, the variance is a function of time, so it is non-stationary. # \\ # Therefore a random walk is a non-stationary time series, it is a function of time. # There is no covarance ($\epsilon_{t}$ terms) since $\epsilon_{t}$ are # independent of one another. # \begin{align} # Cov(X_{t}, X_{t+h}) &= Cov(X_{t}, X_{t} + \sum_{i=0}^{h-1} \epsilon_{t+h-i} ) \\ # &= Cov(X_{t}, X_{t}) # &= Var(X_{t}) # \end{align} # given $X_{t+h} = X_{t} + \sum_{i=0}^{h-1} \epsilon_{t+h-i}$, and since $\epsilon_{t}$ are independent, then $Cov(X_{t}, \epsilon_{t_{k}}) = 0$. # Therefore, a random walk is non-stationary, since the variance # increases with time, and the covariance also increases with time.
src/jupyter-notebooks/.ipynb_checkpoints/Auto-Regression-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Regexs # # Up until now, to search in text we have used string methods find, startswith, endswith, etc. But sometimes you need more power. # # Regular expressions are their own little language that allows you to search through text and find matches with incredibly complex patterns. # # A regular expression, also referred to as "regex" or "regexp", provides a concise and flexible means for matching strings of text, such as particular characters, words, or patterns of characters. # # To use regular you need to import python's regex library `re` # https://docs.python.org/2/library/re.html import re # + # To run the examples we are going to use some of the logs from the # django project, a web framework for python django_logs = '''commit 722344ee59fb89ea2cd5b906d61b35f76579de4e Author: <NAME> <<EMAIL>> Date: Thu May 19 09:31:49 2016 -0400 Refs #24067 -- Fixed contenttypes rename tests failures on Oracle. Broke the initial migration in two to work around #25530 and added 'django.contrib.auth' to the available_apps to make sure its tables are also flushed as Oracle doesn't implement cascade deletion in sql_flush(). Thanks Tim for the report. commit 9fed4ec418a4e391a3af8790137ab147efaf17c2 Author: <NAME> <<EMAIL>> Date: Sat May 21 13:18:22 2016 -0400 Removed an obsolete comment about a fixed ticket. commit 94486fb005e878d629595942679ba6d23401bc22 Author: <NAME> <<EMAIL>> Date: Sat May 21 13:20:40 2016 +0200 Revert "Disable patch coverage checks" Mistakenly pushed to django/django instead of another repo This reverts commit 6dde884c01156e36681aa51a5e0de4efa9575cfd. commit 6dde884c01156e36681aa51a5e0de4efa9575cfd Author: <NAME> <<EMAIL>> Date: Sat May 21 13:18:18 2016 +0200 Disable patch coverage checks commit 46a38307c245ab7ed0b4d5d5ebbaf523a81e3b75 Author: <NAME> <<EMAIL>> Date: Fri May 20 10:50:51 2016 -0400 Removed versionadded/changed annotations for 1.9. commit 1915a7e5c56d996b0e98decf8798c7f47ff04e76 Author: <NAME> <<EMAIL>> Date: Fri May 20 09:18:55 2016 -0400 Increased the default PBKDF2 iterations. commit 97c3dfe12e095005dad9e6750ad5c5a54eee8721 Author: <NAME> <<EMAIL>> Date: Thu May 19 22:28:24 2016 -0400 Added stub 1.11 release notes. commit 8df083a3ce21ca73ff77d3844a578f3da3ae78d7 Author: <NAME> <<EMAIL>> Date: Thu May 19 22:20:21 2016 -0400 Bumped version; master is now 1.11 pre-alpha.''' # - # ## Searching # # The simplest thing you can do with regexs in python is search through text to see if there is a match. To do this you use the methods `search` or `match`. `match` only checks if it matches at the beginning of the string and `search` check the whole string. # # re.match(pattern, string) # re.search(pattern, string) print(re.match('a', 'abcde')) print(re.match('c', 'abcde')) print(re.search('a', 'abcde')) print(re.search('c', 'abcde')) print(re.match('version', django_logs)) print(re.search('version', django_logs)) if re.search('commit', django_logs): print("Someone has been doing work.") # ### TRY IT # Search for the word May in the django logs # # Special Characters # So far we can't do anything that you couldn't do with find, but don't worry. Regexs have many special characters to allow you to look for thing like the beginning of a word, whitespace or classes of characters. # # You include the character in the pattern. # # * ^ Matches the beginning of a line # * $ Matches the end of the line # * . Matches any character # * \s Matches whitespace # * \S Matches any non-whitespace character # * \* Repeats a character zero or more times # * \*? Repeats a character zero or more times (non-greedy) # * \+ Repeats a character one or more times # * +? Repeats a character one or more times (non-greedy) # * ? Repeats a character 0 or one time # * [aeiou] Matches a single character in the listed set # * [^XYZ] Matches a single character not in the listed set # * [a-z0-9] The set of characters can include a range # * {10} Specifics a match the preceding character(s) {num} number or times # * \d Matches any digit # * \b Matches a word boundary # # # **Hint** if you want to match the literal character (like $) as opposed to its special meaning, you would escape it with a `\` # + # Start simple, match any character 2 times print(re.search('..', django_logs)) # just to prove it works print(re.search('..', 'aa')) print(re.search('..', 'a')) print(re.search('..', '^%')) # - # to match a commit hash (numbers and letters a-f repeated) we can use a regex commit_pattern = '[0-9a-f]+' print(re.search(commit_pattern, django_logs)) # Let's match the time syntax time_pattern = '\d\d:\d\d:\d\d' time_pattern = '\d{2}:\d{2}:\d{2}' print(re.search(time_pattern, django_logs)) # ### TRY IT # Match anything between angled brackets < > # # Ignoring case # match and search both take an optional third argument that allows you to include flags. The most common flag is ignore case. # # re.search(pattern, string, re.IGNORECASE) # re.match(pattern, string, re.IGNORECASE) print(re.search('<NAME>', django_logs)) print(re.search('<NAME>', django_logs, re.IGNORECASE)) # ### TRY IT # search for 'django' in 'Both Django and Flask are very useful python frameworks' ignoring case # # Extracting Matches # Finding is only half the battle. You can also extract what you match. # # To get the string that your regex matched you can store the match object in a variable and run the group method on that # # m = re.search(pattern, string) # print m.group(0) # Let's match the time syntax time_pattern = '\d\d:\d\d:\d\d' m = re.search(time_pattern, django_logs) print(m.group(0)) # If you want to find all the matches, not just the first, you can use the findall method. It returns a list of all the matches # # re.findall(pattern, string) time_pattern = '\d\d:\d\d:\d\d' print(re.findall(time_pattern, django_logs)) # If you want to have only part of the match returned to you in findall, you can use parenthesis to set a capture point # # pattern = 'sads (part to capture) asdjklajsd' # print re.findall(pattern, string) # prints part to capture time_pattern = '(\d\d):\d\d:\d\d' hours = re.findall(time_pattern, django_logs) print(sorted(hours)) # + # you can capture more than one match time_pattern = '(\d\d):(\d\d):\d\d' times = re.findall(time_pattern, django_logs) print(times) # Unpacking the tuple in the first line for hours, mins in times: print("{} hr {} min".format(hours, mins)) # - # ### TRY IT # Capture the host of the email address (alphanumerics between @ and .com) **Hint** remember to escape the . in .com # ## Practice # There is a lot more that you can do, but it can feel overwhelming. The best way to learn is with practice. A great way to experiment is this website http://www.regexr.com/ You can put a section of text and see what regexs match patterns in your text. The site also has a cheatsheet for special characters. # + # Lets try some now # - # # Project: Doc Clerk # # Let's imagine you are working in a law office. You have millions of e-mails and other documents to go through to see what is relevant to the case. You are going to write a program to go though a file, check for key words (client's name, phone number, defendant's name) and print out the whole paragraph. It should not print any paragraphs with no relevant info. Paragraphs will be separated by an empty line. # # Your program should match the following items: # Gold E. Locks (case insensitive, E. or E) # Three bears or 3 bears # 571 209-4000 (with parens, dashes, or no spaces) # # # 0. Import re # 1. Initialize a variable called paragraph to be an empty list and a variable called found_match to false. # 2. Create a list of patterns to match and store in variable called patterns # 3. Read in test file 'evidence.txt'. # 4. For line in evidence: # a. check if it matches any of the patterns, if so set found_match to true # b. append line to paragraph # c. if line is empty (just a newline character) # - print paragraph if found_match is true **Hint** use the join method to print a string instead of a list # - reset paragraph to empty list and found_match to false # #
Lesson10_Regexs/RegularExpressions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''hra'': conda)' # name: python3 # --- # # Exploratory Data Analysis - Multivariate Analysis # Load the required packages import json import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from ipywidgets import widgets, interact from IPython.display import display # Load the data df = pd.read_csv('./../../../data/cleaned_data.csv') # Load lists of numerical and categorical columns from the static file with open('./../../../data/statics.json') as f: statics = json.load(f) categorical_columns = statics['categorical_columns'] numerical_columns = statics['numerical_columns'] # Separate out the dataframe intro numerical and categorical dataframe num_df = df[numerical_columns] cat_df = df[categorical_columns] # ## Correlation # For multivariate, analysis we will begin with correlation. It should be noted that correlation coefficients can only be calculated for numerical variables. It is meaningless to use it for categorical variables. The correlation coefficients can only tell us whether 2 variables are moving together or in opposite direction but in no way it can covey us the about the cause-effect relationship between variables. # Compute the correlation coefficients pearson_corr = num_df.corr() spearman_corr = num_df.corr(method='spearman') fig, ax = plt.subplots(figsize=(14, 12)) sns.heatmap(pearson_corr, annot=True, ax=ax) # From the above chart it can be noted that there are no variables with extreme negative correlations. # Total working years is highly correlated with age, monthly income, and years at company. It would be convenient for us to remove this variable but I would like to take this decision while feature selection. # Years at current company, years in current role. years since last promotion and years with current manager are all highly correlated with each other. # Pearson correlation gives us linear correlation while Spearman correlation gives us non-linear correation. Repeating the above activity with Spearman correlation gives - fig, ax = plt.subplots(figsize=(14, 12)) sns.heatmap(spearman_corr, annot=True, ax=ax) # The picture doesn't change. Hence it can safely concluded that features are only linearly correlated. # To understand the correlated features more closely, let us plot the pair plots for them. sub_df1 = num_df[['Age', 'TotalWorkingYears', 'MonthlyIncome', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager']] sns.pairplot(sub_df1) # Some people start their job at a later stage, like in their 40s and 50s. But as such age and total working years do show positive corelation. Years at a company, years with curernt manager and years in current role all have high positive corellation amongst them. # ## Relationship with target variable # Let us gauge the available variables with respect to the attrition one by one. # We begin our analysis with the one of the most important factor about how satisifed an employee is in the company that is money. df['Attrition'].value_counts() # Divide the data based on attrition attr = df[df['Attrition'] == 'Yes'] nattr = df[df['Attrition'] == 'No'] # ### Numerical # + # Create interactive plots # Create function to respond on change def function_numerical_attrition(column): fig, ax = plt.subplots(figsize=(16, 8)) sns.histplot(df, x=column, hue='Attrition', stat='probability', ax=ax, common_norm=False) plt.show() interact(function_numerical_attrition, column=numerical_columns) # - # The probability that people will shift is much more higher for the people between age 20 to 30 than other age groups. People above 30 are very less likely to shift. Distance from home, hourly rate, monthly rate, percentage salary hike, number of times employee was trainined in the last year and years since last promotion does not seem to be significant factors for the decision to leave the company. Employees who exist in the lower bands of monthly salary are more likely to leave the company than their higher bands couterparts. People who have worked in less than or equal to 4 companies are less likely to shift than the people who have worked in more than 4 companies. People with less number of years of working experience and less number of years in the current company are more likely to shift. If people stay in current role for more number of years then the probability that they will leave becomes less then the probability to not leave. If people work under one manager for longer period of time then people are less likely to leave. # ### Categorical # We will repeat the above exercise with categorical variables. # + # Create interactive plots def function_categorical_attrition(column): fig, ax = plt.subplots(figsize=(18, 8)) sns.histplot(df, x=column, hue='Attrition', stat='probability', ax=ax, common_norm=False) plt.show() interact(function_categorical_attrition, column=categorical_columns) # - # People who travel frequently seems to be more likely to leave the company. People from Sales and Human Resources are more likely to shift. It becomes difficult for people with Masters and Doctorate to make a shift. Interestingly, the same condition holds true for people who just passed their college. The attrition is more with respect to people who studied marketing, technical degree and human resource, while the trend reverses for life sciences, medical and others. People who are satisfied with the company environment are less likely to leave. Females are less likely to make a shift than males. People who find themselves more involved in jobs are less likely to put their papers. Only people who are at lower level i.e. on level 1 on both in heirarchy and stock level options have more probability to leave. Employees who are single and do overtime are more likely to leave the job. Performace rating and work life balance does not affect the decision of the people.
hrbook/files/eda_multivariate.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.7.1 # language: julia # name: julia # --- # + colab={"base_uri": "https://localhost:8080/"} id="axKIMsUiwbAy" outputId="043853c0-ef6c-49b4-cf3b-fdb97dac62c0" ## https://colab.research.google.com/github/ageron/julia_notebooks/blob/master/Julia_Colab_Notebook_Template.ipynb # %%shell set -e #---------------------------------------------------# JULIA_VERSION="1.7.1" # any version ≥ 0.7.0 JULIA_PACKAGES="IJulia BenchmarkTools Plots" JULIA_PACKAGES_IF_GPU="CUDA" # or CuArrays for older Julia versions JULIA_NUM_THREADS=2 #---------------------------------------------------# if [ -n "$COLAB_GPU" ] && [ -z `which julia` ]; then # Install Julia JULIA_VER=`cut -d '.' -f -2 <<< "$JULIA_VERSION"` echo "Installing Julia $JULIA_VERSION on the current Colab Runtime..." BASE_URL="https://julialang-s3.julialang.org/bin/linux/x64" URL="$BASE_URL/$JULIA_VER/julia-$JULIA_VERSION-linux-x86_64.tar.gz" wget -nv $URL -O /tmp/julia.tar.gz # -nv means "not verbose" tar -x -f /tmp/julia.tar.gz -C /usr/local --strip-components 1 rm /tmp/julia.tar.gz # Install Packages if [ "$COLAB_GPU" = "1" ]; then JULIA_PACKAGES="$JULIA_PACKAGES $JULIA_PACKAGES_IF_GPU" fi for PKG in `echo $JULIA_PACKAGES`; do echo "Installing Julia package $PKG..." julia -e 'using Pkg; pkg"add '$PKG'; precompile;"' &> /dev/null done # Install kernel and rename it to "julia" echo "Installing IJulia kernel..." julia -e 'using IJulia; IJulia.installkernel("julia", env=Dict( "JULIA_NUM_THREADS"=>"'"$JULIA_NUM_THREADS"'"))' KERNEL_DIR=`julia -e "using IJulia; print(IJulia.kerneldir())"` KERNEL_NAME=`ls -d "$KERNEL_DIR"/julia*` mv -f $KERNEL_NAME "$KERNEL_DIR"/julia echo '' echo "Successfully installed `julia -v`!" echo "Please reload this page (press Ctrl+R, ⌘+R, or the F5 key) then" echo "jump to the 'Checking the Installation' section." fi # + colab={"base_uri": "https://localhost:8080/"} id="b463ba94-e940-47ae-a726-83b2715b7572" outputId="0f55178c-e80e-45ea-a0f6-fb78de622a2d" versioninfo() # + id="42b07027-823e-41d2-bcee-683c229d2c9b" colab={"base_uri": "https://localhost:8080/"} outputId="dd83866d-0140-4d4e-9035-aa42fa0b50a0" import Pkg; Pkg.add("Arpack") import Pkg; Pkg.add("ArgParse") import Pkg; Pkg.add("LinearMaps") using LinearAlgebra using Arpack using SparseArrays using LinearMaps #using ArgParse # + id="27625538-c1a2-44bc-a9b4-99d598288822" colab={"base_uri": "https://localhost:8080/"} outputId="7dc6b89f-600e-4b46-b52b-f89afe85253b" function get_combination(NOS,NOD) combination = zeros(Int64,(NOS,NOD)) @inbounds for j in 1:NOD for i in 1:NOS combination[i,j] = f_combination(i,j) end end return combination end # + id="67ba02e4-9b08-4a5a-b41c-f873b192c702" colab={"base_uri": "https://localhost:8080/"} outputId="7fb7d256-3312-4960-a1ea-234f83b0fdda" function f_combination(n,k) if n<k return 0 end nCk = 1 for i in 1:k nCk = nCk * (n-k+i) nCk = div(nCk,i) end return nCk end # + id="837a9b10-83d9-45c3-b2b0-ea8f2a669208" colab={"base_uri": "https://localhost:8080/"} outputId="fea560c4-89ec-4a12-de9c-0353a41216d8" function insertion_sort!(a,NOD) for i in 2:NOD j = i - 1 @inbounds temp = a[i] @inbounds while a[j] > temp a[j+1] = a[j] j = j - 1 if j==0 break end end @inbounds a[j+1] = temp end return 0 end # + id="43db410a-d867-4075-983a-3ac247bb4a66" colab={"base_uri": "https://localhost:8080/"} outputId="833d52b5-01a0-4eae-d9e7-a0dc65b2feb6" function inv_list(ni,NOD,combination) @inbounds val_inv_list = ni[1] @inbounds for i in 2:NOD val_inv_list = val_inv_list + combination[ni[i]-1,i] end return val_inv_list end # + id="9837593f-7e38-465d-a244-c421c185125d" colab={"base_uri": "https://localhost:8080/"} outputId="3e33c61f-10fd-4350-c889-fc1354dac058" function qsort_w_order!(a,o,first,last) @inbounds x = a[div(first+last,2)] i = first j = last while true @inbounds while a[i] < x i = i + 1 end @inbounds while x < a[j] j = j - 1 end if i >= j break end @inbounds t8 = a[i]; @inbounds a[i] = a[j]; @inbounds a[j] = t8 @inbounds t = o[i]; @inbounds o[i] = o[j]; @inbounds o[j] = t i = i + 1 j = j - 1 end if first < i - 1 qsort_w_order!(a,o,first,i-1) end if j + 1 < last qsort_w_order!(a,o,j+1,last) end return 0 end # + id="97d44728-19cb-4433-964d-72c019a70a14" colab={"base_uri": "https://localhost:8080/"} outputId="ee1d295a-6639-4ade-fc2b-0d55a2077482" ## output "ni" is returned function list_fly(t,NOD,NOS,combination) ni = zeros(Int64,NOD) s = t j = NOS - 1 for i in NOD:-1:2 b, j0 = binary_search(s,combination[:,i],i,j) j = j0 - 1 ni[i] = j0 s = s - combination[j,i] end ni[1] = s return ni end # + id="2d1639fe-2d68-4d56-a65c-cbd9c8a048be" colab={"base_uri": "https://localhost:8080/"} outputId="6cb1ebda-0bdc-4115-a0f7-a7235d84588d" ## output "ni" is in arguments function list_fly_2!(t,NOD,NOS,combination,ni) @inbounds fill!(ni,0) s = t j = NOS - 1 @inbounds for i in NOD:-1:2 b, j0 = binary_search(s,combination[:,i],i,j) j = j0 - 1 ni[i] = j0 s = s - combination[j,i] end @inbounds ni[1] = s return 0 end # + id="296f0df9-a11c-4b55-a4d4-31fdfe44ba71" colab={"base_uri": "https://localhost:8080/"} outputId="d1a198a1-fc8f-4dc4-9d4f-4fad8bc0690f" function binary_search(s,list_s,ls,le) bmin = ls; bmax = le while true b = bmin + div(bmax-bmin,2) @inbounds if s < list_s[b] bmax = b - 1 elseif list_s[b] < s bmin = b + 1 else bmin = b return b, bmin end if bmin > bmax b = -1 return b, bmin end end return b, bmin end # + id="fc526de5-ef32-4c52-9fd3-8f8906a7ff95" colab={"base_uri": "https://localhost:8080/"} outputId="cf07e957-4aca-4ca9-a249-2930d64431dc" function list_to_state_no_duplication(st_list,NOS) string01 = "" for i in 1:NOS if i in st_list string01 = string01 * "1" # down else string01 = string01 * "0" # up end end return string01 end # + id="445f2a67-fbc1-4489-8a8b-a5a212b4b81b" colab={"base_uri": "https://localhost:8080/"} outputId="cfb20330-b398-43df-a344-17e8bbf6a3ff" function list_to_state(st_list,NOS) list01 = zeros(Int64,NOS) for i in st_list list01[i] += 1 end list01 = mod.(list01,2) string01 = join(list01) return string01 end # + id="c5a4a257-64ad-4eea-b996-efd20e4bf6e9" colab={"base_uri": "https://localhost:8080/"} outputId="ec89a712-3e9d-4f1a-ab3a-28c86477be0b" ## output "nd" is returned function j_flip_ni(i,j,n,NOD) nd = ones(Int64,NOD) kr = NOD for _kr in NOD:-1:1 if j < n[_kr] kr = _kr continue elseif j > n[_kr] kr = _kr break else fill!(nd,0) kr = _kr break end end if nd[NOD] == 1 # S+_i S-_j kl = 1 for _kl in 1:kr if i == n[_kl] kl = _kl break end kl = _kl+1 end nd[kl:kr-1] = n[kl+1:kr] nd[kr] = j else # S-_i S+_j kl = 1 for _kl in 1:kr if i < n[_kl] kl = _kl break end kl = _kl+1 end nd[kl] = i nd[kl+1:kr] = n[kl:kr-1] end nd[1:kl-1] = n[1:kl-1] nd[kr+1:NOD] = n[kr+1:NOD] return nd end # + id="5a12e2a9-0a1e-45e4-b9e8-3aff3f9ea120" colab={"base_uri": "https://localhost:8080/"} outputId="7bf4b648-317b-4ac9-e13c-16bf68cd475d" ## output "nd" is in arguments function j_flip_ni_2!(i,j,n,NOD,nd) @inbounds fill!(nd,1) kr = NOD @inbounds for _kr in NOD:-1:1 @inbounds if j < n[_kr] kr = _kr continue elseif j > n[_kr] kr = _kr break else @inbounds fill!(nd,0) kr = _kr break end end @inbounds if nd[NOD] == 1 # S+_i S-_j kl = 1 for _kl in 1:kr @inbounds if i == n[_kl] kl = _kl break end kl = _kl+1 end @inbounds nd[kl:kr-1] = n[kl+1:kr] @inbounds nd[kr] = j else # S-_i S+_j kl = 1 for _kl in 1:kr @inbounds if i < n[_kl] kl = _kl break end kl = _kl+1 end @inbounds nd[kl] = i @inbounds nd[kl+1:kr] = n[kl:kr-1] end @inbounds nd[1:kl-1] = n[1:kl-1] @inbounds nd[kr+1:NOD] = n[kr+1:NOD] return 0 end # + id="4c5116c9-8de9-4dc6-ac63-e574257fca7c" colab={"base_uri": "https://localhost:8080/"} outputId="95d0c796-12b7-4ec4-8cff-8e94a94a47aa" ## output "Ham" is returned function make_full_hamiltonian(lv,combination,NOD,NOxxz,p_xxz,sJint,NOS) Ham = zeros(Float64,(lv,lv)) for i in 1:lv st_list = list_fly(i,NOD,NOS,combination) for j in 1:NOxxz f1 = p_xxz[1,j] in st_list f2 = p_xxz[2,j] in st_list if xor(f1,f2) Ham[i,i] = Ham[i,i] - sJint[j,2] ni = j_flip_ni(p_xxz[1,j],p_xxz[2,j],st_list,NOD) id = inv_list(ni,NOD,combination) Ham[i,id] = Ham[i,id] + sJint[j,1] else Ham[i,i] = Ham[i,i] + sJint[j,2] end end end return Ham end # + id="22123812-4b61-48fe-b7d5-d0ebc20e0d7f" colab={"base_uri": "https://localhost:8080/"} outputId="304b7cf6-7266-4892-8f64-187939c725e8" ## output "Ham" is in arguments function make_full_hamiltonian_2!(lv,Ham,combination,NOD,NOxxz,p_xxz,sJint,NOS) st_list = zeros(Int64,NOD) ni = zeros(Int64,NOD) @inbounds for i in 1:lv list_fly_2!(i,NOD,NOS,combination,st_list) @inbounds for j in 1:NOxxz f1 = p_xxz[1,j] in st_list f2 = p_xxz[2,j] in st_list @inbounds if xor(f1,f2) Ham[i,i] = Ham[i,i] - sJint[j,2] j_flip_ni_2!(p_xxz[1,j],p_xxz[2,j],st_list,NOD,ni) id = inv_list(ni,NOD,combination) Ham[i,id] = Ham[i,id] + sJint[j,1] else Ham[i,i] = Ham[i,i] + sJint[j,2] end end end return Ham end # + id="bdc27f5d-2c57-48a2-92cb-2d0a5b1ef0ef" colab={"base_uri": "https://localhost:8080/"} outputId="c77554bb-826f-4111-e6a9-7f481ebb0d83" function make_parameters_1d(NOS,NOxxz) p_xxz = zeros(Int64,(2,NOxxz)) Jint = zeros(Float64,(NOxxz,2)) # Jint[NOxxz,1] --> Jint_x, Jint[NOxxz,2] --> Jint_z sJint = zeros(Float64,(NOxxz,2)) # sJint[NOxxz,1] --> sJint_x, sJint[NOxxz,2] --> sJint_z @inbounds for i in 1:NOS p_xxz[1,i] = mod(i-1,NOS)+1 p_xxz[2,i] = mod(i,NOS)+1 @inbounds if p_xxz[1,i] > p_xxz[2,i] # assume i<j for pair (i,j) tmp = p_xxz[1,i] p_xxz[1,i] = p_xxz[2,i] p_xxz[2,i] = tmp end end @inbounds fill!(Jint,1.0) @inbounds sJint[:,1] = 0.5 .* Jint[:,1] @inbounds sJint[:,2] = 0.25 .* Jint[:,2] return p_xxz, Jint, sJint end # + id="1bc2c9f1-7fca-4140-ad14-d0b15fb4e25d" colab={"base_uri": "https://localhost:8080/"} outputId="ed94dc46-a485-4e1f-b8a8-1066cf9b6d8b" ## memory allocation within get_vec function ham_to_vec_wave_vector!(lv,combination,NOD,NOxxz,p_xxz,sJint,NOS) function get_vec!(v0::AbstractVector,v1::AbstractVector) ## v0: new output, v1: old input length(v0) == lv || throw(DimensionMismatch()) length(v1) == lv || throw(DimensionMismatch()) for i = 1:lv v0[i] = 0.0 + 0.0im st_list = list_fly(i,NOD,NOS,combination) for j in 1:NOxxz f1 = p_xxz[1,j] in st_list f2 = p_xxz[2,j] in st_list if xor(f1,f2) v0[i] = v0[i] - sJint[j,2] * v1[i] ni = j_flip_ni(p_xxz[1,j],p_xxz[2,j],st_list,NOD) id = inv_list(ni,NOD,combination) v0[i] = v0[i] + sJint[j,1] * v1[id] else v0[i] = v0[i] + sJint[j,2] * v1[i] end end end return v0 end return (v0,v1) -> get_vec!(v0,v1) end # + id="84e86ed0-8d7f-4d00-8fc5-1ac097c37fb4" colab={"base_uri": "https://localhost:8080/"} outputId="a6fc0673-f99d-49af-bdd2-f61433c30ff2" ## memory allocation outside get_vec function ham_to_vec_wave_vector_2!(lv,combination,NOD,NOxxz,p_xxz,sJint,NOS) st_list = zeros(Int64,NOD) ni = zeros(Int64,NOD) function get_vec_2!(v0::AbstractVector,v1::AbstractVector) ## v0: new output, v1: old input length(v0) == lv || throw(DimensionMismatch()) length(v1) == lv || throw(DimensionMismatch()) @inbounds for i = 1:lv v0[i] = 0.0 + 0.0im list_fly_2!(i,NOD,NOS,combination,st_list) @inbounds for j in 1:NOxxz f1 = p_xxz[1,j] in st_list f2 = p_xxz[2,j] in st_list @inbounds if xor(f1,f2) v0[i] = v0[i] - sJint[j,2] * v1[i] j_flip_ni_2!(p_xxz[1,j],p_xxz[2,j],st_list,NOD,ni) id = inv_list(ni,NOD,combination) v0[i] = v0[i] + sJint[j,1] * v1[id] else v0[i] = v0[i] + sJint[j,2] * v1[i] end end end return v0 end return (v0,v1) -> get_vec_2!(v0,v1) end # + id="4bce2b43-ab52-46cc-b4e4-1eea8820e209" colab={"base_uri": "https://localhost:8080/"} outputId="0e211984-8e3a-461b-c643-61ba7df99d36" function calculate_1d(NOS,NOD) #NOS = 4 # number of sites #NOD = 2 # number of down spins NOxxz = NOS # number of XXZ interaction combination = get_combination(NOS,NOD) THS = combination[NOS,NOD] # total Hilbert space println("# NOS,NOD") println(NOS," ",NOD) #display(combination) println("# total Hilbert space") println(THS) p_xxz, Jint, sJint = make_parameters_1d(NOS,NOxxz) #println(p_xxz) #println(Jint) #println(sJint) #println() get_vec_LM_2! = ham_to_vec_wave_vector_2!(THS,combination,NOD,NOxxz,p_xxz,sJint,NOS) Ham = LinearMap(get_vec_LM_2!,THS;ismutating=true,issymmetric=true) #println(Ham) #println() ene,vec = eigs(Ham,nev=5,which=:SR) println("# energies") println(ene) println() #println("# vectors") #for i in 1:length(ene) # println(i,vec[:,i]) #end #println() end # + id="8285830a-ac19-4e04-9bb3-771f7a8c628d" colab={"base_uri": "https://localhost:8080/"} outputId="1e9b2015-bb7a-462f-be78-f259487c8f2c" #for NOS in [4,8,16,32,64,128] # number of sites #for NOS in [4,8,16,32,64] # number of sites for NOS in [4,8,16,32] # number of sites for NOD in [1,2,3] # number of down spins calculate_1d(NOS,NOD) end end # + id="d1153f57-339e-4461-8175-c75305218d42"
spin_half/colab_julia_1d_heisenberg_20211206.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # # Demonstration of PET reconstruction with SIRF # This demonstration shows how to use OSEM and implement a # (simplistic) gradient-ascent algorithm using SIRF. # # Authors: <NAME> and <NAME> # First version: 8th of September 2016 # Second Version: 17th of May 2018 # # CCP PETMR Synergistic Image Reconstruction Framework (SIRF) # Copyright 2015 - 2017 Rutherford Appleton Laboratory STFC # Copyright 2015 - 2018 University College London. # # This is software developed for the Collaborative Computational # Project in Positron Emission Tomography and Magnetic Resonance imaging # (http://www.ccppetmr.ac.uk/). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Initial set-up #%% make sure figures appears inline and animations works # %matplotlib notebook #%% Initial imports etc import numpy from numpy.linalg import norm import matplotlib.pyplot as plt import matplotlib.animation as animation import os import sys import shutil #import scipy #from scipy import optimize import sirf.STIR as pet from sirf.Utilities import examples_data_path # + #%% some handy function definitions def imshow(image, limits, title=''): """Usage: imshow(image, [min,max], title)""" plt.title(title) bitmap = plt.imshow(image) if len(limits)==0: limits = [image.min(), image.max()] plt.clim(limits[0], limits[1]) plt.colorbar(shrink=.6) plt.axis('off') return bitmap def make_positive(image_array): """truncate any negatives to zero""" image_array[image_array<0] = 0 return image_array def make_cylindrical_FOV(image): """truncate to cylindrical FOV""" filter = pet.TruncateToCylinderProcessor() filter.apply(image) # - #%% go to directory with input files # adapt this path to your situation (or start everything in the relevant directory) os.chdir(examples_data_path('PET')) #%% copy files to working folder and change directory to where the output files are shutil.rmtree('working_folder/thorax_single_slice',True) shutil.copytree('thorax_single_slice','working_folder/thorax_single_slice') os.chdir('working_folder/thorax_single_slice') # ## We will first create some simulated data from ground-truth images # # #%% Read in images image = pet.ImageData('emission.hv') image_array = image.as_array()*.05 image.fill(image_array) mu_map = pet.ImageData('attenuation.hv') mu_map_array = mu_map.as_array() #%% bitmap display of images im_slice = image_array.shape[0]//2 plt.figure() #plt.subplot(1,2,1) imshow(image_array[im_slice,:,:,], [], 'emission image'); #plt.subplot(1,2,2) #imshow(mu_map_array[im_slice,:,:,], [], 'attenuation image'); #%% save max for future displays cmax = image_array.max()*.6 #%% create acquisition model am = pet.AcquisitionModelUsingRayTracingMatrix() # we will increate the number of rays used for every Line-of-Response (LOR) as an example # (it is not required for the exercise of course) am.set_num_tangential_LORs(5) templ = pet.AcquisitionData('template_sinogram.hs') am.set_up(templ,image); #%% simulate some data using forward projection acquired_data=am.forward(image) acquisition_array = acquired_data.as_array() #%% Display bitmaps of a middle sinogram plt.figure() imshow(acquisition_array[0,0,:,:,], [], 'Forward projection'); # # Reconstruction via a SIRF reconstruction class # While you can write your own reconstruction algorithm by using `AcquisitionModel` etc, we also # provide a few reconstruction clases. We will show how to use these here. # ## create the objective function # # In PET, the iterative algorithms in SIRF rely on an objective function (i.e. the function to maximise). # In this case, this is the Poisson log-likelihood (without any priors) #%% create objective function obj_fun = pet.make_Poisson_loglikelihood(acquired_data) # We could set acquisition model but the default (ray-tracing) is in this case ok # obj_fun.set_acquisition_model(am) # we could also add a prior, but we will not do that here (although the rest of the exercise would still work) #obj_fun.set_prior(prior) # ## create OSMAPOSL reconstructor # This implements the Ordered Subsets Maximum A-Posteriori One Step Late # Since we are not using a penalty, or prior in this example, it # defaults to using MLEM, but we will modify it to OSEM recon = pet.OSMAPOSLReconstructor() recon.set_objective_function(obj_fun) recon.set_num_subsets(4) num_iters=10 recon.set_num_subiterations(num_iters) # ## Use this reconstructor! #%% create initial image # we could just use a uniform image but here we will create a disk with a different # initial value (this will help the display later on) init_image=image.clone() init_image.fill(cmax / 4) make_cylindrical_FOV(init_image) # display idata = init_image.as_array() im_slice = idata.shape[0] // 2 plt.figure() imshow(idata[im_slice,:,:],[0,cmax], 'initial image'); #%% reconstruct the image reconstructed_image = init_image.clone() # set up the reconstructor recon.set_up(reconstructed_image) # do actual recon recon.reconstruct(reconstructed_image) # + #%% bitmap display of images reconstructed_array = reconstructed_image.as_array() plt.figure() plt.subplot(1,2,1) imshow(image_array[im_slice,:,:,], [0,cmax*1.2],'emission image') plt.subplot(1,2,2) imshow(reconstructed_array[im_slice,:,:,], [0,cmax*1.2], 'reconstructed image'); # - # # ## For illustration, we do the same with noise in the data #%% Generate a noisy realisation of the data noisy_array=numpy.random.poisson(acquisition_array).astype('float64') print(' Maximum counts in the data: %d' % noisy_array.max()) # stuff into a new AcquisitionData object noisy_data = acquired_data.clone() noisy_data.fill(noisy_array); #%% Display bitmaps of the middle sinogram plt.figure() plt.subplot(1,2,1) imshow(acquisition_array[0,im_slice,:,:,], [0,acquisition_array.max()], 'original') plt.subplot(1,2,2) imshow(noisy_array[0,im_slice,:,:,], [0,acquisition_array.max()], 'noisy'); # + #%% reconstruct the noisy data obj_fun.set_acquisition_data(noisy_data) # We could save the data to file if we wanted to, but currently we don't. # recon.set_output_filename_prefix('reconstructedImage_noisydata') noisy_reconstructed_image = init_image.clone() recon.reconstruct(noisy_reconstructed_image) # + #%% bitmap display of images noisy_reconstructed_array = noisy_reconstructed_image.as_array() plt.figure() plt.subplot(1,2,1) imshow(reconstructed_array[im_slice,:,:,], [0,cmax*1.2], 'no noise') plt.subplot(1,2,2) imshow(noisy_reconstructed_array[im_slice,:,:,], [0,cmax*1.2], 'with noise'); # - # # Taking control of the iteration process # We will now show how to run each sub-iteration from in Python, as opposed to # letting the reconstructor do all sub-iterations at once. #%% run same reconstruction but saving images and objective function values every sub-iteration num_subiters = 64 # create an image object that will be updated during the iterations current_image = init_image.clone() # create an array to store the values of the objective function at every # sub-iteration (and fill in the first) osem_objective_function_values = [obj_fun.value(current_image)] # create an ndarray to store the images at every sub-iteration all_osem_images = numpy.ndarray(shape=(num_subiters + 1,) + idata.shape) all_osem_images[0,:,:,:] = current_image.as_array() # do the loop for i in range(1, num_subiters+1): recon.update(current_image) # store results obj_fun_value = obj_fun.value(current_image) osem_objective_function_values.append(obj_fun_value) all_osem_images[i,:,:,:] = current_image.as_array(); # ## Make some plots with these results #%% define a function for plotting images and the updates def plot_progress(all_images, title, subiterations = []): if len(subiterations) == 0: num_subiters = all_images[0].shape[0] - 1 subiterations = range(1, num_subiters + 1) num_rows = len(all_images) #plt.close('all') for i in subiterations: plt.figure(i) for r in range(num_rows): plt.subplot(num_rows,2,2 * r + 1) imshow(all_images[r][i,im_slice,:,:], [0,cmax], '%s at %d' % (title[r], i)) plt.subplot(num_rows,2,2*r+2) imshow(all_images[r][i,im_slice,:,:]-all_images[r][i - 1,im_slice,:,:],[-cmax*.1,cmax*.1], 'update') #plt.pause(.05) plt.show() #%% now call this function to see how we went along # note that in the notebook interface, this might create a box with a vertical slider subiterations = (1,2,4,8,16,32,64) plot_progress([all_osem_images], ['OSEM'],subiterations) #%% plot objective function values plt.figure() #plt.plot(subiterations, [ osem_objective_function_values[i] for i in subiterations]) plt.plot(osem_objective_function_values) plt.title('Objective function values') plt.xlabel('sub-iterations'); # The above plot seems to indicate that (OS)EM converges to a stable value of the # log-likelihood very quickly. However, as we've seen, the images are still changing. # # Convince yourself that the likelihood is still increasing (either by zooming into the figure, or by using `plt.ylim`). # We can compute some simple ROI values as well. Let's plot those. # + #%% ROI ROI_lesion = all_osem_images[:,(im_slice,), 65:70, 40:45] ROI_lung = all_osem_images[:,(im_slice,), 75:80, 45:50] ROI_mean_lesion = ROI_lesion.mean(axis=(1,2,3)) ROI_std_lesion = ROI_lesion.std(axis=(1,2,3)) ROI_mean_lung = ROI_lung.mean(axis=(1,2,3)) ROI_std_lung = ROI_lung.std(axis=(1,2,3)) plt.figure() #plt.hold('on') plt.subplot(1,2,1) plt.plot(ROI_mean_lesion,'k',label='lesion') plt.plot(ROI_mean_lung,'r',label='lung') plt.legend() plt.title('ROI mean') plt.xlabel('sub-iterations') plt.subplot(1,2,2) plt.plot(ROI_std_lesion, 'k',label='lesion') plt.plot(ROI_std_lung, 'r',label='lung') plt.legend() plt.title('ROI standard deviation') plt.xlabel('sub-iterations'); # - # The above plots indicate that the log-likelihood is not a very sensitive # measure of changes in the image. This an illustration that image reconstruction # is an ill-conditioned inverse problem. # # Implement gradient ascent and compare with OSEM # Here we will implement a simple version of Gradient Ascent using SIRF functions.We will use # the SIRF capability to return the gradient of the objective function directly. # # Gradient ascent (GA) works by updating the image in the direction of the gradient # # new_image = current_image + step_size * gradient # # Here we will use a fixed step-size and use "truncation" to enforce # non-negativity of the image # + #%% Define some variables to perform gradient ascent for a few (sub)iterations num_subiters = 32 # relative step-size tau = .3 # set initial image and store it # also store the value of the objective function for plotting current_image = init_image.clone() GA_objective_function_values = [obj_fun.value(current_image)] # create an array with all reconstruct images for plotting idata = current_image.as_array() all_images = numpy.ndarray(shape=(num_subiters + 1,) + idata.shape) all_images[0,:,:,:] = idata; # - #%% perform GA iterations # executing this cell might take a while for i in range(1, num_subiters+1): # obtain gradient for subset 0 # with current settings, this means we will only use the data of that subset # (gradient ascent with subsets is too complicated for this demo) grad = obj_fun.gradient(current_image, 0) grad_array = grad.as_array() # compute step-size as relative to current image-norm step_size = tau * norm(idata) / norm(grad_array) # perform gradient ascent step and truncate to positive values idata = make_positive(idata + step_size*grad_array) current_image.fill(idata) # compute objective function value for plotting, and write some diagnostics obj_fun_value = obj_fun.value(current_image) GA_objective_function_values.append(obj_fun_value) all_images[i,:,:,:] = idata; #%% Plot objective function values plt.figure() #plt.hold('on') plt.title('Objective function value vs subiterations') plt.plot(GA_objective_function_values,'b') plt.plot(osem_objective_function_values,'r') plt.legend(('gradient ascent', 'OSEM'),loc='lower right'); #%% compare GA and OSEM images plot_progress([all_images, all_osem_images], ['GA' ,'OSEM'],[2,4,8,16,32]) # The above implementation used a fixed (relative) step-size. Experiment with different values for `tau` and see how that influences convergence. # # Steepest gradient ascent will include a line search to estimate the step size. There is a demo # in the SIRF code on this. You can [find the code here as well](https://github.com/CCPPETMR/SIRF/blob/master/examples/Python/PET/steepest_ascent.py). You could implement this here.
notebooks/PET/ML_reconstruction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab={"base_uri": "https://localhost:8080/"} id="AbzZLqIPv6b7" outputId="19f2fc2b-6f1d-4b43-fd50-4c513e3936fd" # # Transformer Network # # Welcome to Week 4's assignment, the last assignment of Course 5 of the Deep Learning Specialization! And congratulations on making it to the last assignment of the entire Deep Learning Specialization - you're almost done! # # Ealier in the course, you've implemented sequential neural networks such as RNNs, GRUs, and LSTMs. In this notebook you'll explore the Transformer architecture, a neural network that takes advantage of parallel processing and allows you to substantially speed up the training process. # # **After this assignment you'll be able to**: # # * Create positional encodings to capture sequential relationships in data # * Calculate scaled dot-product self-attention with word embeddings # * Implement masked multi-head attention # * Build and train a Transformer model # # For the last time, let's get started! # # ## Important Note on Submission to the AutoGrader # # Before submitting your assignment to the AutoGrader, please make sure you are not doing the following: # # 1. You have not added any _extra_ `print` statement(s) in the assignment. # 2. You have not added any _extra_ code cell(s) in the assignment. # 3. You have not changed any of the function parameters. # 4. You are not using any global variables inside your graded exercises. Unless specifically instructed to do so, please refrain from it and use the local variables instead. # 5. You are not changing the assignment code where it is not required, like creating _extra_ variables. # # If you do any of the following, you will get something like, `Grader not found` (or similarly unexpected) error upon submitting your assignment. Before asking for help/debugging the errors in your assignment, check for these first. If this is the case, and you don't remember the changes you have made, you can get a fresh copy of the assignment by following these [instructions](https://www.coursera.org/learn/nlp-sequence-models/supplement/qHIve/h-ow-to-refresh-your-workspace). # - # ## Table of Contents # # - [Packages](#0) # - [1 - Positional Encoding](#1) # - [1.1 - Sine and Cosine Angles](#1-1) # - [Exercise 1 - get_angles](#ex-1) # - [1.2 - Sine and Cosine Positional Encodings](#1-2) # - [Exercise 2 - positional_encoding](#ex-2) # - [2 - Masking](#2) # - [2.1 - Padding Mask](#2-1) # - [2.2 - Look-ahead Mask](#2-2) # - [3 - Self-Attention](#3) # - [Exercise 3 - scaled_dot_product_attention](#ex-3) # - [4 - Encoder](#4) # - [4.1 Encoder Layer](#4-1) # - [Exercise 4 - EncoderLayer](#ex-4) # - [4.2 - Full Encoder](#4-2) # - [Exercise 5 - Encoder](#ex-5) # - [5 - Decoder](#5) # - [5.1 - Decoder Layer](#5-1) # - [Exercise 6 - DecoderLayer](#ex-6) # - [5.2 - Full Decoder](#5-2) # - [Exercise 7 - Decoder](#ex-7) # - [6 - Transformer](#6) # - [Exercise 8 - Transformer](#ex-8) # - [7 - References](#7) # <a name='0'></a> # ## Packages # # Run the following cell to load the packages you'll need. # + id="_OpwqWL2QH5G" import tensorflow as tf import time import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.layers import Embedding, MultiHeadAttention, Dense, Input, Dropout, LayerNormalization from transformers import DistilBertTokenizerFast #, TFDistilBertModel from transformers import TFDistilBertForTokenClassification # - # <a name='1'></a> # ## 1 - Positional Encoding # # In sequence to sequence tasks, the relative order of your data is extremely important to its meaning. When you were training sequential neural networks such as RNNs, you fed your inputs into the network in order. Information about the order of your data was automatically fed into your model. However, when you train a Transformer network using multi-head attention, you feed your data into the model all at once. While this dramatically reduces training time, there is no information about the order of your data. This is where positional encoding is useful - you can specifically encode the positions of your inputs and pass them into the network using these sine and cosine formulas: # # $$ # PE_{(pos, 2i)}= sin\left(\frac{pos}{{10000}^{\frac{2i}{d}}}\right) # \tag{1}$$ # <br> # $$ # PE_{(pos, 2i+1)}= cos\left(\frac{pos}{{10000}^{\frac{2i}{d}}}\right) # \tag{2}$$ # # * $d$ is the dimension of the word embedding and positional encoding # * $pos$ is the position of the word. # * $i$ refers to each of the different dimensions of the positional encoding. # # To develop some intuition about positional encodings, you can think of them broadly as a feature that contains the information about the relative positions of words. The sum of the positional encoding and word embedding is ultimately what is fed into the model. If you just hard code the positions in, say by adding a matrix of 1's or whole numbers to the word embedding, the semantic meaning is distorted. Conversely, the values of the sine and cosine equations are small enough (between -1 and 1) that when you add the positional encoding to a word embedding, the word embedding is not significantly distorted, and is instead enriched with positional information. Using a combination of these two equations helps your Transformer network attend to the relative positions of your input data. This was a short discussion on positional encodings, but develop further intuition, check out the *Positional Encoding Ungraded Lab*. # # **Note:** In the lectures Andrew uses vertical vectors, but in this assignment all vectors are horizontal. All matrix multiplications should be adjusted accordingly. # # <a name='1-1'></a> # ### 1.1 - Sine and Cosine Angles # # Notice that even though the sine and cosine positional encoding equations take in different arguments (`2i` versus `2i+1`, or even versus odd numbers) the inner terms for both equations are the same: $$\theta(pos, i, d) = \frac{pos}{10000^{\frac{2i}{d}}} \tag{3}$$ # # Consider the inner term as you calculate the positional encoding for a word in a sequence.<br> # $PE_{(pos, 0)}= sin\left(\frac{pos}{{10000}^{\frac{0}{d}}}\right)$, since solving `2i = 0` gives `i = 0` <br> # $PE_{(pos, 1)}= cos\left(\frac{pos}{{10000}^{\frac{0}{d}}}\right)$, since solving `2i + 1 = 1` gives `i = 0` # # The angle is the same for both! The angles for $PE_{(pos, 2)}$ and $PE_{(pos, 3)}$ are the same as well, since for both, `i = 1` and therefore the inner term is $\left(\frac{pos}{{10000}^{\frac{2}{d}}}\right)$. This relationship holds true for all paired sine and cosine curves: # # | k | <code> 0 </code>|<code> 1 </code>|<code> 2 </code>|<code> 3 </code>| <code> ... </code> |<code> d - 2 </code>|<code> d - 1 </code>| # | ---------------- | :------: | ----------------- | ----------------- | ----------------- | ----- | ----------------- | ----------------- | # | encoding(0) = |[$sin(\theta(0, 0, d))$| $cos(\theta(0, 0, d))$| $sin(\theta(0, 1, d))$| $cos(\theta(0, 1, d))$|... |$sin(\theta(0, d//2, d))$| $cos(\theta(0, d//2, d))$]| # | encoding(1) = | [$sin(\theta(1, 0, d))$| $cos(\theta(1, 0, d))$| $sin(\theta(1, 1, d))$| $cos(\theta(1, 1, d))$|... |$sin(\theta(1, d//2, d))$| $cos(\theta(1, d//2, d))$]| # ... # | encoding(pos) = | [$sin(\theta(pos, 0, d))$| $cos(\theta(pos, 0, d))$| $sin(\theta(pos, 1, d))$| $cos(\theta(pos, 1, d))$|... |$sin(\theta(pos, d//2, d))$| $cos(\theta(pos, d//2, d))]$| # # # <a name='ex-1'></a> # ### Exercise 1 - get_angles # # Implement the function `get_angles()` to calculate the possible angles for the sine and cosine positional encodings # # **Hints** # # - If `k = [0, 1, 2, 3, 4, 5]`, then, `i` must be `i = [0, 0, 1, 1, 2, 2]` # - `i = k//2` # + id="bPzwMVfcQpT-" # UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION get_angles def get_angles(pos, k, d): """ Get the angles for the positional encoding Arguments: pos -- Column vector containing the positions [[0], [1], ...,[N-1]] k -- Row vector containing the dimension span [[0, 1, 2, ..., d-1]] d(integer) -- Encoding size Returns: angles -- (pos, d) numpy array """ # START CODE HERE # Get i from dimension span k i = k // 2 # Calculate the angles using pos, i and d angles = pos / 10_000 ** (2 * i / d) # END CODE HERE return angles # + from public_tests import * get_angles_test(get_angles) # Example position = 4 d_model = 8 pos_m = np.arange(position)[:, np.newaxis] dims = np.arange(d_model)[np.newaxis, :] get_angles(pos_m, dims, d_model) # - # <a name='1-2'></a> # ### 1.2 - Sine and Cosine Positional Encodings # # Now you can use the angles you computed to calculate the sine and cosine positional encodings. # # $$ # PE_{(pos, 2i)}= sin\left(\frac{pos}{{10000}^{\frac{2i}{d}}}\right) # $$ # <br> # $$ # PE_{(pos, 2i+1)}= cos\left(\frac{pos}{{10000}^{\frac{2i}{d}}}\right) # $$ # # <a name='ex-2'></a> # ### Exercise 2 - positional_encoding # # Implement the function `positional_encoding()` to calculate the sine and cosine positional encodings # # **Reminder:** Use the sine equation when $i$ is an even number and the cosine equation when $i$ is an odd number. # # #### Additional Hints # * You may find # [np.newaxis](https://numpy.org/doc/stable/reference/arrays.indexing.html) useful depending on the implementation you choose. # + id="y78txxoHQtwG" # UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION positional_encoding def positional_encoding(positions, d): """ Precomputes a matrix with all the positional encodings Arguments: positions (int) -- Maximum number of positions to be encoded d (int) -- Encoding size Returns: pos_encoding -- (1, position, d_model) A matrix with the positional encodings """ # START CODE HERE # initialize a matrix angle_rads of all the angles angle_rads = get_angles(np.arange(positions)[:, np.newaxis], np.arange(d)[np.newaxis,:], d) # apply sin to even indices in the array; 2i angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2]) # apply cos to odd indices in the array; 2i+1 angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2]) # END CODE HERE pos_encoding = angle_rads[np.newaxis, :] return tf.cast(pos_encoding, dtype=tf.float32) # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="jYiWrawRQvuv" outputId="cfccc7c9-428e-4b08-d969-e3090fafc1ad" # UNIT TEST positional_encoding_test(positional_encoding, get_angles) # - # Nice work calculating the positional encodings! Now you can visualize them. # + pos_encoding = positional_encoding(50, 512) print (pos_encoding.shape) plt.pcolormesh(pos_encoding[0], cmap='RdBu') plt.xlabel('d') plt.xlim((0, 512)) plt.ylabel('Position') plt.colorbar() plt.show() # - # Each row represents a positional encoding - notice how none of the rows are identical! You have created a unique positional encoding for each of the words. # <a name='2'></a> # ## 2 - Masking # # There are two types of masks that are useful when building your Transformer network: the *padding mask* and the *look-ahead mask*. Both help the softmax computation give the appropriate weights to the words in your input sentence. # # <a name='2-1'></a> # ### 2.1 - Padding Mask # # Oftentimes your input sequence will exceed the maximum length of a sequence your network can process. Let's say the maximum length of your model is five, it is fed the following sequences: # # [["Do", "you", "know", "when", "Jane", "is", "going", "to", "visit", "Africa"], # ["Jane", "visits", "Africa", "in", "September" ], # ["Exciting", "!"] # ] # # which might get vectorized as: # # [[ 71, 121, 4, 56, 99, 2344, 345, 1284, 15], # [ 56, 1285, 15, 181, 545], # [ 87, 600] # ] # # When passing sequences into a transformer model, it is important that they are of uniform length. You can achieve this by padding the sequence with zeros, and truncating sentences that exceed the maximum length of your model: # # [[ 71, 121, 4, 56, 99], # [ 2344, 345, 1284, 15, 0], # [ 56, 1285, 15, 181, 545], # [ 87, 600, 0, 0, 0], # ] # # Sequences longer than the maximum length of five will be truncated, and zeros will be added to the truncated sequence to achieve uniform length. Similarly, for sequences shorter than the maximum length, they zeros will also be added for padding. However, these zeros will affect the softmax calculation - this is when a padding mask comes in handy! You will need to define a boolean mask that specifies which elements you must attend(1) and which elements you must ignore(0). Later you will use that mask to set all the zeros in the sequence to a value close to negative infinity (-1e9). We'll implement this for you so you can get to the fun of building the Transformer network! 😇 Just make sure you go through the code so you can correctly implement padding when building your model. # # After masking, your input should go from `[87, 600, 0, 0, 0]` to `[87, 600, -1e9, -1e9, -1e9]`, so that when you take the softmax, the zeros don't affect the score. # # The [MultiheadAttention](https://keras.io/api/layers/attention_layers/multi_head_attention/) layer implemented in Keras, use this masking logic. # + id="JOL9XWsFQxxo" def create_padding_mask(decoder_token_ids): """ Creates a matrix mask for the padding cells Arguments: decoder_token_ids -- (n, m) matrix Returns: mask -- (n, 1, 1, m) binary tensor """ seq = 1 - tf.cast(tf.math.equal(decoder_token_ids, 0), tf.float32) # add extra dimensions to add the padding # to the attention logits. return seq[:, tf.newaxis, :] # + colab={"base_uri": "https://localhost:8080/"} id="5J5FFjklQ1Fz" outputId="8319446f-3ed4-406a-cf38-ca2b08142ff4" x = tf.constant([[7., 6., 0., 0., 1.], [1., 2., 3., 0., 0.], [0., 0., 0., 4., 5.]]) print(create_padding_mask(x)) # - # If we multiply (1 - mask) by -1e9 and add it to the sample input sequences, the zeros are essentially set to negative infinity. Notice the difference when taking the softmax of the original sequence and the masked sequence: print(tf.keras.activations.softmax(x)) print(tf.keras.activations.softmax(x + (1 - create_padding_mask(x)) * -1.0e9)) # <a name='2-2'></a> # ### 2.2 - Look-ahead Mask # # The look-ahead mask follows similar intuition. In training, you will have access to the complete correct output of your training example. The look-ahead mask helps your model pretend that it correctly predicted a part of the output and see if, *without looking ahead*, it can correctly predict the next output. # # For example, if the expected correct output is `[1, 2, 3]` and you wanted to see if given that the model correctly predicted the first value it could predict the second value, you would mask out the second and third values. So you would input the masked sequence `[1, -1e9, -1e9]` and see if it could generate `[1, 2, -1e9]`. # # Just because you've worked so hard, we'll also implement this mask for you 😇😇. Again, take a close look at the code so you can effectively implement it later. # + id="9O9UbM31Q3hK" def create_look_ahead_mask(sequence_length): """ Returns an upper triangular matrix filled with ones Arguments: sequence_length -- matrix size Returns: mask -- (size, size) tensor """ mask = tf.linalg.band_part(tf.ones((1, sequence_length, sequence_length)), -1, 0) return mask # + colab={"base_uri": "https://localhost:8080/"} id="nfzHoVj9Q5nG" outputId="300e76ec-77d0-460a-b6df-71e40de86606" x = tf.random.uniform((1, 3)) temp = create_look_ahead_mask(x.shape[1]) temp # + [markdown] id="VG0gPyv0oDBi" # <a name='3'></a> # ## 3 - Self-Attention # # As the authors of the Transformers paper state, "Attention is All You Need". # # <img src="self-attention.png" alt="Encoder" width="600"/> # <caption><center><font color='purple'><b>Figure 1: Self-Attention calculation visualization</font></center></caption> # # The use of self-attention paired with traditional convolutional networks allows for parallelization which speeds up training. You will implement **scaled dot product attention** which takes in a query, key, value, and a mask as inputs to returns rich, attention-based vector representations of the words in your sequence. This type of self-attention can be mathematically expressed as: # $$ # \text { Attention }(Q, K, V)=\operatorname{softmax}\left(\frac{Q K^{T}}{\sqrt{d_{k}}}+{M}\right) V\tag{4}\ # $$ # # * $Q$ is the matrix of queries # * $K$ is the matrix of keys # * $V$ is the matrix of values # * $M$ is the optional mask you choose to apply # * ${d_k}$ is the dimension of the keys, which is used to scale everything down so the softmax doesn't explode # # <a name='ex-3'></a> # ### Exercise 3 - scaled_dot_product_attention # # Implement the function `scaled_dot_product_attention()` to create attention-based representations # **Reminder**: The boolean mask parameter can be passed in as `none` or as either padding or look-ahead. # # Multiply (1. - mask) by -1e9 before applying the softmax. # # **Additional Hints** # * You may find [tf.matmul](https://www.tensorflow.org/api_docs/python/tf/linalg/matmul) useful for matrix multiplication. # + id="CSysk_rjQ7lp" # UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION scaled_dot_product_attention def scaled_dot_product_attention(q, k, v, mask): """ Calculate the attention weights. q, k, v must have matching leading dimensions. k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v. The mask has different shapes depending on its type(padding or look ahead) but it must be broadcastable for addition. Arguments: q -- query shape == (..., seq_len_q, depth) k -- key shape == (..., seq_len_k, depth) v -- value shape == (..., seq_len_v, depth_v) mask: Float tensor with shape broadcastable to (..., seq_len_q, seq_len_k). Defaults to None. Returns: output -- attention_weights """ # START CODE HERE matmul_qk = tf.matmul(q, k.transpose()) # (..., seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(k.shape[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.sqrt(dk) # add the mask to the scaled tensor. if mask is not None: # Don't replace this None scaled_attention_logits += (1 - mask) * -1.0e9 # softmax is normalized on the last axis (seq_len_k) so that the scores # add up to 1. attention_weights = tf.keras.activations.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) # END CODE HERE return output, attention_weights # - # UNIT TEST scaled_dot_product_attention_test(scaled_dot_product_attention) # Excellent work! You can now implement self-attention. With that, you can start building the encoder block! # + [markdown] id="blS0pEpTqRVI" # <a name='4'></a> # ## 4 - Encoder # # The Transformer Encoder layer pairs self-attention and convolutional neural network style of processing to improve the speed of training and passes K and V matrices to the Decoder, which you'll build later in the assignment. In this section of the assignment, you will implement the Encoder by pairing multi-head attention and a feed forward neural network (Figure 2a). # <img src="encoder_layer.png" alt="Encoder" width="250"/> # <caption><center><font color='purple'><b>Figure 2a: Transformer encoder layer</font></center></caption> # # * `MultiHeadAttention` you can think of as computing the self-attention several times to detect different features. # * Feed forward neural network contains two Dense layers which we'll implement as the function `FullyConnected` # # Your input sentence first passes through a *multi-head attention layer*, where the encoder looks at other words in the input sentence as it encodes a specific word. The outputs of the multi-head attention layer are then fed to a *feed forward neural network*. The exact same feed forward network is independently applied to each position. # # * For the `MultiHeadAttention` layer, you will use the [Keras implementation](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention). If you're curious about how to split the query matrix Q, key matrix K, and value matrix V into different heads, you can look through the implementation. # * You will also use the [Sequential API](https://keras.io/api/models/sequential/) with two dense layers to built the feed forward neural network layers. # + id="sC5vJhz29vZR" def FullyConnected(embedding_dim, fully_connected_dim): return tf.keras.Sequential([ tf.keras.layers.Dense(fully_connected_dim, activation='relu'), # (batch_size, seq_len, dff) tf.keras.layers.Dense(embedding_dim) # (batch_size, seq_len, d_model) ]) # + [markdown] id="R65WbX5wqYYH" # <a name='4-1'></a> # ### 4.1 Encoder Layer # # Now you can pair multi-head attention and feed forward neural network together in an encoder layer! You will also use residual connections and layer normalization to help speed up training (Figure 2a). # # <a name='ex-4'></a> # ### Exercise 4 - EncoderLayer # # Implement `EncoderLayer()` using the `call()` method # # In this exercise, you will implement one encoder block (Figure 2) using the `call()` method. The function should perform the following steps: # 1. You will pass the Q, V, K matrices and a boolean mask to a multi-head attention layer. Remember that to compute *self*-attention Q, V and K should be the same. Let the default values for `return_attention_scores` and `training`. You will also perform Dropout in this multi-head attention layer during training. # 2. Now add a skip connection by adding your original input `x` and the output of the your multi-head attention layer. # 3. After adding the skip connection, pass the output through the first normalization layer. # 4. Finally, repeat steps 1-3 but with the feed forward neural network with a dropout layer instead of the multi-head attention layer. # # <details> # <summary><font size="2" color="darkgreen"><b>Additional Hints (Click to expand)</b></font></summary> # # * The `__init__` method creates all the layers that will be accesed by the the `call` method. Wherever you want to use a layer defined inside the `__init__` method you will have to use the syntax `self.[insert layer name]`. # * You will find the documentation of [MultiHeadAttention](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention) helpful. *Note that if query, key and value are the same, then this function performs self-attention.* # * The call arguments for `self.mha` are (Where B is for batch_size, T is for target sequence shapes, and S is output_shape): # - `query`: Query Tensor of shape (B, T, dim). # - `value`: Value Tensor of shape (B, S, dim). # - `key`: Optional key Tensor of shape (B, S, dim). If not given, will use value for both key and value, which is the most common case. # - `attention_mask`: a boolean mask of shape (B, T, S), that prevents attention to certain positions. The boolean mask specifies which query elements can attend to which key elements, 1 indicates attention and 0 indicates no attention. Broadcasting can happen for the missing batch dimensions and the head dimension. # - `return_attention_scores`: A boolean to indicate whether the output should be attention output if True, or (attention_output, attention_scores) if False. Defaults to False. # - `training`: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (no dropout). Defaults to either using the training mode of the parent layer/model, or False (inference) if there is no parent layer. # + id="tIufbrc-9_2u" # UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION EncoderLayer class EncoderLayer(tf.keras.layers.Layer): """ The encoder layer is composed by a multi-head self-attention mechanism, followed by a simple, positionwise fully connected feed-forward network. This archirecture includes a residual connection around each of the two sub-layers, followed by layer normalization. """ def __init__(self, embedding_dim, num_heads, fully_connected_dim, dropout_rate=0.1, layernorm_eps=1e-6): super(EncoderLayer, self).__init__() self.mha = MultiHeadAttention(num_heads=num_heads, key_dim=embedding_dim, dropout=dropout_rate) self.ffn = FullyConnected(embedding_dim=embedding_dim, fully_connected_dim=fully_connected_dim) self.layernorm1 = LayerNormalization(epsilon=layernorm_eps) self.layernorm2 = LayerNormalization(epsilon=layernorm_eps) self.dropout_ffn = Dropout(dropout_rate) def call(self, x, training, mask): """ Forward pass for the Encoder Layer Arguments: x -- Tensor of shape (batch_size, input_seq_len, fully_connected_dim) training -- Boolean, set to true to activate the training mode for dropout layers mask -- Boolean mask to ensure that the padding is not treated as part of the input Returns: encoder_layer_out -- Tensor of shape (batch_size, input_seq_len, fully_connected_dim) """ # START CODE HERE # calculate self-attention using mha(~1 line) attn_output = self.mha(x, x, x, mask) # Self attention (batch_size, input_seq_len, fully_connected_dim) # apply layer normalization on sum of the input and the attention output to get the # output of the multi-head attention layer (~1 line) out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, fully_connected_dim) # pass the output of the multi-head attention layer through a ffn (~1 line) ffn_output = self.ffn(out1) # (batch_size, input_seq_len, fully_connected_dim) # apply dropout layer to ffn output during training (~1 line) ffn_output = self.dropout_ffn(ffn_output) # apply layer normalization on sum of the output from multi-head attention and ffn output to get the # output of the encoder layer (~1 line) encoder_layer_out = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, fully_connected_dim) # END CODE HERE return encoder_layer_out # - # UNIT TEST EncoderLayer_test(EncoderLayer) # <a name='4-2'></a> # ### 4.2 - Full Encoder # # Awesome job! You have now successfully implemented positional encoding, self-attention, and an encoder layer - give yourself a pat on the back. Now you're ready to build the full Transformer Encoder (Figure 2b), where you will embedd your input and add the positional encodings you calculated. You will then feed your encoded embeddings to a stack of Encoder layers. # # <img src="encoder.png" alt="Encoder" width="330"/> # <caption><center><font color='purple'><b>Figure 2b: Transformer Encoder</font></center></caption> # # # <a name='ex-5'></a> # ### Exercise 5 - Encoder # # Complete the `Encoder()` function using the `call()` method to embed your input, add positional encoding, and implement multiple encoder layers # # In this exercise, you will initialize your Encoder with an Embedding layer, positional encoding, and multiple EncoderLayers. Your `call()` method will perform the following steps: # 1. Pass your input through the Embedding layer. # 2. Scale your embedding by multiplying it by the square root of your embedding dimension. Remember to cast the embedding dimension to data type `tf.float32` before computing the square root. # 3. Add the position encoding: self.pos_encoding `[:, :seq_len, :]` to your embedding. # 4. Pass the encoded embedding through a dropout layer, remembering to use the `training` parameter to set the model training mode. # 5. Pass the output of the dropout layer through the stack of encoding layers using a for loop. # + id="7j2Tjr0K0t0I" # UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION class Encoder(tf.keras.layers.Layer): """ The entire Encoder starts by passing the input to an embedding layer and using positional encoding to then pass the output through a stack of encoder Layers """ def __init__(self, num_layers, embedding_dim, num_heads, fully_connected_dim, input_vocab_size, maximum_position_encoding, dropout_rate=0.1, layernorm_eps=1e-6): super(Encoder, self).__init__() self.embedding_dim = embedding_dim self.num_layers = num_layers self.embedding = Embedding(input_vocab_size, self.embedding_dim) self.pos_encoding = positional_encoding(maximum_position_encoding, self.embedding_dim) self.enc_layers = [EncoderLayer(embedding_dim=self.embedding_dim, num_heads=num_heads, fully_connected_dim=fully_connected_dim, dropout_rate=dropout_rate, layernorm_eps=layernorm_eps) for _ in range(self.num_layers)] self.dropout = Dropout(dropout_rate) def call(self, x, training, mask): """ Forward pass for the Encoder Arguments: x -- Tensor of shape (batch_size, input_seq_len) training -- Boolean, set to true to activate the training mode for dropout layers mask -- Boolean mask to ensure that the padding is not treated as part of the input Returns: out2 -- Tensor of shape (batch_size, input_seq_len, fully_connected_dim) """ #mask = create_padding_mask(x) seq_len = tf.shape(x)[1] # START CODE HERE # Pass input through the Embedding layer x = self.embedding(x) # (batch_size, input_seq_len, embedding_dim) # Scale embedding by multiplying it by the square root of the embedding dimension x *= tf.math.sqrt(tf.cast(self.embedding_dim, tf.float32)) # Add the position encoding to embedding x += self.pos_encoding[:, :seq_len, :] # Pass the encoded embedding through a dropout layer x = self.dropout(x, training = training) # Pass the output through the stack of encoding layers for i in range(self.num_layers): x = self.enc_layers[i](x, training, mask) # END CODE HERE return x # (batch_size, input_seq_len, fully_connected_dim) # - # UNIT TEST Encoder_test(Encoder) # <a name='5'></a> # ## 5 - Decoder # # The Decoder layer takes the K and V matrices generated by the Encoder and in computes the second multi-head attention layer with the Q matrix from the output (Figure 3a). # # <img src="decoder_layer.png" alt="Encoder" width="250"/> # <caption><center><font color='purple'><b>Figure 3a: Transformer Decoder layer</font></center></caption> # # <a name='5-1'></a> # ### 5.1 - Decoder Layer # Again, you'll pair multi-head attention with a feed forward neural network, but this time you'll implement two multi-head attention layers. You will also use residual connections and layer normalization to help speed up training (Figure 3a). # # <a name='ex-6'></a> # ### Exercise 6 - DecoderLayer # # Implement `DecoderLayer()` using the `call()` method # # 1. Block 1 is a multi-head attention layer with a residual connection, and look-ahead mask. Like in the `EncoderLayer`, Dropout is defined within the multi-head attention layer. # 2. Block 2 will take into account the output of the Encoder, so the multi-head attention layer will receive K and V from the encoder, and Q from the Block 1. You will then apply a normalization layer and a residual connection, just like you did before with the `EncoderLayer`. # 3. Finally, Block 3 is a feed forward neural network with dropout and normalization layers and a residual connection. # # **Additional Hints:** # * The first two blocks are fairly similar to the EncoderLayer except you will return `attention_scores` when computing self-attention # + id="wEouNFvCzMeT" # UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION DecoderLayer class DecoderLayer(tf.keras.layers.Layer): """ The decoder layer is composed by two multi-head attention blocks, one that takes the new input and uses self-attention, and the other one that combines it with the output of the encoder, followed by a fully connected block. """ def __init__(self, embedding_dim, num_heads, fully_connected_dim, dropout_rate=0.1, layernorm_eps=1e-6): super(DecoderLayer, self).__init__() self.mha1 = MultiHeadAttention(num_heads=num_heads, key_dim=embedding_dim, dropout=dropout_rate) self.mha2 = MultiHeadAttention(num_heads=num_heads, key_dim=embedding_dim, dropout=dropout_rate) self.ffn = FullyConnected(embedding_dim=embedding_dim, fully_connected_dim=fully_connected_dim) self.layernorm1 = LayerNormalization(epsilon=layernorm_eps) self.layernorm2 = LayerNormalization(epsilon=layernorm_eps) self.layernorm3 = LayerNormalization(epsilon=layernorm_eps) self.dropout_ffn = Dropout(dropout_rate) def call(self, x, enc_output, training, look_ahead_mask, padding_mask): """ Forward pass for the Decoder Layer Arguments: x -- Tensor of shape (batch_size, target_seq_len, fully_connected_dim) enc_output -- Tensor of shape(batch_size, input_seq_len, fully_connected_dim) training -- Boolean, set to true to activate the training mode for dropout layers look_ahead_mask -- Boolean mask for the target_input padding_mask -- Boolean mask for the second multihead attention layer Returns: out3 -- Tensor of shape (batch_size, target_seq_len, fully_connected_dim) attn_weights_block1 -- Tensor of shape(batch_size, num_heads, target_seq_len, input_seq_len) attn_weights_block2 -- Tensor of shape(batch_size, num_heads, target_seq_len, input_seq_len) """ # START CODE HERE # enc_output.shape == (batch_size, input_seq_len, fully_connected_dim) # BLOCK 1 # calculate self-attention and return attention scores as attn_weights_block1. # Dropout will be applied during training (~1 line). mult_attn_out1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask, return_attention_scores=True) # (batch_size, target_seq_len, d_model) # apply layer normalization (layernorm1) to the sum of the attention output and the input (~1 line) Q1 = self.layernorm1(mult_attn_out1 + x) # BLOCK 2 # calculate self-attention using the Q from the first block and K and V from the encoder output. # Dropout will be applied during training # Return attention scores as attn_weights_block2 (~1 line) mult_attn_out2, attn_weights_block2 = self.mha2(Q1, enc_output, enc_output, padding_mask, return_attention_scores=True) # (batch_size, target_seq_len, d_model) # apply layer normalization (layernorm2) to the sum of the attention output and the output of the first block (~1 line) mult_attn_out2 = self.layernorm2(mult_attn_out2 + Q1) # (batch_size, target_seq_len, fully_connected_dim) #BLOCK 3 # pass the output of the second block through a ffn ffn_output = self.ffn(mult_attn_out2) # (batch_size, target_seq_len, fully_connected_dim) # apply a dropout layer to the ffn output ffn_output = self.dropout_ffn(ffn_output, training = training) # apply layer normalization (layernorm3) to the sum of the ffn output and the output of the second block out3 = self.layernorm3(ffn_output + mult_attn_out2) # (batch_size, target_seq_len, fully_connected_dim) # END CODE HERE return out3, attn_weights_block1, attn_weights_block2 # - # UNIT TEST DecoderLayer_test(DecoderLayer, create_look_ahead_mask) # <a name='5-2'></a> # ### 5.2 - Full Decoder # You're almost there! Time to use your Decoder layer to build a full Transformer Decoder (Figure 3b). You will embedd your output and add positional encodings. You will then feed your encoded embeddings to a stack of Decoder layers. # # # <img src="decoder.png" alt="Encoder" width="300"/> # <caption><center><font color='purple'><b>Figure 3b: Transformer Decoder</font></center></caption> # # <a name='ex-7'></a> # ### Exercise 7 - Decoder # # Implement `Decoder()` using the `call()` method to embed your output, add positional encoding, and implement multiple decoder layers # # In this exercise, you will initialize your Decoder with an Embedding layer, positional encoding, and multiple DecoderLayers. Your `call()` method will perform the following steps: # 1. Pass your generated output through the Embedding layer. # 2. Scale your embedding by multiplying it by the square root of your embedding dimension. Remember to cast the embedding dimension to data type `tf.float32` before computing the square root. # 3. Add the position encoding: self.pos_encoding `[:, :seq_len, :]` to your embedding. # 4. Pass the encoded embedding through a dropout layer, remembering to use the `training` parameter to set the model training mode. # 5. Pass the output of the dropout layer through the stack of Decoding layers using a for loop. # + id="McS3by6k4pnP" # UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION Decoder class Decoder(tf.keras.layers.Layer): """ The entire Encoder is starts by passing the target input to an embedding layer and using positional encoding to then pass the output through a stack of decoder Layers """ def __init__(self, num_layers, embedding_dim, num_heads, fully_connected_dim, target_vocab_size, maximum_position_encoding, dropout_rate=0.1, layernorm_eps=1e-6): super(Decoder, self).__init__() self.embedding_dim = embedding_dim self.num_layers = num_layers self.embedding = Embedding(target_vocab_size, self.embedding_dim) self.pos_encoding = positional_encoding(maximum_position_encoding, self.embedding_dim) self.dec_layers = [DecoderLayer(embedding_dim=self.embedding_dim, num_heads=num_heads, fully_connected_dim=fully_connected_dim, dropout_rate=dropout_rate, layernorm_eps=layernorm_eps) for _ in range(self.num_layers)] self.dropout = Dropout(dropout_rate) def call(self, x, enc_output, training, look_ahead_mask, padding_mask): """ Forward pass for the Decoder Arguments: x -- Tensor of shape (batch_size, target_seq_len, fully_connected_dim) enc_output -- Tensor of shape(batch_size, input_seq_len, fully_connected_dim) training -- Boolean, set to true to activate the training mode for dropout layers look_ahead_mask -- Boolean mask for the target_input padding_mask -- Boolean mask for the second multihead attention layer Returns: x -- Tensor of shape (batch_size, target_seq_len, fully_connected_dim) attention_weights - Dictionary of tensors containing all the attention weights each of shape Tensor of shape (batch_size, num_heads, target_seq_len, input_seq_len) """ seq_len = tf.shape(x)[1] attention_weights = {} # START CODE HERE # create word embeddings x = self.embedding(x) # (batch_size, target_seq_len, fully_connected_dim) # scale embeddings by multiplying by the square root of their dimension x *= tf.math.sqrt(tf.cast(self.embedding_dim, tf.float32)) # calculate positional encodings and add to word embedding x += self.pos_encoding[:, :seq_len, :] # apply a dropout layer to x x = self.dropout(x, training = training) # use a for loop to pass x through a stack of decoder layers and update attention_weights (~4 lines total) for i in range(self.num_layers): # pass x and the encoder output through a stack of decoder layers and save the attention weights # of block 1 and 2 (~1 line) x, block1, block2 = self.dec_layers[i](x, enc_output, training, look_ahead_mask, padding_mask) #update attention_weights dictionary with the attention weights of block 1 and block 2 attention_weights['decoder_layer{}_block1_self_att'.format(i+1)] = block1 attention_weights['decoder_layer{}_block2_decenc_att'.format(i+1)] = block2 # END CODE HERE # x.shape == (batch_size, target_seq_len, fully_connected_dim) return x, attention_weights # - # UNIT TEST Decoder_test(Decoder, create_look_ahead_mask, create_padding_mask) # <a name='6'></a> # ## 6 - Transformer # # Phew! This has been quite the assignment, and now you've made it to your last exercise of the Deep Learning Specialization. Congratulations! You've done all the hard work, now it's time to put it all together. # # <img src="transformer.png" alt="Transformer" width="550"/> # <caption><center><font color='purple'><b>Figure 4: Transformer</font></center></caption> # # The flow of data through the Transformer Architecture is as follows: # * First your input passes through an Encoder, which is just repeated Encoder layers that you implemented: # - embedding and positional encoding of your input # - multi-head attention on your input # - feed forward neural network to help detect features # * Then the predicted output passes through a Decoder, consisting of the decoder layers that you implemented: # - embedding and positional encoding of the output # - multi-head attention on your generated output # - multi-head attention with the Q from the first multi-head attention layer and the K and V from the Encoder # - a feed forward neural network to help detect features # * Finally, after the Nth Decoder layer, two dense layers and a softmax are applied to generate prediction for the next output in your sequence. # # <a name='ex-8'></a> # ### Exercise 8 - Transformer # # Implement `Transformer()` using the `call()` method # 1. Pass the input through the Encoder with the appropiate mask. # 2. Pass the encoder output and the target through the Decoder with the appropiate mask. # 3. Apply a linear transformation and a softmax to get a prediction. # + id="QHymPmaj-2ba" # UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION Transformer class Transformer(tf.keras.Model): """ Complete transformer with an Encoder and a Decoder """ def __init__(self, num_layers, embedding_dim, num_heads, fully_connected_dim, input_vocab_size, target_vocab_size, max_positional_encoding_input, max_positional_encoding_target, dropout_rate=0.1, layernorm_eps=1e-6): super(Transformer, self).__init__() self.encoder = Encoder(num_layers=num_layers, embedding_dim=embedding_dim, num_heads=num_heads, fully_connected_dim=fully_connected_dim, input_vocab_size=input_vocab_size, maximum_position_encoding=max_positional_encoding_input, dropout_rate=dropout_rate, layernorm_eps=layernorm_eps) self.decoder = Decoder(num_layers=num_layers, embedding_dim=embedding_dim, num_heads=num_heads, fully_connected_dim=fully_connected_dim, target_vocab_size=target_vocab_size, maximum_position_encoding=max_positional_encoding_target, dropout_rate=dropout_rate, layernorm_eps=layernorm_eps) self.final_layer = Dense(target_vocab_size, activation='softmax') def call(self, input_sentence, output_sentence, training, enc_padding_mask, look_ahead_mask, dec_padding_mask): """ Forward pass for the entire Transformer Arguments: input_sentence -- Tensor of shape (batch_size, input_seq_len, fully_connected_dim) An array of the indexes of the words in the input sentence output_sentence -- Tensor of shape (batch_size, target_seq_len, fully_connected_dim) An array of the indexes of the words in the output sentence training -- Boolean, set to true to activate the training mode for dropout layers enc_padding_mask -- Boolean mask to ensure that the padding is not treated as part of the input look_ahead_mask -- Boolean mask for the target_input dec_padding_mask -- Boolean mask for the second multihead attention layer Returns: final_output -- Describe me attention_weights - Dictionary of tensors containing all the attention weights for the decoder each of shape Tensor of shape (batch_size, num_heads, target_seq_len, input_seq_len) """ # START CODE HERE # call self.encoder with the appropriate arguments to get the encoder output enc_output = self.encoder(input_sentence, training, enc_padding_mask) # (batch_size, inp_seq_len, fully_connected_dim) # call self.decoder with the appropriate arguments to get the decoder output # dec_output.shape == (batch_size, tar_seq_len, fully_connected_dim) dec_output, attention_weights = self.decoder(output_sentence, enc_output, training, look_ahead_mask, dec_padding_mask) # pass decoder output through a linear layer and softmax (~2 lines) final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size) # END CODE HERE return final_output, attention_weights # - # UNIT TEST Transformer_test(Transformer, create_look_ahead_mask, create_padding_mask) # ## Conclusion # # You've come to the end of the graded portion of the assignment. By now, you've: # # * Create positional encodings to capture sequential relationships in data # * Calculate scaled dot-product self-attention with word embeddings # * Implement masked multi-head attention # * Build and train a Transformer model # <font color='blue'> # <b>What you should remember</b>: # # - The combination of self-attention and convolutional network layers allows of parallization of training and *faster training*. # - Self-attention is calculated using the generated query Q, key K, and value V matrices. # - Adding positional encoding to word embeddings is an effective way of include sequence information in self-attention calculations. # - Multi-head attention can help detect multiple features in your sentence. # - Masking stops the model from 'looking ahead' during training, or weighting zeroes too much when processing cropped sentences. # Now that you have completed the Transformer assignment, make sure you check out the ungraded labs to apply the Transformer model to practical use cases such as Name Entity Recogntion (NER) and Question Answering (QA). # # # # Congratulations on finishing the Deep Learning Specialization!!!!!! 🎉🎉🎉🎉🎉 # # This was the last graded assignment of the specialization. It is now time to celebrate all your hard work and dedication! # # <a name='7'></a> # ## 7 - References # # The Transformer algorithm was due to Vaswani et al. (2017). # # - <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2017). [Attention Is All You Need](https://arxiv.org/abs/1706.03762)
DeepLearning_course5/W4/Transformer Architecture with Tensorflow/C5_W4_A1_Transformer_Subclass_v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1.Multiple choice concept questions # + Q1. What is Not the technique for Machine Learning? a. Supervised learning b. Intelligent learning c. Deep Learing d. Reinforcement learning Answer: (b) Q2. Here is a Set s ={23, 2, 8}. What may be the result after operating s.add(8)? a. {23, 2, 8, 8} b. {8, 23, 2, 8,} c. {23, 8, 2} d. {23, 2} Answer: (c) Q3. What is Not the appropriate situation for applying Tuples? a. Useful when you wan to provide information that cannot be changed b. Stores unique dates c. Most extreme case since structure has no function d. Supports iterration, indexing, contains Answer: (d) Q4. Bayes theorem is often used to? a. compute P(A|B) when P(A) and P(B) are known b. relate P(A|B) to P(B|A) c. compute P(A,B) from P(C) d. relate P(A,B) to P(C) Answer: (b) Q5. What is Not the functions of Explanatory Data Visualization? a. Having an understanding of the data outcome b. Communicate data outcome to clients c. Particularly useful for data scientists d. Make data looks tidy Answer: (d) Q6. Time series analysis basic statistic model are great at a. recognizing a trend b. evaluating the performance of model c. predicting the next value in a series d. captureing the periodic pattens Answer: (a) Q7. What is most important for data stroytelling? a. Elaborate explanation b. Emotional connection c. Unbiased statement d. Clear data visualization Answer: (b) Q8. Vector Data GIS data is best for storing? a. Points, Circle and Rectangle b. Points, Lines and Circle c. Points, Lines and Rectangle d. Points, Lines and regional polygons Answer: (d) # - # # 2.True-false choice # + Q1. Data visualization is the mouthpiece of the data. a. True b. False Answer: a Q2. Statistical Time series analysis can predict black swans. a. True b. False Answer: b (It can't predict black swans) Q3. Google's alpha go beat the Go champion using supervised learning. a. True b. False Answer: b (using Reinforcement learning) Q4. To master the art of data analysis, you need to a good story teller and EDV helps you achive taht. a. True b. False Answer: a Q5. Supervised learning finds patterns in data. Unsupervised learning finds patterns for a prediction task. a. True b. False Answer: b (It would be right if exchanging the subject of the two sentence) Q6. Neural networks account for interactions really well. a. True b. False Answer: a Q7. Machine learning can give computers the ability to learn to make decisions from data. a. True b. False Answer: a # - # # 3.Short-answer Data Camp-Style Notebook questions # + # Import what we need import requests from bs4 import BeautifulSoup import nltk # the e-book url of Alice's Adventures in Wonderland url = "http://www.gutenberg.org/files/11/11-h/11-h.htm" # Part 1 Getting the HTML of Alice's Adventures in Wonderland respond = ---------------------- # Part 2 Setting the correct text encoding of the HTML page ---------------------- # Part 3 Extracting the HTML from the request object html = ---------------------- # Part 4 Creating a BeautifulSoup object from the HTML soup = ---------------------- # Part 5 Getting the text out of the soup text = ---------------------- #Part 7 Creating a tokenizer tokenizer = ---------------------- #Part 7 Tokenizing the text tokens = ---------------------- #Part 8 Printing out the first 10 words / tokens ---------------------- Answer: # Import what we need import requests from bs4 import BeautifulSoup import nltk # Part 1 Getting the HTML of Alice's Adventures in Wonderland respond = requests.get("http://www.gutenberg.org/files/11/11-h/11-h.htm") # Part 2 Setting the correct text encoding of the HTML page respond.encoding = 'utf-8' # Part 3 Extracting the HTML from the request object html = respond.text # Part 4 Creating a BeautifulSoup object from the HTML soup = BeautifulSoup(html) # Part 5 Getting the text out of the soup text = soup.text #Part 7 Creating a tokenizer tokenizer = nltk.tokenize.RegexpTokenizer('\w+') #Part 7 Tokenizing the text tokens = tokenizer.tokenize(text) #Part 8 Printing out the first 8 words / tokens print(tokens[:10]) # - # # 4.Task on Data Set Mini # <p>Load the Boston Data Set in Sklearn. Creating LinearRegression to fit feature and price of house. </p> # <p>Using the LinearRegression with features of house to predict price. </p> # <p>Print the scatter of actual price and predict price. It should look like this:</p> # # # <p>Answer:</p> # + import matplotlib.pyplot as plt from sklearn.datasets import load_boston from sklearn.linear_model import LinearRegression import pandas as pd # Load the boston data base boston = load_boston() bos = pd.DataFrame(boston.data) bos.columns = boston.feature_names bos['Price'] = boston.target lm = LinearRegression() X = bos.iloc[:,0:-1] y_actual = bos.iloc[:,-1] lm.fit(X, y_actual) y_pred = lm.predict(X) plt.scatter(y_actual, y_pred) plt.xlabel("actual price") plt.ylabel("predicted price") plt.show() # -
Exam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.4 (''env'': venv)' # language: python # name: python3 # --- import RoiCalc # + attorney_count = 20 attorney_cost = 200 critical_services = 2 it_cost = 3 * 37 issue_frequency = 4 issue_duration_minutes = 36 pingplotter_downtime_impact = 0.5 legal_downtime_cost = RoiCalc.calc_downtime_cost( attorney_cost, it_cost, issue_frequency, issue_duration_minutes ) legal = RoiCalc.PingPlotterRoi( attorney_count, critical_services, legal_downtime_cost, pingplotter_downtime_impact, ) # - # ## Contact center # # + csr_count = 150 csr_cost = 20 critical_services = 1 it_cost = 10 * 25 issue_frequency = 30 issue_duration_minutes = 20 pingplotter_downtime_impact = 0.5 contact_downtime_cost = RoiCalc.calc_downtime_cost( csr_cost, it_cost, issue_frequency, issue_duration_minutes ) contact_center = RoiCalc.PingPlotterRoi( csr_count, critical_services, contact_downtime_cost, pingplotter_downtime_impact, ) display(contact_downtime_cost) contact_center.plotBreakeven("be")
Cloud ROI Calculator.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .java // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Java // language: java // name: java // --- // %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/ // %maven ai.djl:api:0.12.0 // %maven org.slf4j:slf4j-api:1.7.26 // %maven org.slf4j:slf4j-simple:1.7.26 // %maven ai.djl.mxnet:mxnet-engine:0.12.0 // %maven ai.djl.mxnet:mxnet-native-auto:1.8.0 import ai.djl.Device; import ai.djl.ndarray.*; import ai.djl.ndarray.types.*; import ai.djl.ndarray.index.*; NDManager manager = NDManager.newBaseManager(); var x = manager.arange(12) x // %system nvidia-smi NDManager manager = NDManager.newBaseManager(); manager.ones(new Shape(4))
notebooks/djl/Cap 2 - DJL - Preliminares 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self, input_dim, hidden_dim=[128, 128, 64], output_dim=1): super(Net, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.layer1 = nn.Linear(in_features=self.input_dim, out_features=self.hidden_dim[0]) self.layer2 = nn.Linear(in_features=self.hidden_dim[0], out_features=self.hidden_dim[1]) self.layer3 = nn.Linear(in_features=self.hidden_dim[1], out_features=self.hidden_dim[2]) self.y1_out = nn.Linear(in_features=self.hidden_dim[1], out_features=output_dim) self.y2_out = nn.Linear(in_features=self.hidden_dim[2], out_features=self.output_dim) def forward(self, x): x = F.relu(self.layer1(x)) x = F.relu(self.layer2(x)) y1_out = self.y1_out(x) x = F.relu(self.layer3(x)) y2_out = self.y2_out(x) return y1_out, y2_out model = Net(input_dim=8) print(model)
Course-1 Custom Models, Layers and Loss Functions/Week-1/C1_W1_Lab_2_multi-output_PyTorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # # 📃 Solution for Exercise M6.02 # # The aim of this exercise it to explore some attributes available in # scikit-learn's random forest. # # First, we will fit the penguins regression dataset. # + import pandas as pd from sklearn.model_selection import train_test_split penguins = pd.read_csv("../datasets/penguins_regression.csv") feature_name = "Flipper Length (mm)" target_name = "Body Mass (g)" data, target = penguins[[feature_name]], penguins[target_name] data_train, data_test, target_train, target_test = train_test_split( data, target, random_state=0) # - # <div class="admonition note alert alert-info"> # <p class="first admonition-title" style="font-weight: bold;">Note</p> # <p class="last">If you want a deeper overview regarding this dataset, you can refer to the # Appendix - Datasets description section at the end of this MOOC.</p> # </div> # Create a random forest containing three trees. Train the forest and # check the generalization performance on the testing set in terms of mean # absolute error. # + # solution from sklearn.metrics import mean_absolute_error from sklearn.ensemble import RandomForestRegressor forest = RandomForestRegressor(n_estimators=3) forest.fit(data_train, target_train) target_predicted = forest.predict(data_test) print(f"Mean absolute error: " f"{mean_absolute_error(target_test, target_predicted):.3f} grams") # - # The next steps of this exercise are to: # # - create a new dataset containing the penguins with a flipper length # between 170 mm and 230 mm; # - plot the training data using a scatter plot; # - plot the decision of each individual tree by predicting on the newly # created dataset; # - plot the decision of the random forest using this newly created dataset. # # <div class="admonition tip alert alert-warning"> # <p class="first admonition-title" style="font-weight: bold;">Tip</p> # <p class="last">The trees contained in the forest that you created can be accessed # with the attribute <tt class="docutils literal">estimators_</tt>.</p> # </div> # + [markdown] tags=["solution"] # In a first cell, we will collect all the required predictions from the # different trees and forest. # + # solution import numpy as np data_range = pd.DataFrame(np.linspace(170, 235, num=300), columns=data.columns) tree_predictions = [] for tree in forest.estimators_: # we convert `data_range` into a NumPy array to avoid a warning raised in scikit-learn tree_predictions.append(tree.predict(data_range.to_numpy())) forest_predictions = forest.predict(data_range) # + [markdown] tags=["solution"] # Now, we can plot the predictions that we collected. # + tags=["solution"] import matplotlib.pyplot as plt import seaborn as sns sns.scatterplot(data=penguins, x=feature_name, y=target_name, color="black", alpha=0.5) # plot tree predictions for tree_idx, predictions in enumerate(tree_predictions): plt.plot(data_range[feature_name], predictions, label=f"Tree #{tree_idx}", linestyle="--", alpha=0.8) plt.plot(data_range[feature_name], forest_predictions, label=f"Random forest") _ = plt.legend(bbox_to_anchor=(1.05, 0.8), loc="upper left")
notebooks/ensemble_sol_02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow.keras.datasets.mnist as mnist (features_train, label_train), (features_test, label_test) = mnist.load_data() label_train features_train.shape features_test.shape features_train = features_train.reshape(60000, 28, 28, 1) features_test = features_test.reshape(10000, 28, 28, 1) features_train = features_train / 255.0 features_test = features_test / 255.0 import numpy as np import tensorflow as tf from tensorflow.keras import layers np.random.seed(8) tf.random.set_seed(8) model = tf.keras.Sequential() conv_layer1 = layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)) conv_layer2 = layers.Conv2D(64, (3,3), activation='relu') fc_layer1 = layers.Dense(128, activation='relu') fc_layer2 = layers.Dense(10, activation='softmax') model.add(conv_layer1) model.add(layers.MaxPooling2D(2, 2)) model.add(conv_layer2) model.add(layers.MaxPooling2D(2, 2)) model.add(layers.Flatten()) model.add(fc_layer1) model.add(fc_layer2) optimizer = tf.keras.optimizers.Adam(0.001) model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model.summary() model.fit(features_train, label_train, epochs=5, validation_split=0.2, verbose=2) model.evaluate(features_test, label_test)
Exercise02/Exercise02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # load the raw dataset import numpy as np import pandas as pd from pandas import DataFrame rawdf = pd.read_csv("unc.edu_PANCAN_IlluminaHiSeq_RNASeqV2.geneExp.tsv", sep="\t", index_col=0) rawdf.head() # - # transpose raw dataset so row is patient samples and column is list of genes processeddf = rawdf.transpose() # load class label dflabels = pd.read_csv('project_class_labels_original_10471.csv',index_col='Unnamed: 0') # check to see if there is any feature (i.e. column) has all zero values so we will delete them removedAllZeroColdf = processeddf.loc[:, (processeddf != 0).any(axis=0)] removedAllZeroColdf.shape # + # data scaling # method 1 : standardization from sklearn.preprocessing import StandardScaler stdscaler = StandardScaler() stdscalerfit = stdscaler.fit_transform(removedAllZeroColdf) stddf = DataFrame(stdscalerfit,index = removedAllZeroColdf.index, columns = removedAllZeroColdf.columns) stddf.head() # + # split 80% training set; 20% testing set from sklearn.model_selection import train_test_split trainData, testData, trainLabel, testLabel = train_test_split(stddf, dflabels, test_size=0.20) # + # optimization of number of neighbor from sklearn.neighbors import KNeighborsClassifier import pandas as pd import pylab as pl import matplotlib.pyplot as plt from sklearn.metrics import accuracy_score numberOfNeighbors = [1, 5, 10, 25, 50] results=[] for i in numberOfNeighbors: clf = KNeighborsClassifier(n_neighbors=i) clf.fit(trainData, trainLabel.values.ravel()) pred = clf.predict(testData) accuracy = accuracy_score(testLabel, pred) print("Accuracy Score (k = ", i,"):", accuracy) results.append([i,accuracy]) resultsMatrix = pd.DataFrame(results, columns=["i","accuracy"]) pl.plot(resultsMatrix.i, resultsMatrix.accuracy) pl.title("Accuracy Score when K goes up") pl.show() # + # distance wrighting function; k = 5 from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score import pandas as pd import time clf_weighted = KNeighborsClassifier(n_neighbors=5,weights='distance') start = time.time() clf_weighted.fit(trainData, trainLabel.values.ravel()) end = time.time() print("5 nearest neighbor (normal) training time: ", end - start) pred = clf_weighted.predict(testData) accuracy = accuracy_score(testLabel, pred) print("Accuracy Score (distance weighted) k = 5:", accuracy) # confusion matrix from sklearn.metrics import confusion_matrix confusion_matrix = confusion_matrix(testLabel, pred) print(confusion_matrix) # classficiation report from sklearn.metrics import classification_report print(classification_report(testLabel, pred)) # -
project_k_neighbor_baseline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import seaborn as sns import numpy as np import matplotlib.pyplot as plt # %matplotlib inline pd.options.mode.chained_assignment=None pd.options.display.max_columns = None # - df_train_variants = pd.read_csv("training_variants.zip") df_test_variants = pd.read_csv("test_variants.zip") df_train_text = pd.read_csv("training_text.zip", sep="\|\|", engine='python', header=None, skiprows=1, names=["ID", "Text"]) df_test_text = pd.read_csv("test_text.zip", sep="\|\|", engine='python', header=None, skiprows=1, names=["ID", "Text"]) print("Train and Test text variants shape:", df_train_variants.shape, df_test_variants.shape) print("Train and Test text shape:", df_train_text.shape, df_test_text.shape) df_train_variants.head() df_test_variants.head() df_train_text.head() df_train_variants.describe(include="object") df_train_variants.nunique() sns.countplot(data=df_train_variants, x="Class")
msk-redefining-cancer-treatment/basic-eda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # 8. Cross-Validation # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/rhennig/EMA6938/blob/main/Notebooks/8.Cross-Validation.ipynb) # # (Based on https://aamir07.medium.com/polynomial-regression-with-k-fold-cross-validation-bc5275137546) # # In this notebook, we will apply k-fold cross-validation to determine the optimol degree of polynomial features in polynomial regression, a model hyperpararameter. # ## Polynomial Regression # # Polynomial is actually a form of linear regression. We simply change the features to be polynomials of our input data $x_i$. It is linear regression, because the resulting model is linear in the parameters, $\beta_i$: # $$ # \hat y_i = \beta_0 + \beta_1 x_i + \beta_2 x_i^2 + \beta_3 x_i^3 + \dots # $$ # or in matrix form # $$ # \begin{bmatrix} \hat y_1 \\ \hat y_2 \\ \vdots \\ \hat y_n \end{bmatrix} = # \begin{bmatrix} # 1 & x_1 & x_1^2 & x_1^3 & \dots \\ # 1 & x_2 & x_2^2 & x_2^3 & \dots \\ # \vdots \\ # 1 & x_n & x_n^2 & x_n^3 & \dots # \end{bmatrix} # \begin{bmatrix} # \beta_0 \\ # \beta_1 \\ # \beta_2 \\ # \beta_3 \\ # \vdots # \end{bmatrix}. # $$ # $$ # \hat{\bf y} = {\bf X}\beta. # $$ # # The coefficients, $\beta_i$, that minimize our cost function, given by the mean-squared error, fullfill the normal equation: # $$ # {\bf X}^\mathrm{T} {\bf X} \beta = {\bf X}^\mathrm{T} {\bf y}. # $$ # ## K-fold Cross Validation # # k-fold Cross Validation is a technique for model selection where the training data set is divided into $k$ equal groups. The first group is considered as the validation set and the rest $k-1$ groups as training data and the model is fit on it. This process is iteratively repeated $k-1$ times. Each time the $k^\mathrm{th}$ group will be selected as validation and the remaining $k-1$ groups be used for optimizing the model parameters, $\beta$. In each iteration, the validation MSE is calculated and the final MSE after $k$ iterations the Cross-Validation MSE is given as the average: # $$ # \text{CV}_k = \frac{1}{N_k} \sum_{i=1}^k \text{MSE}_i # $$ # This validation MSE is the estimate for our test data MSE. # # <img src="https://github.com/rhennig/EMA6938/blob/main/Notebooks/Figures/Cross-Validation.png?raw=1" alt="Confusion Matrix" align="right" style="width:400px; float:right"/> # # Importantly, **cross-validation is for model selection or hyperparameter optimization** and utilizes the training data. To measure the **performance of our model**, we need to apply it to data that was not used in the optimization of the hyperparameters or model parameters. Usually, we keep a fraction of 20% of the data aside as a **holdout test set** and use 80% of the data for the training set used in cross-validations. We measure the model performance of the optimized model on the holdout test set. # + # Import the numpy, panda, sklearn, and matplotlib libraries import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_validate from sklearn.model_selection import train_test_split plt.rc('xtick', labelsize=18) plt.rc('ytick', labelsize=18) # - # Generate a data set for machine learning np.random.seed(seed=0) x=np.linspace(0,2,300) x=x+np.random.normal(0,.3,x.shape) y=np.cos(x)+2*np.sin(x)+3*np.cos(x*2)+np.random.normal(0,1,x.shape) # Plot the whole dataset fig,ax=plt.subplots(figsize=(8,8)) ax.scatter(x, y) ax.set_xlabel('X Values',fontsize=20) ax.set_ylabel('cos(x)+2sin(x)+3cos(2x)',fontsize=20) ax.set_title('Scatter Plot',fontsize=25) plt.show() # Split the dataset into 80% for training and 20% for testing x = x.reshape((300,1)) x_train,x_test,y_train,y_test = train_test_split(x, y, train_size=0.8) # + ## Let's find out the model we need to select maxdegree=10 # The maximum degree we would like to test training_error=[] cross_validation_error=[] for d in range(1,maxdegree): x_poly_train=PolynomialFeatures(degree=d).fit_transform(x_train) # Create the polynomial feature for the training x_poly_test=PolynomialFeatures(degree=d).fit_transform(x_test) # and the testing lr=LinearRegression(fit_intercept=False) model=lr.fit(x_poly_train,y_train) # Optimize the linear regression coefficients y_train_pred=model.predict(x_poly_train) # Predictions of the model mse_train=mean_squared_error(y_train,y_train_pred) # MSE on training data training_error.append(mse_train) # 5-fold cross validation to calculate the cross-validation MSE cve=cross_validate(lr, x_poly_train, y_train, scoring='neg_mean_squared_error', cv=5, return_train_score=True) cross_validation_error.append(np.mean(np.absolute(cve['test_score']))) # Array of cross-validation MSE fig,ax=plt.subplots(figsize=(8,8)) ax.plot(range(1,maxdegree), cross_validation_error, label="CV Error") ax.plot(range(1,maxdegree), training_error, label="Training Error") ax.set_xlabel('Degree', fontsize=20) ax.set_ylabel('MSE', fontsize=20) ax.set_title('Hyperparameter Optimization', fontsize=25) ax.legend(loc='upper right', fontsize=20) plt.show() # + # Validate optimal model on holdout data set # Select the optimal hyperparameter degree = np.argmin(cross_validation_error)+1 print("Optimal degree of polynomial regression = ", degree) # Fit the model parameters using the optimal hyperparameter x_poly_train = PolynomialFeatures(degree).fit_transform(x_train) # Create the polynomial features for the training set lr = LinearRegression(fit_intercept=False) model = lr.fit(x_poly_train,y_train) # Optimize the linear regression coefficient # Measure performance of optimal model on holdout dataset x_poly_test = PolynomialFeatures(degree).fit_transform(x_test) # Create polynomial features on holdout set y_test_pred = model.predict(x_poly_test) # Predictions of the model on holdout set mse_test = mean_squared_error(y_test,y_test_pred) # MSE on holdout set print("MSE on holdout set = ", mse_test) # Calculate optimal polynomial regression x_model = np.linspace(np.min(x), np.max(x), 100) X_model = PolynomialFeatures(degree).fit_transform(np.array([x_model.flatten()]).T) y_model_pred = model.predict(X_model) y_truth = np.cos(x_model)+2*np.sin(x_model)+3*np.cos(x_model*2) # Plot the whole dataset fig,ax=plt.subplots(figsize=(8,8)) ax.scatter(x, y, label='Data') ax.plot(x_model, y_model_pred, color='orange', label='Model') ax.plot(x_model, y_truth, color='green', label='Truth') ax.set_xlabel('x-Values', fontsize=20) ax.set_ylabel('y-Values', fontsize=20) ax.set_title('Performance', fontsize=25) ax.legend(loc='upper right', fontsize=20) plt.show() # -
Notebooks/8.Cross-Validation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # La conjecture de Syracuse # + [markdown] slideshow={"slide_type": "subslide"} # # Présentation du problème # La suite de Syracuse est définie comme suit : # Soit N un nombre entier # Si N est pair alors N est remplacé par N/2 # Sinon N est remplacé par 3N+1 # # et ainsi de suite... # - # # Conjecture # Quel que soit le nombre entier choisi au départ, il semble que la suite finisse toujours par atteindre le cycle infini 4,2,1... # # Simulation # A ce jour la conjecture n'a été ni démontrée ni démentie. # Cependant la simulation par ordinateur tend à montrer que celle-ci est vraie. # Voici un exemple de programme, écrit en langage Python. # A vous de le tester et de le modifier en fonction de vos besoins. # En particulier, on pourra proposer à l'utilisateur de saisr le plus grand entier à tester. (ici 999) # # + slideshow={"slide_type": "-"} c=0 M=0 T=0 for n in range (1,1000): u=n while u>1: if u%2==0: u=u/2 else: u=3*u+1 c=c+1 if c>M: M=c T=n c=0 print("Pour tous les nombres entiers entre 1 et 999:") print("Le nombre nécessitant le plus d'étapes pour atteindre 4,2,1 est :",T) print("Le nombre 1 est atteint au bout de",M,"étapes.") # + [markdown] slideshow={"slide_type": "-"} # <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"><img alt="Licence Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by-sa/4.0/88x31.png" /></a><br />Ce document est mis à disposition selon les termes de la <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/">Licence Creative Commons Attribution - Partage dans les Mêmes Conditions 4.0 International</a>. # # Pour toute question, suggestion ou commentaire : <a href="mailto:<EMAIL>"><EMAIL></a>
Conjecture de Syracuse.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Review of 2018 Vaccine Adverse Event Reporting System # ## Introduction # # ##### The data set tracks VAERS, which is the Vaccine Adverse Event Reporting System for the year 2015. This information is obtained by the Centers for Disease Control and Prevention (CDC). # # ##### The data set details include vaccine names, manufacturers, manufacturer’s lot numbers, vaccine doses, administration routes, and anatomical sites where the vaccine was administered. # # ##### Additionally, the primary information data provides age, sex, symptoms reported, and the following adverse events outcomes: Died, Life Threatening, ER visit, Hospitalized, Days Hospitalized, Disabled, Recovered, Vaccination Data, Onset Date, Onset Interval, Other Medications, Current Illnesses, Prior Vaccination and Condition History. # ## Purpose # # ##### Vaccination is facing increasing public health pressures and it is important to note if the drug formulations themselves are becoming more advanced through new administration techniques that could be more direct and less potentially prone to adverse events. Additionally, there is increased public debate regarding the appropriate age of vaccination and what combinations of vaccinations to be given at a time. # # ##### The VAERS form strives to collect data across a broad spectrum of variables, however, it is biased in the sense that vaccines where no adverse events occurred are not recorded by the reporting system, or at least that data is not available in the 2018 sample set. # # ##### With any incomplete date set, especially one that has the potential to influence public health policy and that has the power to sway decision making in the regulatory sphere it is important to have clear transparency in the data you are utilizing to guide your predictive models and to state its limitations within the analysis. # # ##### It is further important to make an inference on how the outcomes can change with the introduction of missing variables and the impact that would have on the model. In this particular scenario, it would be necessary to understand why the surveys were not collected on no adverse events and address if it would play a significant role on the questions posed. Based on literature review on adverse event reporting within the clinical setting, the nature of the way the data is collected is misleading. The survey is only available when an event occurs that signals a negative adverse event. The system itself is not set up to collect all vaccination events and thus record all outcomes, both positive and negative. This will be critical to note in the analysis and in the recommendations made moving forward. # import pandas as pd import seaborn as sns import pdb # ## Datasets # # ##### df_vax denotes half of the vaccination data collected (which includes information about the vaccination themselves # ##### df_sym denotes the data surrounding the adverse events df_vax = pd.read_csv('./2019VAERSVAX.csv') df_sym = pd.read_csv('./2019VAERSDATA.csv', encoding='cp1252') df_sym.head() # ## Merged Datasets df_vax.merge(df_sym, left_on='VAERS_ID' , right_on='VAERS_ID') df_vax.head() finaldata = df_vax.merge(df_sym, left_on='VAERS_ID' , right_on='VAERS_ID') # ## Name of Vaccine Providers (Pharmaceuticals) finaldata['VAX_MANU'].value_counts() # ## Route of Administration # # ##### __*IM*__: intramuscular injection (ex. tetnus, HepB, flu vaccine) --- delivered into muscle # # ##### __*UN*__: unknown site injection # # ##### __*SYR*__: syringe (undisclosed if intramuscular or subcutanous) # # ##### __*SC*__: subcutanous injection (ex. MMR, Varicella) ---- in the fat layer underneath skin # # ##### __*PO*__: per oral (medication) # # ##### __*OT*__: other # # ##### __*IN*__: intranasal (flu vaccine) # # ##### __*ID*__: intradermal (ex. TB test) --- skin layer underneath epidermis, upper layer # finaldata['VAX_ROUTE'].value_counts() # ## Visualization of Datasets import matplotlib.pyplot as plt finaldata['AGE_YRS'].hist(bins=10) plt.xlabel('Age') plt.ylabel('Count') #newdf = finaldata['AGE_YRS'].value_counts() #range of age and histo # ##### As can be visuablised above, the number of vaccines administered are higher in specific subsectors of the population, particulary higher for childer and young adults under 20, and for adults between 60 and 70. finaldata=finaldata.set_index('VAERS_ID') finaldata.head() corr = finaldata.corr() corr.style.background_gradient(cmap='RdBu_r', axis=None).set_precision(3) # 'coolwarm', RdBu_r', & 'BrBG' good color maps # ## Selected categories of interest selection=['VAX_MANU','VAX_ROUTE','VAX_SITE','VAX_NAME','AGE_YRS','ER_VISIT'] cutfinaldata=pd.DataFrame(finaldata,columns=selection) cutfinaldata.head() cutfinaldata.count() # ER visit (not collected or not needed) and no way it was reported cutfinaldata['AGE_YRS'].hist(bins=10) cutfinaldata.dtypes cutfinaldata['VAX_MANU']=cutfinaldata['VAX_MANU'].astype('category').cat.codes cutfinaldata['VAX_ROUTE']=cutfinaldata['VAX_ROUTE'].astype('category').cat.codes cutfinaldata['VAX_SITE']=cutfinaldata['VAX_SITE'].astype('category').cat.codes cutfinaldata['VAX_NAME']=cutfinaldata['VAX_NAME'].astype('category').cat.codes cutfinaldata['ER_VISIT']=cutfinaldata['ER_VISIT'].astype('category').cat.codes cutfinaldata.dtypes # ## Correlation Analysis corr2=cutfinaldata.corr() corr2.style.background_gradient(cmap='RdBu_r', axis=None).set_precision(3) # 'coolwarm', RdBu_r', & 'BrBG' good color maps #slight correlation between age and vax_name, which makes sense as many vaccines are given based on age type #could show a stronger correlation if data is cleaner # ##### As seen above, there is a slight correlation between age and vax_name, which makes sense as many vaccines are given based on age type. A stonger correlation could be shown if the data would be cleaner. # ## Limitations of VAERS datasets # # ##### Given the limitations of the dataset, I wanted to expand on the bias issue and merge in a new dataset on childhood vaccination to show relative frequency of bias in the VAERS dataset. # ## Frequency of Childhood Vaccination Dataset df_dpvax = pd.read_csv('./DPdataset.csv', encoding='cp1252') df_dpvax.head() df_dpvax.plot(kind='scatter',x='Value',y='TIME',color='red') plt.show() df_dpvax.count() df_dpvax['SUBJECT'].value_counts() selection2=['LOCATION','SUBJECT','Value'] df_dpvax2=pd.DataFrame(df_dpvax,columns=selection2) df_dpvax2.head() table1=df_dpvax2.pivot(index = None, columns='SUBJECT', values = 'Value') table1
ltarab/Laura.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Final project # # <img style="float: right; margin: 0px 0px 15px 15px;" src="https://p0.pikrepo.com/preview/226/186/artificial-intelligence-concept.jpg" width="350px" height="180px" /> # # In the final proyect you will tackle some problem using a probabilistic graphical model that you will propose. You are allowed to use all the tools we saw throughout the course to this end, and then you will evaluate the performance of your model in terms of the problem. # # The project will be done by pairs. # ___ # ## 1. Problem description # # First of all, you should clearly describe the problem you are trying to solve, and how the data is generated. # # Not only text should be included. A diagram of the situation, pictures or some visual material is of great help. # ## 2. Probabilistic graphical model proposal # # Once you describe the problem, you should propose a PGM that attempts to model the situation of interest. This model can come from various sources: # # - The knowledge of an expert. # - Your own knowledge. # - Literature. # - A structure learning scheme. # # In any case, you must document the whole process of construction of the model. # ## 3. Model evaluation # # The problem might provide an inherent evaluation metric for the model you propose. If this is not the case, you should propose a metric to evaluate your model and justify why it is a proper metric for the specific problem you are trying to solve. # # According to this metric, you should evaluate your model. You may use a benchmark model for comparison purposes. # ## 4. Presentation # # All the points described above must be done in a jupyter notebook (including the pieces of code you use), considering that you will present the project using that notebook. # <script> # $(document).ready(function(){ # $('div.prompt').hide(); # $('div.back-to-top').hide(); # $('nav#menubar').hide(); # $('.breadcrumb').hide(); # $('.hidden-print').hide(); # }); # </script> # # <footer id="attribution" style="float:right; color:#808080; background:#fff;"> # Created with Jupyter by <NAME>. # </footer>
ProjectDescription.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: python38 # language: python # name: python38 # --- # + #default_exp github # - # # github # apis of github #export import requests from requests import get, Response from beartype import beartype #export @beartype def githubGet(url:str, token:str)->Response: ''' get request to github using token parameters: url:str: github raw file url token:str: your personal access token response: response: requests.Response: response object ''' r = requests.get(url, headers = {"Authorization": f"token {token}"}) return r #hide from pathlib import Path with open (f'{Path.home()}/.githubtoken.test') as f: token = f.read() url = 'https://raw.githubusercontent.com/thanakijwanavit/villaConfig/main/cloudsearch.yaml' githubGet(url, token).text #export @beartype def githubGetYaml(url:str, token:str)->dict: ''' get yaml dictionary from github using token parameters: url:str: github raw file url token:str: your personal access token response: response: dict: yaml dictionary ''' import yaml r = githubGet(url, token) return yaml.load(r.text,Loader=yaml.FullLoader) url = 'https://raw.githubusercontent.com/thanakijwanavit/villaConfig/main/cloudsearch.yaml' githubGetYaml(url, token)
nbs/github.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Summary Statistics of SDSS Quiescent galaxies # There's been some issues dealing with simulated galaxies with instantaneous SFR=0 (see Issues [#31](https://github.com/IQcollaboratory/galpopFM/issues/31). These galaxies **in principle** because they don't have gas should not have dust. However, they have a sharp feature in the observable space and as a result impact the DEM parameters we infer. # # It's clear that these simulated galaxies disagree with observables. More importantly, we want to marginalize out this population in our DEM parameter infernece. One way to do this would be to assign quiescent galaxy observables consistent with DSSS to the SFR=0 population. Then these galaxies will not contribute to the discrepancy between the FM and observations. # # In this notebook we're going to examine the observables of SDSS quiescent galaxies. import os import h5py import numpy as np # -- galpopfm -- from galpopfm.catalogs import Catalog # -- plotting -- import matplotlib as mpl import matplotlib.pyplot as plt mpl.rcParams['text.usetex'] = True mpl.rcParams['font.family'] = 'serif' mpl.rcParams['axes.linewidth'] = 1.5 mpl.rcParams['axes.xmargin'] = 1 mpl.rcParams['xtick.labelsize'] = 'x-large' mpl.rcParams['xtick.major.size'] = 5 mpl.rcParams['xtick.major.width'] = 1.5 mpl.rcParams['ytick.labelsize'] = 'x-large' mpl.rcParams['ytick.major.size'] = 5 mpl.rcParams['ytick.major.width'] = 1.5 mpl.rcParams['legend.frameon'] = False dat_dir = os.environ['GALPOPFM_DIR'] sdss = Catalog('tinker') # + M_fuv, M_nuv, _, M_g, M_r, _, _ = sdss.data['NSA_ABSMAG'].T cuts = ( (M_r < -20.) & (M_fuv != -999) & (M_nuv != -999)) z_sdss = sdss.data['redshift'][cuts] Fmag = M_fuv[cuts] Nmag = M_nuv[cuts] Rmag = M_r[cuts] Gmag = M_g[cuts] FUV_NUV = Fmag - Nmag G_R = Gmag - Rmag # - logmstar = sdss.data['log.M_star'][cuts] logssfr = sdss.data['log.ssfr'][cuts] logsfr = logssfr + logmstar # Lets impose a simple SSFR based classication for quiescent galaxies and examine how their observables look # simple SSFR Q cut quiescent = (logssfr < -11.) fig = plt.figure(figsize=(6,4)) sub = fig.add_subplot(111) sub.scatter(logmstar, logssfr, c='C0', s=1) sub.plot([9., 12.], [-11., -11.], c='k', ls='--') sub.scatter(logmstar[quiescent], logssfr[quiescent], c='C1', s=1, label='$\sim$Quiescent') sub.legend(loc='upper right', markerscale=5, handletextpad=0., fontsize=20) sub.set_xlabel('$\log M_*$', fontsize=25) sub.set_xlim(9.5, 12.) sub.set_ylabel('$\log$ SSFR', fontsize=25) sub.set_ylim(-15., -7) # + fig = plt.figure(figsize=(14, 6)) sub = fig.add_subplot(121) sub.scatter(Rmag, G_R, c='C0', s=2) sub.scatter(Rmag[quiescent], G_R[quiescent], c='C1', s=1) sub.set_xlabel('$R$ mag', fontsize=25) sub.set_xlim(-20., -23) sub.set_ylabel('$G-R$', fontsize=25) sub.set_ylim(0., 1.5) sub = fig.add_subplot(122) sub.scatter(Rmag, FUV_NUV, c='C0', s=2) sub.scatter(Rmag[quiescent], FUV_NUV[quiescent], c='C1', s=1, label='$\sim$Quiescent') sub.legend(loc='upper right', markerscale=5, handletextpad=0., fontsize=20) sub.set_xlabel('$R$ mag', fontsize=25) sub.set_xlim(-20., -23) sub.set_ylabel('$FUV-NUV$', fontsize=25) sub.set_ylim(-1., 5.) # - # As expected, redder galaxies are quiescent. Lets take a closer look at the distribution of the colors for quiescent galaxies # + fig = plt.figure(figsize=(14, 6)) sub = fig.add_subplot(121) _ = sub.hist(G_R, range=(0.2, 1.2), bins=50, color='C0') _ = sub.hist(G_R[quiescent], range=(0.2, 1.2), bins=50, alpha=0.75, color='C1') sub.set_xlabel('$G-R$', fontsize=25) sub.set_xlim(0.2, 1.2) sub = fig.add_subplot(122) _ = sub.hist(FUV_NUV, range=(-1., 5.), bins=100, color='C0') _ = sub.hist(FUV_NUV[quiescent], range=(-1., 5.), bins=100, color='C1', alpha=0.75, label='$\sim$Quiescent') sub.legend(loc='upper right', markerscale=5, handletextpad=0., fontsize=20) sub.set_xlabel('$FUV-NUV$', fontsize=25) sub.set_xlim(-1., 5.) # - # Is there a significant luminosity dependence on the distrubtion? # + fig = plt.figure(figsize=(14, 20)) for i in range(5): rbin = (Rmag < -20-0.5*i) & (Rmag > -20-0.5*(i+1)) sub1 = fig.add_subplot(5,2,2*i+1) _ = sub1.hist(G_R[rbin], range=(0.2, 1.2), bins=50, color='k') _ = sub1.hist(G_R[~quiescent & rbin], range=(0.2, 1.2), bins=50, alpha=0.75, color='C0') _ = sub1.hist(G_R[quiescent & rbin], range=(0.2, 1.2), bins=50, alpha=0.75, color='C1') sub1.set_xlim(0.2, 1.2) sub1.text(0.05, 0.95, r'$%.1f < R < %.1f$' % (-20-0.5*i, -20-0.5*(i+1)), ha='left', va='top', transform=sub1.transAxes, fontsize=20) sub2 = fig.add_subplot(5,2,2*i+2) _ = sub2.hist(FUV_NUV[rbin], range=(-1., 5.), bins=100, color='C0') _ = sub2.hist(FUV_NUV[~quiescent & rbin], range=(-1., 5.), bins=100, color='C0', alpha=0.75) _ = sub2.hist(FUV_NUV[quiescent & rbin], range=(-1., 5.), bins=100, color='C1', alpha=0.75, label='$\sim$Quiescent') sub2.set_xlim(-1., 5.) sub1.set_xlabel('$G-R$', fontsize=25) sub2.set_xlabel('$FUV-NUV$', fontsize=25) sub2.legend(loc='upper right', markerscale=5, handletextpad=0., fontsize=20) # + fig = plt.figure(figsize=(14, 6)) sub1 = fig.add_subplot(121) sub2 = fig.add_subplot(122) for i in range(4): rbin = (Rmag < -20-0.5*i) & (Rmag > -20-0.5*(i+1)) _ = sub1.hist(G_R[quiescent & rbin], density=True, histtype='step', range=(0.2, 1.2), bins=50, linewidth=2, color='C%i' % i) sub1.set_xlim(0.2, 1.2) _ = sub2.hist(FUV_NUV[quiescent & rbin], density=True, histtype='step', range=(-1., 5.), bins=100, linewidth=2, color='C%i' % i, label='$%.1f < R < %.1f$' % (-20-0.5*i, -20-0.5*(i+1))) sub2.set_xlim(-1., 5.) sub1.set_xlabel('$G-R$', fontsize=25) sub2.legend(loc='upper right', markerscale=5, handletextpad=0., fontsize=20) sub2.set_xlabel('$FUV-NUV$', fontsize=25) # - # Not so much. We can sample G-R and FUV-NUV directly from the total SDSS quiescent galaxy population color distributions. # + # save the histograms to sample the distribution nbin, edges = np.histogram(G_R[quiescent], range=(0.2, 1.2), bins=50) f_hist = os.path.join(dat_dir, 'obs', 'tinker.Mr_20.quiescent.G_R_dist.npy') np.save(f_hist, [edges, nbin], allow_pickle=True) # + nbin, edges = np.histogram(FUV_NUV[quiescent],range=(-1., 5.), bins=100) f_hist = os.path.join(dat_dir, 'obs', 'tinker.Mr_20.quiescent.FUV_NUV_dist.npy') np.save(f_hist, [edges, nbin], allow_pickle=True) # - # # test `dust_infer` implementation # + from galpopfm import dust_infer as dustInfer sim_sed = dustInfer._read_sed('simba') wlim = (sim_sed['wave'] > 1e3) & (sim_sed['wave'] < 8e3) cens = sim_sed['censat'].astype(bool) # centrals mlim = (sim_sed['logmstar'] > 9.4) # mass limit zerosfr = sim_sed['logsfr.inst'] == -999 # - zerosfr = dustInfer._observable_zeroSFR(sim_sed['wave'][wlim], sim_sed['sed_neb'][cens & mlim & zerosfr,:][:,wlim]) # + fig = plt.figure(figsize=(14, 6)) sub = fig.add_subplot(121) _ = sub.hist(G_R[quiescent], range=(0.2, 1.2), bins=50, alpha=0.75, color='C1', density=True) _ = sub.hist(zerosfr[1], range=(0.2, 1.2), bins=50, alpha=0.75, color='k', histtype='step', linewidth=2, density=True) sub.set_xlabel('$G-R$', fontsize=25) sub.set_xlim(0.2, 1.2) sub = fig.add_subplot(122) _ = sub.hist(FUV_NUV[quiescent], range=(-1., 5.), bins=100, color='C1', alpha=0.75, density=True, label='SDSS Quiescent') _ = sub.hist(zerosfr[2], range=(-1., 5.), bins=100, alpha=0.75, color='k', histtype='step', density=True, linewidth=2, label='sampled for SFR=0') sub.legend(loc='upper right', markerscale=5, handletextpad=0.2, fontsize=20) sub.set_xlabel('$FUV-NUV$', fontsize=25) sub.set_xlim(-1., 5.) # -
nb/sdss_quiescent_sumstat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Homework 05 # ### Exercise 1 - Terminology # # Describe the following terms with your own words: # # ***boolean array:*** is a numpy array but with true/false values # # ***shape:*** shape tell about the dimensions and their length of an array # # ***axis:*** directions along the rows and columns(dimension of an array?) # Answer the following questions: # # ***Which ways are there to select one or more elements from a Numpy array?*** with indexing # # ***What is the difference between Numpy and Scipy?*** Scipy has more advanced functions, than in numpy # ### Exercise 2 - Download data from entsoe-e for Lecture 6 # For lecture 6, we need to download data from the Entso-e [transparency platform](https://transparency.entsoe.eu/): Entso-e provides (almost) real-time data on European electricity systems. We will download hourly load data (i.e. electricity demand) for all systems in Europe. First, you need to get a user account at Entsoe-e [here](https://transparency.entsoe.eu/usrm/user/createPublicUser). # # We are going to use the S-FTP server of Entso-e. To use S-FTP in Python, you have to install the package pysftp. You can do so here in the notebook by executing the following command (please be aware that this may take some time): # !conda install -c conda-forge pysftp --yes # Now we are ready to download the data. In principle, you simply have to fill out your account information (by setting ```USER``` and ```PWD```), decide where to put the data locally by assigning a path to a ```DOWNLOAD_DIR``` and run the 4 cells below. If the download directory does not exist, it will be created. The download will take some time, so you may want to run the script overnight. # # If the download fails at some point, you can restart it by simply executing the cell again. Files which are already downloaded will not be downloaded again. ***Hint:*** I had problems downloading to a directoy which was on a google drive - so if you run into an error message, which says ```OSError: size mismatch in get!``` you may want to choose a directory which is not on a google drive or possibly a dropbox. Also, this error may occur if your disk is full. # + import os import pysftp # if you want, you can modify this too, per default it will create a folder # in the parant folder of the homework repository: DOWNLOAD_DIR = '../../entsoe-data' CATEGORIES = [ 'ActualTotalLoad' ] # - # To avoid storing the user credentials in the public Github repository, # these commands will ask you to enter them interactively: from getpass import getpass user = getpass('User for ENTSO-E API:') pwd = getpass('Password for ENTSO-E API:') def download_entsoe_data(user, pwd, category, output_dir, server_uri='sftp-transparency.entsoe.eu'): """Download a dataset from ENTSO-E's transparency data sftp server. Contact ENTSO-E to receive login credentials: https://transparency.entsoe.eu/usrm/user/createPublicUser :param user: user name required for connecting with sftp server :param pwd: password required for connecting with sftp server :param category: ENTSO-E data category to be downloaded :param output_dir: directory where downloaded data is saved to, a separate subdirectory is created for each category. :param server_uri: URI of ENTSO-E transparency server (default last updated on 2020-05-01) """ abspath = os.path.abspath(output_dir) # check if local_dir exists and create if it doesn't if not os.path.exists(abspath): os.mkdir(abspath) print (f'Successfully created the directory {abspath} and using it for download') else: print (f'{abspath} exists and will be used for download') print("\nCopy this path for other notebooks, e.g. the next lecture or homework:\n" f"DOWNLOAD_DIR = '{abspath}'\n") cnopts = pysftp.CnOpts() cnopts.hostkeys = None # connect to entsoe server via sFTP entsoe_dir = f'/TP_export/{category}' with pysftp.Connection(server_uri, username=user, password=<PASSWORD>, cnopts=cnopts) as sftp: sftp.chdir(entsoe_dir) files_entsoe = sftp.listdir() to_download = list(files_entsoe) print(f'In total, {len(to_download)} files are going to be downloaded') # download files not on disk for file in to_download: print(f'Downloading file {file}...') dest_file = os.path.join(abspath, file) if not os.path.exists(dest_file): temp_file = os.path.join(abspath, f'{file}.partial') sftp.get(f'{entsoe_dir}/{file}', temp_file) os.rename(temp_file, dest_file) print(f'{file} downloaded successfully.') else: print(f'{file} already present locally, skipping download.') sftp.close() print("All downloads completed") # download data... for category in CATEGORIES: download_entsoe_data(user, pwd, category, DOWNLOAD_DIR) # **Privacy note:** If you don't want to publish the path to your repository on Github (it may contain your Windows user name for example), clear the output of the cell above before saving the Notebook! (In the menu via Cell -> Current outputs -> Clear.) # ### Exercise 3 - Create a diagonal matrix # # Create a matrix `m` with shape `(4, 4)` by using `np.zeros()` and set the 4 diagonal elements to `1` by using indexing using `np.arange()`. Do not use more two assign statements in total for this exercise! # # Bonus: Find multiple ways to avoid calling `np.arange()` twice and analyze which is the best regarding readability, performance and memory usage! # # Note: Normally you would use `np.diag()` to do this. You can also have a look into the code using `np.diag??`, but it's probably easier to write your own implementation (which might be less generic and slower, but way simpler). # + import math import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import urllib import os.path from pathlib import Path m = np.zeros((4, 4)) print(m) np.fill_diagonal(m, 1) print(m) f = np.zeros((4,4)) i = np.arange(3) f[i, i] = 1 print(m) # - # ### Exercise 4 - Invasion # # Create a canvas using `np.zeros()` of shape `(8, 11)`. Then set the following elements to one using fancy slicing techniques: # # - Rows 4 and 5 completely. # - In row 3 all elements except the first one. # - In row 2 all elements except the first two ones. # - The two elements defined by: `row_idcs, column_idcs = [0, 1], [2, 3]` # - In row 6 the elements in column 0 and 2. # - In row 7 all elements except the first three and the last three. # # And then afterwards the following elements to zero: # - The three elements defined by: `row_idcs, column_idcs = [3, 5, 7], [3, 1, 5]` # # As a last step, set assign the content of the first five columns to the last five columns in reversed order. This can be done by using a `step=-1` and starting with 4, i.e. the first five columns in reversed order are indexed by `canvas[:, 4::-1]`. # # Then plot the canvas using `plt.imshow()` with the parameter `cmap='gray'`! # # **Hint:** it helps a lot to have all commands in one cell (including the `imshow()` command) and execute the cell often, to check the result. # # **Note:** When ever the instruction says "first element" it is something like `x[0]`, because it refers to the first one in the array. If it is column 1 or row 1 it is `x[1]`, because it refers then to the index of the column/row. # # **Note:** It is `canvas[row_index, column_index]`, so if you are thinking in x/y coordinates, it is `canvas[y, x]` and the y axis goes downwards. # + canvas = np.zeros((8,11)) canvas[4:6,:]=1 canvas[3,1:]=1 canvas[2,2:]=1 canvas[[0,1],[2,3]]=1 canvas[6,[0,2]]=1 canvas[7,3:-3]=1 canvas[[3,5,7],[3,1,5]]=0 canvas[:,-5:] = canvas[:, 4::-1] plt.imshow(canvas, cmap='gray') plt.show() # - # ### Exercise 5 - Draw a circle # # Draw a full circle: first define a resolution e.g. $N=50$. Then define coordinates $x$ and $y$ using `np.linspace()` and pass the resolution as parameter `num=N`. Use `np.meshgrid()` to define a grid `xx` and `yy`. Define a canvas of shape `(N, N)` using `np.zeros()`. Then use the circle formula $x^2 + y^2 < r^2$ to define all circle points on the grid (use $r=2$). Then use the boolean 2D expression to set the inside of the circle to 1. Finally plot the canvas using `imshow()`. # + N = 50 # np.linespace: Return evenly spaced numbers over a specified interval. x_coordinates = np.linspace(-7, 7, N) y_coordiantes = np.linspace(-7, 7, N) # np.meshgrid: Returns coordinate matrices from coordinate vectors. xx, yy = np.meshgrid(x_coordinates, y_coordiantes) canvas = np.zeros((N, N)) r = 2 circle = xx**2 + yy**2 < r**2 canvas[circle] = 1 plt.imshow(canvas) plt.xlabel("x") plt.ylabel("y") # - # ### Exercise 6 - Frequency of shades of gray # # Convert the picture `numpy-meme.png` to gray scale and plot a histogram! # # **Instructions:** Load the image by using `plt.imread()`. This will return a three dimensional array (width, height and colors) with values between zero and one. Using the formula `gray = red * 0.2125 + green * 0.7154 + blue * 0.0721`, convert the picture to shades of gray. Look at the shape of the image and pick the right axis by looking at the length of the array in this axis! You can first calculate a weighted version of the array by multiplying with a vector of length 3 (and the three weights) and then sum along the right axis. Check the shape of the gray image afterwards and plot it using `plt.imshow()` with the parameter `cmap='gray'`. It should be only two dimensional now. Use `image_gray.flatten()` to get all pixels as one-dimensional vector and pass this to the function `plt.hist()` with the parameter `bins=50` to get 50 bins with different gray values. import numpy as np import matplotlib import matplotlib.pyplot as plt # + matplotlib.rc('figure', figsize=(15, 10)) image = plt.imread('numpy-meme.png') print(np.shape(image)) image_weighted =image*np.array([0.2125,0.7254,0.0721]) image_gray = np.sum(image_weighted,axis=2) print(np.shape(image_gray)) plt.imshow(image_gray, cmap='gray') plt.show() # - image_gray_flat = image_gray.flatten() plt.hist(image_gray_flat, bins=50, color='yellow') # ### Exercise 7 - Count colors (optional) # # Calculate the number of colors used in the picture `numpy-meme.png` and the percentage of the color space (3 x 8bit, i.e. 256 values per color) used! # # **Instructions:** Load the image by using `plt.imread()`. This will return a three dimensional array (width, height and colors) with values between zero and one. Multiplying the array with 255 will restore the original 8bit values (integer values between 0 and 255). After multiplying by 255 use `image = image.astype(int)` to convert the image to integer type. Plot the `image` using `plt.imshow()` to see the image and guess the result. Check the shape of the array. One of the axes is of length three - this is the color axis (red, green and blue). We want to map all colors to unique integers. This can be done by defining `colors = red + green * 256 + blue * 256**2`. This is a unique mapping between the triples `(red, green, blue)` and the integers `color` similar to decimal digits (three values between 0 and 9 e.g. `(3, 5, 1)` can be mapped to a three digit number `3 + 5 * 10 + 1 * 100 = 153`). Then use `np.unique()` to get an array with unique colors (in the mapped form as in `color`). This can be used to determine the number of unique colors in the image. This value can also be used to calculate the percentage of the color space used. # # <small>Image source: https://me.me/i/1-import-numpy-1-import-numpy-as-np-there-is-e4a6fb9cf75b413dbb3154794fd3d603</small> # Inspired by [this exercise](https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md#66-considering-a-wh3-image-of-dtypeubyte-compute-the-number-of-unique-colors-) (MIT licensed, [DOI](https://zenodo.org/badge/latestdoi/10173/rougier/numpy-100))
homework05-numpy-scipy/homework05_haemmerlemert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Nikoladu # # This script opens a json file of jobs at Ministry Of Education. It creates a rst file for each job - along with a meta file. This is then turned into a static blogs. # The title of each blog post is the ID of the job. I used this as it is unique. It ignores the first 3 characters (edu) and just focus on the number. # The post content was just the job title, but I have updated it to include helpful skills to have. # Location and category of job is added as tags to the post. # + #import nikola import requests import json import getpass import pandas import os # - myusr = getpass.getuser() with open('/home/{}/moejobs/index.json'.format(myusr), 'r') as opedu: dicminj = json.loads(opedu.read()) # + #opedu = open('/home/{}/moejobs/index.json'.format(myusr), 'rb', 'utf-8') # - # + #minjob = opedu.read() # + #dicminj = json.loads(minjob) # - ldic = len(dicminj) # + #print(dicminj) # + catlis = list() loclis = list() datlis = list() jobti = list() # - numdic = dict() # + #catedi # - #for hel in helpth: #print(hel[0] + '/n') testlay = list() # + for ldi in range(ldic): dicjob = dict() catedi = (dicminj[str(ldi)]['Category']) locdi = (dicminj[str(ldi)]['Location']) datdi = (dicminj[str(ldi)]['Date Advertised']) pandatz = pandas.to_datetime(datdi) pdate = pandatz.date() titdi = (dicminj[str(ldi)]['Job Title']) helpth = (dicminj[str(ldi)]['lidocend']) jobref = (dicminj[str(ldi)]['Job Reference']) jorefd = jobref.replace('/', '-') print (jorefd) with open('/home/{}/minstryofedu/posts/{}.meta'.format(myusr, jorefd), 'w') as moemeta: moemeta.write(jorefd + '\n' + jorefd + '\n' + str(pdate) + ' ' + str('09:00:00') + '\n' + catedi + ', ' + locdi) #opmetf = open('/home/{}/minstryofedu/posts/{}.meta'.format(myusr, jorefd), 'w') #opmetf.write(jorefd + '\n' + jorefd + '\n' + str(pdate) + ' ' + str('09:00:00') + '\n' + catedi + ', ' + locdi) #opmetf.close() #print(helpth) for hel in helpth: print(hel[0] + '/n') testlay.append(hel[0]) with open('/home/{}/minstryofedu/posts/{}.rst'.format(myusr, jorefd), 'w') as moerst: moerst.write(titdi) #oprstfi = open('/home/{}/minstryofedu/posts/{}.rst'.format(myusr, jorefd), 'w') #oprstfi.write(titdi) #oprstfi.close() dicjob.update({'Category' : catedi, 'Date Advertised' : str(pdate), 'Job Title' : titdi, 'Location' : locdi, 'Job Reference' : jobref, 'reqs' : helpth}) numdic.update({ldi : dicjob}) #numdic.update({ldi : dicjob}) loclis.append(locdi) datlis.append(datdi) jobti.append(titdi) nedicf = dicjob.copy() nedicf.update(nedicf) numdic.update({ldi : nedicf}) #if 'education' in catedi: # print (catedi) # - # + #json.dumps(numdic) # - with open('/home/{}/minstryofedu/output/index.json'.format(myusr), 'w') as moerst: moerst.write(json.dumps(numdic)) testlay os.chdir('/home/{}/minstryofedu/'.format(myusr)) os.system('nikola build') os.system('aws s3 sync /home/{}/minstryofedu/output/ s3://moejobs'.format(myusr))
nikoladu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc="true" # # Table of Contents # <p> # - import random import numpy as np from itertools import combinations from collections import defaultdict from collections import Iterable # + code_folding=[8, 10] class Apriori: def __init__(self,minSupport,numbers): self.numbers = numbers self.minSupport = minSupport def flattern(self,items,ignore_type=(tuple,)): for x in items: if isinstance(x, Iterable) and not isinstance(x,ignore_type): yield from flattern(x) else: yield x def train(self,X): len_data = len(X) freq_parent = range(len_data) for i in range(1,self.numbers+1): data_use = [(idx,combinations(raw_data[idx],i)) for idx in freq_parent] item_cnt = defaultdict(int) item_parent = defaultdict(list) for i,x in data_use: for y in x: item_cnt[y] +=1 item_parent[y].append(i) freq_set = set([k for k,v in item_cnt.items() if v/len_data>self.minSupport]) freq_parent = [item_parent.get(k) for k in freq_set ] freq_parent = set(self.flattern(freq_parent)) return freq_set # - # 生成测试数据,测试 if __name__ == '__main__': data_set = [] for i in range(100): data_set.append(random.sample(range(100),random.randint(1,5))) for a in range(3): data_setnew = [] for i in data_set: box = [] for j in i: box.append(j) if (j%10)<5: box.append(j+1) data_setnew.append(box) data_set = data_setnew raw_data = [set(i) for i in data_set] Ap = Apriori(0.05,4) freq_set = Ap.train(raw_data)
Apriori.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Machine Learning with Microsoft Outlook # # Do you spend a lot of time sorting and filtering your mails in Outlook? # Have you enough to create filtering rules for each new type of mail and modify them each time a mail is a little different that what you specified? # If yes, this tutorial is for you! # # I had the same problem, receiving several hundreds to thousands of mail each day, and creating endless rules that slowed down my outlook a lot. I then developed a script in Python using sklearn (a machine learning library for Python) to automatically sort my mails. # # Indeed, machine learning applies perfectly in this case, because : # - I have already a lot of mails (lots of data) # - These mails are already well sorted # - They don't follow extremely simple rules (such as "move all the mails containing 'order' in the folder 'Order') but rather more complex rules that will take some times to define and which are prone to be modified with future mails (such as "move all the mails containing "order" from this person to this other person with these other keyword A,B,...,Z and without this keyword 1,2,... from this date to this date..) # # # The code I will introduce below contains two main parts : # - The trainer : part that will train the model using all the folders specified (*) with the emails inside # - The predictor : part that will predict the category of a new received mail # # I use in the model a random forest (RF) classifier which, after experimentations, is in this case the fastest and efficient model to classify the mails. # For information, I tried to use neural networks (MLP with Keras) for this task but it was much more time and CPU consumming for no or very little gain. Better use the best tools for a specific task ! # # # # # + # coding: utf8 import win32com.client #win32 allows to access microsoft applications like outlook from sklearn.ensemble import RandomForestClassifier #our model with use a random forest from sklearn.feature_extraction.text import CountVectorizer #we will need to normalise our input data before feeding it to our model from sklearn.externals import joblib #we will need to save our model after the training to retrieve it before the prediction from os import listdir from os.path import isfile, join import os from datetime import datetime,timedelta import pytz from timeit import default_timer as timer from bs4 import BeautifulSoup #contains a method to extract raw text from html data from nltk.corpus import stopwords # Import the stop word list import re #regular expressions import sys import pdb # + raw_mimetype="text/markdown" active="" # Note that above we import nltk.corpus # If you haven't already installed the Natural Language Toolkit or one of the corpus needed, you will have to do the following : # - pip install nltk # - python # - import nltk # - nltk.download() # - Select "Corpora/Stopwords" and download # - # Below we define the global definitions : # X is the input vector # y is output vector # outlook_folders will list all the folders containing the intelligent_folder_identifier (by default, the folders containing '.' # path_file_vectorizer and path_file_classifier are the paths where we will save the vectorize and the classifier # nb_clf_estimators is the number used for the random forest (1000 is a good value, not too big, not too small) # nb_max_minutes_to_classify_mail is the number maximum of ancienty for a mail used when we retrieve the mails for predictions, -1 to classify all the mails not already classified # + #start global definitions outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI") inbox = outlook.GetDefaultFolder(6) #6= Inbox X=[] y=[] outlook_folders=[] #you might want to modify the following variables path_file_vectorizer='data//vectorizer.pkl' path_file_classifier='data//clf.pkl' intelligent_folder_identifier="." #We define "." as the identifier to identify the 'smart' folders nb_clf_estimators=1000 #Number of estimators used in our model (random forest classifier) nb_max_minutes_to_classify_mail=-1 #We will each classify each mail older than less than nb_max_minutes_to_classify_mail in each folder #end global definitions # - # The function 'get_relevant_info_from_mail' keeps only essential informations about the email, here : # the sender name, the sender email address, the recipient, the message, the CC, the BCC, the subject and the body. # I don't store the dates but that would be interesting if a certain order in the mail will change the category. In this case a LSTM neural network might be more efficient... def get_relevant_info_from_mail(msg): #This function keeps only the relevant info from the mail try: return msg.SenderName+" "+msg.SenderEmailAddress+msg.To+msg.CC+msg.BCC+msg.Subject+msg.Body except Exception as e: print("Error in get_relevant_info_from_mail") try: print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e) except: pass # The function 'strip_text' will get the output of 'get_relevant_info_from_mail' and strips it from all undesirable data like html tags, non-letters characters, remove the stop words and finally get a cleaned string with only real words def strip_text(raw_text): # This function strips raw text from all html tags, accentuated characters, remove stop words and keep only meaningful words # 1. Remove HTML review_text = BeautifulSoup(raw_text,"lxml").get_text() # 2. Remove non-letters accentedCharacters = "àèìòùÀÈÌÒÙáéíóúýÁÉÍÓÚÝâêîôûÂÊÎÔÛãñõÃÑÕäëïöüÿÄËÏÖÜŸçÇߨøÅ寿œ" letters_only = re.sub("[^a-zA-Z"+accentedCharacters+"\.]", " ", review_text) # 3. Convert to lower case, split into individual words words = letters_only.lower().split() # 4. In Python, searching a set is much faster than searching a list, so convert the stop words to a set stops = set(stopwords.words("english")) # 5. Remove stop words meaningful_words = [w for w in words if not w in stops] # 6. Join the words back into one string separated by space, and return the result. return( " ".join( meaningful_words )) # The function train_model is the most interesting part. # X is the input dataset. Each input of X is a string containing only real words which is the output of strip_text. # But for a random forest, it is not exploitable as such. # You will need to convert each of these strings in an array of 0s and 1s. # The method to do that is to use the CountVectorizer. # # In summary, the CountVectorizer will find all the words used in each of the strings of X and create a global dictionnary with these words. Each string of X wil then be compared to the global dictionnary and convert to an array of 0s an 1s such as this example : # Consider a very simple dictionnary : ["Machine","Learning","is","very","great","awesome","good"] # and a string 1: "Machine Learning is awesome" # and a string 2: "Learning is great" # You will then have the string 1 the following array : [1,1,1,0,0,1,0] # and for the string 2 : [0,1,1,0,1,0,0] # # Two more things to be noted about CountVectorizer : # max_df = 0.95 # min_df = 0.005 # # These two parameters allows us to reduce the dimensionality of our X matrix by using TF-IDF to identify un-important words. # # The min_df paramter makes sure we exclude words that only occur very rarely # The default also is to exclude any words that occur in every string (like 'the' ,'is', 'are'...) # # We are excluding all words that occur in too many or too few documents, as these are very unlikely to be discriminative. Words that only occur in one document most probably are names, and words that occur in nearly all documents are probably stop words. # # # Now that we have a normalized X (X1) and y, which is our output dataset containing the identifiant of the category (0 for the first category, 1 for the second category, 2 for the third category...), we can train the model # # The function returns the vectorizer, the classifier and the score. The latter is not useful in the training but can be used for benchmarking or following the score over time def train_model(nb_clf_estimators): # The min_df paramter makes sure we exclude words that only occur very rarely # The default also is to exclude any words that occur in every movie description # We are excluding all words that occur in too many or too few documents, as these are very unlikely to be discriminative # More about these parameters on : https://spandan-madan.github.io/ vectorizer = CountVectorizer(analyzer = "word", tokenizer = None, preprocessor = None, stop_words = None, max_features = 5000,max_df=0.95, min_df=0.005) X1=vectorizer.fit_transform(X) # Here we train train a random forest because it is quick to train and works very well for text classification clf = RandomForestClassifier(n_estimators = nb_clf_estimators) clf = clf.fit(X1, y) score=clf.score(X1, y) return [vectorizer,clf,score] # The function 'set_dataset' retrieves the relevant informations from each mail from each folders passed as parameters and adds for each mail a new line in X (relevant string) and y (category). # Note that we only deal with message with class=43 because events, notes don't have necessarily the same fields that the standard messages and need a specific treatment (not implemented in this tutorial). def set_dataset(folder,value): for msg in reversed(folder.Items): try: if msg.Class==43: #We only want to keep pure mails (this exclude appointments and notes for example) txt0=get_relevant_info_from_mail(msg) if txt0!=None: txt=strip_text(txt0) X.append(txt) y.append(value) except Exception as e: print("error when adding this mail to our dataset",msg.Subject) try: print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e) except: pass # The function 'predict_mail_category' is used to predict the category and move accordingly all the mails inside the folder passed as parameters. # The function takes into input the vectorizer and the classifier and the folders (array that contains all the smart folders). def predict_mail_category(vectorizer,clf,folders,folder): dt=pytz.utc.localize(datetime.utcnow())+timedelta(minutes=-nb_max_minutes_to_classify_mail) for msg in reversed(folder.Items): try: if msg.Class==43: if msg.sentOn>dt or nb_max_minutes_to_classify_mail==-1: txt0=get_relevant_info_from_mail(msg) if txt0!=None: txt=strip_text(txt0) X1=vectorizer.transform([txt]) prediction=clf.predict(X1) msg.Move(folders[prediction[0]]) except Exception as e: print("error when adding this mail to our dataset",msg.Subject) try: print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e) except: pass # The function 'loop_through_folder' iterates recursively through all the smart folders and smart subfolders in the inbox and lauching the function action passed as parameters def loop_through_folder(k,folders,action): #Recursive function that iterate through folder and subfolder and launch the action method if there is a "." in the folder name for folder in folders: if intelligent_folder_identifier in folder.Name: # print("We will add the mails of this folder in our dataset",folder.Name,"with y=",k) action(folder,k) k=k+1 try: l=len(folder.Folders) loop_through_folder(k,folder.Folders,action) except: pass # 'main_train_model_from_folders' is the main function to train the random forest. # It will display the total score and time taken to train the model # and then will save the vectorizer and the classifier for future use. def main_train_model_from_folders(): print("Start training") start = timer() loop_through_folder(0,inbox.Folders,set_dataset) [vectorizer,clf,score]=train_model(nb_clf_estimators) end = timer() print("Score=",score*100,"%") print("Total time taken to train the model:",end-start,"s") #Dumps vectorizer and classifier (random forest classifier) in files such as to be retrieve later when we want to classify a mail joblib.dump(vectorizer, path_file_vectorizer) joblib.dump(clf, path_file_classifier) print("End training") # 'main_predict_category_for_each_mail' is the function used to move all the mails according the predicted categories. def main_predict_category_for_each_mail(): print("Start prediction") #Loads vectorizer and classifier previously stored after having run the main_fetch_mails_and_train_model function start = timer() vectorizer=joblib.load(path_file_vectorizer) clf=joblib.load(path_file_classifier) loop_through_folder(0,inbox.Folders,lambda folder,k: outlook_folders.append(folder)) predict_mail_category(vectorizer,clf,outlook_folders,inbox) end = timer() print("End prediction") print("Total time taken to classify the mails:",end-start,"s") # When executing the program ouside jupyter, you can specify if you want to train or predict. # To do so : # - To train : python outlook_train_and_predict.py train # - To predict : python outlook_train_and_predict.py predict if __name__ == '__main__': if len(sys.argv)!=2: action="" else: action=sys.argv[1] if action=="train": main_train_model_from_folders() elif action=="predict": main_predict_category_for_each_mail() else: print("Unknown keyword, use 'train' to train the model and 'predict' to predict the category of each mail") main_train_model_from_folders() main_predict_category_for_each_mail() # # Author # # <NAME> # # - https://www.linkedin.com/in/axelderaismes/ # - http://www.thegeeklegacy.com # - https://twitter.com/axelderaismes #
outlook_train_and_predict.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center><h1> bsolar demonstration </h1></center> # # ## In this file, we carefully show the output of the key steps in "bsolar_parallel.py" (the parallel version of bsolar) # ## Please read the comments and explanations in "bsolar_parallel.py" first. # # --- # ## Preparation 1: we import all the packages # + # %reset -f import numpy as np import matplotlib.pyplot as plt import time import warnings from matplotlib.ticker import MaxNLocator from joblib import Parallel, delayed from sklearn.linear_model import LinearRegression from solar import solar from sklearn.exceptions import ConvergenceWarning # For recent version of Scikit-learn: since the class 'Lars' may rely on the Cholesky decomposition and hence may have potential convergence warning in high dimensional data (p is much larger than n), we input the following commmand to skip the convergence warning. warnings.filterwarnings("ignore", category=ConvergenceWarning, module="sklearn") # - # # ## Preparation 2 : we define the data generator for simulations (which is copied from "simulator.py") class simul: def __init__(self, sample_size, n_dim, n_info): ##for convinience, we define the common variable (variables we need to use for each of the following functions) in the class as follows (the common variable is defined as self.xxxx) self.sample_size = sample_size self.n_dim = n_dim self.n_info = n_info #data-generating process def data_gen(self): ##1. generating the covariance matrix for X, #we add a matrix full of 1/2 with an identity matrix multiplied with 1/2 a = np.ones((self.n_dim, self.n_dim)) * 0.5; A = np.eye(self.n_dim)*0.5 cov_x = a + A ##2. generating the mean of each column in X (which is 0) mean_x = np.zeros(self.n_dim) ##3. generating X as a multivariate Gaussian X = np.random.multivariate_normal(mean_x, cov_x, self.sample_size) ##4. generate regression coefficients in DGP as an increasing sequence (2,3,4,5,6 in our paper) beta_info = np.arange(2, self.n_info + 2) #in DGP, generate regression coefficients of redundant variables as 0 #concatenate the regression coefficients of informative variables and redundant variables beta = np.concatenate((beta_info, np.zeros(self.n_dim - self.n_info)), axis = 0) ##5. generate the Gaussian random noise noise = np.random.normal(0, 1, self.sample_size) #transform Gaussian random noise into a column #transform regression coefficients in DGP into a row (based on the requirement of np.inner ) noise.shape = (self.sample_size, 1); beta.shape = (1, self.n_dim) ##6. generate Y by adding random noise with the inner product of X and beta Y = np.inner(X,beta) + noise return X, Y # --- # ## Now we show the output of key steps in "bsolar_parallel.py" # * <font size="4.5"> using a parallel for-loop (coded with Joblib), bsolar trains a solar on each random subsample (based on a child random seed)</font> # # ## #1. firstly, we only run the parallel for-loop once and check # * <font size="4.5"> the random seed generation </font> # * <font size="4.5"> the solar result on the first subsample </font> # * <font size="4.5"> the variable "qhat_k" that we use to represent which variable is picked by solar </font> # # ### after turning step 3 and 4 into comments, we only show the output of the first repetition in step 2. class bsolar: def __init__(self, X, Y, n_repeat_solar, n_repeat_bsolar, step_size, rnd=0): # for convinience, we define the common variable (variables we need to use for each of the following functions) in the class as follows (the common variable is defined as self.xxxx) # sample size self.sample_size = X.shape[0] # the number of subsamples generated in solar self.n_repeat_solar = n_repeat_solar # the number of subsamples generated in bsolar self.n_repeat_bsolar = n_repeat_bsolar # (grid search) step size for tuning the threshold of subsample selection frequency for bsolar self.step_size = -0.02 # the Numpy random seed for replication self.rnd = rnd # the size of each subsample self.subsample_size = int(self.sample_size * 0.9) # the number of total variables in X self.n_dim = X.shape[1] # the maximum value of c in its grid search (for plotting only) self.q_start = 1 # the minimum value of c in its grid search (for plotting only) self.q_end = 0.1 # step size of c in its grid search (for plotting only) self.q_step = -0.02 # the sample we generate via data-generating process self.X = X; self.y = Y def fit(self): #1. construct a placeholder called 'qhat_k_container', which is the list of all qhat^k (a binary string representing whether each variable is selected by solar on subsample k) of each subsample qhat_k_container = list() #2. train a solar on each subsample, find out which variable is selected on a given sample and save the corresponding selection result on subsample k as qhat^k # parallel computing starts # 2a. to make parallel computing replicable, set random seeds np.random.seed(self.rnd) # 2b. Spawn off child seed sequences to pass to child processes. seeds = np.random.randint(2e8, size=self.n_repeat_bsolar) # 2c. first we define what we do in each stage of the loop def loop_fun(self, j, seeds, qhat_k_container): # 2c(1). fix random seed for replication np.random.seed(seeds[j]) # 2c(2). randomly choose a subset of sample points (whose index is 'index_subsample') and use them to generate a subsample in the given repeat of for-loop index_subsample = np.random.choice(self.sample_size, self.subsample_size, replace=False) # 2c(3). based on 'index_subsample', take the corresponding observations of X as the "X_subample" X_subsample = self.X[index_subsample] # 2c(4).based on 'index_subsample', take the corresponding observations of Y out and save them as the subample y_subsample = self.y[index_subsample] # 2c(5). change dimension for solar training y_subsample.shape = (y_subsample.shape[0],1) # 2c(6). given a subsample, compute solar on it # call the class 'solar' trial2 = solar( X_subsample, y_subsample, self.n_repeat_solar, self.step_size, lasso=False) # compute solar on the subsample solar_coef, _, _, _, _, _, _, _,_ = trial2.fit() # save the active set of solar on this subsample (indices of variables select by solar) as 'active'. active = np.nonzero(solar_coef)[0] # 2c(7). based on the active set of solar, we compute qhat^k as the binary string of whether each variable is selected by solar on subsample K # we generate 'qhat_k' as a row of zeros; qhat_k = np.zeros((1, self.n_dim)) # if a variable (the ith column in matrix X) is selected by solar, we change the ith value of qhat_k as 1 for i in active: qhat_k[0, i] = 1 # we append the result into 'qhat_k_container' and save it as one element of the list qhat_k_container.append(qhat_k) return seeds, active, qhat_k_container seeds, active, qhat_k_container = loop_fun(self, 0, seeds, qhat_k_container) return seeds, active, qhat_k_container ''' # 2d. parallel the whole for-loop using the function we define previously and save the result qhat_k_container = Parallel(n_jobs=-1, prefer="processes")(delayed(loop_fun)(self, j, seeds, qhat_k_container) for j in range(self.n_repeat_bsolar)) # 3. compute the subsample selection frequency for all variables # 3a. we transform the list of all qhat^k ('qhat_k_container') into a matrix ('qhat_k_container_matrix') # row of the matrix : the qhat^k on a given subsample for all variables # column of the matrix : the corresponding values of qhat^k for variable "X_i" on all subsamples # axis =0 means that we treat each item as a row; qhat_k_container_matrix = np.concatenate(qhat_k_container, axis=0) # 3b. compute the the value of qhat for each variable (the subsample selection frequency of each variable) # e.g., compute the mean of each column qhat_value = np.mean(qhat_k_container_matrix, axis=0) # 3c. set 'Qc_list' as the container for the subsample selection frequencies for all variables, ranking in decreasing order. Qc_list = list() # 3d. set 'c_seq' as the sequence of c (the threshold of subsample selection frequency in bsolar) c_seq = np.arange(1, 0.1, -0.02) # 3e. for each value of c, generate Q(c) --- the set of variables with subsample selection frequency larger or equal to c for j in c_seq: # 3e(1). define 'container' as the placeholder of Q(c) when c == j; container = list() for i in range(self.X.shape[1]): # 3e(2). include all variables into 'container' if their corresponding values in q-hat are larger or equal to j; if (qhat_value[0][i] >= j): container.append(i) # 3e(3). append 'container' (Q(c) when c == j) into 'Qc_list' (the container of Q(c) for all value of c); Qc_list.append(container) # 4. pick the variable that are selected most of the time; # 4a. if it is bsolar-S, choose c = 0.9 Q_opt_c_S = Qc_list[5] # if it is bsolar-H, choose c = 1 Q_opt_c_H = Qc_list[0] # 5. output the bsolar-S result (Q_opt_c_S is the active set of bolasso-S) # 5a. if Q_opt_c_S is empty, return a zero array and empty active set if Q_opt_c_S == []: bsolar_coef_S = np.zeros([self.n_dim, 1]) # 5b. otherwise, regress Y onto the selected variables in X (variables in Q_opt_c_S) else : # 5b(1). call the LinearRegression class; OLS_S = LinearRegression() # 5b(2). fit OLS of Y to the variables of Q_opt_c_S on X; OLS_S.fit(self.X[:, Q_opt_c_S], self.y) # 5b(3). set 'bsolar_coef_S' (an array of zeros) as the placeholder of bsolar-S regression coefficents bsolar_coef_S = np.zeros([self.n_dim, 1]) # 5b(4). put the estimated regression coefficents into their corresponding place of 'bsolarS_coef' bsolar_coef_S[Q_opt_c_S, 0] = OLS_S.coef_ # 5c. output the bsolar-H result (Q_opt_c_H is the active set of bolasso-H) # if Q_opt_c_H is empty, return a zero array and empty active set if Q_opt_c_H == []: bsolar_coef_H = np.zeros([self.n_dim, 1]) # 5d. otherwise, regress Y onto the selected variables in X (variables in Q_opt_c_H) else : # 5d(1). call the LinearRegression class; OLS_H = LinearRegression() # 5d(2). fit OLS of Y on the variables of Q(c*) in X; OLS_H.fit(self.X[:, Q_opt_c_H], self.y) # 5d(3). set 'bsolar_coef_H' (an array of zeros) as the placeholder of bsolar regression coefficents bsolar_coef_H = np.zeros([self.n_dim, 1]) # 5d(4). put the estimated regression coefficents into their corresponding place of 'bsolarH_coef' bsolar_coef_H[Q_opt_c_H, 0] = OLS_H.coef_ return bsolar_coef_H, bsolar_coef_S, Qc_list, Q_opt_c_H, Q_opt_c_S # return the full list of subsample selection frequency for each variable in bsolar def q_list(self, Qc_list): # 1. concatenate Qc_list into a matrix var_mark_plot = np.concatenate(Qc_list) # 2. compute the value of c for each Q(c) and the corresponding variables in each Q(c) var_index, counts = np.unique(var_mark_plot, return_counts=True) var_index_ordered = [x for _, x in sorted(zip(counts, var_index))] var_plot = var_index_ordered[::-1] cou_plot = np.sort(counts)[::-1] / \ ((self.q_end - self.q_start) / self.q_step) var_plot = ['X' + str(i) for i in var_plot] # 3. print the list of variables with different value of c var_loc_list = list() var_q_list = list() q_value_list = np.unique(cou_plot)[::-1] i = 1 for j in q_value_list: ans_ind = np.where([cou_plot == j])[1] ans_var = [var_plot[i] for i in ans_ind] var_loc_list.append(ans_ind) var_q_list.append(ans_var) print('selection frequency >= ', j) print(var_q_list[:i]) i += 1 ''' # ### now run the function above under the following simulation setting # + sample_size = 20 n_dim = 12 n_info = 5 n_repeat_solar = 10 n_repeat_bsolar = 3 step_size = -0.02 np.random.seed(0) # generate X and Y trial1 = simul(sample_size, n_dim, n_info) X, Y = trial1.data_gen() # train bsolar trial2 = bsolar(X, Y, n_repeat_solar, n_repeat_bsolar, step_size) seeds, active, qhat_k_container = trial2.fit() # - # ## Now let's check the result # # ### all 3 random seeds for each solar repetition in bsolar-3 print(seeds) # ## check the solar active set on the first subsample print("the active set of solar :", active) # ## check variable "qhat_k" on the first subsample # # * <font size="4.5"> if you find the $i^{th}$ value in "qhat_k" is 1, the $i^{th}$ variable is selected on this bootstrap sample</font> print("the qhat for each varaible in X on the first bootstrap sample") print(qhat_k_container) # --- # # ## #2. now, let's check # * <font size="4.5"> the qhat for all solar repetitions </font> # * <font size="4.5"> the subsample selection frequency of all variables </font> # * <font size="4.5"> active set of bsolar3-S and bsolar3-H </font> # # ### we turning the last step into comments since it only produces the post-bsolar OLS coefficients. class bsolar: def __init__(self, X, Y, n_repeat_solar, n_repeat_bsolar, step_size, rnd=0): # for convinience, we define the common variable (variables we need to use for each of the following functions) in the class as follows (the common variable is defined as self.xxxx) # sample size self.sample_size = X.shape[0] # the number of subsamples generated in solar self.n_repeat_solar = n_repeat_solar # the number of subsamples generated in bsolar self.n_repeat_bsolar = n_repeat_bsolar # (grid search) step size for tuning the threshold of subsample selection frequency for bsolar self.step_size = -0.02 # the Numpy random seed for replication self.rnd = rnd # the size of each subsample self.subsample_size = int(self.sample_size * 0.9) # the number of total variables in X self.n_dim = X.shape[1] # the maximum value of c in its grid search (for plotting only) self.q_start = 1 # the minimum value of c in its grid search (for plotting only) self.q_end = 0.1 # step size of c in its grid search (for plotting only) self.q_step = -0.02 # the sample we generate via data-generating process self.X = X; self.y = Y def fit(self): #1. construct a placeholder called 'qhat_k_container', which is the list of all qhat^k (a binary string representing whether each variable is selected by solar on subsample k) of each subsample qhat_k_container = list() #2. train a solar on each subsample, find out which variable is selected on a given sample and save the corresponding selection result on subsample k as qhat^k # parallel computing starts # 2a. to make parallel computing replicable, set random seeds np.random.seed(self.rnd) # 2b. Spawn off child seed sequences to pass to child processes. seeds = np.random.randint(2e8, size=self.n_repeat_bsolar) # 2c. first we define what we do in each stage of the loop def loop_fun(self, j, seeds, qhat_k_container): # 2c(1). fix random seed for replication np.random.seed(seeds[j]) # 2c(2). randomly choose a subset of sample points (whose index is 'index_subsample') and use them to generate a subsample in the given repeat of for-loop index_subsample = np.random.choice(self.sample_size, self.subsample_size, replace=False) # 2c(3). based on 'index_subsample', take the corresponding observations of X as the "X_subample" X_subsample = self.X[index_subsample] # 2c(4).based on 'index_subsample', take the corresponding observations of Y out and save them as the subample y_subsample = self.y[index_subsample] # 2c(5). change dimension for solar training y_subsample.shape = (y_subsample.shape[0],1) # 2c(6). given a subsample, compute solar on it # call the class 'solar' trial2 = solar( X_subsample, y_subsample, self.n_repeat_solar, self.step_size, lasso=False) # compute solar on the subsample solar_coef, _, _, _, _, _, _, _,_ = trial2.fit() # save the active set of solar on this subsample (indices of variables select by solar) as 'active'. active = np.nonzero(solar_coef)[0] # 2c(7). based on the active set of solar, we compute qhat^k as the binary string of whether each variable is selected by solar on subsample K # we generate 'qhat_k' as a row of zeros; qhat_k = np.zeros((1, self.n_dim)) # if a variable (the ith column in matrix X) is selected by solar, we change the ith value of qhat_k as 1 for i in active: qhat_k[0, i] = 1 # we append the result into 'qhat_k_container' and save it as one element of the list qhat_k_container.append(qhat_k) return qhat_k_container # 2d. parallel the whole for-loop using the function we define previously and save the result qhat_k_container = Parallel(n_jobs=-1, prefer="processes")(delayed(loop_fun)(self, j, seeds, qhat_k_container) for j in range(self.n_repeat_bsolar)) # 3. compute the subsample selection frequency for all variables # 3a. we transform the list of all qhat^k ('qhat_k_container') into a matrix ('qhat_k_container_matrix') # row of the matrix : the qhat^k on a given subsample for all variables # column of the matrix : the corresponding values of qhat^k for variable "X_i" on all subsamples # axis =0 means that we treat each item as a row; qhat_k_container_matrix = np.concatenate(qhat_k_container, axis=0) # 3b. compute the the value of qhat for each variable (the subsample selection frequency of each variable) # e.g., compute the mean of each column qhat_value = np.mean(qhat_k_container_matrix, axis=0) # 3c. set 'Qc_list' as the container for the subsample selection frequencies for all variables, ranking in decreasing order. Qc_list = list() # 3d. set 'c_seq' as the sequence of c (the threshold of subsample selection frequency in bsolar) c_seq = np.arange(1, 0.1, -0.02) # 3e. for each value of c, generate Q(c) --- the set of variables with subsample selection frequency larger or equal to c for j in c_seq: # 3e(1). define 'container' as the placeholder of Q(c) when c == j; container = list() for i in range(self.X.shape[1]): # 3e(2). include all variables into 'container' if their corresponding values in q-hat are larger or equal to j; if (qhat_value[0][i] >= j): container.append(i) # 3e(3). append 'container' (Q(c) when c == j) into 'Qc_list' (the container of Q(c) for all value of c); Qc_list.append(container) # 4. pick the variable that are selected most of the time; # 4a. if it is bsolar-S, choose c = 0.9 Q_opt_c_S = Qc_list[5] # if it is bsolar-H, choose c = 1 Q_opt_c_H = Qc_list[0] return qhat_k_container, qhat_value, Qc_list, Q_opt_c_H, Q_opt_c_S ''' # 5. output the bsolar-S result (Q_opt_c_S is the active set of bolasso-S) # 5a. if Q_opt_c_S is empty, return a zero array and empty active set if Q_opt_c_S == []: bsolar_coef_S = np.zeros([self.n_dim, 1]) # 5b. otherwise, regress Y onto the selected variables in X (variables in Q_opt_c_S) else : # 5b(1). call the LinearRegression class; OLS_S = LinearRegression() # 5b(2). fit OLS of Y to the variables of Q_opt_c_S on X; OLS_S.fit(self.X[:, Q_opt_c_S], self.y) # 5b(3). set 'bsolar_coef_S' (an array of zeros) as the placeholder of bsolar-S regression coefficents bsolar_coef_S = np.zeros([self.n_dim, 1]) # 5b(4). put the estimated regression coefficents into their corresponding place of 'bsolarS_coef' bsolar_coef_S[Q_opt_c_S, 0] = OLS_S.coef_ # 5c. output the bsolar-H result (Q_opt_c_H is the active set of bolasso-H) # if Q_opt_c_H is empty, return a zero array and empty active set if Q_opt_c_H == []: bsolar_coef_H = np.zeros([self.n_dim, 1]) # 5d. otherwise, regress Y onto the selected variables in X (variables in Q_opt_c_H) else : # 5d(1). call the LinearRegression class; OLS_H = LinearRegression() # 5d(2). fit OLS of Y on the variables of Q(c*) in X; OLS_H.fit(self.X[:, Q_opt_c_H], self.y) # 5d(3). set 'bsolar_coef_H' (an array of zeros) as the placeholder of bsolar regression coefficents bsolar_coef_H = np.zeros([self.n_dim, 1]) # 5d(4). put the estimated regression coefficents into their corresponding place of 'bsolarH_coef' bsolar_coef_H[Q_opt_c_H, 0] = OLS_H.coef_ return bsolar_coef_H, bsolar_coef_S, Qc_list, Q_opt_c_H, Q_opt_c_S # return the full list of subsample selection frequency for each variable in bsolar def q_list(self, Qc_list): # 1. concatenate Qc_list into a matrix var_mark_plot = np.concatenate(Qc_list) # 2. compute the value of c for each Q(c) and the corresponding variables in each Q(c) var_index, counts = np.unique(var_mark_plot, return_counts=True) var_index_ordered = [x for _, x in sorted(zip(counts, var_index))] var_plot = var_index_ordered[::-1] cou_plot = np.sort(counts)[::-1] / \ ((self.q_end - self.q_start) / self.q_step) var_plot = ['X' + str(i) for i in var_plot] # 3. print the list of variables with different value of c var_loc_list = list() var_q_list = list() q_value_list = np.unique(cou_plot)[::-1] i = 1 for j in q_value_list: ans_ind = np.where([cou_plot == j])[1] ans_var = [var_plot[i] for i in ans_ind] var_loc_list.append(ans_ind) var_q_list.append(ans_var) print('selection frequency >= ', j) print(var_q_list[:i]) i += 1 ''' # ### now run the function above under the following simulation setting # + sample_size = 100 n_dim = 12 n_info = 5 n_repeat_solar = 10 n_repeat_bsolar = 3 step_size = -0.02 np.random.seed(0) # generate X and Y trial1 = simul(sample_size, n_dim, n_info) X, Y = trial1.data_gen() # train bsolar trial2 = bsolar(X, Y, n_repeat_solar, n_repeat_bsolar, step_size) qhat_k_container, qhat_value, Qc_list, Q_opt_c_H, Q_opt_c_S = trial2.fit() # - # ## Now let's check the result # # ### the qhat for first 3 subsamples of bsolar-3 qhat_k_container[0:3] # ### the subsample selection frequency of each varaible on 256 subsamples print(qhat_value) # ### the correpsonding barplot # + ind = np.arange(12) fig, ax = plt.subplots(figsize=(12,3)) ax.bar(ind, qhat_value[0], 0.35, edgecolor="black", color="white") ax.hlines(1, -1, 13, linewidth=3, color='red', linestyles='dashed') ax.hlines(0.9, -1, 13, linewidth=3, color='green', linestyles='dashed') ax.set_xlim([-0.5, 11.5]) ax.set_ylim([0.3, 1.02]) ax.xaxis.set_major_locator(MaxNLocator(integer=True)) plt.xlabel("the index of each varaible in X") plt.ylabel("subsample selection frequency") plt.show() # - # ## check the active set of bsolar print("the active set with f=1.0 :", Q_opt_c_H) print("the active set with f=0.9 :", Q_opt_c_S) # ## finaly, producing this into html # !rm -rf bsolar_walkthrough.html # !jupyter nbconvert --to html bsolar_walkthrough.ipynb
Demo/bsolar_walkthrough.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df = pd.read_csv('index.csv') df.drop_duplicates('question_index', inplace=True) df['question_index'] = df['question_index'].apply(lambda x: eval(x)) df['answer_index'] = df['answer_index'].apply(lambda x: eval(x)) from tqdm import tqdm for i, d in enumerate(tqdm(df['question_index'].values.tolist())): if eval(d) == []: df.drop(index=i, inplace=True) for i, d in enumerate(tqdm(df['question_index'].values.tolist())): if eval(d) == []: print(i) df.to_csv('index_without_duplicates.csv', index=False)
data/remove_empty_and_duplicate_questions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/RoshaniMallav/letUpgrade_Python_Essential/blob/master/python2List.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="DpROM7Cmw0Kt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="cf28548d-3bc9-44b3-db00-4023ce0d0093" list = ['Roshani', 'Mallav', 1999, 20000]; ## 1.updating the list print ("Value available at index 2 : ") print (list[2]) list[2] = 22222; print ("New value available at index 2 : ") print (list[2]) ## 2.deleting element print (list) del (list[2]); print ("After deleting value at index 2 : ") print (list) ## 3.Checking lenght of the list print("The lenght of the list is:") print(len(list)) ##4.Append operation print(list) print("After appending the list is:") list.append("orange") print(list) ## 5.Copy print("Copy list from onelist to another:") mylist=[1,2,3,'letupgrade'] mylist = list.copy() print(mylist)
python2List.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fixing Negative Concentrations import numpy as np import copy import pprint A = np.array([ [ 0, 1,-1, 0, 0, 0, 0, 0, 0, 0],\ [ 1, 0,-1, 0, 0, 0,-1, 0, 0, 0],\ [-1, 0, 1, 0, 0, 0, 1,-1, 0, 0],\ [ 0, 0, 0,-1,-1,-1, 0, 0, 0, 0],\ [ 0, 0, 0, 2, 0, 1,-1, 0, 0, 1],\ [ 0, 0, 0, 0, 0, 0, 0, 0,-1,-1]]) # ### Step 1: Machine learning step, let `CO` be the concentration input and `S` be the flux output C0 = np.array([0.0445, 0.0001, 0.0007, 0.0002, 0.0001, 0.0059]); S = np.array([0.00070, 0.00070, 0.00091, 0, 0, 0, 0.00011, 0, 0, 0 ]); # ### Step 2: Finding negative concentrations # Multiplication by stoichiometric matrix to get change in concentration, ∆C = AS. # Then addition of `delC` to `C0` to get `C1`, the concentration of the next timestep. If there are any negative values for `C1`, find the location of the most negative value. delC = A@S C1 = C0 + delC; if any(C1 < 0): print(C1) loc = C1.argmin() print(f"The concentration at index {location} is negative!") # ### Step 3: Shrinking the fluxes that led to negative concentrations # # Though the negative element of `C1` may have been contributed to by multiple elements of `S`, we only want to shrink the elements that caused a negative concentration -- in other words, the location of all the negative column elements of `A`, at the row corresponding to the negative element of `C1`. We want to adjust these "depleting" elements of `S` while leaving the other elements alone. # + S_corrected = copy.deepcopy(S); C_corrected = copy.deepcopy(C1) N = S[np.where(A[loc,:]<0)[0]] M = S[np.where(A[loc,:]>0)[0]] for n in np.where(A[loc,:]<0)[0]: S_corrected[n] = (1 - (delC[loc]+C0[loc])/sum(S[np.where(A[loc,:]<0)[0]])/ A[loc,n] ) * S_corrected[n] print("Nonphysical ML prediction S:") print(np.round(S,5)) print("Adjusted S vector") print(np.round(S_corrected,5)) # - # ### Step 4: Calculate the adjusted C # # Use `S_corrected` in ∆C = AS, and then add that to `C0`. # + delC_corrected = A@S_corrected C_corrected = C0 + A@S_corrected print("Initial concentration C0:") print(np.round(C0,5)) print("Nonphysical ML prediction C1:") print(np.round(C1,5)) print("Adjusted concentration:") print(np.round(C_corrected,5)) # -
initial_attempts/NonNegativeExample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Traffic sign recognition following German staddard # # In this project, I used the dataset of [German Traffic Sign Recognition Benchmark on Kaggle](https://www.kaggle.com/meowmeowmeowmeowmeow/gtsrb-german-traffic-sign), which consists of 40 classes and more than 50,000 images in total. # ### Visualizing the data # Libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf import cv2 from PIL import Image import os # + fig=plt.figure(figsize=(8, 8)) columns = 3 rows = 3 fig_list= [] for i in range(1, columns*rows +1): rand_class = np.random.randint(43) show_random = os.listdir("Train/{}/".format(str(rand_class))) rand = np.random.randint(len(show_random)) fig_list.append(fig.add_subplot(rows, columns, i)) img = load_img("Train/{}/{}".format(str(rand_class),show_random[rand]),target_size=(30,30)) plt.imshow(img) plt.tight_layout(True) plt.show() # - # ### Pre-processing data # + # Use cv2 to read the images, convert them to RGB and out in numpy array data=[] labels=[] height = 30 width = 30 channels = 3 classes = 43 n_inputs = height * width * channels for i in range(classes): path = "Train/{0}/".format(i) #print(path) Class=os.listdir(path) for a in Class: try: image = cv2.imread(path+a) image_from_array = Image.fromarray(image,"RGB") size_image = image_from_array.resize((height,width)) data.append(np.array(size_image)) labels.append(i) except AttributeError: pass Cells = np.array(data) # Array of train images data labels=np.array(labels) # Array of label image data print(Cells.shape) print(labels.shape) # + # Randomize the order of the input images s=np.arange(Cells.shape[0]) np.random.seed(43) np.random.shuffle(s) Cells=Cells[s] labels=labels[s] # + # Splitting train and validation dataset in 80% and 20% respectively (X_train, X_val)=Cells[(int)(0.2*len(labels)):],Cells[:(int)(0.2*len(labels))] X_train = X_train.astype('float32')/255 X_val = X_val.astype('float32')/255 (y_train,y_val)=labels[(int)(0.2*len(labels)):],labels[:(int)(0.2*len(labels))] # Conduct one hot encoding for this classification from keras.utils import to_categorical y_train = to_categorical(y_train,43) y_val = to_categorical(y_val,43) # - X_train.shape[1:] # ### Create and train model # + # Define the layers of the model from keras.models import Sequential from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout model = Sequential() model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=X_train.shape[1:])) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(rate=0.25)) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(rate=0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(rate=0.5)) model.add(Dense(43, activation='softmax')) #Compilation of the model model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) model.summary() # + # Trainning model epochs = 20 result = model.fit(X_train, y_train, batch_size=32, epochs=epochs, validation_data=(X_val, y_val)) # - model.save("Traffic_sign_recognition.h5") # ### Evaluate the model # + plt.figure(0) plt.plot(result.history['acc'], label='training accuracy') plt.plot(result.history['val_acc'], label='val accuracy') plt.title('Accuracy') plt.xlabel('epochs') plt.ylabel('accuracy') plt.legend() plt.figure(1) plt.plot(result.history['loss'], label='training loss') plt.plot(result.history['val_loss'], label='val loss') plt.title('Loss') plt.xlabel('epochs') plt.ylabel('loss') plt.legend() # - # Evaluate our model with Test data y_test = pd.read_csv("Test.csv") y_test.head() # + labels=y_test["Path"].as_matrix() y_test = y_test["ClassId"].values data=[] for f in labels: image=cv2.imread(f) image_from_array = Image.fromarray(image, 'RGB') size_image = image_from_array.resize((height, width)) data.append(np.array(size_image)) len(data) # - X_test=np.array(data) X_test = X_test.astype('float32')/255 pred = model.predict_classes(X_test) from sklearn.metrics import accuracy_score accuracy_score(y_test, pred) from sklearn.metrics import confusion_matrix, classification_report print(classification_report(y_test,pred)) from keras.models import load_model from keras.preprocessing.image import ImageDataGenerator, load_img model = load_model("Traffic_sign_recognition.h5") # ### Visualizing predicted data def show_result(image): list_sign_name=["Maximum 20km/h","Maximum 30km/h","Maximum 50km/h","Maximum 60km/h","Maximum 70km/h","Maximum 80km/h", "End of limit 80km/h","Maximum 100km/h","Maximum 120km/h","No Passing (all)","No passing (vehicles>3.5t)", "Priority in front","Priority road", "Yield", "STOP","Prohibition (all)", "Prohibition (truck)","One-way road", "Dange point", "Dangerous left curve ", "Dangerous right curver", "Double curves left","Bumpy road","Slippery road", "Road narrows right","Roadworks","Traffic signal","Pedestrian crossing","Children crossing","Bicycle lane", "Snow ahead","Wild animals","End of limitation","Turn right","Turn left","Go straight","Go straight or turn right", "Go straight or turn left","Keep right","Keep left","Roundabout","End of no passing (all)","End of no passing (truck)"] image =cv2.imread(image) image = image.astype('float32')/255 image_resize = cv2.resize(image,(30,30)) image_reshape = image_resize.reshape(1,30,30,3) pre = model.predict(image_reshape) return list_sign_name[np.argmax(pre)], image image="Test/00062.png" infor,img = show_result(image) image_plt = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) plt.imshow(image_plt) print(infor) import os test_random = os.listdir("Test/") len(test_random) print(test_random[5]) # + fig=plt.figure(figsize=(10, 10)) columns = 3 rows = 3 fig_list= [] for i in range(1, columns*rows +1): rand = np.random.randint(len(test_random)) fig_list.append(fig.add_subplot(rows, columns, i)) fig_list[-1].set_title(show_result("Test/" + test_random[rand])[0]) img = load_img("Test/{}".format(test_random[rand]),target_size=(30,30)) plt.imshow(img) plt.tight_layout(True) plt.show() # - # ### Conclusion: # - This model of traffic sign recognition achieved verry impressive accuracy (~97%) and doen't require complex layers, long time trainning or expensive hardware requirement in comparison to the Dog and Cat model. This is because of the fact that traffic signs need to follow the same standard defined by the law while Dogs and Cats can be very different even on the same breed. # # - One disadvantage of this model is that in real life driving scenario, we need to know not only **what** the traffic sign are but also **where** they are. From that we can ajust the behavior of our vehicles associating to the distance with traffic signs. In an attempt to solve that, I will try to implement Yolov3 for traffic sign recognition as a solution to gather both content and distance infor of the traffic sign in the next notebook. # # The end!
Traffic_signs_classification/Traffic_sign_recognition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="4w2e0D-hni2p" # # HOME ASSIGNMENT #3: SLACK API - TO GSHEET # **Mục đích của bài Assignment** # - Lấy thông tin các Users từ Slack của DataCracy (BTC, Mentors và Learners) # - `**[Optional 1]**` Đưa danh sách Users lên Google Spreadsheet, để theo dõi # - `**[Optional 2]**` Lấy thông tin Assignment Submission và số Reviews trên `#atom-assignmentnt2` và cập nhật lên Spreadsheet, để theo dõi các học viên đã nộp bài và được review # # **Các kiến thức sẽ áp dụng** # - Ôn lại và luyện tập thêm về concept API (cụ thể sử dụng API Slack) # - Trích xuất thông tin từ JSON # - Dùng module gspread để đưa thông tin lên Google Spreadsheet # + [markdown] id="yitICPVzpRI2" # ## 0. Load Modules # + id="G_HCucZ5rSHq" import requests #-> Để gọi API import re #-> Để xử lý data dạng string from datetime import datetime as dt #-> Để xử lý data dạng datetime import gspread #-> Để update data lên Google Spreadsheet from gspread_dataframe import set_with_dataframe #-> Để update data lên Google Spreadsheet import pandas as pd #-> Để update data dạng bản import json from oauth2client.service_account import ServiceAccountCredentials #-> Để nhập Google Spreadsheet Credentials import os # + [markdown] id="z7HOk6uMp2K2" # ## 1. Slack API: User List # * Bạn có thể đọc lại về concept API [HERE](https://anhdang.gitbook.io/datacracy/atom/3-data-tools-2/3.2-spotify-api-and-postman) # * Assignment này sẽ dùng Slack API để lấy thông tin về Learners và theo dõi các bài tập đã nộp và được review (sau đó cập nhật lên Google Spreadsheet) # * ===> **NOTICE**: Slack API authorize bằng Bearer Token `<KEY>` (Sẽ được cung cấp riêng) # * Update file `env_variable.json` như trong [Assignment#2](../assignment_2/home_assignment_2.ipynb) # * ==> Nếu bạn dùng Google Colab, upload file vào Colab ([Hướng dẫn](https://colab.research.google.com/notebooks/io.ipynb)) # + colab={"base_uri": "https://localhost:8080/"} id="YoSEHfdTwYRj" outputId="02b1f5e1-6f9a-4c2b-a481-33284324d1cc" # !ls # + id="tFPqT2pirhy4" with open('env_variables.json', 'r') as j: json_data = json.load(j) print(json_data) # + id="wAzmaVTLrpvH" ## Load SLACK_BEARER_TOKEN os.environ['SLACK_BEARER_TOKEN'] = json_data['SLACK_BEARER_TOKEN'] # + id="K_TrDTx138jD" ## Gọi API từ Endpoints (Input - Token được đưa vào Headers) ## Challenge: Thử gọi API này bằng Postman endpoint = "https://slack.com/api/users.list" headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])} response_json = requests.post(endpoint, headers=headers).json() print(response_json) user_dat = response_json['members'] # + [markdown] id="rqEti4Y50tdB" # ### TODO #1 # Hoàn tất đoạn code sau # + id="P0qbkdhtD9ww" ## Loop qua JSON file và extract các thông tin quan trọng (id, name, display_name, real_name_normalized, title, phone, is_bot) ## Hint: Bạn có thể dùng Postman hoặc in user_dat JSON để xem cấu trúc (schema), dùng Ctrl+F để tìm các keys (id, name, display_name, real_name_normalized, title, phone, is_bot) user_dict = {'user_id':[], 'name':[], 'display_name':[],'real_name':[],'title':[],'phone':[],'is_bot':[]} for i in range(len(user_dat)): user_dict['user_id'].append(user_dat[i]['id']) user_dict['name'].append(user_dat[i]['name']) user_dict['display_name'].append(user_dat[i]['profile']['display_name']) user_dict['real_name'].append(user_dat[i]['profile']['real_name_normalized']) user_dict['title'].append(user_dat[i]['profile']['title']) user_dict['phone'].append(user_dat[i]['profile']['phone']) user_dict['is_bot'].append(user_dat[i]['is_bot']) # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="yGNvsCauF8_w" outputId="dfc4c6f7-845c-4202-8fc7-43aeb328554a" user_df = pd.DataFrame(user_dict) ## Dùng pandas để convert dictionaries thành bảng user_df.head(5) ## Chỉ in 5 dòng đầu (chủ yếu để xem cấu trúc) # + colab={"base_uri": "https://localhost:8080/", "height": 80} id="rjFcaWdt2ABM" outputId="1a114985-7608-4421-aa51-1e406cc427eb" user_df[user_df.display_name == 'MAD'] ## Lọc thông tin của MAD, trên DataFrame (bạn có thể Google thêm) # + [markdown] id="1TZrcaWZyE07" # -------------- HẾT PHẦN BẮT BUỘC --------------------- # + [markdown] id="P3_b7zw-1A6s" # ## Option 1: Update data => Google SpreadSheet # + [markdown] id="UPuXhw3WtmWw" # ### TODO#2 # Tạo service account (output là file json), file này để cho phép ta access vào Google Spreadsheet: # # 1. Làm theo hướng dẫn: [Google Create a Service Account](https://support.google.com/a/answer/7378726?hl=en) # ![google_service_account](../img/google_service_account.png) # 2. Lưu file JSON (chứa credential về máy) # ![gservice_acc_json](../img/gservice_acc_json.png) # 3. Nhớ Enable [Google Drive API](https://console.cloud.google.com/marketplace/product/google/drive.googleapis.com?q=search&referrer=search&project=quickstart-313303) (Nếu bạn chạy code báo lỗi chưa enable API thì vào link trong phần lỗi để Enable, sau khi kích hoạt có thể cần vài phút để chạy được) # ![enable_api](../img/enable_api.png) # * ==> Upload file Gsheet Credential JSON nếu bạn dùng Colab # * ==> Nếu bạn để key trong repo git, **NHỚ** để file json vào `.gitignore` để không bị leaked key) # # + colab={"base_uri": "https://localhost:8080/"} id="XhrcVjXWwldS" outputId="63133330-f68b-4adf-96a1-6d2c0e08dc70" # !ls # + colab={"base_uri": "https://localhost:8080/"} id="4NEQFu-DNvFC" outputId="a855fe34-b409-4152-a218-67bfa240396f" ## Authorize bằng JSON scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] credentials = ServiceAccountCredentials.from_json_keyfile_name( 'slack-bot-key.json', scope) gc = gspread.authorize(credentials) print("DONE!") # + [markdown] id="HELBp0Sdxhzc" # **Tạo Spreadsheet** # # 1. Tạo Spreadsheet trên google # 2. Invite account trong `client_email` (file JSON Gsheet Credential bên trên) vào Spreadsheet (quyền Editor) # ![enable_api](../img/enable_api.png) # 3. Lấy `SPREADSHEET_KEY` (nằm trong chính URL của Spreadhstee): `https://docs.google.com/spreadsheets/d/<SPREADSHEET_KEY>/edit#gid=0` # # ![add_gsheet](../img/add_gsheet.png) # + id="XlZ8JNB3bZbw" # ACCES GOOGLE SHEET sheet_index_no = 0 spreadsheet_key = '1mrHOuXzCOtgsIPQcm-pg8VK6uE3DM91u-RGXO_Vfexs' # input SPREADSHEET_KEY HERE sh = gc.open_by_key(spreadsheet_key) worksheet = sh.get_worksheet(sheet_index_no) #-> 0 - first sheet, 1 - second sheet etc. # APPEND DATA TO SHEET set_with_dataframe(worksheet, user_df) #-> Upload user_df vào Sheet đầu tiên trong Spreadsheet # DONE: Bây giờ bạn có thể mở spreadsheet và kiểm tra nội dung đã update chứ # - # ![slack_user_gsheet](../img/slack_user_gsheet.png) # + [markdown] id="5I15m3ilyA3Y" # -------------- HẾT PHẦN OPTION 1 --------------------- # + [markdown] id="9aRN9KSczkmr" # ## Option 2: Ai đã nộp bài? # # + [markdown] id="Pw81ioUz2N0v" # ### Slack API: Channel List # + id="fcdaDaGdz60p" ## Gọi SLACK API để list tất cả các channel endpoint = "https://slack.com/api/conversations.list" headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])} response = requests.post(endpoint, headers=headers).json() channel_ls = response['channels'] # + colab={"base_uri": "https://localhost:8080/"} id="47dUJAPi0Jg5" outputId="2762f1e8-ff65-4994-c3e5-1ab0f5dc9205" channel_ls[0] ## Thử extract record đầu tiên để xem schema => name: general, id: C01B4PVGLVB # - #TODO 3 for i in channel_ls: if i['name'] == 'atom-assignment2': print(i) # + [markdown] id="CNFTcFC00l8g" # ### TODO#3 # * Tìm id của channel #atom-assignment2 # + [markdown] id="EnCvdYAn2R47" # ### Slack API: List messages trong 1 channel # + id="1UsSESN8rtnk" endpoint = "https://slack.com/api/conversations.history" data = {"channel": "C021FSDN7LJ"} ## This is ID of assignment#1 channel headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])} # + id="i8eJw3LBr4lY" response_json = requests.post(endpoint, data=data, headers=headers).json() msg_ls = response_json['messages'] # + colab={"base_uri": "https://localhost:8080/"} id="KkYO1KcBtZa9" outputId="7a76397b-202b-445e-aa26-f4b30cfc9a53" msg_ls[0] # + id="BsW8CXAXv-tC" not_learners_id = ['U01BE2PR6LU'] # + colab={"base_uri": "https://localhost:8080/"} id="XOvJ5kGCsmAD" outputId="79567aaf-62bd-4091-deca-6e13c1c80e79" ## Summarize all submitted assignments + reviews cnt not_learners_id = ['U01BE2PR6LU'] # -> Remove MA from the user_id msg_lk = [] msg_dict = {'github': [], 'reply_count': [], 'reply_users_count': [], 'reply_users': [], 'latest_reply':[] } for i in range(len(msg_ls)): ts = dt.fromtimestamp(float(msg_ls[i]['ts'])) # -> Convert timestamp Epoch thành dàng dễ đọc user = msg_ls[i]['user'] # -> Lấy thông tin người post messages if msg_ls[i]['user'] not in not_learners_id: if 'attachments' in msg_ls[i].keys(): text = msg_ls[i]['text'] github_link = re.findall('(?:https?://)?(?:www[.])?github[.]com/[\w-]+/?', text) #-> Submission là các message có link github print(github_link) if len(github_link) > 0: msg_dict['github'].append(github_link[0]) if 'reply_count' in msg_ls[i].keys(): msg_dict['reply_count'].append(msg_ls[i]['reply_count']) else: msg_dict['reply_count'].append('') #-> Extract số review if 'reply_users_count' in msg_ls[i].keys(): msg_dict['reply_users_count'].append(msg_ls[i]['reply_users_count']) else: msg_dict['reply_users_count'].append('') if 'reply_users' in msg_ls[i].keys(): msg_dict['reply_users'].append(msg_ls[i]['reply_users']) else: msg_dict['reply_users'].append('') if 'latest_reply' in msg_ls[i].keys(): msg_dict['latest_reply'].append(dt.fromtimestamp(float(msg_ls[i]['latest_reply']))) else: msg_dict['latest_reply'].append('') msg_df = pd.DataFrame(msg_dict) msg_df # + [markdown] id="SgSC21qO3kKA" # ### TODO#4 # # * Tạo thành 1 bảng chứa các thông tin trên và update lên Spreadsheet (Sheet: Assignment#2 Submission) # - sheet_index_no = 1 spreadsheet_key = '<KEY>' # input SPREADSHEET_KEY HERE sh = gc.open_by_key(spreadsheet_key) worksheet2 = sh.get_worksheet(sheet_index_no) #-> 0 - first sheet, 1 - second sheet etc. set_with_dataframe(worksheet2, msg_df) # + [markdown] id="698exK3k35RM" # -------------- HẾT PHẦN OPTION 2 --------------------- # -
assignment_3/.ipynb_checkpoints/home_assignment_3-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # + # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import h5py import PIVutils from importlib import reload saveFolder = './' # - X, Y, U, V, Cond, Prof = PIVutils.importMatlabPIVdata('/Users/Owen/Dropbox/Data/ABL/SBL PIV data/RNV45-RI2.mat',['X','Y','U','V'],['Cond','Prof']) f = h5py.File('/Users/Owen/Dropbox/Python Codes/ASIIT/Data/testSave3.hdf5','w') Uset = f.create_dataset("U", data=U) Uset.dtype "U" in f f.close() f = h5py.File('/Users/Owen/Dropbox/Python Codes/ASIIT/Data/testSave.hdf5','r') a = f.keys() list(f.keys()) names = ['X','Y','V'] data = [X,Y,V] range(len(names)) for i in range(len(names)): print(i) for i in range(len(names)): f.create_dataset(names[i], data=data[i]) f.close() reload(PIVutils) PIVutils.saveDataset('/Users/Owen/Dropbox/Python Codes/ASIIT/Data/testSave4.hdf5',\ ['X','Y','U','V'],[X,Y,U,V],\ ['Cond','Prof'],[Cond,Prof]) f.close() Cond.items() a = list(Cond.items()) a[0][0] len(list(Cond.items())) CondSet = f.create_group("Cond2") #,(len(list(Cond.items())),) for i in list(Cond.items()): CondSet.create_dataset(i[0], data=i[1]) #h.create_dataset(i[0], data=i[1]) reload(PIVutils) X, Y, U, V, Cond, Prof = PIVutils.loadDataset('/Users/Owen/Dropbox/Python Codes/ASIIT/Data/testSave4.hdf5',\ ['X','Y','U','V'],['Cond','Prof']) Cond import os file = '/Users/Owen/Dropbox/Python Codes/ASIIT/Data/testSave2.hdf5' if os.path.exists(file): del(file) print("Original file deleted") f = h5py.File('/Users/Owen/Dropbox/Python Codes/ASIIT/Data/RNV45-thumbs.hdf5','r') list(f.keys()) list(f['ThumbParams'].keys()) i = 'Cond' TempS = {k : f[i][k].value[0] #Generate a dictionary linking all values in cond with their names for k in f[i].keys()} Cond Prof
ImportMatPIVdata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import seaborn as sns; sns.set_style('darkgrid') import matplotlib.pyplot as plt import numpy as np # %matplotlib inline df = pd.read_excel(r'D:\Jupyter notebook\Data sets\CCRB-Complaint-Data_202007271729\Cleaned_NYPD_dataset.xlsx') df.info() df.head(5) #checking the dataset to verify if I've got the right data set # ### We'll explore the impact of the police and how it affects the likelihood of an offence happening. # We'll look at categories such as age, Rank,gender and Precinct #We'll create a new officer age column where age is categorised in below 20s,20s,30s,40s and so forth df['Age_group'] = round((df.mos_age_incident//10)*10 , 0) plt.figure(figsize=(12, 8)) sns.countplot(data = df ,y='Age_group',palette="twilight_r",orient='h') df.Age_group.unique() plt.savefig('Officer age group') # + #We can see that NYPD officers in their 30s are more likely to have with complaints than any other age group #let's compare the officer age group that was more likely to be exonerated #First we'll create a column describing if the officer's force was deemed Substantiated,Unsubstantiated or exonerated by the CCRB def ccrb_check(x): if 'unsubstantiated' in x.lower().split(): return 'unsubstantiated' elif 'substantiated' in x.lower().split(): return 'substantiated' elif 'exonerated' in x.lower().split(): return 'exonerated' df['CCRB_CHECK'] = df.board_disposition.apply(lambda x: ccrb_check(x) ) Twenty_not_exonerated = df[(df.CCRB_CHECK == 'substantiated') & (df.Age_group == 20)]['Name'].count()/df[df.Age_group == 20]['Name'].count() Thirty_not_exonerated = df[(df.CCRB_CHECK == 'substantiated') & (df.Age_group == 30)]['Name'].count()/df[df.Age_group == 30]['Name'].count() Forty_not_exonerated = df[(df.CCRB_CHECK == 'substantiated') & (df.Age_group == 40)]['Name'].count()/df[df.Age_group == 40]['Name'].count() fifty_not_exonerated = df[(df.CCRB_CHECK == 'substantiated') & (df.Age_group == 50)]['Name'].count()/df[df.Age_group == 50]['Name'].count() sixty_not_exonerated = df[(df.CCRB_CHECK == 'substantiated') & (df.Age_group == 60)]['Name'].count()/df[df.Age_group == 60]['Name'].count() twenty = '20s age group: ' + str(np.ceil(Twenty_not_exonerated*100)) thirties = '30s age group: ' + str(np.ceil(Thirty_not_exonerated*100)) forties = '40s age group: ' +str(np.ceil(Forty_not_exonerated*100)) fifties = '50s age group: ' +str(np.ceil(fifty_not_exonerated*100)) sixties = '60s age group: '+str(np.ceil(sixty_not_exonerated*100)) print(twenty,thirties,forties,fifties,sixties, sep='\n') # - # From the data above, we can see that officers in their 60s and 50s are more likely to be found guilty of miscounduct . In general,by the CCRB. In short, the older the officer, the less likely they are to get exonerated. # ###### Next we're going to look at the precincts df.precinct.value_counts(ascending=False).head(10) #top 10 highest precincts with complaints #We'll explore the percentage of NYPD officers that are exonerated or not per precinct for the top 1o highest precincts pt= pd.pivot_table(df,values='fado_type',aggfunc='count',index='precinct',columns='CCRB_CHECK').sort_values(by='precinct',ascending=False) pt['%exonerated'] = round(pt.exonerated/(pt.exonerated+pt.substantiated+pt.unsubstantiated) *100,1) pt['%substantiated'] = round(pt.substantiated/(pt.exonerated+pt.substantiated+pt.unsubstantiated) *100,1) pt['%unsubstantiated'] = round(pt.unsubstantiated/(pt.exonerated+pt.substantiated+pt.unsubstantiated) *100,1) pt.drop(['exonerated','substantiated','unsubstantiated'],axis=1,inplace=True) pt.sort_values(by='%exonerated',ascending=False,inplace=True) pt['Index'] = np.arange(1,79) pt.reset_index(inplace=True) pt.set_index('Index',inplace=True) print(pt,pt.sort_values(by='%substantiated',ascending=False),sep='\n \n \n \n') # #### Precinct 75 has the most number of complaints which are almost 2times the amount of the precinct with the second highest complaints # Officers from precinct 100 had the highest exoneration rate # Officers from precicnt 77 had were most likely to be found in misconduct by the CCRB #### The rank of the officers and the complaints will be explored fig=plt.figure(figsize=(12, 8)) sns.countplot(x='rank_incident',data=df,palette='plasma_r') # The number of complaints goes down with an increase in rank. This could be attributed to more experience and less contact with criminals and civilians #Let's take a look at how rank varies with sex in the NYPD and how an officer's sex affects the way they misconduct themselves fig=plt.figure(figsize=(12, 8)) sns.countplot(x='rank_incident',data=df,hue='mos_gender',palette='BuPu') #There are less female officers with complaints compared to male officers #Let's take a look a the ways both sexes misconduct themselves #first we'll explore the complaint words in each fado_type to get a better understanding of what each fado_type #We'll bring up the glossary and check Sheet4/Tab 3 for unique words per fado type s4 =pd.read_excel(r'D:\Jupyter notebook\Data sets\CCRB-Complaint-Data_202007271729\CCRB Data Layout Table.xlsx',sheet_name=3) Discourtesy = s4[s4['FADO Category'] == 'Discourtesy']['Allegation type'].unique() Force= s4[s4['FADO Category'] == 'Force']['Allegation type'].unique() Abuse_of_Athority = s4[s4['FADO Category'] == 'Abuse of Authority']['Allegation type'].unique() Offensive_language = s4[s4['FADO Category'] == 'Offensive Language']['Allegation type'].unique() print(Abuse_of_Athority) #The unique words above give us an insight into the examples of anofficer abusing his authority print(Discourtesy) #We can see that some words in the Discourtesy fado type are actually better suited for the offensive language fado type #Words such as 'Gay/Lesbian Slur' 'Sexist Remark','Curse' 'Nasty Words' #It safe to say that the Discourtesy and Offensive language fado types intersect alot print(Force) print(Offensive_language) # ##### After getting a better understanding of each fado type, we can now move onto analysing the fado type common for each sex plt.figure(figsize=(12,8)) sns.countplot(data=df,y='fado_type',hue='mos_gender',palette='YlGnBu_r') plt.savefig('fado_type, hue=officer gender') # ###### The graph above shows that Abuse of authority is a common misconduct amongst both sexes. Let's make a table of the percentage representation of both genders per fado_type # + F_abuse_of_authority = df[(df.fado_type == 'Abuse of Authority') & (df.mos_gender == 'Female')].Name.count() /df[df.fado_type == 'Abuse of Authority'].Name.count() F_Discourtesy = df[(df.fado_type == 'Discourtesy') & (df.mos_gender == 'Female')].Name.count()/df[df.fado_type == 'Discourtesy'].Name.count() F_Offensive_Language = df[(df.fado_type == 'Offensive Language') & (df.mos_gender == 'Female')].Name.count() / df[df.fado_type == 'Offensive Language'].Name.count() F_Force = df[(df.fado_type == 'Force') & (df.mos_gender == 'Female')].Name.count()/df[df.fado_type == 'Force'].Name.count() Female_Officer =[round(F_abuse_of_authority*100,1),round(F_Discourtesy*100,1),round(F_Offensive_Language*100,1),round(F_Force*100,1)] M_abuse_of_authority = df[(df.fado_type == 'Abuse of Authority') & (df.mos_gender == 'Male')].Name.count()/df[df.fado_type == 'Abuse of Authority'].Name.count() M_Discourtesy = df[(df.fado_type == 'Discourtesy') & (df.mos_gender == 'Male')].Name.count()/df[df.fado_type == 'Discourtesy'].Name.count() M_Offensive_Language = df[(df.fado_type == 'Offensive Language') & (df.mos_gender == 'Male')].Name.count()/df[df.fado_type == 'Offensive Language'].Name.count() M_Force = df[(df.fado_type == 'Force') & (df.mos_gender == 'Male')].Name.count()/df[df.fado_type == 'Force'].Name.count() Male_Officer = [round(M_abuse_of_authority*100,1),round(M_Discourtesy*100,1),round(M_Offensive_Language*100,1),round(M_Force*100,1)] gend_dict = {'Female Officer':Female_Officer,'Male':Male_Officer} gend_table = pd.DataFrame(gend_dict,index=['Abuse of Authority', 'Discourtesy', 'Offensive Language', 'Force']) gend_table.head(5) # - # ##### As shown from the table above, female officers are less represented in misconduct complaints that involved force and Offensive language compared to their male counterparts. # ##### Female officers are more likely to abuse their authority when misconducting themselves whilst their male counterparts are more likely to use force # ##### Male NYPD officers are more aggressive than their female counterparts # We will now look at how the gender breakdown of the complainants plt.figure(figsize=(15,10)) sns.countplot(hue='fado_type',y='complainant_gender',data=df,palette='Blues') #The graph shows that male New York citizens are more likely to receive harsh treatment by the NYPD. #Let's take a look if there any sexist and anti-trans complaints #We'll first look for any unique words in the allegations columns that denote sexit or anti-LGBTQ misconduct df.allegation.unique() #We'll create a list with words in the allegations column that denote sexist misconduct Sexist_misconduct = ['Gender', 'Sex Miscon (Sexual Harassment, Verbal)','Sex Miscon (Sexual Harassment, Gesture)','Sexual Misconduct (Sexual Humiliation)','Sexual orientation','Sex Miscon (Sexual/Romantic Proposition)','Gender Identity'] #We classified gender identity as a sexist misconduct because its based on one's gender Percent_sexist = round(df[df.allegation.isin(Sexist_misconduct)].Name.count()/df.Name.count() *100,2) print('Percentage of misconducts that are sexist: ' + str(Percent_sexist)+'%') guilty_sexist = df[(df.allegation.isin(Sexist_misconduct)) & (df['CCRB_CHECK'].isin(['substantiated','unsubstantiated']))].Name.count()/df[df.allegation.isin(Sexist_misconduct)].Name.count() guilty_sexist = round(guilty_sexist*100,2) print(str(guilty_sexist)+ '% of sexist complaints were found to have been misconducts by the CCRB.') # ##### According to the data,all the sexist complaints were successful and for every 1000 complaints,8 are sexist # # + #We'll now look at any race related misconducts #We'll create a list with words in the allegations column that denote race-related misconduct race_related = ['Race','Ethnicity'] Percent_race = round(df[df.allegation.isin(race_related)].Name.count()/df.Name.count() *100,2) Percent_race = 'Percentage of misconducts that are race related : ' + str(Percent_race)+'%' guilty_race = df[(df.allegation.isin(race_related)) & (df['CCRB_CHECK'].isin(['substantiated','unsubstantiated']))].Name.count()/df[df.allegation.isin(race_related)].Name.count() guilty_race = round(guilty_sexist*100,2) Percent_race_guilty = str(guilty_sexist)+ '% of race related complaints were found to have been misconducts by the CCRB.' print(Percent_race,Percent_race_guilty,sep='\n') # - # #### All race related complaints were deemed misconducts by the CCRB. # #### Atleast 1 in every 100 complaint cases is race related #Let's take a look at the racial break down of complaints rp =df.pivot_table(columns='fado_type',values='Name',aggfunc='count',index='complainant_ethnicity') Total_fado_type=(rp['Abuse of Authority']+rp['Discourtesy']+ rp['Force']+rp['Offensive Language']) rp['Abuse of Authority%'] = round( rp['Abuse of Authority']/Total_fado_type *100,2) rp['Force%'] = round(rp['Force']/Total_fado_type *100,2) rp['Offensive Language and Discourtesy%'] = round((rp['Offensive Language']+rp['Discourtesy'])/Total_fado_type *100,2) rp.drop(['Offensive Language','Force','Discourtesy','Abuse of Authority'],axis=1,inplace=True) rp.reset_index(inplace=True) rp # #### For each racial group, most complaints were about Abuse of athority # #### For American Indian,Asian and Black New Yorkers, morethan 1 in 3 complaints made about police misconduct are about Abuse of Authority # #### Hispanic and Black New Yorkers are more likely to make a complaint about bring handled aggressively by the NYPD than any other racial group # #### White New Yorkers are more likely to make a complaint about offensive language and discourtesy than any other racial group df.complainant_ethnicity.value_counts() # + #Lastly we'll look at the commonest complainst per race pr = df.pivot_table(values='Name',columns='complainant_ethnicity',index='fado_type',aggfunc='count') Total_ethinc=(pr['American Indian']+pr['Asian']+ pr['Black']+pr['Hispanic'] + pr['White']) pr['American Indian%']= round( pr['American Indian']/Total_ethinc *100,2) pr['Asian%'] = round( pr['Asian']/Total_ethinc *100,2) pr['Black%'] = round( pr['Black']/Total_ethinc *100,2) pr['Hispanic%'] = round( pr['Hispanic']/Total_ethinc *100,2) pr['White%'] = round( pr['White']/Total_ethinc *100,2) pr.drop(['American Indian','Black','White','Hispanic','Asian'],axis=1,inplace=True) pr['index']=np.arange(4) pr # - # #### The data tells us that black New Yorkers are more likely to receive harsh treatment from the NYPD followed by Hispanic New Yorkers # #### Asian and American Indian New Yorkers are less likely to receive harsh treatment than any other racial group # plt.figure(figsize=(12,8)) pr.plot.bar(stacked=True) lgd = plt.subplot(111).legend(loc='center left', bbox_to_anchor=(1, 0.5)) #move legend to the side plt.savefig('fado_type vs Race', bbox_extra_artists=(lgd,), bbox_inches='tight') #show legend in saved figure # ## The End
NYPD Officer complaints - Exploratory Data Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib import wradlib import cv2 import numpy as np import os import matplotlib.pyplot as plt from matplotlib import animation import matplotlib.patches as mpatches from matplotlib.collections import PatchCollection from scipy.ndimage import zoom import datetime import warnings warnings.simplefilter('once', DeprecationWarning) # ## Read sample data # # Data is from the German Weather Service: the so called RY product represents rainfall intensity composite for the whole of Germany in 5 minute intervals. # # Spatial resolution: `1 x 1 km`; spatial extent: `900 x 900 km`. # # **Information required from user** # # - specify the directory `datadir` where you store the RY data (unpack the ry archives there). # - select a specific interval by commenting/uncommenting the `dtimes` lines. # - decide whether you need to reduce the resolution (downsize the image by a `downsizeby`) in order to avoid memory problems (this becomes relevant once you solve the 2D-adveciton equation...) # + # Set data directory datadir = "data/ry" # Original grid dimensions nx = 900 ny = 900 # pixel size (in meters) dx = 1000. dy = 1000. # Downsize by factor "downsizeby" # downsizeby = 1 will leave the dimensions unchanged, # but for a 900x900 km grid, downsizing might be # required in order to avoid MemoryError downsizeby = 1 # interval between observations (in seconds) interval = 300 # Set time window ##dtimes = wradlib.util.from_to("2008-06-02 17:00:00", "2008-06-02 19:00:00", interval) ##dtimes = wradlib.util.from_to("2015-04-26 17:00:00", "2015-04-26 19:00:00", interval) ##dtimes = wradlib.util.from_to("2015-03-29 17:00:00", "2015-03-29 19:00:00", interval) #dtimes = wradlib.util.from_to("2016-05-29 16:00:00", "2016-05-29 19:00:00", interval) dtimes = wradlib.util.from_to("2016-05-23 04:00:00", "2016-05-23 08:00:00", interval) # + # Compute grid dimensions and grid coordinates after resampling dx2, dy2 = dx*downsizeby, dy*downsizeby nx2, ny2 = int(nx/downsizeby), int(ny/downsizeby) X2, Y2 = np.meshgrid( np.arange(0,nx2*dx2, dx2), np.arange(0,ny2*dy2, dy2) ) # Define container frames = np.zeros( (len(dtimes), nx2, ny2 ) ) # Read the data, convert back to dBZ, and downsize # (maybe also try with keeping mm/h instead of converting to dBZ?) for i, dtime in enumerate(dtimes): fname = dtime.strftime( os.path.join(datadir, "raa01-ry_10000-%y%m%d%H%M-dwd---bin") ) frames[i] = zoom( wradlib.io.read_RADOLAN_composite(fname, missing=0)[0], 1./downsizeby, order=1) frames[i] = wradlib.trafo.decibel( wradlib.zr.r2z(frames[i]) ) frames[i][frames[i]<0] = 0 # - # ## Use OpenCV's Optical Flow to detect and track features # # This example uses the Lucas-Kanade Optical Flow implementation in OpenCV (see [here](http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.html)). We take the first frame, detect some Shi-Tomasi corner points in it, then we iteratively track those points over the subsequent images. # # The parameter dictionaries are certainly something to experiment with. # + # FEATURE DETECTION: Parameters for ShiTomasi corner detection feature_params = dict( maxCorners = 200, qualityLevel = 0.2, minDistance = 7, blockSize = 21 ) # FEATURE TRACKING: Parameters for Lucas Kanade (lk) Optical Flow technique lk_params = dict( winSize = (20,20), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0)) # Over which time steps (of the data we've read in) do you want to track trackstart = 0 trackend = len(frames)-1 # - len(frames) # Our approach requires 8 bit integers - so we need to normalize our radar data accordingly # (there might be a more elegant solution...) minval = 0 maxval = 59 # dBZ in this case iframes = frames.copy() iframes[iframes<minval] = minval iframes[iframes>maxval] = maxval iframes = ((iframes / maxval)*255).astype(np.uint8) # + # Find good features to track... old = cv2.goodFeaturesToTrack(iframes[trackstart], mask = None, **feature_params) print("Found %d good features to track." % len(old) ) # Set containers to collect results (time steps in rows, detected corners in columns) # Tracking status sts = np.zeros((trackend,len(old)), dtype=np.bool) # corner x coords x = np.zeros((trackend,len(old))) * np.nan # corner y coords y = np.zeros((trackend,len(old))) * np.nan # tracking error errs = np.zeros((trackend,len(old))) * np.nan # Assign persistent corner IDs ids = np.arange(len(old)) # + # Track good features for i in range(trackstart, trackend): # track current corners in next image new, st, err = cv2.calcOpticalFlowPyrLK(prevImg=iframes[i], nextImg=iframes[i+1], prevPts=old, nextPts=None, **lk_params) success = st.ravel()==1 ids = ids[success] sts[i, ids] = True x[i, ids] = old[success,0,0] y[i, ids] = old[success,0,1] errs[i, ids] = err.ravel()[success] # new corners will be old in the next loop old = new[success] # Incremental euclidic distance from starting point trackdist = np.diff( np.sqrt( (x-x[0].reshape((1,-1)))**2 + (y-y[0].reshape((1,-1)))**2 ), axis=0 ) trackdist = np.vstack( (np.zeros((1,trackdist.shape[1])), trackdist)) # Plot feature persistence fig = plt.figure(figsize=(12,6)) ax = fig.add_subplot(211) cb = plt.imshow(errs, interpolation="none", cmap="summer", vmax = 15) plt.xlabel("Feature ID") plt.ylabel("Tracking time step") plt.colorbar(cb, shrink=0.5) plt.title("Tracking error") # Plot consistence of movement ax = fig.add_subplot(212) cb = plt.imshow(trackdist, interpolation="none", cmap="bwr", vmin=-5, vmax=5) plt.xlabel("Feature ID") plt.ylabel("Tracking time step") plt.colorbar(cb, shrink=0.75) plt.title("Incremental euclidian distance from starting point") plt.tight_layout() # - i # Find good tracks (but what is a "good" track...?) # Certainly a lot of subjective criteria to play with... goodtrack = np.zeros(x.shape[1], dtype=np.bool) for i in range(len(goodtrack)): # persistence of the track if len(np.where(sts[:,i])[0]) < 2: continue # consistency of movement if len(np.where(trackdist[:,i]<0)[0]) > 0: continue # tracking error if len(np.where(errs[:,i]>15)[0]) > 5: continue goodtrack[i] = True print("Found %d good tracks and %d bad tracks." % \ (len(np.where(goodtrack)[0]), len(goodtrack)-len(np.where(goodtrack)[0])) ) # Visualize tracks: green=good track, red=bad track goodcolor = "limegreen" badcolor = "red" fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111, aspect="equal") # average reflectivity over entire tracking period as background image ax.imshow(np.mean(frames[trackstart:trackend], axis=0), origin="lower", cmap="gray", interpolation="none") plt.xlabel("Easting (# pixels)") plt.ylabel("Northing (# pixels)") plt.title("[Zoom in to inspect track properties (not in inline mode!)]") plt.grid(color="white") plt.xlim(0,nx/downsizeby) plt.ylim(0,nx/downsizeby) bad_line = plt.Line2D([], [], color=badcolor, label='Bad track') good_line = plt.Line2D([], [], color=goodcolor, label='Good track') plt.legend(handles=[bad_line, good_line], loc="upper left") for i, isgood in enumerate(goodtrack): ix = sts[:,i] color = badcolor if isgood: color = goodcolor ax.plot(x[ix,i], y[ix,i],marker="None", color=color, markersize=14, linestyle="-") #ax.arrow(x[ix,i][-2], y[ix,i][-2], # np.diff(x[ix,i][-2:])[0], np.diff(y[ix,i][-2:])[0], # head_width=2, head_length=2, fc=color, ec=color) x.shape y[ix,i] # + # Animate features #plt.rcParams['animation.ffmpeg_path'] = r"E:\install\ffmpeg\bin\ffmpeg.exe" # Prepare canvas fig = plt.figure(figsize=(5,5)) ax1 = plt.subplot(111,aspect="equal") im1 = ax1.imshow(iframes[trackstart], origin="lower", cmap="gray", interpolation="none") plt.xlabel("Easting (# pixels)") plt.ylabel("Northing (# pixels)") plt.title("[Zoom in to inspect track properties (not in inline mode!)]") plt.grid(color="white") plt.xlim(0,nx/downsizeby) plt.ylim(0,nx/downsizeby) ax1.plot(x[0,goodtrack], y[0,goodtrack], linestyle="None", marker="o", mfc="None", mec="limegreen") ax1.plot(x[0,~goodtrack], y[0,~goodtrack], linestyle="None", marker="o", mfc="None", mec="red") ax1.grid(color="white") tstamp1 = ax1.text(25, 850, dtimes[trackstart].isoformat(), color="white", fontsize=14) def animate(j): im1.set_array(iframes[trackstart+j]) for line in plt.gca().get_lines(): if not line.get_linestyle()=="None": line.remove() for i, isgood in enumerate(goodtrack): ix = np.where(sts[:j,i])[0] color = "red" if isgood: color = "limegreen" ax1.plot(x[ix,i], y[ix,i], marker="None", color=color, markersize=14, linestyle="-") tstamp1.set_text(dtimes[trackstart+j].isoformat()) return im1 # ATTENTION: THIS IS SLOW - Rendering each frame of the animation might take more time than the interval between the frames # This can cause the temporal sequence to be confused in the matplotlib interactive mode. # The animation thus looks better if saved as movie, or you have to increase the interval argument # Animation not shown in notebook if you use %pylab inline ani = animation.FuncAnimation(fig, animate, frames=np.arange(trackstart, trackend-1), interval=400, blit=False) ani.save("featurescv.gif", writer="imagemagick", dpi=150) #ani.save("features.mp4", writer=animation.FFMpegWriter(bitrate=2000)) # - # ### Update tracked corners for each time step of the considered tracking period # # Until now, we only tracked those corners which we detected in the initial time step. We now want to add new tracks with each addtional time step, and follow these as well. init_crns = [cv2.goodFeaturesToTrack(iframes[i], mask = None, **feature_params) for i in range(trackstart, trackend)] print("List of # corners in each time step:\n", [len(crn) for crn in init_crns ]) # this function wraps up everything which we already did above for a single set of corners def tracker(old, frameset, lk_params): # Set containers to collect results (time steps in rows, corners in columns) # Tracking status sts = np.zeros((trackend,len(old)), dtype=np.bool) # corner x coords x = np.zeros((trackend,len(old))) * np.nan # corner y coords y = np.zeros((trackend,len(old))) * np.nan # tracking error errs = np.zeros((trackend,len(old))) * np.nan # Assign persistent corner IDs ids = np.arange(len(old)) # Track good features for i in range(len(frameset)-1): # track current corners in next image new, st, err = cv2.calcOpticalFlowPyrLK(prevImg=frameset[i], nextImg=frameset[i+1], prevPts=old, nextPts=None, **lk_params) success = st.ravel()==1 ids = ids[success] sts[i, ids] = True x[i, ids] = new[success,0,0] y[i, ids] = new[success,0,1] errs[i, ids] = err.ravel()[success] # new corners will be old in the next loop old = new[success] # Incremental euclidic distance from starting point trackdist = np.diff( np.sqrt( (x-x[0].reshape((1,-1)))**2 + (y-y[0].reshape((1,-1)))**2 ), axis=0 ) trackdist = np.vstack( (np.zeros((1,trackdist.shape[1])), trackdist)) # Find good tracks (but what is a "good" track...?) goodtrack = np.zeros(x.shape[1], dtype=np.bool) for i in range(len(goodtrack)): # persistence of the track if len(np.where(sts[:,i])[0]) < 2: continue # consistency of movement if len(np.where(trackdist[:,i]<0)[0]) > 0: continue # tracking error if len(np.where(errs[:,i]>15)[0]) > 5: continue goodtrack[i] = True return sts, x, y, errs, goodtrack sts_ls, x_ls, y_ls, errs_ls, goodtrack_ls = [], [], [], [], [] for i, crns in enumerate(init_crns): sts, x, y, errs, goodtrack = tracker(crns, iframes[i:], lk_params) sts_ls.append(sts) x_ls.append(x) y_ls.append(y) errs_ls.append(errs) goodtrack_ls.append(goodtrack) # Visualize tracks: fig = plt.figure(figsize=(12,12)) ax = fig.add_subplot(111, aspect="equal") # average reflectivity as background image ax.imshow(np.mean(frames[trackstart:trackend], axis=0), origin="lower", cmap="gray", interpolation="none") plt.xlabel("Easting (# pixels)") plt.ylabel("Northing (# pixels)") plt.title("[Zoom in to inspect track properties (not in inline mode!)]") plt.grid(color="white") plt.xlim(0,nx/downsizeby) plt.ylim(0,nx/downsizeby) colors = [ plt.cm.spring(i) for i in np.linspace(0,254, len(goodtrack_ls)).astype("i4") ] for j, goodtrack in enumerate(goodtrack_ls[:-2]): sts, x, y = sts_ls[j], x_ls[j], y_ls[j] for i, isgood in enumerate(goodtrack): ix = sts[:,i] # HERE WE DO NOT PLOT THE BAD TRACKS color = "none" if isgood: color = colors[j] ax.plot(x[ix,i], y[ix,i],marker="None", color=color, linestyle="-", alpha=0.4) #ax.arrow(x[ix,i][-2], y[ix,i][-2], # np.diff(x[ix,i][-2:])[0], np.diff(y[ix,i][-2:])[0], # head_width=2, head_length=2, fc=color, ec=color, alpha=0.4) # + # ATTENTION: THIS ANIMATION TAKES A LONG WHILE (SEVERAL MINUTES) AND MIGHT STILL BE BUGGY # Prepare canvas fig = plt.figure(figsize=(5,5)) ax1 = plt.subplot(111,aspect="equal") im1 = ax1.imshow(iframes[trackstart], origin="lower", cmap="gray", interpolation="none", vmin=10, vmax=60) plt.xlabel("Easting (km)") plt.ylabel("Northing (km)") plt.grid(color="white") plt.xlim(0,nx/downsizeby) plt.ylim(0,nx/downsizeby) #ax1.plot(x[0,goodtrack], y[0,goodtrack], linestyle="None", marker="o", mfc="None", mec=colors[0]) ax1.grid(color="white") plt.xlim(150,450) plt.ylim(550,900) tstamp1 = ax1.text(160, 560, dtimes[trackstart].isoformat(), color="white", fontsize=14) colors = [ plt.cm.rainbow(i) for i in np.linspace(0,254, len(goodtrack_ls)).astype("i4") ] def animate(j): im1.set_array(iframes[trackstart+j]) tstamp1.set_text(dtimes[0+j].isoformat()) for line in plt.gca().get_lines(): line.remove() #if not line.get_linestyle()=="None": # line.remove() for k, goodtrack in enumerate(goodtrack_ls[:j]): sts, x, y = sts_ls[k], x_ls[k], y_ls[k] for i, isgood in enumerate(goodtrack): ix = np.where(sts[:j,i])[0] # HERE WE DO NOT PLOT THE BAD TRACKS color = "none" if isgood: color = colors[k] #ax1.plot(x[0,goodtrack], y[0,goodtrack], linestyle="None", marker="o", mfc="None", mec=color, alpha=0.4) ax1.plot(x[ix,i], y[ix,i],marker="None", color=color, linestyle="-", alpha=0.3) # ATTENTION: THIS IS SLOW - Rendering each frame of the animation might take more time than the interval between the frames # This can cause the temporal sequence to be confused. # The animation thus looks better if saved as movie, or you have to increase the interval argument ani = animation.FuncAnimation(fig, animate, frames=np.arange(trackstart, trackend), interval=400, blit=False) ani.save("featurescv.gif", writer="imagemagick", dpi=150) #ani.save("features2.avi", dpi=500, bitrate=2000) # - # ### Experiment with SIFT/SURF feature detection and description # # See [SIFT and SURF](http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_table_of_contents_feature2d/py_table_of_contents_feature2d.html) for feature detection. Right now, this does not seem to add value as compared to the Optical Flow approach above. Features seem to be much less persistent. # + # SURF surf = cv2.xfeatures2d.SURF_create(3000) kplist = [] deslist= [] for i in range(trackstart, trackend): kp, des = surf.detectAndCompute(iframes[i],None) kplist.append(kp) deslist.append(des) print("Found %d keypoints in step %d." % (len(kp), i)) # - fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111, aspect="equal") # average reflectivity as background image ax.imshow(frames[0], origin="lower", cmap="gray", interpolation="none") plt.xlabel("Easting (# pixels)") plt.ylabel("Northing (# pixels)") plt.title("[Zoom in to inspect feature properties (not in inline mode)]") tstamp1 = ax1.text(25, 850, dtimes[0].isoformat(), color="white", fontsize=14) plt.grid(color="white") plt.xlim(0,nx/downsizeby) plt.ylim(0,nx/downsizeby) patches = [] for kp_ in kplist[0]: if kp_.size > 5: circle = mpatches.Circle(kp_.pt, kp_.size, fill=False, edgecolor="red") #ax.add_patch(circle) patches.append(circle) collection = PatchCollection(patches, facecolor="none", edgecolor="red") ax.add_collection(collection) # Make list of patch collections for all timesteps def collect(kp): patches = [] for kp_ in kp: if (kp_.size > 10) and (kp_.size < 50): circle = mpatches.Circle(kp_.pt, kp_.size, fill=False, edgecolor="red") patches.append(circle) return(PatchCollection(patches, facecolor="none", edgecolor="red")) # + # Animate features _plot_style = dict(markersize=12, markeredgewidth=2, markerfacecolor='none', markeredgecolor='r', marker='o', linestyle='none') _pcm_style = dict(cmap=plt.cm.spectral, vmin=0., vmax=30.) # Prepare canvas fig = plt.figure(figsize=(10,10)) ax1 = plt.subplot(111,aspect="equal") im1 = ax1.imshow(iframes[0], origin="lower", cmap="gray", interpolation="none") ax1.add_collection(collect(kplist[0])) ax1.grid(color="white") tstamp1 = ax1.text(25, 850, dtimes[0].isoformat(), color="white", fontsize=14) def animate(i): im1.set_array(iframes[trackstart+i]) ax1.collections = [] ax1.add_collection(collect(kplist[trackstart+i])) tstamp1.set_text(dtimes[trackstart+i].isoformat()) return im1 ani = animation.FuncAnimation(fig, animate, frames=np.arange(trackstart, trackend-1), interval=200, blit=False) #ani.save("features_surf.avi", dpi=400, bitrate=2000) # - # ### Match features (brute force) # According [Bowler et al. (2004)](http://www.sciencedirect.com/science/article/pii/S0022169403004591), maximum advection velocity of rainfall objects is about 130 km/h which is roughly 10 km (pixels) in 5 minutes. # + maxveloc = 10. # Detect initial feature set detector = cv2.xfeatures2d.SURF_create(3000) kp1, des1 = detector.detectAndCompute(iframes[trackstart],None) # create BFMatcher object bf = cv2.BFMatcher() kp1_ls = [] kp2_ls = [] for i in range(trackstart+1, trackend): kp2, des2 = detector.detectAndCompute(iframes[i],None) matches = bf.knnMatch(des1, des2, k=1) # Select matches to keep kp1_, des1_, kp2_, des2_ = [], [], [], [] for match in matches: match=match[0] xy = np.vstack( (kp1[match.queryIdx].pt, kp2[match.trainIdx].pt) ) eucdist = np.sqrt( (xy[0,0] - xy[1,0])**2 + (xy[0,1] - xy[1,1])**2 ) if eucdist < maxveloc: kp1_.append( kp1[match.queryIdx] ) des1_.append( np.array( des1[match.queryIdx] ) ) kp2_.append( kp2[match.trainIdx] ) des2_.append( np.array( des2[match.trainIdx] ) ) kp1_ls.append(kp1_) kp2_ls.append(kp2_) # Update initial feature set kp1, des1 = kp2_, np.array( des2_ ) # -
opencv_plus_ry.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.1 # language: julia # name: julia-1.6 # --- readlines("input") # Try manual solution 9000 + 30 + 5 + 5 + 50 + 40 + 5000 + 500 + 500 + 3000 + 3000 # Not the right answer 😥 300 + 300 + 9000 + 30 + 5 + 5 + 50 + 40 + 6000 + 500 + 500 # Still not the right answer 😥 # # Okay then, let's model it... target_x = Dict('A'=>3, 'B'=>5, 'C'=>7, 'D'=>9) step_cost = Dict('A'=>1, 'B'=>10, 'C'=>100, 'D'=>1000) empty_spaces = Set([(i,1) for i in [1,2,4,6,8,10,11]]) positions = [(3,2,'D'),(3,3,'B'),(5,2,'A'),(5,3,'A'),(7,2,'B'),(7,3,'D'),(9,2,'C'),(9,3,'C')] possibilities = [(positions, empty_spaces, 0)] all_spaces = Set([empty_spaces..., [(x[1],x[2]) for x in positions]...]) occupied(empty) = setdiff(all_spaces, empty) occupied(empty_spaces) function is_finished(possibility) (pos, space, cost) = possibility for p in pos if p[2]<2 || p[1] != target_x[p[3]] return false end end return true end is_finished(possibilities[1]) function way_blocked(from, to, space) occ = occupied(space) if from[2]==3 && (from[1],2) ∈ occ return true end if to[2]==3 && (to[1],2) ∈ occ return true end return length(filter(x->x[1] ∈ min(from[1],to[1])+1:max(from[1],to[1])-1 && x[2]==1,occ)) > 0 end positions[1],empty_spaces[1],empty_spaces way_blocked(positions[1],empty_spaces[1],empty_spaces) way_blocked(positions[1],positions[3],empty_spaces) function move_possible(possibility, from, to) (pos, space, cost) = possibility if from[2]==1 && to[2]==1 return false end if to[2]==3 if to[1] != target_x[from[3]] || way_blocked(from, to, space) return false end elseif to[2]==2 if from[1]==to[1] || to[1] != target_x[from[3]] || (to[1],3,from[3]) ∉ pos || way_blocked(from, to, space) return false end else if way_blocked(from, to, space) || (from[1] == target_x[from[3]] && (from[1],3,from[3]) ∈ pos) return false end end return true end function possible_moves(possibility, i) (pos, space, cost) = possibility moves = [] from = pos[i] for to in space if move_possible(possibility, from, to) push!(moves, to) end end return moves end possible_moves(possibilities[1],1), possible_moves(possibilities[1],2) function way_length(from, to) if from[1] == to[1] return abs(from[2]-to[2]) else return abs(from[1]-to[1]) + from[2]-1 + to[2]-1 end end function one_step(possibilities, highscore) new_possibilities = [] for possibility in possibilities (pos, space, cost) = possibility for i in 1:8 moves = possible_moves(possibility,i) for move in moves new_cost = cost + way_length(pos[i],move) * step_cost[pos[i][3]] if new_cost < highscore new_empty = setdiff(space, [move]) push!(new_empty, (pos[i][1],pos[i][2])) new_pos = deepcopy(pos) new_pos[i] = (move[1], move[2], new_pos[i][3]) push!(new_possibilities, (new_pos, new_empty, new_cost)) end end end end return new_possibilities end one_step(possibilities, highscore) highscore = 20000 remaining_possibilities = possibilities while length(remaining_possibilities) > 0 #for i in 1:20 remaining_possibilities = one_step(remaining_possibilities, highscore) unique!(x->(Set(x[1]),x[3]),remaining_possibilities) finished_indices = is_finished.(remaining_possibilities) if any(finished_indices) highscore = minimum([x[3] for x in remaining_possibilities[finished_indices]]) remaining_possibilities = remaining_possibilities[finished_indices .== 0] end println(length(remaining_possibilities)) end highscore # ## Part II # Add between 2 and 3: # ``` # #D#C#B#A# # #D#B#A#C# # ``` empty_spaces = Set([(i,1) for i in [1,2,4,6,8,10,11]]) positions = [(3,2,'D'),(3,5,'B'),(5,2,'A'),(5,5,'A'),(7,2,'B'),(7,5,'D'),(9,2,'C'),(9,5,'C'), (3,3,'D'),(3,4,'D'),(5,3,'C'),(5,4,'B'),(7,3,'B'),(7,4,'A'),(9,3,'A'),(9,4,'C')] possibilities = [(positions, empty_spaces, 0)] all_spaces = Set([empty_spaces..., [(x[1],x[2]) for x in positions]...]) occupied(empty) = setdiff(all_spaces, empty) function way_blocked(from, to, space) occ = occupied(space) if from[2]>2 && any(x->x[1]==from[1] && x[2] ∈ 1:from[2]-1,occ) return true end if to[2]>2 && any(x->x[1]==to[1] && x[2] ∈ 1:to[2]-1,occ) return true end return any(x->x[1] ∈ min(from[1],to[1])+1:max(from[1],to[1])-1 && x[2]==1,occ) end function move_possible(possibility, from, to) (pos, space, cost) = possibility if from[2]==1 && to[2]==1 return false end if way_blocked(from, to, space) return false end if from[1] == target_x[from[3]] && all(x->(from[1],x,from[3]) ∈ pos,from[2]:5) return false end if to[2]>1 if to[1] != target_x[from[3]] return false end if to[2]<5 && any(x->(to[1],x,from[3]) ∉ pos,to[2]+1:5) return false end end return true end function one_step(possibilities, highscore) new_possibilities = [] for possibility in possibilities (pos, space, cost) = possibility for i in 1:16 moves = possible_moves(possibility,i) for move in moves new_cost = cost + way_length(pos[i],move) * step_cost[pos[i][3]] if new_cost < highscore new_empty = setdiff(space, [move]) push!(new_empty, (pos[i][1],pos[i][2])) new_pos = deepcopy(pos) new_pos[i] = (move[1], move[2], new_pos[i][3]) push!(new_possibilities, (new_pos, new_empty, new_cost)) end end end end return new_possibilities end highscore = 50000 remaining_possibilities = possibilities #while length(remaining_possibilities) > 0 for i in 1:30 remaining_possibilities = one_step(remaining_possibilities, highscore) unique!(x->(Set(x[1]),x[3]),remaining_possibilities) finished_indices = is_finished.(remaining_possibilities) if any(finished_indices) highscore = minimum([x[3] for x in remaining_possibilities[finished_indices]]) remaining_possibilities = remaining_possibilities[finished_indices .== 0] end println(length(remaining_possibilities)) end highscore # Wow 😮‍💨
23/23.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # vamos importar o selenium porque com ele conseguimos mexer com os navegadores from selenium import webdriver from selenium.webdriver.common.keys import Keys import time import zipfile import pandas as pd import win32com.client as win32 # passo 1 - abrindo o navegador e colocando o link que vai ser aberto driver = webdriver.Chrome() driver.get('https://www.kaggle.com/sakshigoyal7/credit-card-customers') # clicando em um elemento (primeiro elemento da lista) pelo css selector driver.find_elements_by_css_selector('a.sc-fzoYHE')[0].click() # vai esperar 1 segundo time.sleep(1) # clicando em um elemento (segundo elemento da lista) pela classe driver.find_elements_by_class_name('sc-jWtpDG')[1].click() time.sleep(1) # escrevendo valores com o .send_keys driver.find_elements_by_css_selector('input.mdc-text-field__input')[0].send_keys('<EMAIL>') driver.find_elements_by_css_selector('input.mdc-text-field__input')[1].send_keys('<KEY>') driver.find_element_by_css_selector('form.sc-kIGZdZ').submit() time.sleep(2) # clicando para baixar o arquivo driver.find_elements_by_css_selector('a.sc-fzoYHE')[0].click() time.sleep(5) # esse processo mostra como tirar um arquivo do zip with zipfile.ZipFile(r'C:\Users\joaop\Downloads\archive.zip', 'r') as zip_ref: zip_ref.extractall(r"C:\Users\joaop\Downloads") # lendo o arquivo csv clientes_df = pd.read_csv(r'C:\Users\joaop\Downloads\BankChurners.csv') # display(clients_df) # 1 - agrupou os valores da tabela que contou quantos usuários existem e quantos não existem resumo_status = clientes_df.groupby('Attrition_Flag')['Attrition_Flag'].count() #print(resumo_status.to_string()) #2 - pegando linhas e colunas distribuicao_cartao = clientes_df.loc[clientes_df['Attrition_Flag']=='Existing Customer', ['Attrition_Flag', 'Card_Category']] #display(distribuicao_cartao) # vai agrupar valores os valores dos cartões e contar tudo distribuicao_cartao = distribuicao_cartao.groupby('Card_Category')['Card_Category'].count() # mudou o título da coluna para 'Card_Category - Existing Customer' distribuicao_cartao.index.names = ["Card_Category - Existing Customer"] #print(distribuicao_cartao.to_string()) #3 - calculando a média tempo_medio = clientes_df['Months_on_book'].mean() limite_medio = clientes_df['Credit_Limit'].mean() limite_medio_exclientes = clientes_df.loc[clientes_df['Attrition_Flag']=='Attrited Customer', 'Credit_Limit'].mean() #print(limite_medio) #print(limite_medio_exclientes) # enviar um e-mail para o chefe outlook = win32.Dispatch('outlook.application') texto = f''' Ol<NAME>, tudo bem? Conforme solicitado, levantamos os principais indicadores dos nossos clientes para ver o impacto dos Attrited Customers. Temos atualmente a seguinte divisão da base de clientes: {resumo_status.to_string()} Além disso, dos clientes ativos (Existing Customers) a divisão por categoria fica assim: {distribuicao_cartao.to_string()} Por fim, o tempo médio de permanência dos clientes é de {tempo_medio:.1f} meses. Agora, quando comparamos o limite médio entre clientes ativos e não ativos, não percebemos muita diferenciação, sendo {limite_medio:.1f} para ativos e {limite_medio_exclientes:.1f} para inativos Segue em anexo planilha completa para mais detalhes. Att., João ''' mail = outlook.CreateItem(0) mail.To = '<EMAIL>' mail.Subject = 'Relatório de Clientes - Análise de Attrited Customers' mail.Body = texto attachment = r'C:\Users\joaop\Downloads\BankChurners.csv' mail.Attachments.Add(attachment) mail.Send() print('Fim')
automacao-clientes-cartoes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] colab_type="text" id="FtcmYWJc-FBb" # # SIT742: Modern Data Science # **(Week 02: A Touch of Data Science)** # # --- # - Materials in this module include resources collected from various open-source online repositories. # - You are free to use, change and distribute this package. # - If you found any issue/bug for this document, please submit an issue at [tulip-lab/sit742](https://github.com/tulip-lab/sit742/issues) # # # Prepared by **SIT742 Teaching Team** # # # --- # # # # Session 2A - Python Packages and Your Data # # In this week, we will learn how to use Python Packages to manipulate the data and files. # + [markdown] colab_type="text" id="Fb5TqlLE-FBo" # ## Content # # # # ### Part 1 Python packages # # 1.1 [Standard Library](#standlib) # # 1.2 [Third Party Packages](#3rdparty) # # 1.3 [Importing a module](#importmod) # # # ### Part 2 Python Simple IO # # 2.1 [Input](#input) # # 2.2 [Output](#output) # # # ### Part 3 Datetime Module # # 3.1 [Time](#time) # # 3.2 [Date](#date) # # 3.3 [Timedelta](#timedelta) # # 3.4 [Formatting and Parsing](#parsing) # # ### Part 4 Numpy Module # # 4.1 [Importing Numpy](#importnp) # # 4.2 [Numpy arrays](#nparray) # # 4.3 [Manipulating arrays](#maninp) # # 4.4 [Array Operations](#arrayop) # # 4.5 [np.random](#random) # # 4.6 [Vectorizing Functions](#vecfunc) # # # # ### Part 5 Data Loading # # 5.1 [TXT](#txt) # # 5.2 [CSV](#csv) # # 5.3 [JSON](#json) # # # + [markdown] colab_type="text" id="nDEk8Gqz-FBx" # --- # ## <span style="color:#0b486b">1. Python packages</span> # + [markdown] colab_type="text" id="ybgB1hro-FB2" # After completing previous Python sessions, you should know about the syntax and semantics of the Python language. But apart from that, you should also learn about Python libraries and its packages to be able to code efficiently. Python’s standard library is very extensive, offering a wide range of facilities as indicated [here](https://docs.python.org/2/library/). The library contains built-in modules (written in C) that provide access to system functionality such as file I/O that would otherwise be inaccessible to Python programmers, as well as modules written in Python that provide standardized solutions for many problems that occur in everyday programming. Look at the [Python Standard Library Manual](https://docs.python.org/2/library/) to read more. # # In addition to the standard library, there is a growing collection of several thousand components (from individual programs and modules to packages and entire application development frameworks), available from the [Python Package Index](https://pypi.python.org/pypi). # + [markdown] colab_type="text" id="Ws56RKhy-FB7" # <a id = "standlib"></a> # # ### <span style="color:#0b486b">1.1 Standard libraries</span> # + [markdown] colab_type="text" id="YLU8yilr-FCa" # For a complete list of Python standard library and their documentation look at the [Python Manual.](https://docs.python.org/2/library/) A few to mention are: # # * ``math`` for numeric and math-related functions and data types # * ``urllib`` for fetching data across the web # * ``datetime`` for manipulating dates and times # * ``pickle`` and ``cPickle`` for serializing and deserializing data structures enabling us to save our variables on the disk and load them from the disk # * ``os`` for operating system dependent functions # + [markdown] colab_type="text" id="D0gTVNG3-FCd" # <a id = "3rdparty"></a> # # ### <span style="color:#0b486b">1.2 Third party packages</span> # + [markdown] colab_type="text" id="G_FHaLGL-FCh" # There are thousands of third party packages, each developed for a special task. Some of the useful libraries for data science are: # # * ``numpy`` is probably the most fundamental package for efficient scientific computing in Python # * ``scipy`` is one of the core packages for scientific computations # * ``pandas`` is a library for operating with table-like data structures called DataFrame object # * ``matplotlib`` is a comprehensive plotting library # * ``BeautifulSoup`` is an HTML and XML parser # * ``scikit-learn`` is the most general machine learning library for Python # * ``nltk`` is a toolkit for natural language processing # + [markdown] colab_type="text" id="n9-8E657-FCs" # --- # <a id = "importmod"></a> # ### <span style="color:#0b486b">1.3 Importing a module</span> # + [markdown] colab_type="text" id="dDvdJbCz-FCv" # To use a module, first you have to ``import`` it. There are different ways to import a module: # # * `import my_module` # * `from my_module import my_function` # * `from my_module import my_function as func` # * `from my_module import submodule` # * `from my_module import submodule as sub` # * `from my_module import *` # + [markdown] colab_type="text" id="-ZAPHJav-FCz" # **`'import my_module'`** imports the module `'my_module'` and creates a reference to it in the namespace. For example `'import math'` imports the module `'math'` into the namespace. After importing the module this way, you can use the dot operator `(.)` to refer to the objects defined in the module. For example `'math.exp()'` refers to function `'exp()'` in module `'math'`. # + colab={} colab_type="code" id="f40P8yu7-FC3" import math x = 2 y1 = math.exp(x) y2 = math.log(x) print("e^{} is {} and log({}) is {}".format(x, y1, x, y2)) # + [markdown] colab_type="text" id="umlFNTcm-FDG" # **`'from my_module import my_function'`** only imports the function `'my_function'` from the module `'my_module'` into the namespace. This way you won't have access to neither the module (since you have not imported the module), nor the other objects of the module. You can only have access to the object you have imported. # # You can use a comma to import multiple objects. # + colab={} colab_type="code" id="S68L0h56-FDK" from math import exp x = 2 y = exp(x) # no need to math.exp() print("e^{} is {}".format(x, y)) # + [markdown] colab_type="text" id="O7OwKRz0-FDV" # **`'from my_module import my_function as func'`** imports the function `'my_function'` from module `'my_module'` but its identifier in the namespace is changed into `'func'`. This syntax is used to import submodules of a module as well. For example later you will see that nowadays it is almost a convention to import matplotlib.pyplot as plt. # + colab={} colab_type="code" id="PkemZT1l-FDY" # you can change the name of the imported object from math import exp as myfun x = 2 y = myfun(x) print("e^{} is {}".format(x, y)) # + [markdown] colab_type="text" id="yPV8jMzF-FDj" # **`'from my_module import *'`** imports all the public objects defined in `'my_module'` into the namespace. Therefore after this statement you can simply use the plain name of the object to refer to it and there is no need to use the dot operator: # + colab={} colab_type="code" id="RRWQj8n9-FDq" from math import * x = 2 y1 = exp(x) y2 = log(x) print("e^{} is {} and log({}) is {}".format(x, y1, x, y2)) # + [markdown] colab_type="text" id="jFyAzGPm-FD0" # **Exercise 1:** # # 1. Import the library `math` from standard Python libraries # 2. Define a variable and assign an integer value to it (smaller than 20) # 3. Use `factorial()` function (an object in `math` library) to calculate the factorial of the variable # 4. Print its value # + colab={} colab_type="code" id="69FyXRpt-FD3" #Put your code here # + [markdown] colab_type="text" id="1F2W0yVs1Zej" # <details><summary><u><b><font color="Blue">Click here for the solution to Exercise 1</font></b></u></summary> # # ```python # import math # n = 10 # # factorial means that the result when you multiply a whole number by all the numbers below it # # Hence, factorial(n) = n * (n-1) * (n-2) * ...... * 1 # print(math.factorial(n)) # ``` # # + [markdown] colab_type="text" id="C-TbbbaO-FEA" # **Exercise 2:** # # 1. Write a function that takes an integer variable and returns its factorial. # If you do not know how to write a function, just review the week 1 lab materials"SIT742P01C-FunctionOO.ipynb". # 2. Use this function to find the factorial of the variable defined in Exercise 1 # 3. Do your answers match? # + colab={} colab_type="code" id="wrVdMU_u-FEC" #Put your code here # + [markdown] colab_type="text" id="eQXtvPYIn0Ss" # <details><summary><u><b><font color="Blue">Click here for the solution to Exercise 2</font></b></u></summary> # # ```python # def my_factorial(n): # if n==1: # return 1 # else: # return n * my_factorial(n-1) # print(my_factorial(10)) # ``` # + [markdown] colab_type="text" id="Y9wByq5c-FEM" # --- # # ## <span style="color:#0b486b">2. Python simple input/output</span> # # <a id = "input"></a> # # ### <span style="color:#0b486b">2.1 Input</span> # # `input()` asks the user for a string of data (ended with a newline), and simply returns the string. # + colab={} colab_type="code" id="TPVPb_62-FEO" x = input('What is your name? ') print("x is {}".format(type(x))) print("Your name is {}".format(x)) # + [markdown] colab_type="text" id="Dq3-hh28-FEY" # **Exercise 3:** # # 1. Use `input()` to take a float value between -1 and 1 from the user # 2. Use the function `acos()` from `math` to find the arc cosine of it # 3. Print the value of the variable and its arc cosine # + colab={} colab_type="code" id="I6C0rhxp-FEa" #Put your code here # + [markdown] colab_type="text" id="M3foK9h0ooKY" # <details><summary><u><b><font color="Blue">Click here for the solution to Exercise 3</font></b></u></summary> # # ```python # x = input('Enter a real number between -1 and 1: ') # y = math.acos(float(x)) # # acos function is used as the trigonometric function that is equal to the ratio of the side adjacent to an acute angle (in a right-angled triangle) to the hypotenuse. # print('acos({}) = {}'.format(x,y)) # ``` # # # + [markdown] colab_type="text" id="00UHlj58-FEj" # As we know the domain of [arc cosine function][acos] is [-1, 1]. So, what if the value entered by the user is not in the domain (the value is smaller than -1 or greater that 1)? What happens then? # # To avoid raising a ValueError exception, before passing the value to `acos()` function make sure it is in range and if not, display an appropriate message. # # [acos]: http://mathworld.wolfram.com/InverseCosine.html # + colab={} colab_type="code" id="bnDnPfOq-FEo" # Add if-else statement on the exercise 3 x = input('Enter a real number between -1 and 1: ') x = float(x) if x>=-1 and x<=1: y = math.acos(x) print('acos({}) = {}'.format(x,y)) else: print('Out of range') # + [markdown] colab_type="text" id="qXO1sN7o-FEw" # <a id = "output"></a> # # ### <span style="color:#0b486b">2.2 output</span> # + [markdown] colab_type="text" id="Sb9kk_Uj-FE1" # The basic way to do output is the print statement. To print multiple things on the same line separated by spaces, use commas between them. # + colab={} colab_type="code" id="CRTLNz8--FE4" name = "John" msg = "Hello" print(msg) print(msg, name) # + [markdown] colab_type="text" id="DC3XfzpT-FFB" # Objects can be printed on the same line using the 'end' arguments. You can read the [print()](https://docs.python.org/3/library/functions.html#print) syntax. # + colab={} colab_type="code" id="9KQpYWC7-FFF" print('Sample is using the end=\'\,\'') for i in range(10): print(i, end=',') print('\nSample is using the end=\' \'') for i in range(10): print(i, end=' ') print('\nSample is without the end arguments') for i in range(10): print(i) # + [markdown] colab_type="text" id="U6o-Mudx-FFS" # --- # ## <span style="color:#0b486b">3. datetime module</span> # # + [markdown] colab_type="text" id="ro05oOUt-FFV" # The datetime module includes functions and classes for date and time parsing, formatting, and arithmetic. # + [markdown] colab_type="text" id="gBxIzp5W-FFa" # <a id = "time"></a> # # ### <span style="color:#0b486b">3.1 Time</span> # # Time values are represented with the time class. Times have attributes for hour, minute, second, and microsecond. They can also include time zone information. # + colab={} colab_type="code" id="Up5dk2aY-FFd" import datetime t = datetime.time(11, 21, 33) print(t) print('hour :', t.hour) print('minute:', t.minute) print('second:', t.second) print('microsecond:', t.microsecond) print('tzinfo:', t.tzinfo) # + [markdown] colab_type="text" id="9mA0v-_s-FFq" # <a id = "date"></a> # # ### <span style="color:#0b486b">3.2 Date</span> # # Calendar date values are represented with the date class. Instances have attributes for year, month, and day. # + colab={} colab_type="code" id="hPdlr4qX-FFt" import datetime today = datetime.date.today() print(today) print('ctime:', today.ctime()) print('tuple:', today.timetuple()) print('ordinal:', today.toordinal()) print('Year:', today.year) print('Mon :', today.month) print('Day :', today.day) # + [markdown] colab_type="text" id="PTbYXTCj-FF0" # A way to create new date instances is using the `replace()` method of an existing date. For example, you can change the year, leaving the day and month alone. # + colab={} colab_type="code" id="2bkkUju6-FF3" import datetime d1 = datetime.date(2013, 3, 12) print('d1:', d1) d2 = d1.replace(year=2015) print('d2:', d2) # + [markdown] colab_type="text" id="S2KFmbfp-FF_" # **Exercise 4:** # # 1. Write a piece of code that gives you the day of the week that you were born. # 2. How about this year? Do you know what day of the week is it? # + colab={} colab_type="code" id="KCc7E-_Q-FGB" #Put your code here # + [markdown] colab_type="text" id="SjLqYwOXo-Pi" # <details><summary><u><b><font color="Blue">Click here for the solution to Exercise 4</font></b></u></summary> # # ```python # import datetime # day_of_week = {0 : 'Monday', # 1: 'Tuesday', # 2: 'Wednesday', # 3: 'Thursday', # 4: 'Friday', # 5: 'Saturday', # 6: 'Sunday'} # # you could also use a list to store the days o the week # # and it would work just fine. # # days_of_week = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] # # print('Today is', day_of_week[datetime.date.today().weekday()]) # # # Assume my birthday value is 10/10/2008 # my_birthdate = datetime.date(2008,10,10) # print('I was born on', day_of_week[my_birthdate.weekday()]) # # # Change the previous birthday year value with 2019 # t2 = my_birthdate.replace(year=2019) # print('and my birthday this year is on a', day_of_week[t2.weekday()]) # ``` # # + [markdown] colab_type="text" id="ESNhrSou-FGJ" # <a id = "timedelta"></a> # # ### <span style="color:#0b486b">3.3 timedelta</span> # Using `replace()` is not the only way to calculate future/past dates. You can use datetime to perform basic arithmetic on date values via the timedelta class. # + colab={} colab_type="code" id="fr7X-Pnq-FGL" today = datetime.datetime.today() print(today) tomorrow = today + datetime.timedelta(days=1) print(tomorrow) # + [markdown] colab_type="text" id="D1pyHfid-FGU" # **Exercise 5:** # # Rewrite Exercise 4 using timedelta method. # + colab={} colab_type="code" id="xRr0jYZV-FGX" #Put your code here # + [markdown] colab_type="text" id="GOQNxj5zpdyE" # <details><summary><u><b><font color="Blue">Click here for the solution to Exercise 5</font></b></u></summary> # # ```python # import datetime # day_of_week = {0 : 'Monday', # 1: 'Tuesday', # 2: 'Wednesday', # 3: 'Thursday', # 4: 'Friday', # 5: 'Saturday', # 6: 'Sunday'} # # You could also use a list to store the days o the week # # and it would work just fine. # # days_of_week = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] # # print('Today is', day_of_week[datetime.date.today().weekday()]) # # # Assume my birthday value is 10/10/2008 # my_birthdate = datetime.date(2008,10,10) # print('I was born on', day_of_week[my_birthdate.weekday()]) # # # Change the previous birthday year value with 2019 # #You need to note that both 2012 and 2016 are leap year, which are 366 days in one year not 365 days. # t2 = my_birthdate + datetime.timedelta(days=365)*9 + datetime.timedelta(days=366)*2 # print('and my birthday this year is on a', day_of_week[t2.weekday()]) # ``` # # + [markdown] colab_type="text" id="lCCVhKtCKewi" # You will find that the result of exercise 4 is same with the exercise 5. # + [markdown] colab_type="text" id="NlLyL9gz-FGd" # You can use comparison operators for datetime objects too. It makes sense right? # + colab={} colab_type="code" id="pN-yw9iA-FGg" tomorrow > today # + [markdown] colab_type="text" id="5wwW6zPU-FGn" # <a id = "parsing"></a> # # ### <span style="color:#0b486b">3.4 Formatting and Parsing</span> # # The default string representation of a datetime object uses the ISO 8601 format (YYYY-MM-DDTHH:MM:SS.mmmmmm). Alternate formats can be generated using `strftime()`. Similarly, if your input data includes timestamp values parsable with `time.strptime()`, then `datetime.strptime()` is a convenient way to convert them to datetime instances. # + colab={} colab_type="code" id="kRoDoulw-FGs" today = datetime.datetime.today() print('ISO :', today) # + [markdown] colab_type="text" id="nLpPny1k-FG1" # string from datetime object # + colab={} colab_type="code" id="0YNv0AR_-FG4" str_format = "%a %b %d %H:%M:%S %Y" s = today.strftime(str_format) print('strftime:', s) # + [markdown] colab_type="text" id="dC2aAHmT-FHB" # datetime object from string # + colab={} colab_type="code" id="qHTRTrSg-FHH" print(s) d = datetime.datetime.strptime(s, str_format) print(d) print('strptime:', d.strftime(str_format)) # + colab={} colab_type="code" id="UYPLivY6-FHO" # Define a string variable "s", its value is 07/03/2017 s = "07/03/2017" # Define the string format str_format = "%m/%d/%Y" d = datetime.datetime.strptime(s, str_format) print(d) # + [markdown] colab_type="text" id="Gm82ouDc-FHV" # **Exercise 6:** # # You have a string as "7/30/2017 - 12:13". How do you convert it into a datetime object? # + colab={} colab_type="code" id="r1V0948Q-FHX" #Put your code here # + [markdown] colab_type="text" id="MktknO-sqiPe" # <details><summary><u><b><font color="Blue">Click here for the solution to Exercise 6</font></b></u></summary> # # ```python # s = '7/30/2017 - 12:13' # str_format = "%m/%d/%Y - %H:%M" # t = datetime.datetime.strptime(s, str_format) # print(t) # ``` # # + [markdown] colab_type="text" id="fm6guAC1-FHe" # --- # ## <span style="color:#0b486b">4. Numpy module</span> # # + [markdown] colab_type="text" id="o1E02sGn-FHg" # Python lists are very flexible for storing any sequence of Python objects. But usually flexibility comes at the price of performance and therefore Python lists are not ideal for numerical calculations where we are interested in performance. Here is where **NumPy** comes in. It adds support for large, multi-dimensional arrays and matrices, along with high-level mathematical functions to operate on these arrays to Python. # # Relying on `'BLAS'` and `'LAPACK'`, `'NumPy'` gives a functionality comparable with `'MATLAB'` to Python. NumPy facilitates advanced mathematical and other types of operations on large numbers of data. Typically, such operations are executed more efficiently and with less code than is possible using Python’s built-in sequences. It has become one of the fundamental packages used for numerical computations. # # In this tutorial we will review its basics, so to learn more about NumPy, visit [NumPy User Guide](http://docs.scipy.org/doc/numpy/user/index.html) # + [markdown] colab_type="text" id="Hc_bsO3l-FHj" # <a id = "importnp"></a> # # ### <span style="color:#0b486b">4.1 Importing Numpy</span> # + [markdown] colab_type="text" id="yWUvtzi8-FHm" # As you have learnt in this session, first we have to import a package to be able to use it. NumPy is imported with: # + colab={} colab_type="code" id="2-B8prOg-FHq" import numpy # + [markdown] colab_type="text" id="a39r4M3p-FH1" # Although it is the convention to import it like with an alias: # + colab={} colab_type="code" id="SKj133sh-FH7" import numpy as np # + [markdown] colab_type="text" id="uAmsO-V8-FIR" # <a id = "nparray"></a> # # ### <span style="color:#0b486b">4.2 Numpy arrays</span> # + [markdown] colab_type="text" id="tlnX1eG8-FIT" # The core of NumPy is its arrays. You can create an array from a Python list or tuple using `'array'` function. They work similarly to lists apart from the fact that: # # * you can easily perform element-wise operation on them, and # * unlike lists, they should be pre-allocated. # # The first point is further explained in [Array operations section](03-prac3.ipynb#Array-operations). The second point means that you there is no equivalent to list append for arrays. The size of the arrays is known at the time it is defined. # + [markdown] colab_type="text" id="ATvr-1ZS-FIW" # #### <span style="color:#0b486b">4.2.1 Create an array from a list</span> # + colab={} colab_type="code" id="xC6z9Xyo-FIY" x = [1, 7, 3, 4, 0, -5] # + colab={} colab_type="code" id="gdsnKRWa-FIi" y = np.array(x) type(y) # + [markdown] colab_type="text" id="nnBy-g5b-FIq" # #### <span style="color:#0b486b">4.2.2 Create an array using a range</span> # + colab={} colab_type="code" id="OBt00bhq-FIs" range(5) # + colab={} colab_type="code" id="e1KYDBLq-FI7" print(np.array(range(5))) # + colab={} colab_type="code" id="LhL0M03i-FJD" print(np.arange(2, 3, 0.2)) # Why is there no value 3.0 in the output? # + colab={} colab_type="code" id="_SQTHmfL-FJL" print(np.linspace(2, 3, 5)) # returns numbers spaced evenly on a linear scale, both endspoints are included # + [markdown] colab_type="text" id="JDpURgHfM8-D" # Just try to change the variable value 5 with 1, 2, 4 or 10? \ # # What pattern could you find ? # # Could you guess what is the function of **linspace**, if without the given comments? # # Then, you can try to use the same method to learn what is the function of **logspace**? # # # # + colab={} colab_type="code" id="Ol089FmC-FJS" print(np.logspace(2, 3, 5)) # returns numbers spaced evenly on a log scale # + [markdown] colab_type="text" id="hXqpHuub-FJZ" # **Note:** If you need any help on how to use a function or what it does, you can use IPython help. Just add a question mark (?) at the end of the function and execute the cell: # + colab={} colab_type="code" id="xFYTD-O--FJb" # np.logspace? # + [markdown] colab_type="text" id="PnwvGJX4-FJj" # #### <span style="color:#0b486b">4.2.3 Create a prefilled array</span> # + colab={} colab_type="code" id="zSvnqTuR-FJn" print(np.zeros(5)) # + colab={} colab_type="code" id="MuWwpVx2-FJ0" print("The 1st sample is",np.ones(5, dtype=int)) # you can specify the data type, default is float print("The 2nd sample is ",np.ones(5, dtype=float)) print("The 3rd sample is ",np.ones((5,5), dtype=int)) print("The 4th sample is ",np.ones((5,5,5), dtype=int)) # + colab={} colab_type="code" id="1crrWNwwO0BP" np.ones? # You can use this command to learn what is the function of np.ones # + [markdown] colab_type="text" id="g7FgDgFa-FJ7" # #### <span style="color:#0b486b">4.2.4 `'mgrid'`</span> # similar to meshgrid in MATLAB: # + colab={} colab_type="code" id="U5OdZS-L-FKB" x, y = np.mgrid[0:5, 0:3] print(x) print(y) # + colab={} colab_type="code" id="vI0XK1HEPpRb" # np.mgrid? # + [markdown] colab_type="text" id="BK24IR2b-FKJ" # #### <span style="color:#0b486b">4.2.5 Array attributes</span> # # NumPy arrays have multiple attributes and methods. The cell below shows a few of them. You can press tab after typing the dot operator `'(.)'` to use IPython auto-complete and see the rest of them. # + colab={} colab_type="code" id="MirmVfAm-FKN" y = np.array([3, 0, -4, 6, 12, 2]) # + colab={} colab_type="code" id="lPHoezhC-FKW" print("number of dimensions:\t", y.ndim) print("dimension of the array:", y.shape) print("numerical data type:\t", y.dtype) print("maximum of the array:\t", y.max()) print("index of the array max:", y.argmax()) print("mean of the array:\t", y.mean()) # + [markdown] colab_type="text" id="CrbiDDhr-FKk" # #### <span style="color:#0b486b">4.2.6 Multi-dimensional arrays</span> # # + [markdown] colab_type="text" id="qDBhj18y-FKn" # You can define arrays with 2 (or higher) dimensions in numpy: # + [markdown] colab_type="text" id="ltKaGI6K-FKo" # ##### from lists # + colab={} colab_type="code" id="d8M2wkbR-FKr" x = [[1, 2, 10, 20], [3, 4, 30, 40]] y = np.array(x) print(y) print() print(y.ndim, y.shape) # + [markdown] colab_type="text" id="XmXmLGk1-FKz" # ##### pre-filled # + colab={} colab_type="code" id="qiG5NGcB-FK2" x = np.ones((3, 5), dtype='int') # + colab={} colab_type="code" id="qSRQFimN-FK8" print(x) print() print(x.ndim, x.shape) # + [markdown] colab_type="text" id="foS348qb-FLB" # ##### `'diag()'` # diagonal matrix # + colab={} colab_type="code" id="M-ttVrqJ-FLN" np.diag([1, 2, 3]) # + [markdown] colab_type="text" id="3fRD43wu-FLY" # <a id = "maninp"></a> # # ### <span style="color:#0b486b">4.3 Manipulating arrays</span> # # + [markdown] colab_type="text" id="cgGZKWuW-FLa" # #### <span style="color:#0b486b">4.3.1 Indexing</span> # # + [markdown] colab_type="text" id="reg5bJIY-FLc" # Similar to lists, you can index elements in an array using `'[]'` and indices: # + [markdown] colab_type="text" id="47U6VVq--FLf" # If `'x'` is a 1-dimensional array, `'x[i]'` will index `'ith'` element of `'x'`: # + colab={} colab_type="code" id="WI3XEpO0-FLi" x = np.array([2, 8, -2, 4, 3]) print(x[3]) # + [markdown] colab_type="text" id="kOhiutVu-FLp" # If 'x' is a 2-dimensional arrray: # # * '`x[i, j]'` or `'x[i][j]'` will index the element in `'ith'` row and `'jth'` column # * '`x[i, :]'` will index the `'ith'` row # * `'x[:, j]'` will index `'jth'` column # + colab={} colab_type="code" id="5dZ8bMdA-FLr" x = np.array([[7, 6, 8, 6, 4], [4, 7, -2, 0, 9]]) print(x[1, 3]) # + colab={} colab_type="code" id="RYHDHduJ-FLw" print(x[1, :]) # or x[1] # + colab={} colab_type="code" id="WUyelaZV-FL1" print(x[:, 3]) # + [markdown] colab_type="text" id="YMP2cU6y-FMB" # Arrays can also be indexed with other arrays: # + colab={} colab_type="code" id="QgEan722-FMC" x = np.array([2, 8, -2, 4, 3, 9, 0]) idx1 = [1, 3, 4] # list idx2 = np.array(idx1) # array print(x[idx1], x[idx2]) x[idx2] = 0 print(x) # + [markdown] colab_type="text" id="0KBUG3Eg-FMI" # You can also index masks. The index mask should be a NumPy arrays of data type Bool. Then the element of the array is selected only if the index mask at the position of the element is True. # + colab={} colab_type="code" id="X1I8PH1Q-FMK" x = np.array([2, 8, -2, 4, 3, 9, 0]) # + colab={} colab_type="code" id="7ge96tsf-FMQ" mask = np.array([False, True, True, False, False, True, False]) # + colab={} colab_type="code" id="PEoatrjz-FMU" x[mask] # + [markdown] colab_type="text" id="rHObUufk-FMb" # Combining index masks with comparison operators enables you to conditionally select elements of the array. # + colab={} colab_type="code" id="ylAy1AHp-FMd" x = np.array([2, 8, -2, 4, 3, 9, 0]) mask = (x>=2) * (x<9) x[mask] # + [markdown] colab_type="text" id="HA-J3zZr-FMq" # #### <span style="color:#0b486b">4.3.2 Slicing</span> # # + [markdown] colab_type="text" id="blX9Xzbl-FMs" # Similar to Python lists, arrays can also be sliced: # + colab={} colab_type="code" id="QJ550Wak-FMu" x = np.array([2, 8, -2, 4, 3, 9, 0]) print(x[3:]) # slicing print(x[3:7:2]) # slicing with a specified step # + colab={} colab_type="code" id="w8V9DnNs-FMz" x = np.array([[7, 6, 8, 6, 4, 3], [4, 7, 0, 5, 9, 5], [7, 3, 6, 3, 5, 1]]) print(x[1, 1:4]) print() print(x[:2, 1::2]) # rows zero up to 2, cols 1 up to end with a step=2 # + [markdown] colab_type="text" id="hWTmUPd6-FM5" # #### <span style="color:#0b486b">4.3.3 Iteration over items</span> # # + [markdown] colab_type="text" id="Eu9gjTKo-FM7" # Since most of NumPy functions are capable of operating on arrays, in many cases iteration over items of an arrays can be (and should be) avoided. Otherwise it is pretty much similar to iterating over values of a list: # + colab={} colab_type="code" id="eayPM8on-FNA" a = np.arange(0, 50, 7) print(a) for item in a: print(item,) # + [markdown] colab_type="text" id="uEEZs2Pd-FNG" # Of course you could iterate over items using their indices too: # + colab={} colab_type="code" id="W35khYsr-FNJ" a = np.arange(0, 50, 7) for i in range(a.shape[0]): print(a[i],) # + [markdown] colab_type="text" id="E9efgG2f-FNQ" # There are also many functions for manipulating arrays. The most used ones are: # + [markdown] colab_type="text" id="Jq2e_fTx-FNS" # #### <span style="color:#0b486b">4.3.4 `copy()`</span> # # + [markdown] colab_type="text" id="Yqcj120w-FNU" # **Remember** that assignment operator is not an equivalent for copying arrays. In fact Python does not pass the values. It passess the references. # + colab={} colab_type="code" id="Wkx8l22X-FNa" x = [1, 2, 3] y = x print(x, y) # + colab={} colab_type="code" id="69hYgW58-FNf" y[0] = 0 # now we alter an element of y print(x, y) # note that x has changed as well # + [markdown] colab_type="text" id="Oof00tVc-FNq" # Same is true for numpy arrays. That's why if you need a copy of an array, you should use `'copy()'` function. # + colab={} colab_type="code" id="ir5585gc-FNs" x = np.array([1, 2, 3]) y = x y[0] = 0 # now we alter an element of y print(x, y) # note that x has changed as well # + colab={} colab_type="code" id="Uo-iZzvC-FN3" x = np.array([1, 2, 3]) y = x.copy() # or np.copy(x) y[0] = 0 print(x, y) # + [markdown] colab_type="text" id="tSrPOa8t-FN_" # #### <span style="color:#0b486b">4.3.5 `reshape()`</span> # # + colab={} colab_type="code" id="8tnEtiPp-FOC" x1 = np.arange(6) x2 = x1.reshape((2, 3)) # or np.reshape(x1, (2, 3)) print(x1) print() print(x2) # + [markdown] colab_type="text" id="7_aHU31V-FOL" # #### <span style="color:#0b486b">4.3.6 `astype()`</span> # # + [markdown] colab_type="text" id="76fjepzM-FON" # Used for type casting: # + colab={} colab_type="code" id="k1GoBiQV-FOO" x1 = np.arange(5) x2 = x1.astype(float) print(type(x1), x1) print(type(x2), x2) # + [markdown] colab_type="text" id="k6bOoR14-FOU" # #### <span style="color:#0b486b">4.3.7 `T`</span> # + [markdown] colab_type="text" id="oe1PUocl-FOc" # transpose method: # + colab={} colab_type="code" id="DXLlgFaT-FOe" x1 = np.random.randint(5, size=(2, 4)) x2 = x1.T print(x1) print() print(x2) # + [markdown] colab_type="text" id="axdyp7nJ-FOi" # <a id = "arrayop"></a> # # ### <span style="color:#0b486b">4.4 Array operations</span> # # + [markdown] colab_type="text" id="Pdnp48Ov-FOp" # #### <span style="color:#0b486b">4.4.1 Arithmetic operators</span> # # + [markdown] colab_type="text" id="tbF09PSC-FOr" # Arrays can be added, subtracted, multiplied and divided using +, -, \* and, /. Operations done by these operators are **element wise**. # + colab={} colab_type="code" id="Rrdn8VQE-FOt" x1 = np.array([[2, 3, 5, 7], [2, 4, 6, 8]], dtype=float) x2 = np.array([[6, 5, 4, 3], [9, 7, 5, 3]], dtype=float) # + colab={} colab_type="code" id="NsdLlPTm-FO1" print(x1) print() print(x2) # + colab={} colab_type="code" id="6NWyxuog-FO7" print(x1 + x2) # + colab={} colab_type="code" id="xb4lLmlt-FPG" print(x1 - x2) # + colab={} colab_type="code" id="7NM1P_u6-FPM" print(x1 * x2) # + colab={} colab_type="code" id="pBirpVSg-FPR" print(x1 / x2) # + colab={} colab_type="code" id="BWYmg3gc-FPZ" print(3 + x1) # + colab={} colab_type="code" id="bHFn1Mxt-FPd" print(3 * x1) # + colab={} colab_type="code" id="Piegik6m-FPl" print(3 / x1) # + [markdown] colab_type="text" id="2xqoJwUZ-FPr" # #### <span style="color:#0b486b">4.4.2 Boolean operators</span> # # Much like arithmetic operators discussed above, boolean (comparison) operators perform element-wise on arrays. # + colab={} colab_type="code" id="aM5g731W-FPs" x1 = np.array([2, 3, 5, 7]) x2 = np.array([2, 4, 6, 7]) y = x1<x2 print( y, y.dtype) # + [markdown] colab_type="text" id="cnIFJb0j-FPw" # use methods `'.any()'` and `'.all()'` to return a single boolean value indicating whether any or all values in the array are True respectively. This value in turn can be used as a condition for an `'if'` statement. # + colab={} colab_type="code" id="7NGdUs29-FPy" print (y.all()) print (y.any()) # + [markdown] colab_type="text" id="xy2gjc2T-FP3" # NumPy has many other functions that you can read about them in [NumPy User Guide](http://docs.scipy.org/doc/numpy/user/). Specially read about: # + [markdown] colab_type="text" id="0lc_o_C5-FP4" # * `np.unique`, returns unique elements of an array # * `np.flatten`, flattens a multi-dimensional array # * `np.mean`, `np.std`, `np.median` # * `np.min`, `np.max`, `np.argmin`, `np.argmax` # + [markdown] colab_type="text" id="xTcRRTbn-FP5" # <a id = "random"></a> # # ### <span style="color:#0b486b">4.5 np.random</span> # # + [markdown] colab_type="text" id="MPPLTWcv-FP7" # NumPy has a module called `random` to generate arrays of random numbers. There are different ways to generate a random number: # + colab={} colab_type="code" id="OlyjQz8z-FP8" print( np.random.rand()) # + colab={} colab_type="code" id="IxnFBbRv-FQD" # 2x5 random array drawn from standard normal distribution print( np.random.random([2, 5])) # + colab={} colab_type="code" id="YZ_OPkSs-FQI" # 2x5 random array drawn from standard normal distribution print (np.random.rand(2, 5)) # + colab={} colab_type="code" id="9GIPOTP_-FQM" # 2x5 random array drawn from a uniform distribution on {0, 1, 2, ..., 9} print (np.random.randint(10, size=[2, 5])) # + [markdown] colab_type="text" id="C9UFe6xH-FQQ" # ##### <span style="color:#0b486b">4.5.1 Random seed</span> # # + [markdown] colab_type="text" id="yqKfnar6-FQR" # Random numbers generated by computers are not really random. They are called pseudo-random. Thus we can set the random generator to generate the same set of random numbers every time. This is useful while testing the code. # + colab={} colab_type="code" id="d55atcWW-FQS" for i in range(5): print (np.random.random(),) # + colab={} colab_type="code" id="IEhNqhH2-FQX" for i in range(5): np.random.seed(100) print (np.random.random(),) # + [markdown] colab_type="text" id="AZWRunty-FQd" # <a id = "vecfunc"></a> # # ### <span style="color:#0b486b">4.6 Vectorizing functions</span> # # + [markdown] colab_type="text" id="cO7BR00T-FQf" # As mentioned earlier in operators, to get a good performance you should avoid looping over elements in an array and use vectorized algorithms. Many methods and functions of NumPy already support vectors, so keep this in mind while writing your own code. # # But for now, suppose you have written a step function which does not work with arrays, as the cell below: # + colab={} colab_type="code" id="TZLJoQZq-FQh" def step_func(x): """ scalar implementation of step function """ if x>=0: return 1 else: return 0 # + [markdown] colab_type="text" id="4PRIQauM-FQm" # Obviously it fails when dealing with an array, because it expects a scalar as its input. Execute the cell below and see that it raises an error: # + colab={} colab_type="code" id="pqcpMBr7-FQn" # since step_func expects a scalar and recieves an array instead, # it raises an error step_func(np.array([2, 7, -4, -9, 0, 4])) # + [markdown] colab_type="text" id="iSkI0M-G-FQr" # You can use the function `'np.vectorize()'` to obtain a vectorized version of `'step_func'` that can handle vector data: # + colab={} colab_type="code" id="VWZEN82_-FQt" step_func_vectorized = np.vectorize(step_func) step_func_vectorized(np.array([2, 7, -4, -9, 0, 4])) # + [markdown] colab_type="text" id="bnn0lc9P-FQy" # Although `'vectorize()'` can automatically derive a vectorized version of a scalar function, but it is always better to keep this in mind and write functions vector-compatible, from the beginning. For example we could write the step function as it is shown in the cell below, so it can handle scalar and vector data. # + colab={} colab_type="code" id="7qdNrJ3R-FQ1" def step_func2(x): """ vector and scalar implementation of step function """ return 1 * (x>=0) # + colab={} colab_type="code" id="pB2wcxK0-FQ5" step_func2(np.array([2, 7, -4, -9, 0, 4])) # + [markdown] colab_type="text" id="05sezgN2-FQ9" # --- # ## <span style="color:#0b486b">5. File I/O</span> # + [markdown] colab_type="text" id="Z6iEGtaO-FQ-" # For Online platforms such as IBM Cloud, it is important for you to get familiar with the provided data storage or cloud data storage function. Alternatively, you might want to directly access the file, and load into your Notebook. # + colab={} colab_type="code" id="DopvIhrG-FRA" # !pip install wget # + [markdown] colab_type="text" id="QXFY49LC-FRE" # Then you can download the file into GPFS file system. # + colab={} colab_type="code" id="ab6v2GxS-FRF" import wget link_to_data = 'https://github.com/tulip-lab/sit742/raw/master/Jupyter/data/csv_data1.csv' DataSet = wget.download(link_to_data) # - # If you want to download the generated file to your local computer, please refer to the following URL: # # https://colab.research.google.com/notebooks/io.ipynb#scrollTo=p2E4EKhCWEC5 # + [markdown] colab_type="text" id="uluOknMC-FRJ" # <a id = "txt"></a> # # ### <span style="color:#0b486b">5.1 TXT</span> # # + [markdown] colab_type="text" id="o1ZysbwZ-FRK" # TXT file format is the most simplistic way to store data. # # Load a TXT file with `'np.loadtxt()'`: # + colab={} colab_type="code" id="Q2zCykcT-FRL" import numpy as np # This code is for local PC # x = np.loadtxt("data/txt_data1.txt") # The following code for IBM Cloud link_to_data = 'https://github.com/tulip-lab/sit742/raw/master/Jupyter/data/txt_data1.txt' DataSet = wget.download(link_to_data) x = np.loadtxt("txt_data1.txt") x # + [markdown] colab_type="text" id="iZZfyuL1-FRP" # Save a TXT file with `'np.savetxt()'`: # + colab={} colab_type="code" id="-RkfeDje-FRR" y = np.random.randint(10, size=5) np.savetxt("txt_data2.txt", y) y # + [markdown] colab_type="text" id="OT2pfvIA-FRW" # <a id = "csv"></a> # # ### <span style="color:#0b486b">5.2 CSV</span> # # + [markdown] colab_type="text" id="XN8xU7cR-FRX" # Comma Separated Values format and its variations, are one the most used file format to store data. # # You can use `'np.genfromtxt()'` to read a CSV file: # # **NOTE:** The best way to read CSV and XLS files is using **pandas** package that will be introduced later. # + colab={} colab_type="code" id="gHHWnj8r-FRZ" import wget link_to_data = 'https://github.com/tulip-lab/sit742/raw/master/Jupyter/data/csv_data1.csv' DataSet = wget.download(link_to_data) print(DataSet) # + colab={} colab_type="code" id="PlB0GcDT-FRd" import numpy as np x = np.genfromtxt("csv_data1.csv", delimiter=",") x # + [markdown] colab_type="text" id="eMR_478x-FRk" # Use `'np.savetxt()'` to save a 2d-array in a CSV file. # + colab={} colab_type="code" id="BvkGTJF_-FRl" x = np.random.randint(10, size=(6,4)) np.savetxt("csv_data2.csv", x, delimiter=',') x # + [markdown] colab_type="text" id="jWORjUcv-FRp" # <a id = "json"></a> # # ### <span style="color:#0b486b">5.3 JSON</span> # # # JSON is the most used file format when dealing with web services. # # To read a JSON file, use `'json'` package and `'load()'` function, or `'loads()'` if the data is serialized. It reads the data and parses it into a dictionary. # + colab={} colab_type="code" id="yABGeta3-FRq" link_to_data = 'https://github.com/tulip-lab/sit742/raw/master/Jupyter/data/json_data1.json' DataSet = wget.download(link_to_data) print(DataSet) # + colab={} colab_type="code" id="7FAA_6uT-FRu" import json with open("json_data1.json", 'rb') as fp: fcontent = fp.read() # data = json.loads(fcontent) data = json.loads(fcontent.decode('utf-8')) data.keys() # + colab={} colab_type="code" id="DWTeTfFq-FRz" data # + colab={} colab_type="code" id="hc-aODdh-FR2" data['phoneNumbers'] # + [markdown] colab_type="text" id="dOuOtK50-FR5" # You can also write a python dictionary into a JSON file. To do this use `'dump()'` or `'dumps()'` functions. # + colab={} colab_type="code" id="GuglShkt-FR6" data = [{'Name': 'Zara', 'Age': 7, 'Class': 'First'}, {'Name': 'Lily', 'Age': 9, 'Class': 'Third'}]; data # + colab={} colab_type="code" id="Ql8yHsKw-FR9" with open("json_data_now.json", 'w') as fp: json.dump(data, fp)
Jupyter/M09-Optional/SIT742P03D-DataAcquisition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cs224n # language: python # name: cs224n # --- # %reload_ext autoreload # %autoreload 2 # %matplotlib inline import os os.chdir('../../') from musicautobot.numpy_encode import * from musicautobot.config import * from musicautobot.music_transformer import * from musicautobot.multitask_transformer import * from musicautobot.utils import midifile
notebooks/.ipynb_checkpoints/data_overview-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="D7YYMRSQ7r2-" colab_type="text" # # MNIST with PyTorch # # In this notebook example, we will walk through how to train a simple CNN to classify MNIST images. # # We will rely on the following modules, including torch and torchvision. # + id="PvyV5PtX_QLp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 446} outputId="6df18b85-5413-4ede-f914-d08cd2f80b8f" # install dependencies: (use cu101 because colab has CUDA 10.1) # !pip install -U torch==1.5.1+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html # + id="OtRmxP5M7r2_" colab_type="code" colab={} import torch import torchvision from torch import nn from torch.utils.data import DataLoader from torchvision import datasets from torchvision import transforms import matplotlib.pyplot as plt # + [markdown] id="BJZNOG6w7r3C" colab_type="text" # ## 1. Data Loader # # The first step is to create a data loader. # # A data loader can be treated as a list (or iterator, technically). Each time it will provide a minibatch of (img, label) pairs. # + id="cDoroXlA7r3C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 352, "referenced_widgets": ["f9f87e2fa8a44dba89ac06cb0ab1b926", "f32cb999e37b47ed910ebad66d8f1d3c", "01f148a195a34082bef4d03a68f5e469", "bf0a7209b54447e7846c56afa7e26be3", "<KEY>", "<KEY>", "028efaedeca94b72b2b66a7814208058", "a0cea30db76147d7b74d54d1e4ff572a", "<KEY>", "8f217980595e4341b984319c9b1e334c", "<KEY>", "ccca8301d25c4316b2f012ffc1a8ca99", "<KEY>", "<KEY>", "8d5efcf6fa034c10ab316d2250001c02", "8af883caf3744ea6948c5e64887a841e", "fb1adf787a144e16ad474c42514fef21", "<KEY>", "9cc81a5fb1aa488d9a129962b082f28f", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "429e4d1a0ba043deacb10d4a7aea3309", "<KEY>", "f9cac3efa3e04412b877be8145cd0d5f", "<KEY>", "3ddb5af8a4424b56a4e9ec054933af76", "<KEY>", "d6c961e272fe42a5801556b175b49b7d"]} outputId="4f2f1a1d-7aae-4bca-b3b5-de40062fa160" # Choose a dataset -- MNIST for example train_set = datasets.MNIST(root='./data', train=True, download=True) test_set = datasets.MNIST(root='./data', train=False, download=True) train_set.transform = transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) test_set.transform = transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) # Create a data loader train_loader = DataLoader(train_set, batch_size=64, shuffle=True) test_loader = DataLoader(test_set, batch_size=64, shuffle=False) # + [markdown] id="zcUyoucM7r3E" colab_type="text" # Let us show some of the training images, for fun. # + id="oqtIu1WJ7r3F" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 232} outputId="951b80cb-ed64-420c-f2b6-bcfb2194227b" def imshow(img, mean=0, std=1): img = img * std + mean # unnormalize npimg = img.numpy().transpose(1, 2, 0).squeeze() # (c,h,w) -> (h,w,c) plt.imshow(npimg) plt.show() trainiter = iter(train_loader) img, label = trainiter.next() print(img.shape) # (n,c,h,w) # Visualization img = torchvision.utils.make_grid(img) imshow(img, 0.1307, 0.3081) print('GT\t:', ' '.join('%d' % label[j].item() for j in range(len(label)))) # + id="E3KZnVpGVZPS" colab_type="code" colab={} # To check a single random image trainiter = iter(train_loader) img, label = trainiter.next() plt.imshow(img[0].reshape(28,28), cmap="gray") # + [markdown] id="GdO1bC6f7r3I" colab_type="text" # ## 2. Model # # The second step is to define our model. # # We will use a simple CNN with conv(5x5) -> relu -> pool(2x2) -> conv(5x5) -> relu -> pool(2x2) -> fc(200) -> relu -> fc(10). # # In PyTorch, a model is defined by a subclass of nn.Module. It has two methods: # # - `__init__`: constructor. Create layers here. Note that we don't define the connections between layers in this function. # # - `forward(x)`: forward function. Receives an input variable `x`. Returns a output variable. Note that we actually connect the layers here dynamically. # # We no longer need to implement the backward function. The computational graph will be built implicitly based on the forward operations, and the gradients can be automatically computed. # + id="5kjUx5co7r3I" colab_type="code" colab={} class SimpleCNN(nn.Module): def __init__(self): super(SimpleCNN, self).__init__() # Call parent class's constructor self.conv1 = nn.Conv2d(1, 10, 5, 1) self.conv2 = nn.Conv2d(10, 20, 5, 1) self.pool = nn.MaxPool2d(2) self.fc1 = nn.Linear(4*4*20, 200) self.fc2 = nn.Linear(200, 10) self.relu = nn.ReLU() def forward(self, x): x = self.conv1(x) x = self.relu(x) # When a nn.Module is called, it will compute the result x = self.pool(x) x = self.conv2(x) x = self.relu(x) x = self.pool(x) x = x.view(-1, 4*4*20) # Flatten the data x = self.relu(self.fc1(x)) x = self.fc2(x) return x model = SimpleCNN() # + [markdown] id="NJJqU46wQDgp" colab_type="text" # You can double check your model specification. # + id="N3KjjXMWP-3_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="120e4f01-1ced-4af8-e943-106cb41337a7" print(model) # + [markdown] id="QNyhQc4b7r3K" colab_type="text" # Let us test with random inputs. # + id="9V3tpsvA7r3L" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8c1f205b-deda-4d5d-bb57-f3b6a6074401" x = torch.rand([64,1,28,28]) y = model(x) print(y.shape) # + [markdown] id="nXye5PeN7r3N" colab_type="text" # ## 3. Loss and Optimizer # # The third step is to define the loss function and the optimization algorithm. Let’s use a Classification Cross-Entropy loss and SGD with momentum. # + id="PhruYNOP7r3O" colab_type="code" colab={} criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), 0.01, momentum=0.9, weight_decay=5e-4) # + [markdown] id="k2gsLj737r3Q" colab_type="text" # ## 4. Training # # The next step is to start the training process. # + id="3yXAXWAk7r3Q" colab_type="code" colab={} def train(epoch): model.train() # Set the model to be in training mode for batch_index, (inputs, targets) in enumerate(train_loader): # Forward outputs = model(inputs) loss = criterion(outputs, targets) if batch_index % 100 == 0 or batch_index == len(train_loader)-1: print('epoch {} batch {}/{} loss {:.3f}'.format( epoch, batch_index, len(train_loader)-1, loss.item())) # Backward optimizer.zero_grad() # Set gradients to zero loss.backward() # From the loss we compute the new gradients optimizer.step() # Update the parameters/weights # + [markdown] id="gcg05Ycv7r3T" colab_type="text" # 'zero_grad' is to clear previous gradients. We need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes. # # Now let us train the network for 1 pass over the training dataset, and check if the network has learned anything at all. # + id="FkknLoON7r3T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="76dc8c53-7ccf-444f-d037-1bbb00bbd9cd" train(0) # + [markdown] id="Fh0o1bpVbHG8" colab_type="text" # ## 5. Plotting Training Curve # # How do we know when to stop training? How do we know what learning rate and what batch sizes are appropriate? Those are very important and practical questions to answer when training a neural network. We answer those questions by plotting a **training curve**. # # A **training curve** is a chart that shows: # 1. The **iterations** or **epochs** on the x-axis # 2. The **loss** or **accuracy** on the y-axis # # The idea is to track how the loss or accuracy changes as training progresses. # # Let's redefine the training process. # + id="ixwjT01AbeKf" colab_type="code" colab={} def train(epoch): model.train() # Set the model to be in training mode iters = [] # save the iteration counts here for plotting losses = [] # save the avg loss here for plotting for batch_index, (inputs, targets) in enumerate(train_loader): # Forward outputs = model(inputs) loss = criterion(outputs, targets) if batch_index % 100 == 0 or batch_index == len(train_loader)-1: print('epoch {} batch {}/{} loss {:.3f}'.format( epoch, batch_index, len(train_loader)-1, loss.item())) # Backward optimizer.zero_grad() # Set gradients to zero loss.backward() # From the loss we compute the new gradients optimizer.step() # Update the parameters/weights # Save the current training information iters.append(batch_index) losses.append(float(loss)) # Plotting plt.plot(iters, losses) plt.title("Training Curve") plt.xlabel("Iterations") plt.ylabel("Loss") plt.show() # + [markdown] id="jsCqKD3oc2sO" colab_type="text" # Let's try the training again for 1 epoch. # + id="p32rpvIsc-wZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="8f0d616d-07ac-42d9-f11a-b9ed084330bc" model = SimpleCNN() criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), 0.01, momentum=0.9, weight_decay=5e-4) train(0) # + [markdown] id="yTJUPqaHdEIT" colab_type="text" # The first thing that you might notice is that the loss is a bit noisy. Some people choose to plot a running average of the loss to remove some of the noise. # # You can try how the training curve changes as we change the batch size and the learning rate. # # # + [markdown] id="NVw7ZwOWRACH" colab_type="text" # ##6. Visualizing Filters # # In this section, we will look into the practical aspects and code for visualizing filters. # # We first create a model_weights list to save the weights of all the convolutional layers. # # Next, we are getting all the model children as list and storing them in the model_children list. This will allow us to easily access the hidden layers. # + id="QWcRSqpsPLAS" colab_type="code" colab={} model_weights = [] # We will save the conv layer weights in this list # Get all the model children as list model_children = list(model.children()) # + [markdown] id="C0-nKeKGQ0At" colab_type="text" # We will have to traverse through all these nestings to retrieve the convolutional layers and their weights. # # The following code shows how to retrieve all the convolutional layers and their weights. # + id="CDX5JAJDQlls" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4c474101-cfad-4ac0-9a02-23210489a30e" # Counter to keep count of the conv layers counter = 0 # Append all the conv layers and their respective weights to the list for i in range(len(model_children)): if type(model_children[i]) == nn.Conv2d: counter += 1 model_weights.append(model_children[i].weight) elif type(model_children[i]) == nn.Sequential: for j in range(len(model_children[i])): for child in model_children[i][j].children(): if type(child) == nn.Conv2d: counter += 1 model_weights.append(child.weight) print(f"Total convolutional layers: {counter}") # + [markdown] id="Xac-p1gUR3UI" colab_type="text" # For the sake of simplicity, we will only visualize the filters of the first convolutional layer. # + id="1kjC--LSQvd9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="e691eae3-20f9-44a3-dc7f-954b4b204e57" # Visualize the first conv layer filters plt.figure(figsize=(10, 10)) for i, filter in enumerate(model_weights[0]): plt.subplot(6, 6, i+1) # (6, 6) because in conv1 we have 5x5 filters and total of 10 plt.imshow(filter[0, :, :].detach(), cmap='gray') plt.axis('off') plt.savefig('filter.png') plt.show() # + [markdown] id="qtnkU2ZGgvv3" colab_type="text" # ## 7. Testing # # Let's define the test and visualize functions now. # + id="lJVO2vMPb-H6" colab_type="code" colab={} def test(): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for inputs, targets in test_loader: outputs = model(inputs) test_loss += criterion(outputs, targets).item() * inputs.size(0) # Sum up batch loss preds = outputs.max(1, keepdim=True)[1] # Get the index of the max probability correct += preds.eq(targets.view_as(preds)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) def visualize(): model.eval() testiter = iter(test_loader) inputs, targets = testiter.next() with torch.no_grad(): outputs = model(inputs) preds = outputs.max(1, keepdim=True)[1] print(preds.shape) img = torchvision.utils.make_grid(inputs) imshow(img, 0.1307, 0.3081) print('GT\t:', ' '.join('%d' % targets[j].item() for j in range(len(targets)))) print('Pred\t:', ' '.join('%d' % preds[j].item() for j in range(len(targets)))) # + [markdown] id="8VsqMHdNdRuF" colab_type="text" # Let's do the test and visualize the results on the model that we have just trained for 1 epoch. # + id="zF_3vksodV2H" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 371} outputId="3cf1b0d2-7dae-42a8-9df5-426486be3121" test() visualize() # + [markdown] id="OQlreXkFchtH" colab_type="text" # It seems pretty good! Let us try more epochs. # + id="Eze7Vjqf7r3W" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="5607b0fc-6b77-4194-d854-984a0e22e48b" for epoch in range(1, 4): train(epoch) test() visualize() # + [markdown] id="xQKsEStLg_Vm" colab_type="text" # ## 8. Checkpointing # # Normally, we will train our neural network for not just one epoch, but many. Neural network training typically takes a long time, sometimes days, weeks, or even months. Our training code should therefore be robust to interruptions. That is, we should write our training code so that we can save and re-load weights. # # It is good to **checkpoint** training progress by saving the neural network parameter values and training curve data to disk, once every few epochs. The frequency of checkpointing depends on many factors, but I recommend checkpointing every 10-30 minutes for large projects, and every few minutes for smaller ones. # # Another advantage of checkpointing is that we now have one extra hyper-parameter we can tune for free: the **epoch** number! You may not wish to choose neural network parameter values at the end of training, and might opt to choose the parameter values at a different epoch of training. # # One reason you might opt to do so is to prevent **over-fitting**. If your training loss is decreasing (as training progresses), but your validation loss stays the same, then your network is beginning to learn idiosyncrasies of the training set that do not generalize. Most often, we choose the earliest epoch with the lowest validation loss or error. # # Saving and loading a model in PyTorch is very easy and straight forward. # It’s as simple as this: # + id="ntuT8Le8tggf" colab_type="code" colab={} checkpoint = {'model_state_dict': model.state_dict(), 'optimizer_state_dict' : optimizer.state_dict()} torch.save(checkpoint, 'checkpoint.pth') # + [markdown] id="KBsrgkdKMOie" colab_type="text" # Loading is as simple as saving # # 1. Reconstruct the model from the structure saved in the checkpoint. # 2. Load the state dict to the model. # 3. Use model.eval() to freeze the parameters and enter evaluation mode if you are loading the model for inference. If you wish to resuming training, call model.train() to ensure these layers are in training mode. # + id="DRL9BWQaKIr0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="2d8a9eea-1363-4966-f1c3-f840ec971fbe" model = SimpleCNN() optimizer = torch.optim.SGD(model.parameters(), 0.01, momentum=0.9, weight_decay=5e-4) checkpoint = torch.load('checkpoint.pth') model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) model.eval() test() # + [markdown] id="kXdubeem7r3a" colab_type="text" # ## 9. What's Next? # # We have sketched a simple framework for training CNNs. There are a few more functions yet to be completed. # # - Adjust the learning rate and batch size and observe the training curve # - Plot a chart for the accuracy # - Average the loss during each epoch when plotting a chart # - Consider data augmentations # # Please check the official [Tutorials](https://pytorch.org/tutorials) and [Examples](https://github.com/pytorch/examples) on for more details.
Labs/tutorial_01_mnist_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + slideshow={"slide_type": "skip"} import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + [markdown] slideshow={"slide_type": "slide"} # # Feature Engineering # ## How to select featues that make a difference # + [markdown] slideshow={"slide_type": "slide"} # "At the end of the day, some machine learning projects succeed and some fail. What makes the difference? Easily, the most important factor is the features used. If you have many independent features that each correlate well with the class, learning is easy. On the other hand, if the class is a very complex function of the features, you may not be able to learn it. Often, the raw data is not in a form that is amenable to learning, but you can construct features from it that are. This is typically where most of the effort in a machine learning project goes. It is often also one of the most interesting parts, where intuition and creativity are as important as the technical stuff." # # -- <NAME>, *A Few Useful Things to Know about Machine Learning* # + [markdown] slideshow={"slide_type": "slide"} # ## What is feature engineering? # # - Feature Creation. Create better features by combining existing features (including taking the square, cube, etc. of existing features). # - Feature Selection. Pick features that make a difference and drop the rest. # + [markdown] slideshow={"slide_type": "slide"} # ## Why select features? # # - Noise reduction # - The *curse of dimensionality* # - too many features # - too few training examples # + [markdown] slideshow={"slide_type": "slide"} # ## The curse of dimensionality # # "High dimensional datasets are at the risk of being very sparse: most training instances are likely to be far away from each other. Of course, this also means that a new instance will likely be far away from any training instance, making predictions much less reliable than in lower dimensions, since they will be based on much larger extrapolations. In short, the more dimensions the training set has [i.e., the more attributes], the greater the risk of overfitting it." # # > -- <NAME>, *Hands-On Machine Learning with Scikit-Learn & TensorFlow*, p.207 # + slideshow={"slide_type": "skip"} # Range of distances between 2 randomly chosen points in a unit hypercube import numpy as np def dist(n_points, n_dims): # Generate a random set of n_points to fill the hypercube rows1 = np.random.rand(n_points, n_dims) # Generate another set of random n_points to fill the hypercube rows2 = np.random.rand(n_points, n_dims) distances = [np.linalg.norm(rows1[i] - rows2[i]) for i in range(len(rows1))] return [np.average(distances), (np.max(distances) - np.min(distances))] # + slideshow={"slide_type": "skip"} # Try large numbers with caution dims = [10, 20, 30, 40, 50, 100, 200, 300, 400, 500] # %time d = [dist(10000,x) for x in dims] d # + slideshow={"slide_type": "skip"} x_vals = [d[i][0] for i in range(len(d))] y_vals = [d[i][1] for i in range(len(d))] # + slideshow={"slide_type": "skip"} x_vals # + slideshow={"slide_type": "skip"} y_vals # + slideshow={"slide_type": "slide"} # Average distance of any two randomly chosen points ## as a function of the number of features fig = plt.figure(1, figsize=(12, 8)) # Create an axes instance ax = fig.add_subplot(111) plt.title('Average Distance Between Any 2 Randomly Chosen Points in an N-Dimensional Hypercube') plt.xlabel('Number of Features') plt.ylabel('Average Distance') # Create the plot plt.plot(dims, x_vals, marker='o'); # + [markdown] slideshow={"slide_type": "slide"} # ## The curse of dimensionality -- another angle # # "Generalizing correctly becomes exponentially harder as the dimensionality (number of features) of the examples grows, because a fixed-size training set covers a dwindling fraction of the input space. Even with a moderate dimension of 100 and a huge training set of a trillion examples, the latter covers only a fraction of about $10^{-18}$ of the input space. This is what makes machine learning both necessary and hard." # # > -- <NAME>, *A Few Useful Things to Know about Machine Learning* # # To read more, go to [How is Learning Even Possible?](https://nbviewer.jupyter.org/github/jsub10/In-Progress/blob/master/How-is-Learning-Possible%3F.ipynb) # + [markdown] slideshow={"slide_type": "slide"} # ## ... and one more... # # "Consider an orange: a tasty ball of pulp surrounded by a thin shell of skin. Let's say 90% of the radius of an orange is occupied by pulp, and the remaining 10% by skin. That means 73% of the volume of the orange is pulp ($0.9^{3}$). Now consider a hyperorange: still with 90% of the radius occupied by pulp, but in a hundred dimensions, say. The pulp has shrunk to only about three thousandths of a percent of the hyperorange's volume ($0.9^{100}$). The hyperorange is all skin, and you'll never be done peeling it!" # # > -- <NAME>, *The Master Algorithm* page 187. # + [markdown] slideshow={"slide_type": "slide"} # ![The Curse 1](../Images/The-Curse-of-Dimensionality-1.png) # + [markdown] slideshow={"slide_type": "slide"} # ![The Curse 2](../Images/The-Curse-of-Dimensionality-2.png) # + [markdown] slideshow={"slide_type": "slide"} # ## Ways to Select Features (Reduce Dimensionality) # # - Projection (e.g., Principal Component Analysis) # - Manifold Learning # - Sequential Backward Selection # - Random Forest Ranking # + [markdown] slideshow={"slide_type": "slide"} # ## Principal Component Analysis (PCA) # # The idea: Find the axis that captures the highest amount of variance. Then find the axis that captures the second highest amount of variance, and so on. These axes are called *principal components*. # # There are as many principal components as there are features in the dataset. # + [markdown] slideshow={"slide_type": "slide"} # <img src='../Images/PCA-2d.jpeg' width='600' /> # # (Image from <NAME>, *Hands-On Machine Learning with Scikit-Learn & TensorFlow*, p.212) # + [markdown] slideshow={"slide_type": "slide"} # In a space of $n$ dimensions, PCA will find $n$ principal components. This new set of principal components is another set of axes for the dataset. They are axes chosen to maximize the variance of data when it is projected onto that principal component axis. # # Variance is technically the statistical term -- but for simplicity, we can think of it simply as the difference betweeen the maximum and minimum value of the data points projected onto a principal component axis. # # Let's see how it's visualized in http://setosa.io/ev/principal-component-analysis/ # + [markdown] slideshow={"slide_type": "slide"} # Caution: It's not always the case that reducing dimensions simplifies a problem. # # <img src='../Images/manifold-hypothesis.jpeg' width='600' /> # # (Image from <NAME>, *Hands-On Machine Learning with Scikit-Learn & TensorFlow*, p.211) # # (For a simple introduction to the *manifold hypothesis*, see https://nbviewer.jupyter.org/github/jsub10/In-Progress/blob/master/How-is-Learning-Possible%3F.ipynb) # + [markdown] slideshow={"slide_type": "slide"} # ## Sequential Backward Selection # # An algorithm for sequentially removing an attribute that makes the least difference to a model's prediction performance. # # <img src='../Images/sequential-backward-selection.png' width='400' /> # # (You can do Sequential *Forward* Selection too) # + [markdown] slideshow={"slide_type": "slide"} # ## Random Forest Ranking # # Random forests take a number of models that are the same (decision tree classifiers) but train each decision tree with a random set of attributes from the training data. # # <img src='../Images/random-forests.png' width='400' /> # + [markdown] slideshow={"slide_type": "slide"} # Let's see how PCA and random forests look in Orange... # + slideshow={"slide_type": "skip"}
Notebooks/Feature-Engineering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 8</font> # # ## Download: http://github.com/dsacademybr # Versão da Linguagem Python from platform import python_version print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version()) # # Seaborn # ### Instalar o Seaborn: # Caso o Seaborn não esteja instalado, abra o prompt de comando ou terminal e digite: pip install seaborn import numpy as np import pandas as pd import random import warnings warnings.filterwarnings("ignore") import matplotlib as mat import matplotlib.pyplot as plt # %matplotlib inline np.__version__ pd.__version__ mat.__version__ import seaborn as sea sea.__version__ # Carregando um dos datasets que vem com o Seaborn dados = sea.load_dataset("tips") dados.head() # ### Regressão linear é uma equação para se estimar a condicional (valor esperado) de uma variável y, dados os valores de algumas outras variáveis x. # O método joinplot cria plot de 2 variáveis com gráficos bivariados e univariados sea.jointplot("total_bill", "tip", dados, kind = 'reg'); # O método lmplot() cria plot com dados e modelos de regressão. sea.lmplot("total_bill", "tip", dados, col = "smoker"); # Construindo um dataframe com Pandas df = pd.DataFrame() # Alimentando o Dataframe com valores aleatórios df['a'] = random.sample(range(1, 100), 25) df['b'] = random.sample(range(1, 100), 25) df.head() # Scatter Plot sea.lmplot('a', 'b', data = df, fit_reg = True); # Density Plot sea.kdeplot(df.b); sea.kdeplot(df.b, df.a); sea.distplot(df.a); # Histograma plt.hist(df.a, alpha = .3) sea.rugplot(df.a); # Box Plot sea.boxplot([df.b, df.a]); # Violin Plot sea.violinplot([df.a, df.b]); # Heatmap sea.heatmap([df.b, df.a], annot = True, fmt = "d"); # Clustermap sea.clustermap(df); # ### Temas # Configurações globais para controlar estilo, tamanho de fonte, cores, etc. sea.set(context="notebook", style="darkgrid", palette="dark") # Seaborn possui opções de cores variadas sea.palplot(sea.color_palette()) sea.palplot(sea.color_palette("husl", 8)) sea.palplot(sea.color_palette("hls", 8)) sea.palplot(sea.color_palette("coolwarm", 7)) sea.palplot(sea.cubehelix_palette(8)) # + # A função tsplot() foi descontinuada # O método tsplot cria plots a partir de séries temporais # gammas = sea.load_dataset("gammas") # sea.tsplot(gammas, "timepoint", "subject", "ROI", "BOLD signal", color = "muted"); # - # ### Outros Plots # + # Histogramas com subsets dos dados sea.set(style = "darkgrid") dados = sea.load_dataset("tips") g = sea.FacetGrid(dados, row = "sex", col = "time", margin_titles = True) bins = np.linspace(0, 60, 13) g.map(plt.hist, "total_bill", color = "steelblue", bins = bins, lw = 0); # + # Diversos plots simultâneos sea.set(style = "white", palette = "muted") f, axes = plt.subplots(2, 2, figsize = (7, 7), sharex = True) sea.despine(left = True) rs = np.random.RandomState(10) b, g, r, p = sea.color_palette("muted", 4) d = rs.normal(size = 100) sea.distplot(d, kde = False, color = b, ax = axes[0, 0]) sea.distplot(d, hist = False, rug = True, color = r, ax = axes[0, 1]) sea.distplot(d, hist = False, color = g, kde_kws = {"shade": True}, ax = axes[1, 0]) sea.distplot(d, color = p, ax = axes[1, 1]) plt.setp(axes, yticks = []) plt.tight_layout() # + # Plot com distribuições marginais from scipy.stats import kendalltau sea.set(style="ticks") rs = np.random.RandomState(11) x = rs.gamma(2, size = 1000) y = -.5 * x + rs.normal(size = 1000) sea.jointplot(x, y, kind = "hex", stat_func = kendalltau, color = "#4CB391"); # + # Regressão Logística sea.set(style = "darkgrid") df = sea.load_dataset("titanic") pal = dict(male = "#6495ED", female = "#F08080") g = sea.lmplot("age", "survived", col = "sex", hue = "sex", data = df, palette = pal, y_jitter = .02, logistic = True) g.set(xlim=(0, 80), ylim = (-.05, 1.05)); # - # Regressão Linear com Distribuições Marginais sea.set(style = "darkgrid") tips = sea.load_dataset("tips") color = sea.color_palette()[2] g = sea.jointplot("total_bill", "tip", data = tips, kind = "reg", xlim = (0, 60), ylim = (0, 12), color = color, size = 7); # Pair Plots sea.set(style = "darkgrid") df = sea.load_dataset("iris") sea.pairplot(df, hue = "species", size = 2.5); # Conheça a Formação Cientista de Dados, um programa completo, 100% online e 100% em português, com 400 horas, mais de 1.200 aulas em vídeos e 26 projetos, que vão ajudá-lo a se tornar um dos profissionais mais cobiçados do mercado de análise de dados. Clique no link abaixo, faça sua inscrição, comece hoje mesmo e aumente sua empregabilidade: # # https://www.datascienceacademy.com.br/pages/formacao-cientista-de-dados # # Fim # ### Obrigado - Data Science Academy - <a href="http://facebook.com/dsacademybr">facebook.com/dsacademybr</a>
Data Science Academy/Cap08/Notebooks/DSA-Python-Cap08-08-Seaborn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="6DBmteGVKO34" import numpy as np import pandas as pd import matplotlib.pyplot as plt from pylab import rcParams # - # %matplotlib inline rcParams['figure.figsize'] = 14,6 # + colab={} colab_type="code" id="UatdMrRlGjHx" series = [3,10,12,13,12,10,12] # + colab={} colab_type="code" id="1u4mLzOSVaAv" def exponential_smoothing(series, alpha): result = [series[0]] # first value is same as series for n in range(1, len(series)): result.append(alpha * series[n] + (1 - alpha) * result[n-1]) print (result) return result # + colab={"base_uri": "https://localhost:8080/", "height": 138} colab_type="code" executionInfo={"elapsed": 1142, "status": "ok", "timestamp": 1558170198098, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-Dxy3HXzth_c/AAAAAAAAAAI/AAAAAAAAHvk/4kcnA5cyAC4/s64/photo.jpg", "userId": "01561702845917398436"}, "user_tz": -330} id="q36Df31BWi-0" outputId="a3019f94-49f0-4cc3-f80f-116eddf3f38f" exponential_smoothing(series, 0.9) # + colab={"base_uri": "https://localhost:8080/", "height": 138} colab_type="code" executionInfo={"elapsed": 1567, "status": "ok", "timestamp": 1558170313188, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-Dxy3HXzth_c/AAAAAAAAAAI/AAAAAAAAHvk/4kcnA5cyAC4/s64/photo.jpg", "userId": "01561702845917398436"}, "user_tz": -330} id="aKHcqvI6W3ad" outputId="0b99bf24-acfc-4d63-ab18-e510c1fe8431" exponential_smoothing(series, 0.1) # + colab={"base_uri": "https://localhost:8080/", "height": 224} colab_type="code" executionInfo={"elapsed": 2393, "status": "ok", "timestamp": 1558170374302, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-Dxy3HXzth_c/AAAAAAAAAAI/AAAAAAAAHvk/4kcnA5cyAC4/s64/photo.jpg", "userId": "01561702845917398436"}, "user_tz": -330} id="T_oE_MjxVtIE" outputId="66866385-c2b6-47ef-e42f-3f0b3ac1004a" series_0_point_1 = exponential_smoothing(series, 0.1) series_0_point_9 = exponential_smoothing(series, 0.9) # + colab={"base_uri": "https://localhost:8080/", "height": 395} colab_type="code" executionInfo={"elapsed": 4255, "status": "ok", "timestamp": 1558170378407, "user": {"displayName": "<NAME>", "photoUrl": "https://lh5.googleusercontent.com/-Dxy3HXzth_c/AAAAAAAAAAI/AAAAAAAAHvk/4kcnA5cyAC4/s64/photo.jpg", "userId": "01561702845917398436"}, "user_tz": -330} id="aziQhutbVtSU" outputId="ae2b43f7-7bd1-4b72-c49a-2928944def94" plt.plot(series, color='black', linewidth=2, label='Original Series') plt.plot(series_0_point_1, color='red', linewidth=2, label='0.1 Alpha Series') plt.plot(series_0_point_9, color='blue', linewidth=2, label='0.9 Alpha Series') plt.legend(loc='lower right')
Holt_Winters_Forecasting_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In workspaces like this one, you will be able to practice visualization techniques you've seen in the course materials. In this particular workspace, you'll practice creating single-variable plots for categorical data. # + # prerequisite package imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb # %matplotlib inline # solution script imports from solutions_univ import bar_chart_solution_1, bar_chart_solution_2 # - # In this workspace, you'll be working with this dataset comprised of attributes of creatures in the video game series Pokémon. The data was assembled from the database of information found in [this GitHub repository](https://github.com/veekun/pokedex/tree/master/pokedex/data/csv). pokemon = pd.read_csv('./data/pokemon.csv') pokemon.head() # **Task 1**: There have been quite a few Pokémon introduced over the series' history. How many were introduced in each generation? Create a _bar chart_ of these frequencies using the 'generation_id' column. # YOUR CODE HERE sb.countplot(data=pokemon, x='generation_id', color=(0,1,0)); # Once you've created your chart, run the cell below to check the output from our solution. Your visualization does not need to be exactly the same as ours, but it should be able to come up with the same conclusions. bar_chart_solution_1() # **Task 2**: Each Pokémon species has one or two 'types' that play a part in its offensive and defensive capabilities. How frequent is each type? The code below creates a new dataframe that puts all of the type counts in a single column. pkmn_types = pokemon.melt(id_vars = ['id','species'], value_vars = ['type_1', 'type_2'], var_name = 'type_level', value_name = 'type').dropna() pkmn_types.head() # Your task is to use this dataframe to create a _relative frequency_ plot of the proportion of Pokémon with each type, _sorted_ from most frequent to least. **Hint**: The sum across bars should be greater than 100%, since many Pokémon have two types. Keep this in mind when considering a denominator to compute relative frequencies. # + # YOUR CODE HERE types_count = pkmn_types['type'].value_counts() n_pokemon = pkmn_types['species'].unique().shape[0] max_prop = types_count[0]/n_pokemon x_range= np.arange(0, max_prop, 0.02) x_tick_names = ['{:0.2f}'.format(v) for v in x_range] sb.countplot(data=pkmn_types, y='type', color='blue', order=types_count.index); plt.xticks( x_range * n_pokemon ,x_tick_names) plt.xlabel('proportion'); # - bar_chart_solution_2() # If you're interested in seeing the code used to generate the solution plots, you can find it in the `solutions_univ.py` script in the workspace folder. You can navigate there by clicking on the Jupyter icon in the upper left corner of the workspace. Spoiler warning: the script contains solutions for all of the workspace exercises in this lesson, so take care not to spoil your practice!
Matplotlib/Bar_Chart_Practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Preprocessing for Social Anxiety Detection : Participant 11 # # *** # ## Participant Details # # __Gender:__ female <br/> # __Ethnicity:__ asian <br/> # __Age:__ 19 <br/> # __Self-reported Liebowitz social anxiety score:__ 61 <br/> # __Anxiety category:__ 1 <br/> # *** # # ## Contents # # __1.Introduction <br/>__ # 1.1. Nature of the dataset <br/> # 1.2. Description of the ML experiments <br/> # __2.Import packages <br/>__ # __3.Import data <br/>__ # 3.1. Import HR data and resample <br/> # 3.2. Import ST data <br/> # 3.3. Import EDA data <br/> # __4.Combine data <br/>__ # __5.Data labelling <br/>__ # 5.1. Labelling for experiment (1)<br/> # 5.2. Labelling for experiment (2) <br/> # 5.3. Labelling for experiment (3) <br/> # __6.Data visualisation and export__ # # # *** # # ## 1. Introduction # # This notebook preprocesses the physiological data needed for the supervised machine learning (ML) experiments that investigate whether subclinical social anxiety in young adults can be detected using physiological data obtained from wearable sensors. # # ### 1.1. Nature of the dataset # # The dataset consists of Heart Rate (HR) data, Skin Temperature (ST) data and Electrodermal Activity (EDA) data. This physiological data was collected using an E4 Empatica wearable device. Using the default sampling rates of the E4, EDA was measured in microSiemens (μS) at 4 Hz using stainless steel electrodes positioned on the inner side of the wrist. HR was measured in Beats Per Minute (BPM) at 1 Hz using data derived from a Photoplethysmography sensor. ST was measured in degrees Celsius (°C) at 4 Hz using an infrared thermophile. # # ### 1.2. Description of the ML experiments # # __Experiment (1)__ investigates whether models can be trained to classify between baseline and socially anxious states. The data is either labelled '0' during the baseline period and '1' during the anxiety period (during anticipation and reactive anxiety). # # __Experiment (2)__ investigates whether models can be trained to differentiate between baseline, anticipation anxiety and reactive anxiety states. The data is labelled in three ways, '0' during the baseline period, '1' during the anticipation anxiety period and '2' during the reactive anxiety period. # # __Experiment (3)__ investigates whether models can be trained to classify between social anxiety experienced by individuals with differing social anxiety severity. The data was segregated based on scores reported using the self-reported version of Liebowitz Social Anxiety Scale (LSAS-SR), the data was is either labelled as '0' for individuals in anxiety category 1 (LSAS-SR:50-64) or labelled as '1' for individuals in anxiety category 2 (LSAS-SR:65-80). # # *** # ## 2.Import packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # ## 3.Import and combine data # # ### 3.1. Import HR data and upsample # # HR is imported and upsampled to 4Hz similar to ST and EDA. The data is then cleaned using a moving average filter in order to remove noise to reduce the risk of overfitting. # + hr = pd.read_csv("HR.csv") hr.index = pd.date_range('2020-03-04', periods = len(hr), freq='1S') #resampling HR to 4Hz hr_resample = hr.resample('0.25S').ffill() #Applying moving average filter rolling = hr_resample.rolling(window=9) hr_filtered = rolling.mean() #Plotting the comparison fig, (ax1, ax2) = plt.subplots(2, 1) hr_resample[2200:2300].plot(ax=ax1, legend=False, color = 'indigo') ax1.yaxis.set_label_text("HR (BPM)") ax1.xaxis.set_label_text('Time(min)') ax1.set_title("Resampled HR") ax1.grid(which='both', alpha=2) hr_filtered[2200:2300].plot(ax=ax2, legend=False, color = 'indigo') ax2.yaxis.set_label_text("HR (BPM)") ax2.xaxis.set_label_text('Time(min)') ax2.set_title("Resampled HR After Filtering") ax2.grid(which='both', alpha=2) fig.set_size_inches(15, 5) fig.subplots_adjust(hspace=0.7) plt.show() # - # ### 3.2. Import ST data # # The ST data is imported and then cleaned using a moving average filter in order to remove noise to reduce the risk of overfitting. # + st = pd.read_csv("ST.csv") st.index = pd.date_range('2020-03-04', periods = len(st), freq='0.25S') #Applying moving average filter rolling = st.rolling(window=15) st_filtered = rolling.mean() #Plotting the comparison fig, (ax1, ax2) = plt.subplots(2, 1) st[2200:2300].plot( ax=ax1, legend=False, color = 'indigo') ax1.yaxis.set_label_text("ST (°C)") ax1.xaxis.set_label_text('Time(min)') ax1.set_title("Raw ST") ax1.grid(which='both', alpha=2) st_filtered[2200:2300].plot( ax=ax2, legend=False, color = 'indigo') ax2.yaxis.set_label_text("ST (°C)") ax2.xaxis.set_label_text('Time(min)') ax2.set_title("ST After Filtering") ax2.grid(which='both', alpha=2) fig.set_size_inches(15, 5) fig.subplots_adjust(hspace=0.7) plt.show() # - # ### 3.3. Import EDA data # # The EDA data is imported and then cleaned using a moving average filter in order to remove noise to reduce the risk of overfitting. The EDA data is also range corrected in order to remove inter-individual differences, more details about the range correction method can be found in the paper. # + eda = pd.read_csv("EDA.csv") eda.index = pd.date_range('2020-03-04', periods = len(eda), freq='0.25S') #Applying moving average filter rolling = eda.rolling(window=15) eda_filtered = rolling.mean() #Range corrected EDA - value - min/max-min eda_corrected = (eda_filtered - 0.0961)/(6.033-0.0961) #Plotting the comparison fig, (ax1, ax2, ax3) = plt.subplots(3, 1) eda[2200:2400].plot( ax=ax1, legend=False, color = 'indigo') ax1.yaxis.set_label_text("EDA (μS)") ax1.xaxis.set_label_text('Time(min)') ax1.set_title("Raw EDA") ax1.grid(which='both', alpha=2) eda_filtered[2200:2400].plot( ax=ax2, legend=False, color = 'indigo') ax2.yaxis.set_label_text("EDA (μS)") ax2.xaxis.set_label_text('Time(min)') ax2.set_title("EDA After Filtering") ax2.grid(which='both', alpha=2) eda_corrected[2200:2400].plot( ax=ax3, legend=False, color = 'indigo') ax3.yaxis.set_label_text("EDA (μS)") ax3.xaxis.set_label_text('Time(min)') ax3.set_title("Range corrected EDA") ax3.grid(which='both', alpha=2) fig.set_size_inches(15, 6) fig.subplots_adjust(hspace=1.3) plt.show() #print(eda_filtered[480:5947].max()) #print(eda[480:5947].min()) #Filtered EDA was used to find a max here due to an outlier eda_filtered=eda_corrected # - # ## 4.Combine data df = pd.concat([hr_filtered, st_filtered, eda_filtered], ignore_index=True, axis = 1 ) df = df.T.reset_index(drop=True).T display(df.describe()) # ## 5.Data labelling # # The data was labelled for three different experiments. The anxiety duration in data cells etc. was calculated using a spreadsheet and the timestamps recorded during the experiments. #insert column specifically for labels df.insert(3,3,0) display(df.describe()) # ### 5.1. Labelling for experiment (1) # # For experiment (1) the data was labelled '1' (allocated to the social anxiety class) from when the task was announced to when the task was finished. The first 2 minutes from the baseline period were also discarded to account for acclimisation, the data after the task was also discarded. # + experiment1_df = df #duration (labels) of anxiety duration (both anticipation and reactive, labelled '1') experiment1_df[3][2268:5947] = 1 display(experiment1_df[3].value_counts()) #removing the data after the task had ended experiment1_df = experiment1_df.drop(experiment1_df.index[5947:]) #removing the first 2 mins of the baseline period to account for acclimisation experiment1_df = experiment1_df.drop(experiment1_df.index[:480]) display(experiment1_df[3].value_counts()) experiment1_df.to_csv("experiment_1.csv") # - # ### 5.2. Labelling for experiment (2) # # For experiment (2) the data was labelled '1' during the anticipation anxiety stage (task announcement to task start) and labelled '2' during the reactive anxiety stage (task start to task end). The first 2 minutes from the baseline period were also discarded to account for acclimisation, the data after the task was also discarded. # + experiment2_df = df #duration (labels) of task prep (anticipation anxiety duration, labelled '1') experiment2_df[3][2268:4867] = 1 #duration (labels) of task execution (reactive anxiety duration, labelled '2') experiment2_df[3][4867:5947] = 2 display(experiment2_df[3].value_counts()) #removing the data after the task had ended experiment2_df = experiment2_df.drop(experiment2_df.index[5947:]) #removing the first 2 mins of the baseline period to account for acclimisation experiment2_df = experiment2_df.drop(experiment2_df.index[:480]) display(experiment2_df[3].value_counts()) experiment2_df.to_csv("experiment_2.csv") # - # ### 5.3. Labelling for experiment (3) # # For experiment (3) only the data in the anxious period (from task announcement to task end) was extracted and labelled. This individual falls into anxiety catergory 1 based on their LSAS-SR scores therefore their anxious data is labelled '0'. Data is then shuffled and a certain number of samples is taken. # + experiment3_df = df #duration (labels) of task announcement to task end experiment3_df[3][2268:5947] = 0 display(experiment3_df[3].value_counts()) #removing the data after the task had ended experiment3_df = experiment3_df.drop(experiment3_df.index[5947:]) #removing baseline period experiment3_df = experiment3_df.drop(experiment3_df.index[:2268]) display(experiment3_df[3].value_counts()) #shuffling and extracting a set number of samples idx = np.random.permutation(experiment3_df.index) shuffled = experiment3_df.reindex(idx, axis=0) shuffled = shuffled.reset_index(drop=True) shuffled = shuffled.drop(shuffled.index[2500:]) shuffled.to_csv("experiment_3.csv") # - # ## 6.Data visualisation # # The physiological data and experiment (1) and (2) labels were plotted. Pearson correlation matrices were also formulated for the dataset used in experiment (1) and (2). # + fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1) ax1.set_title('Combined Physiological Data and Experiment Labels (1 & 2)', fontsize = 15) experiment1_df[0].plot(ax=ax1, legend=False, color='indigo') ax1.yaxis.set_label_text("HR (BPM)") ax1.xaxis.set_label_text('Time(min)') ax1.grid(which='both', alpha=2) experiment1_df[1].plot(ax=ax2, legend=False, color='indigo') ax2.yaxis.set_label_text("ST (°C)") ax2.xaxis.set_label_text('Time(min)') ax2.grid(which='both', alpha=2) experiment1_df[2].plot(ax=ax3, legend=False, color='indigo') ax3.yaxis.set_label_text("Range Corrected EDA (μS)") ax3.xaxis.set_label_text('Time(min)') ax3.grid(which='both', alpha=2) experiment1_df[3].plot(ax=ax4, legend=False, color='indigo') ax4.yaxis.set_label_text("Experiment (1) labels") ax4.xaxis.set_label_text('Time(min)') ax4.grid(which='both', alpha=2) experiment2_df[3].plot(ax=ax5, legend=False, color='indigo') ax5.yaxis.set_label_text("Experiment (2) labels") ax5.xaxis.set_label_text('Time(min)') ax5.grid(which='both', alpha=2) fig.set_size_inches(15, 14) fig.subplots_adjust(hspace=0.4) plt.show() # - #Correlation matrix with Experiment 1 (binary labels) labeldata = ['HR', 'ST', 'EDA','Labels'] sns.heatmap(experiment1_df.corr(method = 'pearson'), vmin=0, vmax=1, annot=True, cmap="YlGnBu", yticklabels = labeldata, xticklabels =labeldata) fig = plt.gcf() #Correlation matrix with Experiment 2 (Mult-class labels) sns.heatmap(experiment2_df.corr(method = 'pearson'), vmin=0, vmax=1, annot=True, cmap="YlGnBu", yticklabels = labeldata, xticklabels =labeldata) fig = plt.gcf()
data_preprocessing/11/Data_Preprocessing_Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dataset Preparation Tutorial # # Welcome to the dataset preparation tutorial! In this notebook, we will download the toy data set for the tutorial and prepare the necessary tables used for later analysis. Here are the steps we will review: # # 1. Verify prerequisites # 2. Create a new project workspace # 3. Review sample dataset # 4. Build the proxy table # 5. Run regional annotation ETL # # **NOTE**: All of the configuration files for this tutorial have been provided in the container, but you will have to download the input data and add it to the container's volume mount as shown in the steps below. # # **NOTE**: The current working directory is '~/vmount/notebooks'. All file and directory paths specified in the configuration files are relative to the current working directory. # ## 1. Verify prerequisites # Here are the software prerequisites for executing tasks with luna packages. These prerequiristes have already been baked into this docker container. Too view the setup, please see the corresponding dockerfile. # + tags=[] # !python3 --version # !java -version # %env JAVA_HOME=/usr # !echo PYSPARK_PYTHON: $PYSPARK_PYTHON # !echo PYSPARK_DRIVER_PYTHON: $PYSPARK_DRIVER_PYTHON # !echo JAVA_HOME: $JAVA_HOME # !echo LUNA_HOME: $LUNA_HOME # !which jupyter # !pip list | grep luna- import luna luna.__path__ import luna.pathology luna.pathology.__path__ # - # ## 2. Create a new project workspace # # # Next, we create a luna home space and place the configuration files there. Using a manifest file, we will create a project workspace for your configurations, data, models, and outputs to go for this tutorial. # + language="bash" # mkdir -p ~/luna # cp -R ~/vmount/conf ~/luna # cat ~/luna/conf/manifest.yaml # python3 -m luna.project.generate --manifest_file ~/luna/conf/manifest.yaml # tree ~/vmount/PRO_12-123 # - # You should now see a new directory called *PRO_12-123* with the manifest file in it. This will be your project workspace! # ## 3. Review sample dataset # # The data that you will be using for this tutorial is a set of 5 whole slide images of ovarian cancer H&E slides, available in the svs file format. Whole slide imaging refers to the scanning of conventional glass slides for research purposes; in this case, these are slides that oncologists have used to inspecting cancer samples! # # While bringing up the DSA container, we already ran a script to get the data, and set up DSA. The `vmount/provision.py` script ran these steps: # # - Set up admin user and default assetstore # # - Download sample data from [public kitware site](https://data.kitware.com/#user/61b9f3dc4acac99f42ca7678/folder/61b9f4564acac99f42ca7692). to `~/vmount/PRO_12-123/data/toy_data_set/` # # - Create a collection and add slides/annotations to your local DSA # # + language="bash" # tree ~/vmount/PRO_12-123/data/toy_data_set # - # If you want to import your own data, you can do so from your local filesystem as well as an object store. For more details, refer to the [girder user documentation](https://girder.readthedocs.io/en/latest/user-guide.html#assetstores) # # To import images from your local filesystem, # # - Login to DSA with admin/password # - Add images to your local computer at `vmount/assetstore` # - Navigate to **Admin Console** -> **Assetstores** # - From the default assetstore, click on **Import data** # - Specify the path to the images you wish to import. e.g. `/assetstore/yourimage` and click import # # As the `/assetstore` mount is available to DSA, this import should be much faster than uploading the image through the **Upload files** in the UI. # # ## 4. Build the proxy table # # Now, we will run the Whole Slide Image (WSI) ETL to build a meta-data catalog of the slides in a proxy table. # # For reference, ETL stands for extract-transform-load; it is the method that often involves cleaning data, transforming data types, and loading data into different systems. # !cat ~/luna/conf/wsi_config.yaml # + language="bash" # python3 -m luna.pathology.proxy_table.generate \ # -d ~/luna/conf/wsi_config.yaml \ # -a ~/luna/conf/app_config.yaml \ # -p delta # # - # This step may take a while. At the end, your proxy table should be generated! # # Before we view the table, we must first update it to associate patient ID's with the slides. This is necessary for correctly training and validating the machine learning model in the coming notebooks. Once the slides are divided into "tiles" in the next notebook, the tiles are split between the training and validation sets for the ML model. If the tiles do not have patient ID's associated with them, then it is possible for tiles from one individual to appear in both the training and validation of the model; this would cause researchers to have an exaggerated interpretation of the model's accuracy, since we would essentially be validating the model on information that is too near to what it has already seen. # # Note that we will not be using patient IDs associated with MSK. Instead, we will be using spoof IDs that will suffice for this tutorial. When running this workflow with real data, make sure to include the IDs safely and securely. Run the following block of code to add a 'patient_id' column to the table and store it using Spark. # + from pyspark.sql import SparkSession # setup spark session spark = SparkSession.builder \ .appName("test") \ .master('local[*]') \ .config("spark.driver.host", "127.0.0.1") \ .config("spark.jars.packages", "io.delta:delta-core_2.12:0.7.0") \ .config("spark.delta.logStore.class", "org.apache.spark.sql.delta.storage.HDFSLogStore") \ .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") \ .config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog") \ .config("spark.databricks.delta.retentionDurationCheck.enabled", "false") \ .config("spark.hadoop.dfs.client.use.datanode.hostname", "true") \ .config("spark.driver.memory", "6g") \ .config("spark.executor.memory", "6g") \ .getOrCreate() print(spark) # read WSI delta table wsi_table = spark.read.format("delta") .load("../PRO_12-123/tables/WSI_toy_data_set").toPandas() # insert spoof patient ids patient_id=[1,2,3,4,5] wsi_table['patient_id']=patient_id wsi_table # convert back to a spark table (update table) x = spark.createDataFrame(wsi_table) x.write.format("delta").mode("overwrite").option("mergeSchema", "true").save("../PRO_12-123/tables/WSI_toy_data_set") # - # Reduce the delta table down to a single layer so all data can be read as a parquet table. from delta.tables import * wsi_table = DeltaTable.forPath(spark, "../PRO_12-123/tables/WSI_toy_data_set") wsi_table.vacuum(0) # Next, we may view the WSI table! This table should have the metadata associated with the WSI slides that you just collected, including the patient IDs. # + # read WSI delta table wsi_table = spark.read.format("delta") \ .load("../PRO_12-123/tables/WSI_toy_data_set").toPandas() # view table wsi_table # - # If the table is depicted above, congratulations, you have successfully run the Whole Slide Image (WSI) ETL to database the slides! # # ## Run the regional annotation ETL # The whole slide images that you downloaded are images of ovarian cancer, but not every pixel on each slide is a tumor. In fact, the images show tumor cells, normal ovarian cells and more. A non-expert annotated this slide for demo purposes only. # # The regional annotation ETL performs the following steps # # - Downloads DSA json annotations # - Converts DSA jsons to GeoJSON format, which is compatible with downstream applications # - Saves configs in your `~/vmount/PRO_12-123/configs/REGIONAL_METADATA_RESULTS` # - Saves parquet table in your `~/vmount/PRO_12-123/tables/REGIONAL_METADATA_RESULTS ` # # To run the regional annotation ETL, we use the `dsa_annotation` CLI. For more details on the dsa_annotation, and the annotations we support, please checkout the `7_dsa-annotation.ipynb` notebook. # # **Note**: details of your DSA instance is specified as `DSA_URI` in `../conf/dsa_regional_annotation.yaml` and should be updated to reflect your DSA setup. If you are using the docker, replace the `localhost` with the IP you get from running: # # ```docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' luna_tutorial_girder_1``` # # !dsa_annotation \ # -d ../conf/dsa_regional_annotation.yaml \ # -a ../conf/dsa_app_config.yaml \ # -u admin -p password # To check that the regional annotation ETL was correctly run, after the Jupyter cell finishes, you may load the regional annotations table! This table contains the metadata saved from running the ETL. It includes paths to the bitmap files, numpy files, and geoJSON files that were mentioned before. To load the table, run the following code cell: # + from pyarrow.parquet import read_table regional_annotation_table = read_table("../PRO_12-123/tables/REGIONAL_METADATA_RESULTS").to_pandas() regional_annotation_table # - # At this point, you have successfully set up your workspace, dowloaded the data, and run both the pathology and regional annotation ETLs to prepare your data. You are ready to move on to the tiling notebook!
docker/luna_tutorial/vmount/notebooks/1_dataset-prep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tweets Disaster Classification: LSTM, Attention and Transformers <br> # Author: <NAME><br> # Created: 18/2/2020<br> # Last update: 6/1/2021<br> # # <img src = 'https://bn1301files.storage.live.com/y4m-toxx6sX6SL9zvwtvAbEi9xPKLkgI6kdJ0PJ0uWjzQIR5GouWmvWfMBEuppVlUoFh3eZkKSrveb0QWnLNHPfHVwlBx55CtJMcmqurAYyBv-a2d1rSAmBUxU9CYHY7zZ50XIldgPJMkU7o18TcvrbPJatlu7ioKXMNV0qyev-Z1ise-zNPFjcYmbqz52FSyeW?width=5048&height=1838&cropmode=none' width="900"> # # <br><br> # # In the era of big data, text and sequential data are the most uniquitous, from social media to medical records to speech recordings. As such, **natural language processing** problems are present in most fields and industry, and deep neural networks that can learn and tackle these problems are becoming increasingly important. The purpose of this notebook is to use some of the most common and effective models to tackle a **sentence classification** problem, specifically, to classify whether a tweet is about a disaster or not. This is easy for a human, but a computer will find it difficult as languages contain multiple complexities. A model will thus will have to take into account the sequential nature of the tweet, the meaning and representation of each word in numbers, as well as the importance and contribution of other words in the same sequence, since two words can have completely different meanings in two different contexts. For example, take the word `kill`. Although it might seem to indicate a disaster, what if it was used in a different context, such as when referring to the book 'To kill a mockingbird'? Thus, NLP is not an easy problem for a computer to solve but recent advances has greatly advanced this process. # # The dataset contains 10,000 tweets that were classified as disaster or non-disaster. # # # ## Project Goals # 1. *Explore* using different sequence models **(LSTM, Attention, Transformers)** for NLP sentence classification problem # 2. *Preprocess/Clean* tweets data into appropriate format for inputting into neural network models # 3. *Understand* **word embeddings** and how they are used to represent words as inputs into NLP models # 4. *Engineer* new features from tweets data that can help to improve model classification # # # ### What's in this notebook: # 1. [Data Loading and Structure](#Data_loading_structure) # 2. [Exploratory Data Analysis of Tweets](#EDA) <br> # 2.1. [Distribution of Character, Word and Sentence Frequency](#Frequency_Distribution) <br> # 2.2. [Top Most Common Stopwords](#Top_Stopwords) <br> # 2.3. [Top Most Common Punctuations](#Top_Punc) <br> # 2.4. [Top Most Common Words](#Top_Words) <br> # 2.5. [Wordcloud for Hashtags](#Hashtags) <br> # 3. [Meta-Feature Engineering](#Feature_Engineer) # 4. [Text Data Cleaning](#Data_Clean) <br> # 4.1. [Ngrams](#Ngrams) <br> # 4.2. [WordCloud of Most Common Words after Cleaning](#WC_Cleaned) # 5. [Train Validation Data Split](#TrainValSplit) # 6. [Embedding Layer](#Embedding) <br> # 6.1. [Tokenization](#Tokenization) <br> # 6.2. [Padding](#Padding) <br> # 6.3. [Embedding Matrix – GloVe](#E_Matrix) <br> # 7. [Model Building & Training](#Model_Build) <br> # 7.1. [Long Short-Term Memory (LSTM)](#LSTM) <br> # 7.2. [Bidirectional LSTM with Attention](#Attention) <br> # 7.3. [BERT](#BERT) # 8. [Error Analysis](#Error) # 9. [Testing](#Test) # 10. [Conclusion](#Conclusion)<br><br> # <a id='Data_loading_structure'></a> # # 1. Data Loading and Structure # + import numpy as np import pandas as pd # import os # for dirname, _, filenames in os.walk('/kaggle/input'): # for filename in filenames: # print(os.path.join(dirname, filename)) tweets = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') #tweets = pd.read_csv(r'C:\Users\TeYan\OneDrive\Work\Kaggle\Tweets_Disaster\Data\train.csv') #tweets = pd.read_csv('/Users/teyang/OneDrive/Work/Kaggle/Tweets_Disaster/Data/train.csv') # - tweets.head() tweets.isnull().sum().plot(kind='bar') # Location has lots of NaN values and would not be a good/useful feature, unless we have a priori knowledge of where a disaster occured. Furthermore, some of them are not in the correct format, so it will be quite time consuming to clean it. # # Keyword has NaNs as well, but can be imputed with 'None'. # + import seaborn as sns import matplotlib.pyplot as plt color = [sns.xkcd_rgb['medium blue'], sns.xkcd_rgb['pale red']] sns.countplot('target',data = tweets, palette = color) plt.gca().set_ylabel('Samples') # - # <a id='EDA'></a> # # 2. Exploratory Data Analysis of Tweets # # <a id='Frequency_Distribution'></a> # ## 2.1. Distribution of Character, Word and Sentence Frequency # + #import nltk #nltk.download('punkt') from nltk import word_tokenize, sent_tokenize # count number of characters in each tweet tweets['char_len'] = tweets.text.str.len() # count number of words in each tweet word_tokens = [len(word_tokenize(tweet)) for tweet in tweets.text] tweets['word_len'] = word_tokens # count number of sentence in each tweet sent_tokens = [len(sent_tokenize(tweet)) for tweet in tweets.text] tweets['sent_len'] = sent_tokens plot_cols = ['char_len','word_len','sent_len'] plot_titles = ['Character Length','Word Length','Sentence Length'] plt.figure(figsize=(20,4)) for counter, i in enumerate([0,1,2]): plt.subplot(1,3,counter+1) sns.distplot(tweets[tweets.target == 1][plot_cols[i]], label='Disaster', color=color[1]).set_title(plot_titles[i]) sns.distplot(tweets[tweets.target == 0][plot_cols[i]], label='Non-Disaster', color=color[0]) plt.legend() # + # Investigate the Outliers tweets[tweets.sent_len > 8] tweets[tweets.word_len > 50] # - # Some of the outliers such as sentence length > 10 consist of a lot of punctuations. I left it unchanged as I feel that a tweet with a many sentences, which is indicative of many punctuations, suggest that it is not a serious tweet (about a disaster). Of course there might be some instances where a disaster tweet consists of multiple punctuations (e.g. a volvano just erupted!!!!!!!!!!!!) but that is not very frequent. # # <a id='Top_Stopwords'></a> # ## 2.2. Top Most Common Stopwords # + ## Plot most common stopwords #nltk.download('stopwords') from nltk.corpus import stopwords stop = set(stopwords.words('english')) # Get all the word tokens in dataframe for Disaster and Non-Disaster corpus0 = [] # Non-Disaster [corpus0.append(word.lower()) for tweet in tweets[tweets.target == 0].text for word in word_tokenize(tweet)] corpus1 = [] # Disaster [corpus1.append(word.lower()) for tweet in tweets[tweets.target == 1].text for word in word_tokenize(tweet)] # Function for counting top stopwords in a corpus def count_top_stopwords(corpus): stopwords_freq = {} for word in corpus: if word in stop: if word in stopwords_freq: stopwords_freq[word] += 1 else: stopwords_freq[word] = 1 topwords = sorted(stopwords_freq.items(), key=lambda item: item[1], reverse=True)[:10] # get the top 10 stopwords x,y = zip(*topwords) # get key and values return x,y x0,y0 = count_top_stopwords(corpus0) x1,y1 = count_top_stopwords(corpus1) # Plot bar plot of top stopwords for each class plt.figure(figsize=(15,4)) plt.subplot(1,2,1) plt.bar(x0,y0, color=color[0]) plt.title('Top Stopwords for Non-Disaster Tweets') plt.subplot(1,2,2) plt.bar(x1,y1, color=color[1]) plt.title('Top Stopwords for Disaster Tweets') # - # There are lots of occurences of stopwords. These should be removed as they do not predict the target. # <a id='Top_Punc'></a> # ## 2.3. Top Most Common Punctuations # + ## Plot most common punctuations from string import punctuation # Get all the punctuations in dataframe for Disaster and Non-Disaster corpus0 = [] # Non-Disaster [corpus0.append(c) for tweet in tweets[tweets.target == 0].text for c in tweet] corpus0 = list(filter(lambda x: x in punctuation, corpus0)) # use filter to select only punctuations corpus1 = [] # Disaster [corpus1.append(c) for tweet in tweets[tweets.target == 1].text for c in tweet] corpus1 = list(filter(lambda x: x in punctuation, corpus1)) from collections import Counter x0,y0 = zip(*Counter(corpus0).most_common()) x1,y1 = zip(*Counter(corpus1).most_common()) # Plot bar plot of top punctuations for each class plt.figure(figsize=(15,4)) plt.subplot(1,2,1) plt.bar(x0,y0, color=color[0]) plt.title('Top Punctuations for Non-Disaster Tweets') plt.subplot(1,2,2) plt.bar(x1,y1, color=color[1]) plt.title('Top Punctuations for Disaster Tweets') # - # Most common punctuation is the slash, which usually comes from a link ('http://t.co/'). URLs should be removed, as well as most punctuations, with the exception of '!?', which signal some kind of intensity or tonality of the tweet. # # <a id='Top_Words'></a> # ## 2.4. Top Most Common Words # + ## Plot most common words import re from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS stop = ENGLISH_STOP_WORDS.union(stop) # combine stop words from different sources # function for removing url from text def remove_url(txt): return " ".join(re.sub("([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", txt).split()) # Get all the word tokens in dataframe for Disaster and Non-Disaster # - remove url, tokenize tweet into words, lowercase words corpus0 = [] # Non-Disaster [corpus0.append(word.lower()) for tweet in tweets[tweets.target == 0].text for word in word_tokenize(remove_url(tweet))] corpus0 = list(filter(lambda x: x not in stop, corpus0)) # use filter to unselect stopwords corpus1 = [] # Disaster [corpus1.append(word.lower()) for tweet in tweets[tweets.target == 1].text for word in word_tokenize(remove_url(tweet))] corpus1 = list(filter(lambda x: x not in stop, corpus1)) # use filter to unselect stopwords # Create df for word counts to use sns plots a = Counter(corpus0).most_common() df0 = pd.DataFrame(a, columns=['Word','Count']) a = Counter(corpus1).most_common() df1 = pd.DataFrame(a, columns=['Word','Count']) # Plot for Disaster and Non-Disaster plt.figure(figsize=(15,4)) plt.subplot(1,2,1) sns.barplot(x='Word',y='Count',data=df0.head(10), color=color[1]).set_title('Most Common Words for Non-Disasters') plt.xticks(rotation=45) plt.subplot(1,2,2) sns.barplot(x='Word',y='Count',data=df1.head(10), color=color[0]).set_title('Most Common Words for Disasters') plt.xticks(rotation=45) # - # Disaster tweets contain more words related to disasters. But still need more cleaning. And what is the word amp? Will need to expand contractions as well such as 'im'. # <a id='Hashtags'></a> # ## 2.5. Wordcloud for Hashtags # + def clean(word): for p in punctuation: word = word.replace(p, '') return word from wordcloud import WordCloud def wc_hash(target): hashtag = [clean(w[1:].lower()) for tweet in tweets[tweets.target == target].text for w in tweet.split() if '#' in w and w[0] == '#'] hashtag = ' '.join(hashtag) my_cloud = WordCloud(background_color='white', stopwords=stop).generate(hashtag) plt.subplot(1,2,target+1) plt.imshow(my_cloud, interpolation='bilinear') plt.axis("off") plt.figure(figsize=(15,4)) wc_hash(0) plt.title('Non-Disaster') wc_hash(1) plt.title('Disaster') # - # <a id='Feature_Engineer'></a> # # 3. Meta-Feature Engineering # # Here, we extract some features from the tweets that might give us some idea about whether it is a disaster or not. The purpose of this is to build a feature-based model and use it as part of an ensemble model to improve the predictions of the sequence model. Although it might not perform well on its own, it can actually boost the performance when combined with other models. # # * polarity - range of \[-1,1] where 1 denotes positivity and -1 denotes negativity # * subjectivity - range of \[0,1] where 1 denotes personal opinions and 0 denotes factual info # * exclaimation_num - number of exclamation marks in tweet # * questionmark_num - number of question marks in tweet # * url_num - number of urls in tweet # * hash_num - number of hashtags (#) in tweet # * mention_num - number of mentions (@) in tweet # * contraction_num - number of contractions (e.g I'm, we're, we've) # + from textblob import TextBlob # polarity and subjectivity tweets['polarity'] = [TextBlob(tweet).sentiment.polarity for tweet in tweets.text] tweets['subjectivity'] = [TextBlob(tweet).sentiment.subjectivity for tweet in tweets.text] ############################################################################################################################# # exclaimation and question marks tweets['exclaimation_num'] = [tweet.count('!') for tweet in tweets.text] tweets['questionmark_num'] = [tweet.count('?') for tweet in tweets.text] ############################################################################################################################# # count number of hashtags and mentions # Function for counting number of hashtags and mentions def count_url_hashtag_mention(text): urls_num = len(re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)) word_tokens = text.split() hash_num = len([word for word in word_tokens if word[0] == '#' and word.count('#') == 1]) # only appears once in front of word mention_num = len([word for word in word_tokens if word[0] == '@' and word.count('@') == 1]) # only appears once in front of word return urls_num, hash_num, mention_num url_num, hash_num, mention_num = zip(*[count_url_hashtag_mention(tweet) for tweet in tweets.text]) tweets = tweets.assign(url_num = url_num, hash_num = hash_num, mention_num = mention_num) ############################################################################################################################# # count number of contractions contractions = ["'t", "'re", "'s", "'d", "'ll", "'ve", "'m"] tweets['contraction_num'] = [sum([tweet.count(cont) for cont in contractions]) for tweet in tweets.text] # - tweets.head() # <a id='Data_Clean'></a> # # 4. Text Data Cleaning # # This is the most important step of the entire project — text preprocessing/cleaning. This cleans the text into a more 'suitable' form as inputs into the NLP models. For example, URLs might make the text difficult to understand and should be removed when necessary. The choice of whether to remove/clean some words or parts-of-speech is an entire process on its own and sometimes this needs to be experimented. Different models are also able to deal with different kinds of parts-of-speech. # # * Replace NaNs with 'None' # * Expand Contractions # * Remove Emojis # * Remove URLs # * Remove Punctuations except '!?' as they convey intensity and tonality of tweet # * Replace 'amp' with 'and' # * Word Segmentaion - segment words such as 'iwould' into 'i' and 'would' # * Lemmatization - reduces inflected words into their root form; verb part-of-speech tag is used here) # * Ngrams Exploration # * Remove Stopwords # * WordCloud of most commmon words (Unigrams) # + ## Replace NaNs with 'None' tweets.keyword.fillna('None', inplace=True) ############################################################################################################################# ## Expand Contractions # Function for expanding most common contractions https://stackoverflow.com/questions/19790188/expanding-english-language-contractions-in-python def decontraction(phrase): # specific phrase = re.sub(r"won\'t", "will not", phrase) phrase = re.sub(r"can\'t", "can not", phrase) # general phrase = re.sub(r"n\'t", " not", phrase) phrase = re.sub(r"\'re", " are", phrase) phrase = re.sub(r"\'s", " is", phrase) phrase = re.sub(r"\'d", " would", phrase) phrase = re.sub(r"\'ll", " will", phrase) phrase = re.sub(r"\'t", " not", phrase) phrase = re.sub(r"\'ve", " have", phrase) phrase = re.sub(r"\'m", " am", phrase) return phrase tweets.text = [decontraction(tweet) for tweet in tweets.text] ############################################################################################################################# ## Remove Emojis # Reference : https://gist.github.com/slowkow/7a7f61f495e3dbb7e3d767f97bd7304b def remove_emoji(text): emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) u"\U00002702-\U000027B0" u"\U000024C2-\U0001F251" "]+", flags=re.UNICODE) return emoji_pattern.sub(r'', text) print(remove_emoji("OMG there is a volcano eruption!!! 😭😱😷")) tweets.text = tweets.text.apply(lambda x: remove_emoji(x)) # + ############################################################################################################################# ## Remove URLs tweets.text = tweets.text.apply(lambda x: remove_url(x)) ############################################################################################################################# ## Remove Punctuations except '!?' def remove_punct(text): new_punct = re.sub('\ |\!|\?', '', punctuation) table=str.maketrans('','',new_punct) return text.translate(table) tweets.text = tweets.text.apply(lambda x: remove_punct(x)) ############################################################################################################################# ## Replace amp def replace_amp(text): text = re.sub(r" amp ", " and ", text) return text tweets.text = tweets.text.apply(lambda x: replace_amp(x)) ############################################################################################################################# # - # Word segmentation takes a long time. So I have commented out the code and loaded the data that has already been segmented beforehand. # + # from wordsegment import load, segment # load() # tweets.text = tweets.text.apply(lambda x: ' '.join(segment(x))) tweets = pd.read_csv('../input/twitter-logo/tweets_segmented.csv') # + ## Lemmatization from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() def lemma(text): words = word_tokenize(text) return ' '.join([lemmatizer.lemmatize(w.lower(), pos='v') for w in words]) tweets.text = tweets.text.apply(lambda x: lemma(x)) # - # ### Ngrams # + ## Ngrams from nltk.util import ngrams def generate_ngrams(text, n): words = word_tokenize(text) return [' '.join(ngram) for ngram in list(get_data(ngrams(words, n))) if not all(w in stop for w in ngram)] # exclude if all are stopwords # in newer versions of python, raising StopIteration exception to end a generator, which is used in ngram, is deprecated def get_data(gen): try: for elem in gen: yield elem except (RuntimeError, StopIteration): return # + # Bigrams bigrams_disaster = tweets[tweets.target==1].text.apply(lambda x: generate_ngrams(x, 2)) bigrams_ndisaster = tweets[tweets.target==0].text.apply(lambda x: generate_ngrams(x, 2)) bigrams_d_dict = {} for bgs in bigrams_disaster: for bg in bgs: if bg in bigrams_d_dict: bigrams_d_dict[bg] += 1 else: bigrams_d_dict[bg] = 1 bigrams_d_df = pd.DataFrame(bigrams_d_dict.items(), columns=['Bigrams','Count']) bigrams_nd_dict = {} for bgs in bigrams_ndisaster: for bg in bgs: if bg in bigrams_nd_dict: bigrams_nd_dict[bg] += 1 else: bigrams_nd_dict[bg] = 1 bigrams_nd_df = pd.DataFrame(bigrams_nd_dict.items(), columns=['Bigrams','Count']) # + # Barplots for bigrams plt.figure(figsize=(15,10)) plt.subplot(1,2,1) sns.barplot(x='Count',y='Bigrams',data=bigrams_nd_df.sort_values('Count', ascending=False).head(40), color=color[0]).set_title('Most Common Bigrams for Non-Disasters') ax = plt.gca() ax.set_ylabel('') plt.subplot(1,2,2) sns.barplot(x='Count',y='Bigrams',data=bigrams_d_df.sort_values('Count', ascending=False).head(40), color=color[1]).set_title('Most Common Bigrams for Disasters') ax = plt.gca() ax.set_ylabel('') plt.tight_layout() plt.show() # + # Woudcloud for bigrams plt.figure(figsize=(15,10)) plt.subplot(1,2,1) my_cloud = WordCloud(background_color='white', stopwords=stop).generate_from_frequencies(bigrams_nd_dict) plt.imshow(my_cloud, interpolation='bilinear') plt.axis('off') plt.subplot(1,2,2) my_cloud = WordCloud(background_color='white', stopwords=stop).generate_from_frequencies(bigrams_d_dict) plt.imshow(my_cloud, interpolation='bilinear') plt.axis('off') plt.show() # + # Trigrams trigrams_disaster = tweets[tweets.target==1].text.apply(lambda x: generate_ngrams(x, 3)) trigrams_ndisaster = tweets[tweets.target==0].text.apply(lambda x: generate_ngrams(x, 3)) trigrams_d_dict = {} for tgs in trigrams_disaster: for tg in tgs: if tg in trigrams_d_dict: trigrams_d_dict[tg] += 1 else: trigrams_d_dict[tg] = 1 trigrams_d_df = pd.DataFrame(trigrams_d_dict.items(), columns=['Trigrams','Count']) trigrams_nd_dict = {} for tgs in trigrams_ndisaster: for tg in tgs: if tg in trigrams_nd_dict: trigrams_nd_dict[tg] += 1 else: trigrams_nd_dict[tg] = 1 trigrams_nd_df = pd.DataFrame(trigrams_nd_dict.items(), columns=['Trigrams','Count']) # + # Barplots for trigrams plt.figure(figsize=(15,10)) plt.subplot(1,2,1) sns.barplot(x='Count',y='Trigrams',data=trigrams_nd_df.sort_values('Count', ascending=False).head(40), color=color[0]).set_title('Most Common Trigrams for Non-Disasters') ax = plt.gca() ax.set_ylabel('') plt.subplot(1,2,2) sns.barplot(x='Count',y='Trigrams',data=trigrams_d_df.sort_values('Count', ascending=False).head(40), color=color[1]).set_title('Most Common Trigrams for Disasters') ax = plt.gca() ax.set_ylabel('') plt.tight_layout() plt.show() # + ## Remove Stopwords def remove_stopwords(text): word_tokens = word_tokenize(text) return ' '.join([w.lower() for w in word_tokens if not w.lower() in stop]) #tweets_tmp = tweets.copy() tweets['text_nostopwords'] = tweets.text.apply(lambda x: remove_stopwords(x)) # - # <a id='WC_Cleaned'></a> # ## 4.1. WordCloud of Most Common Words after Cleaning # # Removed some words such as 'new', 'like' and 'people' as they are common between both targets # + ## Plot word cloud for most common words after cleaning from PIL import Image mask = np.array(Image.open('../input/twitter-logo/Twitter-Logo_white.png')) reverse = mask[...,::-1,:] def wc_words(target, mask=mask): words = [word.lower() for tweet in tweets[tweets.target == target].text_nostopwords for word in tweet.split()] words = list(filter(lambda w: w != 'like', words)) words = list(filter(lambda w: w != 'new', words)) words = list(filter(lambda w: w != 'people', words)) dict = {} for w in words: if w in dict: dict[w] += 1 else: dict[w] = 1 # plot using frequencies my_cloud = WordCloud(background_color='white', stopwords=stop, mask=mask, random_state=0).generate_from_frequencies(dict) plt.subplot(1,2,target+1) plt.imshow(my_cloud, interpolation='bilinear') plt.axis("off") plt.figure(figsize=(15,10)) wc_words(0) plt.title('Non-Disaster') wc_words(1, reverse) plt.title('Disaster') plt.show() # - pd.options.display.max_colwidth = 200 for t in tweets['text'].sample(n=20, random_state=0): print(t) pd.reset_option('max_colwidth') pd.reset_option('max_colwidth') tweets.drop('text_nostopwords', axis=1, inplace=True) tweets.head() # <a id='TrainValSplit'></a> # # 5. Train Validation Data Split # # + from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split(tweets.drop(['id','keyword','location','target'],axis=1), tweets[['target']], test_size=0.2, stratify=tweets[['target']], random_state=0) X_train_text = X_train['text'] X_val_text = X_val['text'] print('X_train shape: ', X_train.shape) print('X_val shape: ', X_val.shape) print('y_train shape: ', y_train.shape) print('y_val shape: ', y_val.shape) # - print('Train Class Proportion:\n', y_train['target'].value_counts() / len(y_train) * 100) print('\nValidation Class Proportion:\n', y_val['target'].value_counts() / len(y_val) * 100) # <a id='Embedding'></a> # # 6. Embedding Layer # # ### Word Representation # # Word representation refers to representing words as numbers so that a computer can understand it. One way to represent words is to use a one-hot representation (bottom left), where each word in a corpus/dictionary is a vector of all 0s except the index which it is assigned to. For example, in a 10,000 word dictionary, `a` is usually the first word and so is given a vector of [1,0,0,0,...,0], `aaron` is a vector of [0,1,0,0,...,0] and `zulu`, which might be the last word, is a vector of [0,0,0,0,...,1], all with a shape of (10000, 1). However, this way of representing words have a major weakness — ***it treats each word as onto itself, so it does not generalize across words.*** For example, the relationship between `apple` and `orange` is not any closer than the relationship between `apple` and `king`. The inner product or Euclidean distance between any 2 words will be 0. Therefore, all word pairs will have a dissimilarity (Euclidean Distance) of 0. # <br><br> # # <img src = 'https://bn1301files.storage.live.com/y4mS1q2-u6bjSL9LZ317bVz57HUlCnt3l3du9-iVCE8GiUrMMM4YAuxWQ12iHTImvYXvnLJgCKWZFE7kiurFmRX7jMUINieWGPGLeP9rtszv3GlaEwvhWiDXo3wfS7tC-semwXswn3QOlKZi1Ddsz9VRS9YABa_6lugTftLC_ZLOfv77igv55y_E_3Lq5AgqFus?width=3676&height=1378&cropmode=none' width=800> # <br> # # ### Word Embeddings # # A better way to represent words is using word embeddings, which can be learned from large corpuses of texts, such as Wikipedia. It is a dense way, compared to the sparse way for word representation, of representing words as well as the relationships between them. A word embedding is a learned representation for text where words that have the **same meaning have a similar representation**. For example, as shown above in the right table, `apple` and `orange` have similar vector values (their euclidean distance is very small) compared to `apple` and `king`. Another way to compare two words is using **cosine similarity**. # # Each row of the matrix represents a **feature/dimension**, such as `gender` or `food` that are attributes of the words. Words that are highly attributed to the feature are given high positive and negative values, while words with no such attributes are given values close to 0s. If we take the vector difference between `man` and `woman`, or `king` and `queen`, both will give a vector close to [-2,0,0,...,0], indicating that each of the pair of words differ highly according to the `gender` attribute. In practice, the features/dimensions that are learned for word embeddings are more abstract, and sometimes it might not be intuitive as to what attributes they represent, and they might be a combination of different attributes. # # Word embeddings can be trained from scratch. Some of the most popular ways include [Word2Vec](http://jalammar.github.io/illustrated-word2vec/), [NegativeSampling](http://jalammar.github.io/illustrated-word2vec/), and [GloVe (Global vectors for word representation)](https://towardsdatascience.com/light-on-math-ml-intuitive-guide-to-understanding-glove-embeddings-b13b4f19c010). A pre-trained word embedding can also be downloaded and used. # # The graph below shows a simple RNN model for a *many-to-one* classification problem, such as tweet disaster classification or sentiment analysis, with the input words fed into the embedding layer. For each word, its vector representation (`e`) is obtained from the embedding matrix (`E`), and is then fed into the hidden layers. # <br><br> # # <img align=left src = 'https://bn1301files.storage.live.com/y4mkNjcljwqSA1Sb6vCIb8YMk8i5mcl-ViArevkMz6kqZVvbi8fW0lJFPwAprRt5DBN3YamG_ooLd_dRT85rIEinIHrPUTcdxLeBHuxLAYmfpxdDT6Hajvhrqmevt1C_XtXMWQnEe1z2-fouUj760K41kfVH2vbzBOr8JNZYWCNte-xVWHuBSFxGCrzTM7bumTs?width=3368&height=1448&cropmode=none' width=800> # # <br> # # <a id='Tokenization'></a> # ## 6.1. Tokenization # To feed the tweets into the model, first we need to split them up. Here we **tokenize** the sentences -- break them up into words and assign them an integer based on the vocabulary dictionary. The maximum vocabulary size is set to 5000, so only the most common `num_words`-1 words will be kept. `oov_token` is set to `<UNK>` so that out-of-vocabulary words will be given an index instead of being ignored during `text_to_sequence` call. # # Use `fit_on_texts` to create a word-to-index vocabulary dictionary based on the train texts. This creates the vocabulary index based on word frequency, with words that appear more often at the top of the vocabulary. # # `texts_to_sequences` transforms each text in texts to a sequence of integers. # + from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences tokenizer_1 = Tokenizer(num_words=5000, oov_token='<UNK>') tokenizer_1.fit_on_texts(X_train_text) # - X_train_text = tokenizer_1.texts_to_sequences(X_train_text) X_val_text = tokenizer_1.texts_to_sequences(X_val_text) print(X_train_text[:10]) print('') print(X_val_text[:10]) # Each list in the `X_train_text` and `X_val_text` is a list of integers, which corresponds to each tweets in the train and validation set respectively. The length of each list is also different as different tweets have different lengths. Therefore, we will need to apply **padding** to make all sequences the same length. # # We can use `tokenizer.word_index` to look at the vocabulary dictionary and `sequences_to_texts` to transform sequences back into texts. Note that words that are not in the vocabulary are now `<UNK>`. # # # **Note:** The Tokenizer stores everything in the `word_index` during `fit_on_texts`. Then, when calling the `texts_to_sequences` method, only the top `num_words` are considered. So `word_index` will actually contain more words than `num_words`. tokenizer_1.sequences_to_texts([X_train_text[1]]) # <a id='Padding'></a> # ## 6.2. Padding # # After tokenization, each tweet is represented as a list of tokens. Next, we need to **pad** all lists to the same size, so we can represent the input as one 2-d array, rather than a list of lists (of different lengths). Do this by adding 0s to the end of each sentence in the tokenized form so that each sentence is *now the same length as the longest tweet*. # # The max length for the train set tweets is 32. We will set the `maxlen` to be 50 as tweets from the validation or test set might be longer. This means that texts longer than 50 words will be truncated to the 1st 50 words while texts shorter than 50 will have 0s appended to make them of length 50. # # Below shows a quick example of padding sentences to a length of 5 sequences. # # <br> # # <img src = 'https://bn1301files.storage.live.com/y4ma9N0t0Cjf_JcFdIj5J6W47lKDiMsXBwUwg5KXo6hUlH9PrpNv5b067TNxP7NFrtk1nbM8fxn5HXFs4rOLJ1QZK1omFFHB5Bl-jsoX5T4bZKJ3I76JwZazSPvquBb0aVem8MGLIP2CT8AsnRW1EOeMExc4w1AkzmfJ_p1oNRv506yRZEUEVlbtY780CnoAadD?width=4342&height=494&cropmode=none' width=700 align=left> # + print('Train Set Max Length:', max(len(text) for text in X_train_text)) maxlen = 50 X_train_text = pad_sequences(X_train_text, padding='post', maxlen=maxlen) X_val_text = pad_sequences(X_val_text, padding='post', maxlen=maxlen) print('X_train shape:', X_train_text.shape) print('X_train shape:', X_val_text.shape) # - # <a id='E_Matrix'></a> # ## 6.3. Embedding Matrix – GloVe # # We will use the [GloVe embeddings](https://nlp.stanford.edu/projects/glove/) that were pre-trained on 2 billion tweets to create our feature matrix. First, we will create a dictionary that will contain words as keys and their corresponding embedding list at values. The length of the embedding for each word will be 200, as the GloVe embedding we are using was trained to have 200 dimensions. Refer to [here](https://github.com/stanfordnlp/GloVe) also for more details. # # # + # Adding 1 because of reserved 0 index vocab_size = len(tokenizer_1.word_index) + 1 # load the whole embedding into memory embeddings_index = dict() f = open('../input/glove-global-vectors-for-word-representation/glove.twitter.27B.200d.txt') for line in f: values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() print('Loaded %s word vectors.' % len(embeddings_index)) # - # Next we will create an embedding matrix for our train vocab/corpus where each row number will correspond to the index of the word in our train vocab/corpus. The matrix will have 200 columns, each containing one of the GloVe feature/dimension. # + # create a weight matrix for words in training set embedding_matrix = np.zeros((vocab_size, 200)) for word, i in tokenizer_1.word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector print('Embedding Matrix Shape:', embedding_matrix.shape) # - # <a id='Model_Build'></a> # # 7. Model Building & Training # # <a id='LSTM'></a> # ## 7.1. Long Short-Term Memory (LSTM) # # [Long Short-Term Memory (LSTM)](https://www.mitpressjournals.org/doi/abs/10.1162/neco.1997.9.8.1735) models are a type of recurrent neural network that allows for longer range dependencies, unlike traditional feed-forward RNNs. It has a few advantages: # # 1. Longer range dependence # 2. Selectively remember or forget things # 3. Get around exploding and vanishing gradients # # LSTMs have a few dependencies. Imagine that we are predicting whether it will rain today. This depends on several information: # # 1. The trend of the previous few days, such as many days with rain, or a very heavy downpour (the previous cell state) # 2. Information from the previous day, such as temperature, wind level (previous hidden state) # 3. Information from today (input at current time step) # # To decide whether or not to use these information, LSTMs contain different memory blocks called **cells**, which are responsible for remembering which information are important to use and which to discard. Manipulations or changes to this memory is done through **gates**. # # 1. The **forget gate** is responsible for removing memories from the cell state. From the figure and equations below, it takes inputs from the previous hidden state, `a` and current input `x`, and applies a sigmoid function to decide whether to keep the information or not. # 2. The **input gate** consists of the **update gate** and the **tanh** function. The process of adding new information to the memory cell is done through this gate. The **update gate** decides which information to be added through a sigmoid function (similar to the forget gate), and the tanh function creates the information to be added. The two outputs are then multiplied together and added to the memory cell. # 3. The **output gate** selects useful information from the current cell state and outputs it. First, it creates a vector after applying **tanh** to the memory cell, then makes a filter using sigmoid function to regulate the information that needs to be used from the previous vector, and multiply them together, thus creating the output and also the hidden state to the next cell. # # # # <img src='https://bn1301files.storage.live.com/y4moAJV3tGM4StMGVxvRmKYHz14V8F5X2aC0T4WaJdO1M_9QAPti5-3hx69bd-KJRsASCCdYErxqDL9PeNoDkFRFCwJnzpnR3e9w24NFJoOCMj3h_7jG90QjADEDje9hXVGM4sg8ltWrcbi2vz8pCLVBYsCTAQchMBn-JRTsX5ArSXY2r8ah54G_SVTJD9oJQOA?width=1711&height=623&cropmode=none' width=1000> ## Hyperparameters num_epochs=15 dropout=0.2 recurrent_dropout=0.2 lr=0.0005 batch_size=128 class_weight = {0: y_train['target'].value_counts()[1]/len(y_train), 1: y_train['target'].value_counts()[0]/len(y_train)} # We will use dropout and recurrent dropout to add regularization to the model, which can help with overfitting. Regular dropout works in the vertical direction of the RNN, while recurrent dropout masks the connections between the recurrent units (horizontal direction). Refer to this [post](https://stackoverflow.com/questions/44924690/keras-the-difference-between-lstm-dropout-and-lstm-recurrent-dropout) for more information. # # A class weight will also be used. Without it, the model makes a lot more false negatives than false positives. The weighting for the minority class (`disaster`) will be given more weighting, meaning that it will be given more contribution to the loss computation. This is taken as `(total samples-samples of class) / total samples`. # + from keras.models import Sequential from keras.layers.core import Activation, Dropout, Dense from keras.layers import Flatten, GlobalMaxPooling1D, LSTM from keras.layers.embeddings import Embedding from keras import optimizers from keras.callbacks import ModelCheckpoint lstm_model = Sequential() embedding_layer = Embedding(vocab_size, 200, weights=[embedding_matrix], input_length=maxlen, trainable=False) lstm_model.add(embedding_layer) lstm_model.add(LSTM(128, return_sequences=True, dropout=dropout, recurrent_dropout=recurrent_dropout)) # try adding dropout later lstm_model.add(LSTM(128)) #model.add(Flatten()) lstm_model.add(Dense(1, activation='sigmoid')) adam = optimizers.Adam(lr=lr) lstm_model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['acc']) print(lstm_model.summary()) # best hyperparameters # num_epochs=15 # dropout=0.2 # recurrent_dropout=0.2 # lr=0.0005 # batch_size=128 # - def plot_model_performance(history): plt.figure(figsize=(15,5)) plt.plot(range(num_epochs), history.history['acc'],'-o', label='Train ACC',color='#ff7f0e') plt.plot(range(num_epochs),history.history['val_acc'],'-o', label='Val ACC',color='#1f77b4') x = np.argmax( history.history['val_acc'] ); y = np.max( history.history['val_acc'] ) xdist = plt.xlim()[1] - plt.xlim()[0]; ydist = plt.ylim()[1] - plt.ylim()[0] plt.scatter(x,y,s=200,color='#1f77b4') plt.text(x-0.03*xdist,y-0.13*ydist,'max acc\n%.2f'%y,size=14) plt.ylabel('Accuracy',size=14); plt.xlabel('Epoch',size=14) plt.legend(loc=(0.01,0.75)) plt2 = plt.gca().twinx() plt2.plot(range(num_epochs),history.history['loss'],'-o', label='Train Loss',color='#2ca02c') plt2.plot(range(num_epochs),history.history['val_loss'],'-o', label='Val Loss',color='#d62728') x = np.argmin( history.history['val_loss'] ); y = np.min( history.history['val_loss'] ) ydist = plt.ylim()[1] - plt.ylim()[0] plt.scatter(x,y,s=200,color='#d62728') plt.text(x-0.03*xdist,y+0.05*ydist,'min loss',size=14) # plt.ylim([-0.2, 2]) plt.ylabel('Loss',size=14) plt.xticks(ticks=list(range(num_epochs)),labels=list(range(1, num_epochs+1))) plt.legend(loc='lower left', bbox_to_anchor=(0.01, 0.1)) plt.show() checkpoint = ModelCheckpoint('lstm_model.h5', monitor='val_acc', save_best_only=True) history = lstm_model.fit(X_train_text, y_train, batch_size=batch_size, callbacks=[checkpoint], epochs=num_epochs, class_weight=class_weight, validation_data=(X_val_text, y_val), verbose=1) plot_model_performance(history) # One thing to note is that when using **class weights** for **class imbalance**, the validation loss is consistently higher than the train loss, but this doesn't happen when `class_weight` is turned off. I am not sure what is happening here. Please let me know if anyone has any ideas! # + # from keras.models import Sequential # from keras.layers.core import Activation, Dropout, Dense # from keras.layers import Flatten, GlobalMaxPooling1D, LSTM, Bidirectional # from keras.layers.embeddings import Embedding # from keras import optimizers # model = Sequential() # embedding_layer = Embedding(vocab_size, 200, weights=[embedding_matrix], input_length=maxlen, trainable=False) # model.add(embedding_layer) # model.add(Bidirectional(LSTM(128, return_sequences=True, dropout=dropout, recurrent_dropout=recurrent_dropout))) # try adding dropout later # model.add(Bidirectional(LSTM(128))) # #model.add(Flatten()) # model.add(Dense(1, activation='sigmoid')) # adam = optimizers.Adam(lr=lr) # model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['acc']) # print(model.summary()) # - # <a id='Attention'></a> # ## 7.2. Bidirectional LSTM with Attention # # <img src = 'https://bn1301files.storage.live.com/y4mpYVhDp9W6iW73-HkwwbyvkDRtQBj8K6FIz4kb7-iQhcydjC0KzXrREYJy-Im10aox7hLJIetYLNhuusOdo6fBkgpSLnnZn2RCf2H-lfqw1CXfsXUv_wFiuf2QAK70HgeNo_Ayl3H4kIbT5FUgCLK0iS21B5uNIAgFXVKapAYwMdMYzmStGqSBkvQ_H4m_9A6?width=850&height=425&cropmode=none' width=800> # A vanilla LSTM only uses information from the previous timesteps and not from the future. In many NLP problems, words that come after the current timepoint also influences the current output, although this is less likely for other applications like weather forecasting. As such, a bidirectional LSTM takes into account information from both past and future to create the output at the current timepoint, as shown by the figure above in the LSTM layer. Note that a gated recurrent unit (GRU) can also be used instead of a LSTM. # # Also, another limitation with encoder-decoder architectures is that the encoder has to learn to encode input sequences into a *fixed-length internal representation*, which limits the performance of these networks, especially when considering very long input sequences. This means that the encoder has to compress all the information of a source input into a fixed-length vector and pass it to the encoder. The idea of **attention** aims to search for "a set of positions in a source sentence where the most relevant information is concentrated. The model then predicts a target word based on the context vectors associated with these source positions and all the previous generated target words." — [Bahdanau et al., 2015](https://arxiv.org/abs/1409.0473) # # As seen from the figure above, the attention layer takes the bidirection hidden layer states and multiply them to a set of attention weights, which tells how much attention the current input should be paying attention to other past and future inputs (the **context**). These outputs at each timepoint will then be concatenated, which is the context, and will be used to generate the output. There are 2 main kinds of attention: **Global** and **Local** Attention. # * **Global Attention**: Considers all hidden states of encoder LSTM and all hidden states[(Luong et al., 2015)](https://arxiv.org/abs/1508.04025) / previous hidden states [(Bahdanau et al., 2015)](https://arxiv.org/abs/1409.0473) of the unidirectional encoder LSTM. Global attention requires lots of computation as all hidden states are considered. # * **Local Attention**: Only a part of the encoder hidden states are considered for context vector generation. # # # # + ## Attention Class from keras.layers import Layer import keras.backend as K class attention(Layer): def __init__(self,**kwargs): super(attention,self).__init__(**kwargs) def build(self,input_shape): self.W=self.add_weight(name="att_weight",shape=(input_shape[-1],1),initializer="normal") self.b=self.add_weight(name="att_bias",shape=(input_shape[1],1),initializer="zeros") super(attention, self).build(input_shape) def call(self,x): et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1) at=K.softmax(et) at=K.expand_dims(at,axis=-1) output=x*at return K.sum(output,axis=1) def compute_output_shape(self,input_shape): return (input_shape[0],input_shape[-1]) def get_config(self): return super(attention,self).get_config() # + ### Attention ## Hyperparameters num_epochs=15 dropout=0.3 recurrent_dropout=0.3 lr=0.0005 batch_size=128 import tensorflow as tf from keras.models import Sequential from keras import Model from keras.layers.core import Activation, Dropout, Dense from keras.layers import Flatten, Input, Layer, GlobalMaxPooling1D, LSTM, Bidirectional, Concatenate from keras.layers.embeddings import Embedding from keras import optimizers ## Embedding Layer sequence_input = Input(shape=(maxlen,)) embedded_sequences = Embedding(vocab_size, 200, weights=[embedding_matrix], trainable=False)(sequence_input) ## RNN Layer lstm = Bidirectional(LSTM(128, return_sequences = True, dropout=dropout, recurrent_dropout=recurrent_dropout))(embedded_sequences) # Getting our LSTM outputs (lstm, forward_h, forward_c, backward_h, backward_c) = Bidirectional(LSTM(128, return_sequences=True, return_state=True))(lstm) ## Attention Layer att_out=attention()(lstm) outputs=Dense(1,activation='sigmoid')(att_out) model_attn = Model(sequence_input, outputs) adam = optimizers.Adam(lr=lr) #sgd = optimizers.sgd(lr=lr) model_attn.compile(optimizer=adam, loss='binary_crossentropy', metrics=['acc']) print(model_attn.summary()) # - checkpoint = ModelCheckpoint('attn_model.h5', monitor='val_acc', save_best_only=True) history_attn = model_attn.fit(X_train_text, y_train, batch_size=batch_size, callbacks=[checkpoint], epochs=num_epochs, class_weight=class_weight, validation_data=(X_val_text, y_val), verbose=1) plot_model_performance(history_attn) # Again, the validation loss is consistently higher than the train loss when using **class weights**. # <a id='BERT'></a> # ## 7.3. BERT # # The Bidirectional Encoder Representations from Transformers (BERT) is a language model developed by Google which has achieved state-of-the-art results in a variety of NLP tasks. BERT's key innovation is applying bidirectional training (actually it is non-directional, as it reads the entire sequence of words at once) of the encoder part of a **Transformer**. # # To understand BERT, we need to first understand what is a Transformer. The Transformer was first introduced in the very influential paper "Attention is All You Need" by [Vaswani et al., 2017](https://arxiv.org/abs/1706.03762). Below is the architecture of the Transformer. It gets past the sequential nature of traditional RNNs, and instead considers all inputs at the same time using **multi-headed self-attention**. To get a better intuition of self-attention as well as a more detailed explanation of the Transformer, refer to this [post](https://www.analyticsvidhya.com/blog/2019/06/understanding-transformers-nlp-state-of-the-art-models/). Also, since the model is no longer sequential (contains no recurrence), it uses positional encodings to "inject some information about the relative or absolute position of the tokens in the sequence". These positional encodings use sine and cosine functions and are added to the input embeddings at the bottom of the encoder and decoder. # # <img src = 'https://cdn.analyticsvidhya.com/wp-content/uploads/2019/06/Screenshot-from-2019-06-17-19-53-10.png' align='left'> # <img src = 'https://cdn.analyticsvidhya.com/wp-content/uploads/2019/06/Screenshot-from-2019-06-17-20-05-30.png'> # # The BERT model uses a multi-layer bidirectional Transformer encoder (stacks the encoder several times). Only the encoder is needed as its goal is to create a language model. It performs self-attention in both directions and is pre-trained using two unsupervised prediction tasks. # # **Masked Language Modelling** <br> # 15% of the words in each sequence are masked at random, and the model was trained to predict these masked words based on the context provided by the other non-masked words in the sequence. The loss function only takes into consideration the prediction of the masked values and not the non-masked words. # # **Next Sentence Prediction** <br> # BERT was also pre-trained to capture the relationships between consecutive sentences. It uses pairs of sentences as its training data. 5o% of the inputs are a pair in which the second sentence is the subsequent sentence in the original document while the other half is a random sentence from the corpus. # # The figure below (upper) shows how BERT takes in the input and applies masking and sentence separation. The goal of training BERT is to minimize the combined loss function of these 2 strategies. Refer to this [post](https://towardsdatascience.com/bert-explained-state-of-the-art-language-model-for-nlp-f8b21a9b6270) for a more detailed explanation. # # For single sentence classification such as the current problem of classifying disaster tweet, the architecture of BERT will involve adding a classification layer (sigmoid) on top of the Transformer output for the [CLS] token (lower graph below). # # <br><br> # # <img src = 'https://bn1301files.storage.live.com/y4mGNBqhZEX0ARXkCSvNbAkqw5PNaxxm_STcxiBcYvZVJLhdhjaNWbmnxbhxZwxyhJzPG6B7mjWwCQEWdLKJTtM9e7Z_A2y58uKJi2HoNyaU9wB4y9L66TXdp8UVvUSNPpDJc3XBGEld4gESOXDwZRc4xSYWuG_T7a5t8lDYQ3veqOCeBgt9N3IO6tI_PxaXZJV?width=1425&height=451&cropmode=none' width=600> # <img src = 'https://media.geeksforgeeks.org/wp-content/uploads/20200422012400/Single-Sentence-Classification-Task.png' width=500> # Hyperparameters maxlen = 160 lr = 1e-5 # 1e-5 num_epochs = 3 # 5 batch_size=16 # batch size cannot be too big for bert # The following code for building the BERT model was taken from [<NAME>'s notebook](https://www.kaggle.com/wrrosa/keras-bert-using-tfhub-modified-train-data). Credit goes to him for sharing it. # + # We will use the official tokenization script created by the Google team # !wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py import tensorflow as tf import tensorflow_hub as hub from tensorflow.keras.layers import Dense, Input from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Model from tensorflow.keras.callbacks import ModelCheckpoint import tokenization def bert_encode(texts, tokenizer, max_len=512): all_tokens = [] all_masks = [] all_segments = [] for text in texts: text = tokenizer.tokenize(text) text = text[:max_len-2] input_sequence = ["[CLS]"] + text + ["[SEP]"] pad_len = max_len - len(input_sequence) tokens = tokenizer.convert_tokens_to_ids(input_sequence) tokens += [0] * pad_len pad_masks = [1] * len(input_sequence) + [0] * pad_len segment_ids = [0] * max_len all_tokens.append(tokens) all_masks.append(pad_masks) all_segments.append(segment_ids) return np.array(all_tokens), np.array(all_masks), np.array(all_segments) # - def build_model(bert_layer, max_len=512, lr=1e-5): input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids") input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask") segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids") _, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids]) clf_output = sequence_output[:, 0, :] out = Dense(1, activation='sigmoid')(clf_output) model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out) model.compile(Adam(lr=lr), loss='binary_crossentropy', metrics=['accuracy']) return model # + # %%time module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1" bert_layer = hub.KerasLayer(module_url, trainable=True) vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy() do_lower_case = bert_layer.resolved_object.do_lower_case.numpy() tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case) # - train_input = bert_encode(X_train.text.values, tokenizer, max_len=maxlen) val_input = bert_encode(X_val.text.values, tokenizer, max_len=maxlen) train_labels = y_train.target.values val_labels = y_val.target.values bert_model = build_model(bert_layer, max_len=maxlen, lr=lr) bert_model.summary() # + checkpoint = ModelCheckpoint('bertmodel.h5', monitor='val_accuracy', save_best_only=True) bert_history = bert_model.fit( train_input, train_labels, validation_data=(val_input, val_labels), epochs=num_epochs, callbacks=[checkpoint], #class_weight=class_weight, batch_size=batch_size ) # - # ### RoBERTa # # https://colab.research.google.com/github/DhavalTaunk08/NLP_scripts/blob/master/sentiment_analysis_using_roberta.ipynb#scrollTo=c3Q9NDdmqEyo # <a id='Meta-data'></a> # ## 7.4. Feature-based Model # # Here, we will create a feature-based model using the meta-features that we created at the beginning. The idea is to ensemble this model and the sequence models together to get better predictions. When ensembling, the outputs of this model will be given less weight compared to the neural networks as the neural networks are more likely to be better learners. X_train # + from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score clf = RandomForestClassifier(n_estimators=500, max_depth=15, min_samples_split=20, min_samples_leaf=2, n_jobs=-1, random_state=0) clf.fit(X_train.drop('text',axis=1), y_train.target.values) clf_pred = clf.predict_proba(X_val.drop('text',axis=1)) print('Validation Accuracy:', accuracy_score(y_val.target.values, clf_pred.argmax(axis=-1))) # - clf_pred.max(axis=-1) clf_pred.max(axis=-1)*0.1 # <a id='Error'></a> # # 8. Error Analysis # + # val = X_val.copy() # val = val[['text']] # val['target'] = y_val # val['pred'] = model.predict(X_val_text) # val['pred'] = (val['pred']*0.8) + (clf_pred.max(axis=-1)*0.2) # val['pred'] = val['pred'].apply(lambda x: 1 if x >=0.5 else 0) # error = val[val['target'] != val['pred']] # error.head() bert_model.load_weights('bertmodel.h5') val = X_val.copy() val = val[['text']] val['target'] = y_val # val['pred'] = lstm_model.predict_classes(X_val_text) val['pred'] = bert_model.predict(val_input) val['pred'] = val['pred'].apply(lambda x: 1 if x >=0.5 else 0) error = val[val['target'] != val['pred']] error.head() # + from mlxtend.plotting import plot_confusion_matrix from sklearn.metrics import confusion_matrix # Plot confusion matrix cm = confusion_matrix(val.target, val.pred) plt.figure() plot_confusion_matrix(cm,figsize=(12,8),cmap=plt.cm.Blues) plt.xticks(range(2), ['Non-Disaster', 'Disaster'], fontsize=16) plt.yticks(range(2), ['Non-Disaster', 'Disaster'], fontsize=16) plt.xlabel('Predicted Label',fontsize=18) plt.ylabel('True Label',fontsize=18) plt.show() print('Num False Negatives:',sum((val['target'] == 1) & (val['pred'] == 0))) print('Num False Positives:',sum((val['target'] == 0) & (val['pred'] == 1))) # - # There appears to be more false negatives than false positives from the validation data, meaning that more tweets are being labelled as `not disaster` when in fact they are, even after using `class_weights` to adjust for the imbalance. Perhaps `disaster` tweets can be given even more weighting depending on the goal/purpose of the classification. for t in error[(error['target'] == 1) & (error['pred'] == 0)]['text'].sample(n=20, random_state=0): print(t) # <a id='Test'></a> # # 9. Testing # + # count number of characters in each tweet test['char_len'] = test.text.str.len() # count number of words in each tweet word_tokens = [len(word_tokenize(tweet)) for tweet in test.text] test['word_len'] = word_tokens # count number of sentence in each tweet sent_tokens = [len(sent_tokenize(tweet)) for tweet in test.text] test['sent_len'] = sent_tokens # + # polarity and subjectivity test['polarity'] = [TextBlob(tweet).sentiment.polarity for tweet in test.text] test['subjectivity'] = [TextBlob(tweet).sentiment.subjectivity for tweet in test.text] ############################################################################################################################# # exclaimation and question marks test['exclaimation_num'] = [tweet.count('!') for tweet in test.text] test['questionmark_num'] = [tweet.count('?') for tweet in test.text] ############################################################################################################################# # count number of hashtags and mentions # Function for counting number of hashtags and mentions def count_url_hashtag_mention(text): urls_num = len(re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)) word_tokens = text.split() hash_num = len([word for word in word_tokens if word[0] == '#' and word.count('#') == 1]) # only appears once in front of word mention_num = len([word for word in word_tokens if word[0] == '@' and word.count('@') == 1]) # only appears once in front of word return urls_num, hash_num, mention_num url_num, hash_num, mention_num = zip(*[count_url_hashtag_mention(tweet) for tweet in test.text]) test = test.assign(url_num = url_num, hash_num = hash_num, mention_num = mention_num) ############################################################################################################################# # count number of contractions contractions = ["'t", "'re", "'s", "'d", "'ll", "'ve", "'m"] test['contraction_num'] = [sum([tweet.count(cont) for cont in contractions]) for tweet in test.text] # + ## Replace NaNs with 'None' test.keyword.fillna('None', inplace=True) ############################################################################################################################# ## Expand Contractions # Function for expanding most common contractions https://stackoverflow.com/questions/19790188/expanding-english-language-contractions-in-python def decontraction(phrase): # specific phrase = re.sub(r"won\'t", "will not", phrase) phrase = re.sub(r"can\'t", "can not", phrase) # general phrase = re.sub(r"n\'t", " not", phrase) phrase = re.sub(r"\'re", " are", phrase) phrase = re.sub(r"\'s", " is", phrase) phrase = re.sub(r"\'d", " would", phrase) phrase = re.sub(r"\'ll", " will", phrase) phrase = re.sub(r"\'t", " not", phrase) phrase = re.sub(r"\'ve", " have", phrase) phrase = re.sub(r"\'m", " am", phrase) return phrase test.text = [decontraction(tweet) for tweet in test.text] ############################################################################################################################# ## Remove Emojis # Reference : https://gist.github.com/slowkow/7a7f61f495e3dbb7e3d767f97bd7304b def remove_emoji(text): emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) u"\U00002702-\U000027B0" u"\U000024C2-\U0001F251" "]+", flags=re.UNICODE) return emoji_pattern.sub(r'', text) print(remove_emoji("OMG there is a volcano eruption!!! 😭😱😷")) test.text = test.text.apply(lambda x: remove_emoji(x)) # + ############################################################################################################################# ## Remove URLs test.text = test.text.apply(lambda x: remove_url(x)) ############################################################################################################################# ## Remove Punctuations except '!?' def remove_punct(text): new_punct = re.sub('\ |\!|\?', '', punctuation) table=str.maketrans('','',new_punct) return text.translate(table) test.text = test.text.apply(lambda x: remove_punct(x)) ############################################################################################################################# ## Replace amp def replace_amp(text): text = re.sub(r" amp ", " and ", text) return text test.text = test.text.apply(lambda x: replace_amp(x)) ############################################################################################################################# # + # from wordsegment import load, segment # load() # test.text = test.text.apply(lambda x: ' '.join(segment(x))) test = pd.read_csv('../input/twitter-logo/tweets_test_segmented.csv') # + ## Lemmatization from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() def lemma(text): words = word_tokenize(text) return ' '.join([lemmatizer.lemmatize(w.lower(), pos='v') for w in words]) test.text = test.text.apply(lambda x: lemma(x)) # + # tokenize test_text = test['text'] test_text = tokenizer_1.texts_to_sequences(test_text) # padding test_text = pad_sequences(test_text, padding='post', maxlen=50) print('X_test shape:', test_text.shape) # + # lstm prediction # model.predict(test_text) lstm_model.load_weights('lstm_model.h5') submission = test.copy()[['id']] submission['target'] = lstm_model.predict_classes(test_text) submission.to_csv('submission.csv', index=False) display(submission.head()) # - # bi-lstm attention prediction model_attn.load_weights('attn_model.h5') submission_attn = test.copy()[['id']] submission_attn['target'] = model_attn.predict(test_text) submission_attn['target'] = submission_attn['target'].apply(lambda x: 1 if x >=0.5 else 0) submission_attn.to_csv('submission_attn.csv', index=False) display(submission_attn.head()) # + # bert prediction test_input = bert_encode(test.text.values, tokenizer, max_len=160) bert_model.load_weights('bertmodel.h5') submission_bert = test.copy()[['id']] submission_bert['target'] = bert_model.predict(test_input) submission_bert['target'] = submission_bert['target'].apply(lambda x: 1 if x >=0.5 else 0) submission_bert.to_csv('submission_bert.csv', index=False) display(submission_bert.head()) # + # bert + meta-features prediction clf_testpred = clf.predict_proba(test.drop(['id','keyword','location','text'],axis=1)) submission_bert = test.copy()[['id']] submission_bert['target'] = (bert_model.predict(test_input)*0.8).ravel() + (clf_testpred.max(axis=1)*0.2) submission_bert['target'] = submission_bert['target'].apply(lambda x: 1 if x >=0.5 else 0) submission_bert.to_csv('submission_bert_ensemble.csv', index=False) display(submission_bert.head()) # - submission_bert['target'].plot(kind='hist') # <a id='Conclusion'></a> # # 10. Conclusion # https://stackabuse.com/python-for-nlp-movie-sentiment-analysis-using-deep-learning-in-keras/ # # https://www.analyticsvidhya.com/blog/2020/03/pretrained-word-embeddings-nlp/ # # ### attention # https://matthewmcateer.me/blog/getting-started-with-attention-for-classification/ # # ## TO DO # * Word and Char vectorizer # * Remove numbers? Convert numbers to words? # * Unigrams, Bigrams and Trigrams # * Glove; remove stopwords, clean before glove? # * Logistic Regression, BOW, TD IDF, GloVe, BERT? # * Check Duplicates # * Decaying LR
Tweet_Disaster_Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # STOCK PREDICTION AI # ## Author:<NAME> # ## Date:26-11-2021 # ## Program : # ### This Ai will predict closing price and of stock of a company using 100 and 200 days moving average using machine learning. # ### We will create RNN, STACKED LSTM (Long short term memory model) model to predict closing price of a stock. # # LICENSE AND COPYRIGHTS # MIT License # # Copyright (c) 2021 <NAME> # # Permission is here by granted to person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software including with limitation the rights to use. # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # 1.Visualisation import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_datareader as data start = '2010-01-01' end = '2019-12-31' df = pd.read_csv('APPL_STOCK.csv') df.head() df.tail() df = df.reset_index() df.head() df = df.drop(columns=['Date' , 'Adj Close'],axis=1) df.head() plt.plot(df.Close) df ma100 = df.Close.rolling(100).mean() ma100 plt.figure(figsize = (12,6)) plt.plot(df.Close) plt.plot(ma100,'r') ma200 = df.Close.rolling(200).mean() ma200 plt.figure(figsize = (12,6)) plt.plot(df.Close) plt.plot(ma100,'r') plt.plot(ma200,'g') df.shape # # Spliting Data into Training and Testing # # + data_training = pd.DataFrame(df['Close'][0:int(len(df)*0.70)]) data_testing = pd.DataFrame(df['Close'][int(len(df)*0.70): int(len(df))]) print(data_training.shape) print(data_testing) # - data_training.head() data_testing.head() from sklearn.preprocessing import MinMaxScaler Scalar = MinMaxScaler(feature_range=(0,1)) data_training_array = Scalar.fit_transform(data_training) data_training_array data_training_array.shape # + x_train = [] y_train = [] for i in range(100, data_training_array.shape[0]): x_train.append(data_training_array[i-100: i]) y_train.append(data_training_array[i,0]) x_train , y_train = np.array(x_train), np.array(y_train) # - x_train.shape # # LSTM Model(Long short term memory) from keras.layers import Dense, Dropout, LSTM from keras.models import Sequential # + model =Sequential() model.add(LSTM(units = 50, activation = 'relu', return_sequences =True , input_shape =(x_train.shape[1],1))) model.add(Dropout(0.2)) model.add(LSTM(units = 60, activation = 'relu', return_sequences =True )) model.add(Dropout(0.3)) model.add(LSTM(units = 80, activation = 'relu', return_sequences =True )) model.add(Dropout(0.4)) model.add(LSTM(units = 120, activation = 'relu' )) model.add(Dropout(0.5)) model.add(Dense(units= 1)) # - model.summary() # # model compilation or load model which you made before # # ### I'm loading my model which i allready made it before. # ### Optimizer which is used in this model is "adam".Use the below code to compile model.Make sure that you must have a good GPU in your local system. # #### model.compile(optimizer='adam', loss='mean_squared_error') # #### model.fit(x_train, y_train, epochs=50) # # # + model.load_weights('stock_price_prediction.h5') # - data_testing.head() data_training.tail(100) past_100_days = data_training.tail(100) final_df = past_100_days.append(data_testing, ignore_index=True) final_df.head() input_data = Scalar.fit_transform(final_df) input_data input_data.shape # + x_test = [] y_test =[] for i in range(100, input_data.shape[0]): x_test.append(input_data[i-100: i]) y_test.append(input_data[i, 0]) # - x_test, y_test = np.array(x_test), np.array(y_test) print(x_test.shape) print(y_test.shape) # # Make Predictions using our lstm model y_predict = model.predict(x_test) y_test y_predict Scalar.scale_ scale_factor =1/0.02099517 y_predict = y_predict*scale_factor y_test = y_test*scale_factor # # Visualising the original price and predected price plt.figure(figsize=(12,6)) plt.plot(y_test, 'b' , label = 'Original Price') plt.plot(y_predict, 'r' , label = 'Predicted price by ai') plt.xlabel('Time') plt.ylabel('Price') plt.legend() plt.show()
STOCK PREDICTION AI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (oled) # language: python # name: oled # --- import numpy as np import matplotlib.pyplot as plt import os # %matplotlib inline import win32com.client import py2origin as py2o fig,ax=plt.subplots(figsize=(4,4)) x = np.arange(-10,10.1,0.1) y = np.cos(x) plt.plot(x,y,label='lineplot') x = np.arange(-10,10.1,0.1) y = np.sin(x) plt.plot(x,y,'o',label='symplot',markerfacecolor='white') plt.plot(x,y**2,'-s',label='line+sym') plt.xlabel('Time (s)') plt.ylabel('Signal (a.u.)') plt.legend() plt.title('Example Plot') origin = py2o.matplotlib_to_origin( fig,ax, origin=None,origin_version=2018, worksheet_name='Sheet1',workbook_name='Book1', template_name='Spectra_Square.otp', template_path=os.path.abspath('OriginTemplates')) origin.Exit()
Matplotlib_to_OriginLab_Examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="-Tki_ZzoROTC" colab_type="text" # # COVID19 California Data + Facebook Prophet # # # # # + id="UFgfXnIHN0sg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 191} outputId="4da1cae8-d703-4a9e-8df2-acd54f40ea3f" import pandas as pd from fbprophet import Prophet df = pd.read_csv('https://raw.githubusercontent.com/danielcaraway/data/master/ca_train.csv') df.head() # + [markdown] id="zshiRr4FRMwk" colab_type="text" # ## STEP 1: Split into DS & Y # ### We're starting with `ConfirmedCases` as Y # + id="3IBiND7aOXdT" colab_type="code" colab={} sm = df[['Date', 'ConfirmedCases']] sm.columns = ['ds','y'] # + id="saV0NwRTOyR4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="23bd9807-7840-4fb3-af6a-5f4434cf5624" m = Prophet() m.fit(sm) # + id="qve1mFnGPQSN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 191} outputId="4f4ebd95-2ed6-44e9-b610-65afc41be5b8" future = m.make_future_dataframe(periods=365) future.tail() # + id="jkCriCqFPLNp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 441} outputId="3cd72d5d-b241-48f7-8ec7-89559a5f8429" forecast = m.predict(future) fig1 = m.plot(forecast) # + [markdown] id="OxpOT21gRpqY" colab_type="text" # ### * NOTE: Wow that is ridiculously unhelpful. Clearly 365 was too much. Turning it into a function for more rapid testing # + id="Wjf9xilmQhGE" colab_type="code" colab={} def get_prof_preds_for(df, n): m = Prophet() m.fit(df) future = m.make_future_dataframe(periods=n) forecast = m.predict(future) fig1 = m.plot(forecast) # + id="nHL2atEASE3Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 476} outputId="c025da9b-17fa-41e3-a8ad-14bad1ad2cf4" get_prof_preds_for(sm, 60) # + id="nuPPIZWrSHMw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 476} outputId="4445a844-f8d7-4abc-e59e-c7da2f8e4997" get_prof_preds_for(sm, 30) # + id="shex77NgSLIN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 476} outputId="7762c9a6-3902-46bb-fc85-2a7a54fd8e0f" get_prof_preds_for(sm, 10) # + [markdown] id="T78DBIloSQtk" colab_type="text" # #### Trimming the training data # + id="GQco8mTOVh8T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="ec827aca-666d-4278-f501-574811ba438d" sm # + id="950y8c8KSNIR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 476} outputId="87c800d7-1a0d-435c-8c2c-872d9a34c33c" get_prof_preds_for(sm[20:],10) # + id="FUEcBLv-SXvy" colab_type="code" colab={} def get_prof_preds_for(df, n): m = Prophet(daily_seasonality=True) m.fit(df) future = m.make_future_dataframe(periods=n) forecast = m.predict(future) fig1 = m.plot(forecast) # + id="EqbUff5jS7cm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 458} outputId="56d860ca-b5d3-4bf6-af0d-f748a78320af" get_prof_preds_for(sm[20:],10) # + id="eiLkPLhWS78K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 476} outputId="f8a3b489-e247-4f96-f800-183ce807f752" get_prof_preds_for(sm[40:],10) # + [markdown] id="eYxWeIkaTWac" colab_type="text" # #### OK looks like we are getting somewhere # + id="r_7MWtPATJ_D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 476} outputId="58bca39c-278e-4edc-a941-4a653f438089" get_prof_preds_for(sm[40:],60) # + id="kR0xPo7pTM_X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 493} outputId="48c2cb16-be89-45c2-e69e-9b5b0e1bbb14" get_prof_preds_for(sm[47:],30] # + id="A2OCpF-lT8Y1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 493} outputId="a8b08ac6-b13a-45e2-d799-ee300181b0aa" get_prof_preds_for(sm[47:],20) # + [markdown] id="sPxceubjUM_k" colab_type="text" # # STEP 1B: Try with Fatalities # + id="ZaBlEmJ0U-QH" colab_type="code" colab={} sm = df[['Date', 'Fatalities']] sm.columns = ['ds','y'] # + id="rMtx5AckVBA2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 458} outputId="ad9897d3-1c4a-42ce-e0b1-36e24e613712" get_prof_preds_for(sm, 20) # + id="BDp4sSD2VJ2Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="8277e661-98b9-474f-e194-f0afcae32cbd" sm # + id="xl3nbQRBVMFV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 493} outputId="68a3ebee-e637-4ce3-aff4-b1cfe83864f6" get_prof_preds_for(sm[47:], 20) # + id="iKLSZT5aVTpl" colab_type="code" colab={}
assets/covid19/PROJECT_COVID19_colab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/joaoassispadilha/topicosavancados/blob/master/Aula_02_colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Uv_uTtddB36Y" colab_type="text" # # Introdução # # Seja bem vindo a nossa aula de Ciência de Dados. # # Abaixo você pode ver que podemos escrever código Python e mesclar com informações textuais. # # + [markdown] id="ogmYBFYKCgoh" colab_type="text" # # Aula 1 # + id="KXFAtGrv8r-6" colab_type="code" outputId="72afa935-e3f6-4171-f038-63d97c87df92" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 51} print("<NAME>") print("Aula do dia 05-05-2020") # + [markdown] id="5pHuEREL-aDL" colab_type="text" # Nesta aula vamos trabalhar com informações sobre filmes, e para isto vamos utilizar os dados vindos do site Movie Lens. (Mais especificamente https://grouplens.org/datasets/movielens/) # # Abaixo um exemplo de variável em Python. # + id="bM10rgQa-jkm" colab_type="code" cellView="both" colab={} nome_do_filme = "A volta dos que não foram" # + [markdown] id="nP8ohCnQ_BOJ" colab_type="text" # Na linha acima, criamos uma variável, e podemos utilizá-la abaixo. # + id="cg6IOpcA-6hW" colab_type="code" outputId="28247c36-7f30-44b2-d69c-08e2977141d9" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 34} print(nome_do_filme) # + id="BZyQ6k8h_NId" colab_type="code" outputId="f4fed3a8-640b-4b6c-ca66-b0b8f66a956f" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 34} nome_do_filme # + [markdown] id="Rg9A4qjj_fiX" colab_type="text" # ## Lendo os dados do MovieLens # # Primeiramente, baixamos as informações do site (https://grouplens.org/datasets/movielens/) e em seguida adicionamos os arquivos no GitHub. # # Com os dados salvos no GitHub, utilizamos a biblioteca Pandas para importá-los para nossa aplicação. # + id="87SqulRa_oSi" colab_type="code" cellView="both" colab={} import pandas as pd # + [markdown] id="c88BSSx0Rr-k" colab_type="text" # A função `read_csv` faz o processo de baixar o arquivo e importar os dados para um `DataFrame`. # + id="CbasfLPABwuP" colab_type="code" outputId="1a37bdfc-9040-4292-b298-93750a7b5c1f" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 34} filmes = pd.read_csv("https://github.com/joaoassispadilha/topicosavancados/raw/master/movies.csv") # Ver o tipo de dado type(filmes) # + [markdown] id="MB-zmYATR6Ac" colab_type="text" # Como estamos utilizando um dataset em inglês, podemos alterar os nomes das colunas do nosso dataset para português. # # Também utilizamos a função `head` que serve para mostrar apenas os primeiros elementos do nosso dataset. # + id="69yrxR22Cqtw" colab_type="code" outputId="d7c1363b-57fc-4ea0-c9e3-7cd066f86b81" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 204} # Alterar nomes das colunas filmes.columns = ["filmeId", "titulo", "generos"] # Mostra os 5 primeiros itens filmes.head() # + [markdown] id="9ayEJo2lSODc" colab_type="text" # De forma similar ao `head`, a função `tail` mostra apenas os últimos elementos do nosso dataset. Note que podemos passar um número que indica a quantidade de elementos a serem mostrados. # + id="G_PHt1iZDLGf" colab_type="code" outputId="7cf6eba2-05d6-48c4-cd56-d7054522aee1" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 359} # Mostra os 10 últimos itens filmes.tail(10) # + [markdown] id="V8xrlGBqSiQe" colab_type="text" # Utilizando o ponto de interrogação `?` na frente de uma variável ou função, podemos ler a documentação da mesma dentro da ferramenta Colab. # + id="yjF4waJ5EG-R" colab_type="code" cellView="both" colab={} # lendo a documentação de um método/atributo # ?filmes.head # + id="W_ATzbinEZP1" colab_type="code" cellView="both" colab={} # lendo a documentação do tipo # ?filmes # + [markdown] id="21o8sXjpE3Ne" colab_type="text" # ## Lendo informações das avaliações # # Após trabalharmos um pouco com os dados básicos dos filmes, chegou a hora de baixar as avaliações dados pelos usuários para cada filme. # # Note que neste dataset, apenas informações numéricas estão presentes. Logo mais juntaremos os dois datasets (filmes e avaliações) em uma única variável. # + id="q6BlfwuAE9Cs" colab_type="code" outputId="442f307e-1a99-464e-dca6-eaec56276d7b" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 419} avaliacoes = pd.read_csv("https://github.com/joaoassispadilha/topicosavancados/raw/master/ratings.csv") avaliacoes # + [markdown] id="mJqcQwk4FxQg" colab_type="text" # Mas antes, vamos consultar algumas informações a respeito destes dados. # # É possível verificar o formato do nosso dataset a partir do atributo `shape`, conforme abaixo. # + id="2KlsJbPPFwBO" colab_type="code" outputId="ff20a0b8-a9ac-48b1-ed48-d81cf64393c0" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 34} # Os dados das avaliações estão organizados em 100836 linhas e 4 colunas avaliacoes.shape # + [markdown] id="GVUDsbc5TSIL" colab_type="text" # Você pode notar que o atributo `shape` retornou o número de linhas e o número de colunas do nosso dataset. # # Podemos obter informação similar utilizando a nossa conhecida função `len`, que apresenta o total de linhas disponível neste dataset. # + id="QUm49eDlGSYJ" colab_type="code" outputId="fefb962d-33ed-48bb-dea9-efdd8c2c3d08" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 34} len(avaliacoes) # + [markdown] id="tCsvpUnYGhpZ" colab_type="text" # Novamente vamos alterar os nomes das colunas (agora das avaliações) para português, para ficar mais fácil nossa interpretação. # + id="pDiX4rzxGmTm" colab_type="code" outputId="c8319d63-a2bf-47b4-c14f-cb416c997632" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 204} avaliacoes.columns = ["usuarioId", "filmeId", "nota", "momento"] avaliacoes.head() # + [markdown] id="aQJdZxmwG6BZ" colab_type="text" # Já visualizamos várias informações do nosso dataset, não é? # # Podemos aplicar um filtro neste dataset de avaliações, para visualizar as informações referentes a um único filme. Para isto, utilizamos a função `query`, passando como parâmetro a informação que queremos filtrar. # + id="xcWXdiiRHAKD" colab_type="code" outputId="e9c6b9b2-2476-4978-aec7-c37e1dbdd704" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 204} avaliacoes_do_filme_1 = avaliacoes.query("filmeId == 1") avaliacoes_do_filme_1.head() # + [markdown] id="21GUvrmeUEwe" colab_type="text" # Podemos também obter várias informações estatísticas da nossa base dados através da função `describe`. Ela nos traz as informações de contagem, média, mediana, desvio padrão, valor mínimo e máximo, etc. # + id="jkXj_YbYHcWc" colab_type="code" outputId="502d89e8-88e5-485c-8495-a7a96ac58ba1" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 297} avaliacoes_do_filme_1.describe() # + [markdown] id="Ry-yqVY1H-E3" colab_type="text" # Se quisermos, também é possível consultar apenas uma informação estatística por vez, conforme vemos a chamada da função `mean` abaixo, que traz a média das informações de um filme. # + id="HukKK4epICs1" colab_type="code" outputId="b56d9872-2424-42ae-a268-af89421302ee" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 102} avaliacoes_do_filme_1.mean() # + [markdown] id="menHWIWUUpDy" colab_type="text" # Como vimos, a média de todas as informações de um filme não faz muito sentido. O que queremos mesmo é saber a média das notas de um determinado filme. Para isto, vamos fazer a média apenas da coluna de notas. # + id="hhheIRm0IC7d" colab_type="code" outputId="8742fd5f-c655-4b66-bf37-f6728286d076" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 119} avaliacoes_do_filme_1["nota"].head() # + [markdown] id="SbI6xWtuU3eA" colab_type="text" # Aqui é onde se calcula a média das notas do filme 1. # + id="bOHmgR90IRj2" colab_type="code" outputId="cd0562cb-f26d-47b6-caab-1811610d6f80" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 34} avaliacoes_do_filme_1["nota"].mean() # + [markdown] id="wkadWvTMU8mF" colab_type="text" # Poderiamos também querer visualizar as médias das notas de todos os filmes do nosso dataset. # + id="EDPKtxctIpgX" colab_type="code" outputId="d91a6955-809d-4f6a-c8da-faf76c55762e" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 34} avaliacoes["nota"].mean() # + [markdown] id="pkOBemZKVDUb" colab_type="text" # Ou também, poderiamos querer visualizar as médias de cada filme separadamente. # # Ao invés de criar uma variável para cada filme, como fizemos acima, podemos utilizar a função `groupby`, que tem por objetivo agrupar informações iguais a partir de uma determinada coluna. Vejamos o exemplo abaixo. # + id="V8h1Z5TGI9Kx" colab_type="code" outputId="889e1a8c-8038-4194-d81f-f3030cbb8cfe" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 136} notas_medias_por_filme = avaliacoes.groupby("filmeId")["nota"].mean() notas_medias_por_filme.head() # + [markdown] id="QlYXfUdEVZDF" colab_type="text" # ## Juntar as informações de dois datasets # # Também é possível juntar as informações de dois datasets. # # Para isto, utilizamos a função `join`, passando qual informação queremos juntar, e qual é a coluna que deve ser utilizada como referência. Veja o exemplo abaixo. # + id="7yGQ8PL8KBnF" colab_type="code" outputId="fcd5a123-e198-4784-934b-f80173c0fcfb" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 204} filmes_com_media = filmes.join(notas_medias_por_filme, on="filmeId") filmes_com_media.head() # + [markdown] id="HRyGYS7QKoOW" colab_type="text" # As vezes é interessante ordenar as informações para uma melhor visualização. # # Para ordenar os dados, utilizamos a função `sort_values`, e passamos a coluna que queremos ordenar. O parâmetro `ascending` indica se é para ordenar de forma crescente ou decrescente. # + id="LaSawGP2KvIo" colab_type="code" outputId="fa67e3a4-b251-42e0-d7a6-9c8f01e9541c" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 514} filmes_com_media.sort_values("nota", ascending=False).head(15) # + [markdown] id="WhELf5coLYKr" colab_type="text" # ## Melhorando a apresentação das informações utilizando gráficos # # Já fizemos algumas avaliações interessantes, porém mostramos estas informações apenas em forma de texto. # # Seria interessante aprendermos a plotar gráficos também. # # A própria biblioteca `Pandas` possui diversmos mecanismos para apresentação de gráficos, de forma bem facilitada. # # Vamos criar um gráfico bem simples como exemplo usando a função `plot`. # + id="jR2ghyb6Ldmz" colab_type="code" outputId="a0ed8954-3179-4158-9aa0-eb201b67fa23" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 282} # Este gráfico não faz muito sentido com os nossos dados. avaliacoes_do_filme_1["nota"].plot() # + [markdown] id="czEH-4_zMAhW" colab_type="text" # Vejam que interessante. Com uma linha de código fomos capazes de desenhar um gráfico a partir das informações do nosso dataset. Porém, o gráfico escolhido não parece ser o mais adequado, não é? # # Vamos alterar o tipo de gráfico para visualizar melhor as informações. Para alterar o tipo de gráfico, podemos utilizar a opção `kind`, e passar o tipo do gráfico que queremos. Neste caso utilizamos o tipo de gráfico 'histograma'. Veja também que podemos utilizar a opção `title` para personalizar nosso gráfico. # + id="tIOQyBYnMEpH" colab_type="code" outputId="f49991f0-d6d7-4963-d93f-8c9feb48df07" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 298} avaliacoes_do_filme_1["nota"].plot(kind='hist', title='Avaliação do filme Toy Story') # + [markdown] id="aH10IVnlXS7c" colab_type="text" # O Python possui bibliotecas muito ricas para manipulação de gráficos. Uma delas é a `matplotlib`. Podemos utilizá-la para personalizar um pouco mais nosso gráfico. # + id="rEB_NGyNM0EJ" colab_type="code" outputId="d39dd36d-8fc2-491c-da9e-a4a274cf9886" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 281} import matplotlib.pyplot as plt avaliacoes_do_filme_1["nota"].plot(kind='hist') plt.title("Avaliações do filme Toy Story") plt.show() # + [markdown] id="2r3e4acoXfR0" colab_type="text" # Vamos plotar mais alguns gráficos agora para outros filmes. Vejam que estamos utilizando uma abordagem 'inline', onde aplicamos um filtro com a função `query`, pegamos apenas a coluna de "nota" e por fim plotamos um gráfico tipo 'histograma' utilizando a função `plot`. # + id="RAIvUMtaNR73" colab_type="code" outputId="d4096cb6-e9c9-49f1-c6ef-3616049c339b" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 282} avaliacoes.query("filmeId == 2")["nota"].plot(kind='hist') # + [markdown] id="zj9gKuJbXzzx" colab_type="text" # Apenas mais um exemplo de gráfico de um filme com apenas uma nota. Este gráfico não parece muito interessante, não é? # + id="GxqmJsM5Nfoc" colab_type="code" outputId="0a367ab7-52e4-42a8-955e-249606290a92" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 282} avaliacoes.query("filmeId == 102084")["nota"].plot(kind='hist') # + [markdown] id="6mPFUebgDMsR" colab_type="text" # ## Desafios # + [markdown] id="gde1QnUdX9iC" colab_type="text" # Chegou a hora de você praticar e aprender mais algumas coisas por conta própria. Para isto, temos que resolver os seguintes desafios. # + [markdown] id="dyrID-NlOeMV" colab_type="text" # **Desafio 1** # Existem 18 filmes sem avaliação. Determine se são mesmo 18 filmes, e qual o nome deles. # + id="PTT0VbZC0Vh6" colab_type="code" outputId="cb49aaa0-9bac-44bc-a5d0-fb4f87f3e638" colab={"base_uri": "https://localhost:8080/", "height": 624} filmes_sem_nota = filmes_com_media.query("nota.isnull()", engine="python") print("existem", len(filmes_sem_nota), "filmes sem nota. são eles: ") filmes_sem_nota # + [markdown] id="5H96avR-OxR1" colab_type="text" # **Desafio 2** # Alterar a coluna "nota" para "media". # + id="qDzLYg4xO3hc" colab_type="code" outputId="971b6832-da61-49da-af8c-fb1489905cbd" cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 204} filmes_com_media.columns = ["filmeId", "titulo", "generos", "media"] filmes_com_media.head() # + [markdown] id="Al21BZNRPDi1" colab_type="text" # **Desafio 3** # Colocar o número de avaliações por filme, não só a média, mas o TOTAL de votos por filme. # + id="rkqapfTI5Qpz" colab_type="code" outputId="daf37dc0-ed8a-4404-fa09-e26427177cf2" colab={"base_uri": "https://localhost:8080/", "height": 419} votos_por_filme = avaliacoes.groupby("filmeId")["nota"].count() filmes_com_votos = filmes_com_media.join(votos_por_filme, on = "filmeId") filmes_com_votos.columns = ["filmeId", "titulo", "generos", "media", "votos"] filmes_com_votos # + [markdown] id="SVXlQww1PUAJ" colab_type="text" # **Desafio 4** # Arredondar as médias (coluna de nota média) para duas casas decimais. # + id="4DH0Gs3k-SG9" colab_type="code" outputId="fd9cf054-f8d7-4feb-f4cc-960d98acb9ef" colab={"base_uri": "https://localhost:8080/", "height": 419} filmes_com_votos.round({"media": 2}) # + [markdown] id="ldcUxMBJH8uB" colab_type="text" # # Aula 2 # + [markdown] id="254gdIC9BAe-" colab_type="text" # Nesta aula vamos estudar com mais profundidade as técnicas de centralidade, conhecer algumas boas práticas de visualização de dados e o famoso **boxplot**. # # Para iniciar vamos precisar relembrar como os dados estavam configurados. # + [markdown] id="wIXXO9w_Bcsy" colab_type="text" # Temos os títulos e uma coluna com os respectivos gêneros, todos em uma única coluna, cada *label* é separado com um | (Adventure|Children|Fantasy) sendo do tipo *String*. # + id="lf8_5cklK-QB" colab_type="code" outputId="79c4c3db-65c8-4376-aca1-f233f8c48bb4" colab={"base_uri": "https://localhost:8080/", "height": 204} filmes.head() # + id="aefyKQiECJl8" colab_type="code" outputId="7a089727-578c-4e40-f1eb-370aee8b1f78" colab={"base_uri": "https://localhost:8080/", "height": 221} filmes["generos"] # + [markdown] id="mNZbmaMYB4Yb" colab_type="text" # Gostariamos de fazer uma contagem a partir dos gêneros, para saber, por exemplo, a quantidade de gêneros, quantidade de filmes por gênero, etc. # # Vamos aplicar a função `get_dummies` para nos ajudar no processo. # + id="rU8003iYBnmj" colab_type="code" outputId="4f913984-00aa-4b3a-86ef-a4e99e1a78fc" colab={"base_uri": "https://localhost:8080/", "height": 473} filmes["generos"].str.get_dummies('|') # + [markdown] id="yd7qIRyACfx7" colab_type="text" # Com apenas uma linha de código, foi gerada essa tabela, com várias linhas, colunas e números. # # Como pode ser visto, a saída é um `DataFrame`, cada coluna correspondendo a um determinado gênero. Os valores `1` indicam que o filme possui a categoria, e valores `0` indicam o contrário. # # Até aqui, resolvemos parte do problema, agora precisamos somar quantos `1` cada coluna tem. # + id="EZrYDzkqCiiZ" colab_type="code" outputId="0276e8d1-cb0a-4fc1-e6b9-3ed37e0a4ead" colab={"base_uri": "https://localhost:8080/", "height": 374} filmes["generos"].str.get_dummies('|').sum() # + [markdown] id="mgr6UL3pC4d0" colab_type="text" # Conseguimos verificar quantas vezes cada gênero aparece. Assim, fica fácil de responder perguntas como, qual o gênero com mais ou menos filmes produzidos? Qual o segundo? # # Se olharmos diretamente para esta tabela, não é tão fácil visualizar estas informações, pois elas não estão ordenadas. Vamos melhorar isso, ordenando estes dados. # + id="mWL6UAK0C6vm" colab_type="code" outputId="ea621a9b-3306-4502-f4b4-c29ffca24068" colab={"base_uri": "https://localhost:8080/", "height": 374} filmes["generos"].str.get_dummies('|').sum().sort_values(ascending=False) # + [markdown] id="J9QcWgObFgVQ" colab_type="text" # Para não precisar ficar repetindo toda vez este código todo, vamos atribuir estas informações em uma variável. # + id="7d616Ek5DN-R" colab_type="code" outputId="7365af2f-a4d5-475f-c118-238420e02a7d" colab={"base_uri": "https://localhost:8080/", "height": 374} filmes_por_genero = filmes["generos"].str.get_dummies('|').sum().sort_values(ascending=False) filmes_por_genero # + [markdown] id="-H-qF4_7Dio0" colab_type="text" # Ótimo, agora tudo ficou mais fácil. # # A partir dos dados, conseguimos responder a vários questionamentos. Mas ainda podemos melhorar a forma de expor nossas informações, certo? # # Vamos gerar um gráfico com estes dados. # + id="a6dmvQ_-Dmsl" colab_type="code" outputId="9d5803df-4bc4-4b59-bb7b-1fc031ef2989" colab={"base_uri": "https://localhost:8080/", "height": 282} filmes_por_genero.plot() # + [markdown] id="Whp0bdvtDtgl" colab_type="text" # Iniciamos usando o `plot` padrão do `Pandas`, e como percebemos, não adianta apenas gerar um gráfico, é preciso que ele faça sentido para a informação que queremos analisar. Neste caso, um gráfico de linhas não está fazendo muito sentido. # # Temos um gráfico muito conhecido que sempre vemos por aí, o famoso gráfico de pizza. # # Vamos gerar um gráfico deste para ver como fica. # + id="H126bA_rD8SW" colab_type="code" outputId="75c28e2f-92d7-4182-e58d-f025b0a1a1df" colab={"base_uri": "https://localhost:8080/", "height": 482} filmes_por_genero.plot(kind='pie', title='Categorias de filmes e suas presenças relativas', figsize=(8, 8)) plt.show() # + [markdown] id="XxktGU5aEwbk" colab_type="text" # E aí, o que achou? # # Conseguimos notar que Drama, Comedy, Thriller e Action tem proporções grandes, mas qualquer outra análise fica complicada. # # As cores se repetem, e fica difícil de mensurar os gêneros de filmes com as fatias melhores, além do gráfico sobrepor alguns textos dos labels. # # Para facilitar nossa análise, vamos tentar outro tipo de gráfico, que deixa bem claro a diferença entre um gênero e outro. Faremos então um gráfico de barras. # # Veja como é bem simples mudar o tipo de um gráfico. # # # # # + id="eW9OVaXME2Op" colab_type="code" outputId="572d220d-a0f9-4fb0-cc04-baffbfce3816" colab={"base_uri": "https://localhost:8080/", "height": 360} filmes_por_genero.plot(kind='bar') # + [markdown] id="M35tpIzYFzbF" colab_type="text" # Podemos ainda gerar este gráfico de diferentes formas, alterando cores e diversos elementos visuais. # # Para isto usaremos outra biblioteca para geração de gráficos do Python, chamada `seaborn`. # # Porém, antes vamos verificar duas outras funcionalidades da nossa estrutura de dados. # # Temos como visualizar apenas os índices (gêneros) a partir do comando `index`. # + id="wvwHPg_3GMQ4" colab_type="code" outputId="960cc225-35f5-4f42-8042-b35e320f72dc" colab={"base_uri": "https://localhost:8080/", "height": 102} filmes_por_genero.index # + [markdown] id="Fn75x0BGHkWe" colab_type="text" # E também podemos verificar só os valores, a partir do comando `values`. # # Isto será importante na hora de montarmos nosso gráfico. # + id="QmwLV34VGRX_" colab_type="code" outputId="23f4ad9e-e4ba-472a-bbcc-d5d41f91f84a" colab={"base_uri": "https://localhost:8080/", "height": 51} filmes_por_genero.values # + [markdown] id="Qe55ZDj4HtL8" colab_type="text" # Agora, através da utilização da biblioteca `seaborn` vamos tentar melhorar ainda mais nossa apresentação dos dados. # # Vamos tentar deixar mais evidente a diferença entre um gênero e outro utilizando uma espécie de *mapa de calor*, indicando quanto mais escura a cor, maior é a quantidade de filmes daquele gênero. # + id="-4LwAmyUF2oG" colab_type="code" outputId="5da5fbc4-e2b7-4d93-8853-aa694371d1c1" colab={"base_uri": "https://localhost:8080/", "height": 537} import seaborn as sns sns.set_style("whitegrid") plt.figure(figsize=(16,8)) sns.barplot(x=filmes_por_genero.index, y=filmes_por_genero.values, palette=sns.color_palette("BuGn_r", n_colors=len(filmes_por_genero) + 10)) plt.show() # + [markdown] id="-EwOWvNcIJMJ" colab_type="text" # Veja que utilizamos agora o `seaborn` para gerar o gráfico, e passamos um parâmetro indicando a paleta de cores que queremos utilizar (`color_palette`). Vocês podem pesquisar na internet sobre as paletas de cores do `seaborn` e descobrir diversas outras variações. # + [markdown] id="LQ90eG9IIZY4" colab_type="text" # Com o que vimos até agora, conseguimos tirar diversas conclusões trabalhando com a visualização de gêneros. Será que podemos utilizar visualizações para entender melhor as notas de um filme? # # Vamos relembrar alguns pontos que já discutimos e nos aprofundar nas análises de notas para tirar conclusões mais sofisticadas. # # Na primeira aula, calculamos as notas médias por filmes, vamos dar uma olhada no resultado. # + id="sf1cab0GIgix" colab_type="code" outputId="0480ebdf-33a1-42be-e74c-e8146d31a04a" colab={"base_uri": "https://localhost:8080/", "height": 419} filmes_com_media # + [markdown] id="McTWwmowIyLZ" colab_type="text" # Como vimos, olhar apenas para as médias pode ser um problema e para interpretar um pouco melhor os dados, vamos utilizar o gráfico de histograma para comparar as notas de alguns filmes. Por exemplo, **Toy Story** e **Jumanji**. # + id="wJRwRMn7I1dW" colab_type="code" outputId="c74dc0bd-ab5c-4bb9-8568-56fed7689a3a" colab={"base_uri": "https://localhost:8080/", "height": 299} notas_do_filme1 = avaliacoes.query("filmeId == 1")["nota"] print(notas_do_filme1.mean()) notas_do_filme1.plot(kind='hist') # + id="fa4TsqT6JPCa" colab_type="code" outputId="2a8be841-81c2-4810-995e-d649e5d60d64" colab={"base_uri": "https://localhost:8080/", "height": 299} notas_do_filme2 = avaliacoes.query("filmeId == 2")["nota"] print(notas_do_filme2.mean()) notas_do_filme2.plot(kind='hist') # + [markdown] id="a0IeY8z7Jhgb" colab_type="text" # Os dois filmes mostrados acima tem médias próximas, mas com comportamento de notas diferentes. # # Vamos discutir um pouco sobre o problema da média, com o exemplo abaixo. Olhando apenas para a média dos salários não conseguimos evidenciar a desigualdade de salários entre as pessoas das duas cidades. # + id="TEmgyvAnJnJQ" colab_type="code" outputId="15bb2e94-a804-446d-881b-33e005e00c1f" colab={"base_uri": "https://localhost:8080/", "height": 51} # Cidade A populacao = 1000 salario1000 = 1100 media = (salario1000 * 1000) / populacao print(media) # Cidade B populacao = 1000 salario1 = 1000000 salario999 = 100 media = (salario1 * 1 + salario999 * 999) / populacao print(media) # + [markdown] id="VVGZzkBsKhRu" colab_type="text" # Vamos buscar filmes com médias muito mais próximas para analisar outras métricas além das médias. # + id="xLKRi75jKk6r" colab_type="code" outputId="e97971b8-8225-4dc9-a161-2f7352f25295" colab={"base_uri": "https://localhost:8080/", "height": 1000} filmes_com_media.sort_values("media", ascending=False)[2450:2500] # + [markdown] id="rilDVWpmK6DA" colab_type="text" # No exemplo acima, ordenamos e fatiamos os dados entre 2450 e 2500, onde encontramos médias similares. Vamos comparar dois filmes: Wizard of Oz, **filmeId=919** e Little Miss Sunshine, **filmeId=46578**. # # Para não precisar ficar copiando e colando toda hora, vamos criar nossa primeira função, assim podemos passar o código do filme e temos as informações desejadas. # + id="bAgeWzYSLAk9" colab_type="code" colab={} def plot_filme(n): notas_do_filme = avaliacoes.query(f"filmeId=={n}")["nota"] notas_do_filme.plot(kind='hist') return notas_do_filme.describe() # + [markdown] id="zhxbhdKrLWsB" colab_type="text" # Agora podemos fazer uso desta função: # + id="OaPTHrQgLbp6" colab_type="code" outputId="b4c7570c-520a-463f-c551-4a36ed705bfb" colab={"base_uri": "https://localhost:8080/", "height": 418} # Mágico de Oz plot_filme(919) # + [markdown] id="ZqHZqJ1wLbDu" colab_type="text" # A função `plot_filme`, além de gerar o histograma também retorna algumas estatísticas. Vamos chamar a mesma função agora para o filme Little Miss Sunshine. # + id="sl1NwNC_Lxl7" colab_type="code" outputId="0370b9f8-3f5c-417a-d78c-e1d8ba82df10" colab={"base_uri": "https://localhost:8080/", "height": 421} # Little Miss Sunshine plot_filme(46578) # + [markdown] id="iOroueX1L_l_" colab_type="text" # Ótimo, agora com essas informações conseguimos comparar melhor ambos os filmes. Analisando os histogramas, vemos que muitas pessoas realmente amam **Wizard of Oz** (notas 5), mas também temos pessoas que odeiam (notas 1). Quando comparamos com o histograma do filme **Little Miss Sunshine**, percebemos que os resultados se concentram entre valores medianos (notas 2 até 4). # # O que confirma nossa análise aqui é comparar os **25%, 50% e 75%**. 50% representa o valor da mediana, e ambos dos filmes tem a mesma mediana, mas 25% e 75% diferentes. Se quiser mais detalhes sobre a estatística destes itens, consulte: [**1º, 2º e 3º quartis**](https://pt.wikipedia.org/wiki/Quartil) # # A ideia de mesclar os gráficos com as estatísticas ajuda a interpretar melhor os dados. Mas o que precisamos é uma imagem que nos ajude a interpretar os dados de forma ainda melhor. O gráfico que nos ajuda neste caso é o `boxplot`. Vamos adaptar nossa função anterior para adicionar a geração deste gráfico. # + id="gZqf-VJDMNzi" colab_type="code" outputId="612662be-99e5-461b-9d27-ac4b5facd770" colab={"base_uri": "https://localhost:8080/", "height": 904} def plot_filme(n): notas_do_filme = avaliacoes.query(f"filmeId=={n}")["nota"] notas_do_filme.plot(kind='hist') plt.show() print("\n") notas_do_filme.plot.box() plt.show() print(notas_do_filme) return notas_do_filme.describe() plot_filme(919) # + [markdown] id="S_CRIIqbNAtQ" colab_type="text" # Você pode notar como é simples criar um `boxplot` com a biblioteca `pandas`. # # Apenas chamamos o método `.plot.box()`. Vamos tentar interpretar este gráfico. # # Vamos focar primeiro na caixinha que aparece neste gráfico. A linha verde que divide a caixa em dois é a mediana (compare com as estatísticas geradas pelo `describe`), a parte superior da caixa é o 3º quartil (75%) e a parte inferir é o 1º quartil (25%). # # Agora repare nos limites inferior e superior, representados pelas extremidades em preto. Por coincidência, nesta imagem os limites inferior e superior são equivalentes ao ponto de máximo e mínimo, mas nem sempre será assim, pois esse limite superior e inferior são calculados e dependem do 1º e 3º quartil. Algumas vezes os limites podem sobrepor os extremos das "caixas" e isso geralmente ocorre quando temos uma quantidade pequena de dados. # # Como tivemos sobreposição do limite superior, vamos calcular o `boxplot` para outro filme. # + id="eSb0j40vMzaX" colab_type="code" outputId="6320bc0a-ead5-4ec3-8de4-b0ea44a92128" colab={"base_uri": "https://localhost:8080/", "height": 907} plot_filme(46578) # + [markdown] id="BQAvA_wpOzi9" colab_type="text" # Neste caso, os limites superiores não se sobrepõem e temos uma informação a mais. Aparece uma pequena bolinha localizada em `y=1`. A bolinha é chamada de valor discrepante ou *outlier*, por ficar muito fora dos limites inferior e superior. # # Ao olhar o histograma e o boxplot dos dois filmes, podemos notar melhor a diferença entre as avaliações entre eles. # # Embora melhoramos nossa qualidade de análise, ainda temos mais um ponto. Estamos comparando os boxplots dos filmes, mas eles estão em imagens separadas. Vamos juntar vários boxplots em uma única imagem. Veja como podemos fazer isso usando o `seaborn`, para aprendermos outra forma de plotar um boxplot. # + id="nVghhx2yNFF1" colab_type="code" outputId="e2a750c1-90dc-4c88-fa3b-c6f868c10e84" colab={"base_uri": "https://localhost:8080/", "height": 296} sns.boxplot(data = avaliacoes.query("filmeId in [1, 2, 919, 46578]"), x="filmeId", y="nota") # + [markdown] id="csYLtKn5O4Z2" colab_type="text" # Chamamos o `sns.boxplot()` passando três parâmetros. Parâmetro `data` é um `dataframe` das notas dos filmes **Toy Story, Jumanji, Wizard of Oz e Little Miss Sunshine** (usamos o `.query()` para selecionar os dados). **x** indica o ID dos filmes e **y** as respectivas notas. Agora é possível comparar as notas dos filmes de forma muito mais clara. # # Com isto, finalizamos nossa segunda aula de Ciência de Dados, e ficam alguns desafios para vocês resolverem. # + [markdown] id="doCpGlOcNtwP" colab_type="text" # # Desafios # + [markdown] id="Ymk1WfLINxfe" colab_type="text" # **Desafio 1:** Rotacionar os thick para 45 graus (os nomes dos gêneros) do gráfico de barras verdes (o último), de forma a deixar as legendas mais legíveis. # + id="lz3V8hmEe5gd" colab_type="code" outputId="e101a332-8bff-4772-fbde-7591dea97d46" colab={"base_uri": "https://localhost:8080/", "height": 542} sns.set_style("whitegrid") plt.figure(figsize=(16,8)) sns.barplot(x=filmes_por_genero.index, y=filmes_por_genero.values, palette=sns.color_palette("BuGn_r", n_colors=len(filmes_por_genero) + 10)) plt.xticks(rotation=45) plt.show() # + [markdown] id="DgH1kMdwOM8m" colab_type="text" # **Desafio 2:** Encontrar vários filmes com médias próximas e distribuições diferentes. Use a função # **plot_filmes** para plotar. # + id="082edUebbvze" colab_type="code" outputId="e434d500-d892-4c42-81ec-7c7da4a26834" colab={"base_uri": "https://localhost:8080/", "height": 1000} filmes_com_media.sort_values("media", ascending=False)[2450:2500] # + id="r57Sy3S7QxkV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 815} outputId="9291e317-13dd-4fca-adc9-8ec65de5c106" plot_filme(1785) # + id="r5DD3o7pRZoh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 815} outputId="479ae8c3-53ff-4e63-d767-ccaaf4f5538e" plot_filme(1366) # + [markdown] id="IUuYhTi9OeeU" colab_type="text" # **Desafio 3:** Criar o boxplot dos 10 filmes com mais votos (não é com a maior média, é com mais votos). # + id="RxmJ2KxnSB78" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="e1f3fe0e-298d-41b1-c49c-db9da011a241" top_10 = list(filmes_com_votos.sort_values("votos", ascending=False).head(10).filmeId) sns.boxplot(data=avaliacoes.query(f"filmeId in {top_10}"), x = "filmeId", y = "nota") # + [markdown] id="VYFA2Q0eOukS" colab_type="text" # **Desafio 4:** Configurar a visualização do boxplot gerado pelo seaborn (último boxplot na aula), de modo a mostrar o nome do filme ao invés do ID. # + id="Dyg1Rwnb6buH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 390} outputId="eb6b507a-5d8b-4109-fc5d-d899409ba6f6" avaliacoes_com_titulo = avaliacoes.query("filmeId in [1, 2, 919, 46578]").merge(filmes, on = "filmeId") sns.boxplot(data = avaliacoes_com_titulo.query("filmeId in [1, 2, 919, 46578]"), x="titulo", y="nota") plt.xticks(rotation=45)
Aula_02_colab.ipynb