code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + '''Visualization of the filters of VGG16, via gradient ascent in input space. This script can run on CPU in a few minutes. Results example: http://i.imgur.com/4nj4KjN.jpg ''' from __future__ import print_function import numpy as np import time from keras.preprocessing.image import save_img from keras.applications import vgg16 from keras import backend as K # + # util function to convert a tensor into a valid image def deprocess_image(x): # normalize tensor: center on 0., ensure std is 0.1 x -= x.mean() x /= (x.std() + K.epsilon()) x *= 0.2 # clip to [0, 1] x += 0.5 x = np.clip(x, 0, 1) # convert to RGB array x *= 255 if K.image_data_format() == 'channels_first': x = x.transpose((1, 2, 0)) x = np.clip(x, 0, 255).astype('uint8') return x def normalize(x): # utility function to normalize a tensor by its L2 norm return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon()) # + # build the VGG16 network with ImageNet weights model = vgg16.VGG16(weights='imagenet', include_top=False) print('Model loaded.') # model.summary() # + # dimensions of the generated pictures for each filter. img_width = 128 img_height = 128 # the name of the layer we want to visualize # (see model definition at keras/applications/vgg16.py) layer_name = 'block5_pool' # + import pandas as pd coef = pd.read_csv('model.csv') coef = coef.drop(coef.columns[[0,1]], axis=1) coef = coef.as_matrix() # - fil = pd.read_csv('filter.csv') fil = fil.drop(fil.columns[0], axis=1) fil = fil.values # + import matplotlib.pyplot as plt import matplotlib.image as mpimg img_input = mpimg.imread('/Users/jiayun/Documents/coding/vgg/data/dog128_2.jpg') # print(img_input.shape) img_input = np.reshape(img_input, (1,img_width,img_height,3)) # + # this is the placeholder for the input images input_img = model.input # get the symbolic outputs of each "key" layer (we gave them unique names). layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]]) # store result kept_filters = [] id_filters = [] for model_index in range(4): # we visualize 4 models print('Processing model %d' % model_index) start_time = time.time() # read model thisModel = coef[model_index] # we build a loss function that maximizes the activation of the nth model layer_output = layer_dict[layer_name].output loss = 0 # initialize for filter_index in range(len(fil)): loss -= thisModel[filter_index] * K.mean(layer_output[:, :, :, filter_index]) # "-=" more like a cat and "+=" more like a dog # we compute the gradient of the input picture wrt this loss grads = K.gradients(loss, input_img)[0] # normalization trick: we normalize the gradient grads = normalize(grads) # this function returns the loss and grads given the input picture iterate = K.function([input_img], [loss, grads]) # step size for gradient ascent step = 5 # we start from a gray image with some random noise input_img_data = img_input input_img_data = (input_img_data-0.0) # we run gradient ascent for 40 steps for i in range(40): loss_value, grads_value = iterate([input_img_data]) input_img_data += grads_value * step if i % 10 == 0: print('Current loss value:', loss_value) print('Current loss value:', loss_value) img = deprocess_image(input_img_data[0]) kept_filters.append((img, loss_value)) id_filters.append(filter_index) end_time = time.time() print('model %d processed in %ds' % (model_index, end_time - start_time)) # - print(len(kept_filters)) # + # we will stich the best 64 filters on a 8 x 8 grid. n = 2 # the filters that have the highest loss are assumed to be better-looking. # we will only keep the top 64 filters. #kept_filters.sort(key=lambda x: x[1], reverse=True) #kept_filters = kept_filters[:n * n] # build a black picture with enough space for # our 8 x 8 filters of size 128 x 128, with a 5px margin in between margin = 5 width = n * img_width + (n - 1) * margin height = n * img_height + (n - 1) * margin stitched_filters = np.zeros((width, height, 3)) # fill the picture with our saved filters for i in range(n): for j in range(n): img, loss = kept_filters[i * n + j] width_margin = (img_width + margin) * i height_margin = (img_height + margin) * j stitched_filters[ width_margin: width_margin + img_width, height_margin: height_margin + img_height, :] = img # save the result to disk save_img('activation.png', stitched_filters) # - input_img_data = img_input input_img_data = (input_img_data-0.0) img_original = deprocess_image(input_img_data[0]) # + # we will stich the best 64 filters on a 8 x 8 grid. n = 2 # the filters that have the highest loss are assumed to be better-looking. # we will only keep the top 64 filters. #kept_filters.sort(key=lambda x: x[1], reverse=True) #kept_filters = kept_filters[:n * n] # build a black picture with enough space for # our 8 x 8 filters of size 128 x 128, with a 5px margin in between margin = 5 width = n * img_width + (n - 1) * margin height = n * img_height + (n - 1) * margin stitched_filters = np.zeros((width, height, 3)) # fill the picture with our saved filters for i in range(n): for j in range(n): img, loss = kept_filters[i * n + j] img = abs(img - img_original) # img = img/(img.max() + 0.001) # img *= 255 width_margin = (img_width + margin) * i height_margin = (img_height + margin) * j stitched_filters[ width_margin: width_margin + img_width, height_margin: height_margin + img_height, :] = img # save the result to disk save_img('activation2.png', stitched_filters) # - from PIL import Image img = Image.open('activation2.png').convert('LA') img.save('activation2.png') # + # # compare with input_img_data # # we will stich the best 64 filters on a 8 x 8 grid. # n = 2 # # the filters that have the highest loss are assumed to be better-looking. # # we will only keep the top 64 filters. # #kept_filters.sort(key=lambda x: x[1], reverse=True) # #kept_filters = kept_filters[:n * n] # # build a black picture with enough space for # # our 8 x 8 filters of size 128 x 128, with a 5px margin in between # margin = 5 # width = n * img_width + (n - 1) * margin # height = n * img_height + (n - 1) * margin # stitched_filters = np.zeros((width, height)) # # fill the picture with our saved filters # for i in range(n): # for j in range(n): # img, loss = kept_filters[i * n + j] # img = abs(img - img_original) # Y = 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2] # width_margin = (img_width + margin) * i # height_margin = (img_height + margin) * j # stitched_filters[ # width_margin: width_margin + img_width, # height_margin: height_margin + img_height] = Y # # save the result to disk # plt.imshow(stitched_filters, cmap = plt.get_cmap('gray')) # plt.show() # plt.savefig('activation3.jpg')
Activate Neuron.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="yGDhjuq-hfDH" import pandas as pd df = pd.read_csv("../../NewDataset.csv") # + colab={"base_uri": "https://localhost:8080/", "height": 622} colab_type="code" id="kjbriINchnP2" outputId="3dc3d1d6-f4e1-4fe1-8165-53a4bb9044d4" df # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="kEtORWamhtFB" outputId="e3eed1c1-50ec-40c3-bb00-104e961f7458" # Set X array, containing all the values to valuate a Fake Account X = df.iloc[:, 0:14].values X[0] # + colab={"base_uri": "https://localhost:8080/", "height": 835} colab_type="code" id="Fe5KbBI_hvUN" outputId="833755c8-db84-46e7-aa1f-d16e417e0178" # Assign y and print y = df.iloc[:, 14].values y # + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" id="LpLin1hQGlOL" outputId="71402f50-5c70-4188-dd24-1e8f6df7b5d1" from sklearn.ensemble import ExtraTreesClassifier from sklearn.feature_selection import SelectFromModel """ Tree-based estimators can be used to compute feature importances, which in turn can be used to discard irrelevant features """ clf = ExtraTreesClassifier(n_estimators=50) clf = clf.fit(X, y) clf.feature_importances_ # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="NNBSRSxoG_fG" outputId="0bdd1b0f-eb14-495d-d4cf-86fbf3a8dc48" model = SelectFromModel(clf, prefit=True) X_new = model.transform(X) X_new.shape # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="JDRt_xx9pkTp" outputId="7f22d65b-734c-4388-85a9-20b4c06aae76" X_new[0] # + colab={"base_uri": "https://localhost:8080/", "height": 352} colab_type="code" id="kdSiyrG-HgWa" outputId="079722ad-7736-441b-ef32-028b0d4a1446" """ Feature Importance Forest of Trees """ import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_classification from sklearn.ensemble import ExtraTreesClassifier # Build a forest and compute the feature importances forest = ExtraTreesClassifier(n_estimators=250, random_state=0) forest.fit(X_new, y) importances = forest.feature_importances_ std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") def printColumn(number): switcher = { 0: "#Following", 1: "Last Post Recent", 2: "% Post Single Day" } return switcher.get(number, "Invalid Column") for f in range(X_new.shape[1]): print("%d. feature %d (%f) %s" % (f + 1, indices[f], importances[indices[f]], printColumn(indices[f]))) # Plot the feature importances of the forest plt.figure() plt.title("Feature importances") plt.bar(range(X_new.shape[1]), importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(X_new.shape[1]), indices) plt.xlim([-1, X_new.shape[1]]) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 195} colab_type="code" id="DAtODMZZITxv" outputId="1733cffb-0fd1-48bc-e944-40ddd58336ee" """ Decision Tree Classifier """ from sklearn import tree clf = tree.DecisionTreeClassifier() clf = clf.fit(X_new, y) # Train and Test algorithms from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_new, y, test_size=0.33, random_state=42) print(len(X_train), len(y_train)) print(len(X_test), len(y_test)) print(clf.fit(X_train, y_train)) print("TRAIN SET", clf.score(X_train, y_train)) print("TEST SET", clf.score(X_test, y_test)) # + colab={} colab_type="code" id="J_rhuAaVpkTw"
Preprocessing/Features Selection/NewDataSet/Tree_based_feature_selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Flag of China # %matplotlib notebook import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.markers import MarkerStyle def star(angle): m = mpl.markers.MarkerStyle(marker='*') m._transform = m.get_transform().rotate_deg(angle) return m fig = plt.figure(figsize=[9, 6]) ax = fig.add_subplot(111) ax.axis('tight') ax.tick_params(axis='both', which='both', top='False', bottom='False', left='False', right='False', labelbottom='False', labelleft='False') ax.set_xlim(0, 30) ax.set_ylim(0, 20) for loc in ['top', 'bottom', 'left', 'right']: ax.spines[loc].set_color('none') ax.set_facecolor((0.870588, 0.160784, 0.062745)) plt.plot(5, 15, marker='*', markersize=120, markerfacecolor=(1, 0.870588, 0), markeredgewidth=0) m = mpl.markers.MarkerStyle(marker='*') for crd in [[10, 11, 51.3402], [10, 18, 120.964], [12, 13, 74.0546], [12, 16, 98.1301]]: plt.scatter(crd[0], crd[1], marker=star(crd[2]), s=1400, c=[(1, 0.870588, 0)]) fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0) fig.savefig('china_flag.png', dpi=1000, bbox_inches=0)
src/Flag_of_China.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append('../') from maskmoment import maskmoment from astropy.io import fits from astropy.table import Table import numpy as np import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") # %matplotlib inline def quadplot(basename, extmask=None): fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12,12)) mom0 = fits.getdata(basename+'.mom0.fits.gz') ax1.imshow(mom0,origin='lower',cmap='CMRmap') ax1.set_title(basename+' - Moment 0',fontsize='x-large') mom1 = fits.getdata(basename+'.mom1.fits.gz') ax2.imshow(mom1,origin='lower',cmap='jet') ax2.set_title(basename+' - Moment 1',fontsize='x-large') mom2 = fits.getdata(basename+'.mom2.fits.gz') ax3.imshow(mom2,origin='lower',cmap='CMRmap') ax3.set_title(basename+' - Moment 2',fontsize='x-large') if extmask is None: mask = np.sum(fits.getdata(basename+'.mask.fits.gz'),axis=0) else: mask = np.sum(fits.getdata(extmask),axis=0) ax4.imshow(mask,origin='lower',cmap='CMRmap_r') ax4.set_title('Projected Mask',fontsize='x-large') plt.subplots_adjust(hspace=0.15,wspace=0.15) plt.show() return # ## Example 0: Dilated mask with no smoothing. Expand from 4$\sigma$ to 2$\sigma$ contour. Mask regions must span at least 2 beam areas and 2 channels at any pixel. maskmoment(img_fits='NGC4047.co.smo7msk.fits.gz', gain_fits='NGC4047.co.smo7gain.fits.gz', snr_hi=4, snr_lo=2, minbeam=2, snr_lo_minch=2, outname='NGC4047.dilmsk') quadplot('NGC4047.dilmsk') # ## Example 1: Dilated mask with 2 pixel padding in spatial dimensions. Start at 5$\sigma$ contour to isolate main galaxy. We speed up execution by using the rms cube generated by Example 0. maskmoment(img_fits='NGC4047.co.smo7msk.fits.gz', rms_fits='NGC4047.dilmsk.ecube.fits.gz', snr_hi=5, snr_lo=2, minbeam=2, nguard=[2,0], outname='NGC4047.dilmskpad') quadplot('NGC4047.dilmskpad') # ## Example 2: Smooth and mask method. Generate a mask using the 3$\sigma$ contour of a smoothed (to 10") cube. Mask regions must span at least 2 beam areas. maskmoment(img_fits='NGC4047.co.smo7msk.fits.gz', rms_fits='NGC4047.dilmsk.ecube.fits.gz', snr_hi=3, snr_lo=3, fwhm=10, vsm=None, minbeam=2, outname='NGC4047.smomsk') quadplot('NGC4047.smomsk') # ## Example 3: Dilated smooth-and-mask. Expand from the 4$\sigma$ to 2$\sigma$ contour of the smoothed cube. maskmoment(img_fits='NGC4047.co.smo7msk.fits.gz', rms_fits='NGC4047.dilmsk.ecube.fits.gz', snr_hi=4, snr_lo=2, fwhm=10, vsm=None, minbeam=2, outname='NGC4047.dilsmomsk', output_2d_mask=True) quadplot('NGC4047.dilsmomsk') # ## Example 4: Apply an existing mask. Here we apply the 2D version of the mask derived in Example 3. Since this includes a lot of noise the results are not as good. maskmoment(img_fits='NGC4047.co.smo7msk.fits.gz', rms_fits='NGC4047.dilmsk.ecube.fits.gz', mask_fits='NGC4047.dilsmomsk.mask2d.fits.gz', outname='NGC4047.msk2d') quadplot('NGC4047.msk2d', extmask='NGC4047.dilsmomsk.mask2d.fits.gz') # ## Compare integrated spectra from the 5 masks. ex0 = Table.read('NGC4047.dilmsk.flux.csv', format='ascii.ecsv') ex1 = Table.read('NGC4047.dilmskpad.flux.csv', format='ascii.ecsv') ex2 = Table.read('NGC4047.smomsk.flux.csv', format='ascii.ecsv') ex3 = Table.read('NGC4047.dilsmomsk.flux.csv', format='ascii.ecsv') ex4 = Table.read('NGC4047.msk2d.flux.csv', format='ascii.ecsv') fig = plt.figure(figsize=[8,5.5]) plt.step(ex0['Velocity'],ex0['Flux'],color='r',label='dilmsk') plt.step(ex1['Velocity'],ex1['Flux'],color='b',label='dilmskpad') plt.step(ex2['Velocity'],ex2['Flux'],color='g',label='smomsk') plt.step(ex3['Velocity'],ex3['Flux'],color='k',label='dilsmomsk') plt.step(ex4['Velocity'],ex4['Flux'],color='orange',label='msk2d') plt.legend(fontsize='large') plt.xlabel(ex0['Velocity'].description+' ['+str(ex0['Velocity'].unit)+']',fontsize='x-large') plt.ylabel(ex0['Flux'].description+' ['+str(ex0['Flux'].unit)+']',fontsize='x-large')
example/N4047_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # name: python3 # --- pip install PrettyTable # + import json from prettytable import PrettyTable from stringMatcher import levenshtein_ratio_and_distance from time import time table = PrettyTable(field_names=["course_id", "course_title", "is_paid", "price","num_subscribers","subject"]) # - # %store -r course_dic # %store -r df_name1 def getClosestMatch(queryString): matchRatios = [levenshtein_ratio_and_distance(queryString, KEY) for KEY, _ in df_name1.items()] bestMatchRatio = max(matchRatios) if (bestMatchRatio < 0.5): return { "No Match found as per your input" } bestMatchRatio_index = matchRatios.index(bestMatchRatio) response = f"Subject : {df_name1[bestMatchRatio_index]['subject']}" return response def request(course=""): course = str(course) if(course.replace(" ", "").strip() == ""): return f"Invalid State Name" response = getClosestMatch(queryString = course) print(response) request(course="Javascript for Beginners")
DataBaseSearch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/espinili/Linear-Algebra-58020/blob/main/Matrix_and_its_Operations.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="mUGLneQ_QYyC" outputId="2113ca39-b2ff-47fd-894a-53b41c9d8b8f" import numpy as np #Create a 1x3 matrix a = np.array([1,2,3]) print (a) # + colab={"base_uri": "https://localhost:8080/"} id="puVGxG6kNNkY" outputId="85d5ef88-abb5-4b22-8edd-cae49aa63505" #Addition of Matrix a = np.array ([[-5,0],[4,1]]) b = np.array ([[6,-3],[2,3]]) print(a+b) # + colab={"base_uri": "https://localhost:8080/"} id="PwX-i-gtR2GB" outputId="5d78cb32-92de-411f-81d8-298f2fe3bcad" #Subtraction of Matrices 1 a = np.array ([[-5,0],[4,1]]) b = np.array ([[6,-3],[2,3]]) print(a-b) # + colab={"base_uri": "https://localhost:8080/"} id="6ziyRGdYS_rh" outputId="c44549fa-1f53-4522-c0b3-32a720fe2713" #Subtraction of Matrices 2 b = np.array ([[6,-3],[2,3]]) print(b-b)
Matrix_and_its_Operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Dependencies and Setup import pandas as pd # File to Load csv_path = "purchase_data.csv" # Read Purchasing File and store into Pandas data frame purchase_data = pd.read_csv(csv_path) purchase_data.head() # - #calculate total of players total_players=len(purchase_data["SN"].unique()) print("Total number of players:", total_players, "players") #Calculate items values uni_items= len(purchase_data["Item Name"].unique()) #Calculate average price ave_price= purchase_data["Price"].mean() #Calculate number of purchase num_purchase= len(purchase_data) #Calculate total revenue tot_revenue= purchase_data["Price"].sum() # Place all of the data found into a summary DataFrame summary_df = pd.DataFrame({"Number of Unique Items": [uni_items], "Average Price": ave_price, "Number of Purchases": num_purchase, "Total Revenue": tot_revenue}) #mapping values summary_df["Average Price"] = summary_df["Average Price"].map("${:.2f}".format) summary_df["Total Revenue"] = summary_df["Total Revenue"].map("${:,.2f}".format) summary_df # + #Calculate Percentage and Count of Male Players male_df = purchase_data.loc[purchase_data["Gender"]=="Male", :] num_male = len(male_df["SN"].unique()) per_male = (num_male*100)/total_players #Calculate Percentage and Count of Female Players female_df = purchase_data.loc[purchase_data["Gender"]=="Female", :] num_female = len(female_df["SN"].unique()) per_female = (num_female*100)/total_players #Calculate Percentage and Count of Other Players num_other= total_players-(num_male+num_female) per_other = (num_other*100)/total_players #create a Data frame with the values sum_gender_df = pd.DataFrame({"Gender": ["Male", "Female" , "Other/Non-Disclosed"], "Total Count": [num_male, num_female, num_other], "Percentage of Players":[per_male, per_female, per_other] }) #mapping values sum_gender_df["Percentage of Players"] = sum_gender_df["Percentage of Players"].map("{:.2f}%".format) sum_gender_df=sum_gender_df.set_index("Gender") sum_gender_df # + #calculate values per female num_pur_female= len(female_df) ave_pri_female= female_df["Price"].mean() tot_pur_female= female_df["Price"].sum() #calculate avg purchase per male gender by player group_female=female_df.groupby('SN')['Price'].sum() avg_perperson_female=group_female.mean() #calculate values per male num_pur_male= len(male_df) ave_pri_male= male_df["Price"].mean() tot_pur_male= male_df["Price"].sum() #calculate avg purchase per male gender by player group_male=male_df.groupby('SN')['Price'].sum() avg_perperson_male=group_male.mean() #calculate values per other gender other_df = purchase_data.loc[purchase_data["Gender"]=="Other / Non-Disclosed", :] num_pur_other= len(other_df) ave_pri_other= other_df["Price"].mean() tot_pur_other= other_df["Price"].sum() #calculate avg purchase per other gender by player group_other=other_df.groupby('SN')['Price'].sum() avg_perperson_other=group_other.mean() #create a Data frame with the values analysis_gender_df = pd.DataFrame({"Gender": ["Male", "Female" , "Other/Non-Disclosed"], "Purchase Count": [num_pur_female, num_pur_male, num_pur_other], "Average Purchase Price":[ave_pri_female, ave_pri_male, ave_pri_other], "Total Purchase Value": [tot_pur_female, tot_pur_male, tot_pur_other], "Avg Total Purchase per Person": [avg_perperson_female, avg_perperson_male, avg_perperson_other] }) #mapping values analysis_gender_df["Average Purchase Price"] = analysis_gender_df["Average Purchase Price"].map("${:.2f}".format) analysis_gender_df["Total Purchase Value"] = analysis_gender_df["Total Purchase Value"].map("${:,.2f}".format) analysis_gender_df["Avg Total Purchase per Person"] = analysis_gender_df["Avg Total Purchase per Person"].map("${:,.2f}".format) analysis_gender_df=analysis_gender_df.set_index("Gender") analysis_gender_df # - # Create bins in which to place values based upon TED Talk views #ages=purchase_data["Age"].unique() #bins = range(10,ages.max()+1,5) bins=[0,9,14,19,24,29,34,39,45] # Create labels for these bins group_labels = ["<10", "10-14","15-19","20-24","25-29","30-34","35-39",">=40"] # Slice the data and place it into bins # Place the data series into a new column inside of the DataFrame age_purchase_data=pd.read_csv(csv_path) age_purchase_data["Ages Group"] = pd.cut(age_purchase_data["Age"], bins, labels=group_labels) age_sn_purchase_data=age_purchase_data.drop_duplicates('SN') # Using GroupBy in order to separate the data into fields according to "Age Group" values grouped_age_df = age_sn_purchase_data.groupby('Ages Group') # + #calculate puchase count by age age_purchase = grouped_age_df["SN"].count() # - #calculate percent of puchase by age percent_purchase = (age_purchase*100)/total_players #create a Data frame with the values age_demographics_df = pd.DataFrame({"Purchase Count": age_purchase, "Percentage Players": percent_purchase.map("%{:.2f}".format) }) age_demographics_df # + grouped_age_df = age_purchase_data.groupby('Ages Group') #calculate puchase count by age age_purchase = grouped_age_df["SN"].count() #calculate price average by age ranges avg_purchase = grouped_age_df["Price"].mean() #calculate Total purchase by age ranges tot_purchase = grouped_age_df["Price"].sum() #calculate Avg per person by age ranges grouped_perperson_df = age_purchase_data.groupby(['Ages Group','SN'])['Price'].sum() grouped_perperson_df =grouped_perperson_df.dropna(how='any') grouped_perperson_df= grouped_perperson_df.groupby('Ages Group').mean() #create a Data frame with the values age_analysis_df = pd.DataFrame({"Purchase Count": age_purchase, "Avg Purchase Price": avg_purchase.map("${:.2f}".format), "Total Purchase Value": tot_purchase.map("${:,.2f}".format), "Avg Total Purchase per Person": grouped_perperson_df.map("${:,.2f}".format) }) age_analysis_df # - grouped_sn_df = purchase_data.groupby(['SN']) grouped_sn_df #calculate puchase count by SN sn_purchase = grouped_sn_df["Purchase ID"].count() sn_purchase #calculate average price by SN avg_sn_purchase = grouped_sn_df["Price"].mean() #calculate total puchase by SN tot_sn_purchase = grouped_sn_df["Price"].sum() #create a Data frame with the values top_spenders_df = pd.DataFrame({"Purchase Count": sn_purchase, "Avg Purchase Price": avg_sn_purchase.map("${:.2f}".format), "Total Purchase Value": tot_sn_purchase }) top_spenders_df= top_spenders_df.sort_values("Total Purchase Value", ascending=False) top_spenders_df["Total Purchase Value"] = top_spenders_df["Total Purchase Value"].map("${:.2f}".format) top_spenders_df.head() # + grouped_item_df = purchase_data.groupby(['Item ID']) #calculate purchase count by Item ID item_purchase = grouped_item_df["Purchase ID"].count() #calculate total puchase by Item ID tot_item_purchase = grouped_item_df["Price"].sum() #create a Data frame with the values most_popular_df = pd.DataFrame({"Purchase Count": item_purchase, "Total Purchase Value": tot_item_purchase }) #.astype(float).map("${:,.2f}".format) #get the names and prices of each item items_df = pd.DataFrame(purchase_data, columns=["Item ID", "Item Name", "Price"]) items_df=items_df.drop_duplicates('Item ID') # Merge merge_most_popular_df = pd.merge(most_popular_df, items_df, on="Item ID",how="left") merge_most_popular_df = merge_most_popular_df.set_index("Item ID") merge_most_popular_df= merge_most_popular_df[["Item Name", "Purchase Count","Price", "Total Purchase Value"]] merge_most_popular_df["Price"] = merge_most_popular_df["Price"].map("${:.2f}".format) merge_most_popular_df.rename(columns={'Price': 'Item Price'}, inplace=True) merge_most_popular_df= merge_most_popular_df.sort_values("Purchase Count", ascending=False) merge_most_popular_df.head() # - # To sort from highest to lowest. sort_totpurchase_df = merge_most_popular_df.sort_values("Total Purchase Value", ascending=False) sort_totpurchase_df["Total Purchase Value"] = sort_totpurchase_df["Total Purchase Value"].map("${:.2f}".format) sort_totpurchase_df.head() # + # Item Table (Sorted by Total Purchase Value) item_total_purchase = item_data_pd.sort_values("Total Purchase Value", ascending=False) # Minor Data Munging item_total_purchase["Item Price"] = item_total_purchase["Item Price"].map("${:,.2f}".format) item_total_purchase["Purchase Count"] = item_total_purchase["Purchase Count"].map("{:,}".format) item_total_purchase["Total Purchase Value"] = item_total_purchase["Total Purchase Value"].map("${:,.2f}".format) item_profitable = item_total_purchase.loc[:,["Purchase Count", "Item Price", "Total Purchase Value"]] item_profitable.head(5) # -
Heroes of Pymoli Revised.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Building our operators: the Face Divergence # The divergence is the integral of a flux through a closed surface as that enclosed volume shrinks to a point. Since we have discretized and no longer have continuous functions, we cannot fully take the limit to a point; instead, we approximate it around some (finite!) volume: *a cell*. The flux out of the surface ($\vec{j} \cdot \vec{n}$) is actually how we discretized $\vec{j}$ onto our mesh (i.e. $\bf{j}$) except that the face normal points out of the cell (rather than in the axes direction). After fixing the direction of the face normal (multiplying by $\pm 1$), we only need to calculate the face areas and cell volume to create the discrete divergence matrix. # # <img src="images/Divergence.png" width=80% align="center"> # # <h4 align="center">Figure 4. Geometrical definition of the divergence and the discretization.</h4> # ## Implementation # # Although this is a really helpful way to think about conceptually what is happening, the implementation of that would be a huge for loop over each cell. In practice, this would be slow, so instead, we will take advantage of linear algebra. Let's start by looking at this in 1 dimension using the SimPEG Mesh class. import numpy as np from SimPEG import Mesh import matplotlib.pyplot as plt # %matplotlib inline plt.set_cmap(plt.get_cmap('viridis')) # use a nice colormap! # + # define a 1D mesh mesh1D = Mesh.TensorMesh([5]) # with 5 cells fig, ax = plt.subplots(1,1, figsize=(12,2)) ax.plot(mesh1D.gridN, np.zeros(mesh1D.nN),'-k',marker='|',markeredgewidth=2, markersize=16) ax.plot(mesh1D.gridCC,np.zeros(mesh1D.nC),'o') ax.plot(mesh1D.gridFx,np.zeros(mesh1D.nFx),'>') ax.set_title('1D Mesh') # + # and define a vector of fluxes that live on the faces of the 1D mesh face_vec = np.r_[0., 1., 2., 2., 1., 0.] # vector of fluxes that live on the faces of the mesh print "The flux on the faces is {}".format(face_vec) plt.plot(mesh1D.gridFx, face_vec, '-o') plt.ylim([face_vec.min()-0.5, face_vec.max()+0.5]) plt.grid(which='both') plt.title('face_vec'); # - # Over a single cell, the divergence is # # $$ # \nabla \cdot \vec{j}(p) = \lim_{v \to \{p\}} = \int \int_{S(v)} \frac{\vec{j}\cdot \vec{n}}{v} dS # $$ # # in 1D, this collapses to taking a single difference - how much is going out of the cell vs coming in? # # $$ # \nabla \cdot \vec{j} \approx \frac{1}{v}(-j_{\text{left}} + j_{\text{right}}) # $$ # # Since the normal of the x-face on the left side of the cell points in the positive x-direction, we multiply by -1 to get the flux going out of the cell. On the right, the normal defining the x-face is point out of the cell, so it is positive. # + # We can take the divergence over the entire mesh by looping over each cell div_face_vec = np.zeros(mesh1D.nC) # allocate for each cell for i in range(mesh1D.nC): # loop over each cell and div_face_vec[i] = 1.0/mesh1D.vol[i] * (-face_vec[i] + face_vec[i+1]) print "The face div of the 1D flux is {}".format(div_face_vec) # - # Doing it as a for loop is easy to program for the first time, # but is difficult to see what is going on and could be slow! # Instead, we can build a faceDiv matrix (note: this is a silly way to do this!) # + faceDiv = np.zeros([mesh1D.nC, mesh1D.nF]) # allocate space for a face div matrix for i in range(mesh1D.nC): # loop over each cell faceDiv[i, [i, i+1]] = 1.0/mesh1D.vol[i] * np.r_[-1,+1] print("The 1D face div matrix for this mesh is \n{}".format(faceDiv)) assert np.all( faceDiv.dot(face_vec) == div_face_vec ) # make sure we get the same result! print "\nThe face div of the 1D flux is still {}!".format(div_face_vec) # - # the above is still a loop... (and python is not a fan of loops). # Also, if the mesh gets big, we are storing a lot of unnecessary zeros "There are {nnz} zeros (too many!) that we are storing".format(nnz = np.sum(faceDiv == 0)) # ### Working in Sparse # # We will use instead *sparse* matrices instead. These are in scipy and act almost the same as numpy arrays (except they default to matrix multiplication), and they don't store all of those pesky zeros! We use [scipy.sparse](http://docs.scipy.org/doc/scipy/reference/sparse.html) to build these matrices. import scipy.sparse as sp from SimPEG.Utils import sdiag # we are often building sparse diagonal matrices, so we made a functio in SimPEG! # + # construct differencing matrix with diagonals -1, +1 sparse_diff = sp.spdiags((np.ones((mesh1D.nC+1, 1))*[-1, 1]).T, [0, 1], mesh1D.nC, mesh1D.nC+1, format="csr") print "the sparse differencing matrix is \n{}".format(sparse_diff.todense()) # account for the volume faceDiv_sparse = sdiag(1./mesh1D.vol) * sparse_diff # account for volume print "\n and the face divergence is \n{}".format(faceDiv_sparse.todense()) print "\n but now we are only storing {nnz} nonzeros".format(nnz=faceDiv_sparse.nnz) assert np.all(faceDiv_sparse.dot(face_vec) == div_face_vec) print "\n and we get the same answer! {}".format(faceDiv_sparse * face_vec) # - # In SimPEG, this is stored as the `faceDiv` property on the mesh print mesh1D.faceDiv * face_vec # and still gives us the same answer! # ## Moving to 2D # To move up in dimensionality, we build a 2D mesh which has both x and y faces mesh2D = Mesh.TensorMesh([100,80]) mesh2D.plotGrid() plt.axis('tight'); # We define 2 face functions, one in the x-direction and one in the y-direction. Here, we choose to work with sine functions as the continuous divergence is easy to compute, meaning we can test it! # + jx_fct = lambda x, y: -np.sin(2.*np.pi*x) jy_fct = lambda x, y: -np.sin(2.*np.pi*y) jx_vec = jx_fct(mesh2D.gridFx[:,0], mesh2D.gridFx[:,1]) jy_vec = jy_fct(mesh2D.gridFy[:,0], mesh2D.gridFy[:,1]) j_vec = np.r_[jx_vec, jy_vec] print("There are {nFx} x-faces and {nFy} y-faces, so the length of the " "face function, j, is {lenj}".format( nFx=mesh2D.nFx, nFy=mesh2D.nFy, lenj=len(j_vec) )) plt.colorbar(mesh2D.plotImage(j_vec, 'F', view='vec')[0]) # - # ### But first... what does the matrix look like? # # Now, we know that we do not want to loop over each of the cells and instead want to work with matrix-vector products. In this case, each row of the divergence matrix should pick out the two relevant faces in the x-direction and two in the y-direction (4 total). # # When we unwrap our face function, we unwrap using column major ordering, so all of the x-faces are adjacent to one another, while the y-faces are separated by the number of cells in the x-direction (see [mesh.ipynb](mesh.ipynb) for more details!). # # When we plot the divergence matrix, there will be 4 "diagonals", # - 2 that are due to the x-contribution # - 2 that are due to the y-contribution # # Here, we define a small 2D mesh so that it is easier to see the matrix structure. # + small_mesh2D = Mesh.TensorMesh([3,4]) print "Each y-face is {} entries apart".format(small_mesh2D.nCx) print "and the total number of x-faces is {}".format(small_mesh2D.nFx) print ("So in the first row of the faceDiv, we have non-zero entries at \n{}".format( small_mesh2D.faceDiv[0,:])) # - # Now, lets look at the matrix structure # + fig, ax = plt.subplots(1,2, figsize=(12,4)) # plot the non-zero entries in the faceDiv ax[0].spy(small_mesh2D.faceDiv, ms=2) ax[0].set_xlabel('2D faceDiv') small_mesh2D.plotGrid(ax=ax[1]) # Number the faces and plot. (We should really add this to SimPEG... pull request anyone!?) xys = zip( small_mesh2D.gridFx[:,0], small_mesh2D.gridFx[:,1], range(small_mesh2D.nFx) ) for x,y,ii in xys: ax[1].plot(x, y, 'r>') ax[1].text(x+0.01, y-0.02, ii, color='r') xys = zip( small_mesh2D.gridFy[:,0], small_mesh2D.gridFy[:,1], range(small_mesh2D.nFy) ) for x,y,ii in xys: ax[1].plot(x, y, 'g^') ax[1].text(x-0.02, y+0.02, ii+small_mesh2D.nFx, color='g') ax[1].set_xlim((-0.1,1.1)); ax[1].set_ylim((-0.1,1.1)); # - # How did we construct the matrix? - Kronecker products. # There is a handy identity that relates the vectorized face function to its matrix form (<a href = "https://en.wikipedia.org/wiki/Vectorization_(mathematics)#Compatibility_with_Kronecker_products">wikipedia link!</a>) # $$ # \text{vec}(AUB^\top) = (B \otimes A) \text{vec}(U) # $$ # # For the x-contribution: # - A is our 1D differential operator ([-1, +1] on the diagonals) # - U is $j_x$ (the x-face function as a matrix) # - B is just an identity # so # $$ # \text{Div}_x \text{vec}(j_x) = (I \otimes Div_{1D}) \text{vec}(j_x) # $$ # # For the y-contribution: # - A is just an identity! # - U is $j_y$ (the y-face function as a matrix) # - B is our 1D differential operator ([-1, +1] on the diagonals) # so # $$ # \text{Div}_y \text{vec}(j_y) = (\text{Div}_{1D} \otimes I) \text{vec}(j_y) # $$ # # $$ # \text{Div} \cdot j = \text{Div}_x \cdot j_x + \text{Div}_y \cdot j_y = [\text{Div}_x, \text{Div}_y] \cdot [j_x; j_y] # $$ # # And $j$ is just $[j_x; j_y]$, so we can horizontally stack $\text{Div}_x$, $\text{Div}_y$ # # $$ # \text{Div} = [\text{Div}_x, \text{Div}_y] # $$ # # You can check this out in the SimPEG docs by running **small_mesh2D.faceDiv??** # + # check out the code! # # small_mesh2D.faceDiv?? # - # Now that we have a discrete divergence, lets check out the divergence of the face function we defined earlier. # + Div_j = mesh2D.faceDiv * j_vec fig, ax = plt.subplots(1,2, figsize=(8,4)) plt.colorbar(mesh2D.plotImage(j_vec, 'F', view='vec', ax=ax[0])[0],ax=ax[0]) plt.colorbar(mesh2D.plotImage(Div_j, ax=ax[1])[0],ax=ax[1]) ax[0].set_title('j') ax[1].set_title('Div j') plt.tight_layout() # - # ### Are we right?? # # Since we chose a simple function, # # $$ # \vec{j} = - \sin(2\pi x) \hat{x} - \sin(2\pi y) \hat{y} # $$ # # we know the continuous divergence... # # $$ # \nabla \cdot \vec{j} = -2\pi (\cos(2\pi x) + \cos(2\pi y)) # $$ # # So lets plot it and take a look # + # from earlier # jx_fct = lambda x, y: -np.sin(2*np.pi*x) # jy_fct = lambda x, y: -np.sin(2*np.pi*y) sol = lambda x, y: -2*np.pi*(np.cos(2*np.pi*x)+np.cos(2*np.pi*y)) cont_div_j = sol(mesh2D.gridCC[:,0], mesh2D.gridCC[:,1]) Div_j = mesh2D.faceDiv * j_vec fig, ax = plt.subplots(1,2, figsize=(8,4)) plt.colorbar(mesh2D.plotImage(Div_j, ax=ax[0])[0],ax=ax[0]) plt.colorbar(mesh2D.plotImage(cont_div_j, ax=ax[1])[0],ax=ax[1]) ax[0].set_title('Discrete Div j') ax[1].set_title('Continuous Div j') plt.tight_layout() # - # Those look similar :) # ### Order Test # # We can do better than just an eye-ball comparison - since we are using a a staggered grid, with centered differences, the discretization should be second-order ($\mathcal{O}(h^2)$). That is, as we refine the mesh, our approximation of the divergence should improve by a factor of 2. # # SimPEG has a number of testing functions for # [derivatives](http://docs.simpeg.xyz/content/api_core/api_Tests.html#SimPEG.Tests.checkDerivative) # and # [order of convergence](http://docs.simpeg.xyz/content/api_core/api_Tests.html#SimPEG.Tests.OrderTest) # to make our lives easier! # + import unittest from SimPEG.Tests import OrderTest jx = lambda x, y: -np.sin(2*np.pi*x) jy = lambda x, y: -np.sin(2*np.pi*y) sol = lambda x, y: -2*np.pi*(np.cos(2*np.pi*x)+np.cos(2*np.pi*y)) class Testify(OrderTest): meshDimension = 2 def getError(self): j = np.r_[jx(self.M.gridFx[:,0], self.M.gridFx[:,1]), jy(self.M.gridFy[:,0], self.M.gridFy[:,1])] num = self.M.faceDiv * j # numeric answer ans = sol(self.M.gridCC[:,0], self.M.gridCC[:,1]) # note M is a 2D mesh return np.linalg.norm((num - ans), np.inf) # look at the infinity norm # (as we refine the mesh, the number of cells # changes, so need to be careful if using a 2-norm) def test_order(self): self.orderTest() # This just runs the unittest: suite = unittest.TestLoader().loadTestsFromTestCase( Testify ) unittest.TextTestRunner().run( suite ); # - # Looks good - Second order convergence! # ## Next up ... # # In the [next notebook](weakformulation.ipynb), we will explore how to use the weak formulation to discretize the DC equations.
1608_Finite_volume/divergence.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import math import numpy as np import copy import itertools import time # simplification def f(L, maxl, cost, k, B): if k == 1: return ([L], B*max(0, L-maxl)) if k == L: cost_ = max(1, maxl) * B for i in range(k-1): # cost_ += cost[i][i] cost_ += cost[i] return ([1] * L, cost_) cost_best = float("inf") S_best = [] for i in reversed(range(k, L)): S, cost_ = f(i, max(L-i, maxl), cost, k-1, B) cost_ += max(0, L-i-maxl)*B cost_ += cost[i-1] if cost_ < cost_best: cost_best = cost_ S.append(L-i) S_best = S return S_best, cost_best L = 12 k = 8 cost = [2,1,1,3] * 12 f(L, 0, cost, k, 3) # + def pipe_dp(L, cost_e, cost_c, k, B): # Generate all possible max length possible = [0] for i in range(1, L+1): ptr = 0 while ptr + i <= L: possible.append(sum(cost_e[ptr:ptr+i])) ptr += 1 possible = sorted(list(set(possible))) # print(possible) # trace will be a 3D list trace = [] for i in range(L): outer = [] for j in range(k): inner = [] for m in range(len(possible)): inner.append(([],np.infty)) outer.append(inner) trace.append(outer) # i: layer id, starting from 0 # j: number of cut (=GPU-1) for i in range(L): for j in range(k): for m in range(len(possible)): if i+1 <= j: # invalid pass else: if j == 0: # base case: 0 cut cur_sum = sum(cost_e[:i+1]) assert cur_sum in possible trace[i][j][m] = ([i+1], (B-1) * max(0, cur_sum - possible[m])) else: cost_best = np.infty S_best = [] for cut in range(j-1, i): cur_sum = sum(cost_e[cut+1:i+1]) assert cur_sum in possible S, cost_ = trace[cut][j-1][possible.index(max(cur_sum, possible[m]))] #print(S, cost_) cost_ += (B-1) * max(0, cur_sum - possible[m]) cost_ += cost_c[cut][j-1] if cost_ < cost_best: cost_best = cost_ S_ = copy.deepcopy(S) S_.append(i-cut) S_best = S_ trace[i][j][m] = (S_best, cost_best) for i in range(L): for j in range(k): pass #print(i, j, trace[i][j]) return trace[L-1][k-1][0] def brute_force(L, cost_e, cost_c, k, B): best_S = [] best_cost = np.infty ptr_done = [0] * (k-1) possible = list(itertools.combinations(range(L-1), k-1)) for p in possible: p = list(p) p.append(L-1) lens = [sum(cost_e[:p[0]+1])] s = [p[0]+1] for i in range(len(p)-1): lens.append(sum(cost_e[p[i]+1:p[i+1]+1])) s.append(p[i+1]-p[i]) max_l = max(lens) cost = (B-1) * max_l for i in range(k-1): cost += cost_c[p[i]][i] if cost < best_cost: best_cost = cost best_S = s return best_S, best_cost def uniform_split(L, cost_e, cost_c, k, B): per_stage = L // k s = [per_stage] * (k-1) s.append(L-sum(s)) p = [s[0]-1] for i in range(1, k): p.append(p[i-1] + s[i]) lens = [sum(cost_e[:p[0]+1])] for i in range(len(s)-1): lens.append(sum(cost_e[p[i]+1:p[i+1]+1])) max_l = max(lens) cost = (B-1) * max_l for i in range(k-1): cost += cost_c[p[i]][i] return s, cost # - L = 4 k = 2 cost_e = [1,3,2,5] cost_c = np.ones((L-1, k-1)) * 2 pipe_dp(L, cost_e, cost_c, k, 3) test_list = [(12, 4), (24, 4), (24,8), (24, 12), (36, 8)] # homogeneous test for L, k in test_list: cost_e = np.ones(L) cost_c = np.ones((L-1, k-1)) * 2 time_s = time.time() res = pipe_dp(L, cost_e, cost_c, k, 3) print(f"homo dp L={L} k={k} is {res[0]}, minimum cost {res[1]}. Took time {time.time() - time_s}") time_s = time.time() res = brute_force(L, cost_e, cost_c, k, 3) print(f"homo bf L={L} k={k} is {res[0]}, minimum cost {res[1]}. Took time {time.time() - time_s}") time_s = time.time() res = uniform_split(L, cost_e, cost_c, k, 3) print(f"homo us L={L} k={k} is {res[0]}, minimum cost {res[1]}. Took time {time.time() - time_s}") # hetergeneous test for L, k in test_list: cost_e = np.random.randint(low=5,high=10,size=L) cost_c = np.random.randint(low=5,high=10,size=(L-1,k-1)) time_s = time.time() res = pipe_dp(L, cost_e, cost_c, k, 3) print(f"hete dp L={L} k={k} is {res[0]}, minimum cost {res[1]}. Took time {time.time() - time_s}") time_s = time.time() res = brute_force(L, cost_e, cost_c, k, 3) print(f"hete bf L={L} k={k} is {res[0]}, minimum cost {res[1]}. Took time {time.time() - time_s}") time_s = time.time() res = uniform_split(L, cost_e, cost_c, k, 3) print(f"hete us L={L} k={k} is {res[0]}, minimum cost {res[1]}. Took time {time.time() - time_s}") test_list_large = [(12, 4), (24, 12), (36, 8), (36, 12), (48,12), (48, 24), (64, 12), (64, 16), (128, 32), (128, 12), (128, 50)] for L, k in test_list_large: cost_e = np.random.randint(low=5,high=10,size=L) cost_c = np.random.randint(low=5,high=10,size=(L-1,k-1)) time_s = time.time() res = pipe_dp(L, cost_e, cost_c, k, 3) print(f"hete dp L={L} k={k} is {res[0]}, minimum cost {res[1]}. Took time {time.time() - time_s}") time_s = time.time() res = uniform_split(L, cost_e, cost_c, k, 3) print(f"hete us L={L} k={k} is {res[0]}, minimum cost {res[1]}. Took time {time.time() - time_s}") # + from matplotlib import pyplot as plt test_list = [(16,8), (17, 8), (18,8), (19,8), (20, 8), (21,8), (22,8), (23, 8),(24,8)] dp_time = [] bf_time = [] # homogeneous test for L, k in test_list: cost_e = np.ones(L) cost_c = np.ones((L-1, k-1)) * 2 time_s = time.time() res = pipe_dp(L, cost_e, cost_c, k, 3) time_elapsed = time.time() - time_s dp_time.append(time_elapsed) print(f"homo dp L={L} k={k} is {res[0]}, minimum cost {res[1]}. Took time {time_elapsed}") time_s = time.time() res = brute_force(L, cost_e, cost_c, k, 3) time_elapsed = time.time() - time_s bf_time.append(time_elapsed) print(f"homo bf L={L} k={k} is {res[0]}, minimum cost {res[1]}. Took time {time_elapsed}") # - plt.plot([16,17, 18, 19, 20, 21, 22, 23, 24], dp_time, label="ours") plt.plot([16,17, 18, 19, 20, 21, 22, 23, 24], bf_time, label="brute force") plt.xlabel("number of layers") plt.ylabel("runtime") plt.legend(loc="best") # + def draw_fill(puzzle, patternLength, patternWidth, start, count, solList): count += 1 puzzleLength, puzzleWidth = puzzle.shape patternNum = (puzzleWidth*puzzleLength)/(patternWidth*patternLength) horizonal = False if start[0] + patternLength <= puzzleLength and start[1] + patternWidth <= puzzleWidth: horizonal = True #if (puzzle[start[0]:start[0]+patternLength, start[1]:start[1]+patternWidth] != 0).any(): for i in range(start[0], start[0]+patternLength): for j in range(start[1], start[1]+patternWidth): if puzzle[i][j] != 0: horizonal = False if horizonal: newPuzzle = copy.deepcopy(puzzle) for i in range(start[0], start[0]+patternLength): for j in range(start[1], start[1]+patternWidth): newPuzzle[i][j] = count if count == patternNum: solList.append(newPuzzle) return for i in range(start[0], puzzleLength): for j in range(0, puzzleWidth): if newPuzzle[i][j] == 0: newStart = (i, j) break else: continue break draw_fill(newPuzzle, patternLength, patternWidth, newStart, count, solList) vertical = False if patternLength != patternWidth and start[0]+patternWidth <= puzzleLength and start[1]+patternLength <= puzzleWidth: vertical = True for i in range(start[0], start[0]+patternWidth): for j in range(start[1], start[1]+patternLength): if puzzle[i][j] != 0: vertical = False if vertical: newPuzzle = copy.deepcopy(puzzle) for i in range(start[0], start[0]+patternWidth): for j in range(start[1], start[1]+patternLength): newPuzzle[i][j] = count if count == patternNum: solList.append(newPuzzle) return for i in range(start[0], puzzleLength): for j in range(0, puzzleWidth): if newPuzzle[i][j] == 0: newStart = (i, j) break else: continue break draw_fill(newPuzzle, patternLength, patternWidth, newStart, count, solList) def backtrack(puzzleLength, puzzleWidth, patternLength, patternWidth): patternNum = (puzzleWidth*puzzleLength)/(patternWidth*patternLength) solList = [] if patternNum%1 == 0: inputPuzzle = np.zeros((puzzleLength, puzzleWidth)) draw_fill(inputPuzzle, patternLength, patternWidth, (0, 0), 0, solList) #solList = np.asarray(solList).reshape((puzzleLength, puzzleWidth)) return solList # + def get_cost_c(conf, L, cluster_info=None): # homogeneous setting; in real setting, we access cluster to get cost_c num_stages = int(np.max(conf)) stage_cost = [] for i in range(1, num_stages): b = np.where(conf == i) c = np.where(conf == i+1) # All pairs of GPU in the same node if (b[1] == c[1]).all(): stage_cost.append(0) else: stage_cost.append(1) stage_cost = np.asarray(stage_cost).reshape((1,-1)) ret = copy.deepcopy(stage_cost) for i in range(L-1): ret = np.concatenate((ret, stage_cost), axis=0) return ret def get_cost_e(conf, L, cluster_info=None): # homogeneous setting; in real setting, we access cluster to get cost_e # return amp_simulator() #print(conf.shape[0] * conf.shape[1]) num_gpus_per_pipeline = conf.shape[0] * conf.shape[1] / np.max(conf) return np.ones(L) / num_gpus_per_pipeline def generate_initial(M, N, threads=2): h_w_list = [] h_w_list.append((M, 1)) h_w_list.append((1, N)) known = {} configs = [] for (h, w) in h_w_list: solution = backtrack(M, N, h, w) assert len(solution) > 0 config_idx = np.random.choice(len(solution), size=1)[0] config = solution[config_idx] configs.append(config) solution.pop(config_idx) known[(h, w)] = solution #print(np.asarray(configs[0])) return h_w_list, configs, known def cool_down(iter, max_iter, init_temp): return init_temp * (1 - iter / max_iter) def neighbor(cur, known, M, N, maximum_try = 10): h, w = cur time_s = time.time() while time.time() - time_s < 10: index = np.random.choice([0,1], size=1)[0] if index == 0: valid = [] upper = min(M, N) upper = min((M*N) // w, upper) + 1 for i in range(1, upper): if (i, w) in known.keys(): solution = known[(i, w)] else: solution = backtrack(M, N, i, w) known[(i, w)] = solution if len(solution) > 0: valid.append(i) if len(valid) == 0: continue #return new_h = np.random.choice(valid, size=1)[0] # TODO new_config_idx = np.random.choice(len(known[(new_h, w)]), size=1)[0] ret = known[(new_h, w)].pop(new_config_idx) return new_h, w, ret else: valid = [] upper = min(M, N) upper = min((M*N) // h, upper) + 1 for i in range(1, upper): if (h, i) in known.keys(): solution = known[(h, i)] else: solution = backtrack(M, N, h, i) known[(h, i)] = solution if len(solution) > 0: valid.append(i) if len(valid) == 0: continue new_w = np.random.choice(valid, size=1)[0] new_config_idx = np.random.choice(len(known[(h, new_w)]), size=1)[0] ret = known[(h, new_w)].pop(new_config_idx) return h, new_w, ret return None def predict(configs, L, B): costs = [] for i in range(len(configs)): config = configs[i] config = np.asarray(config) #config = config.reshape((config.shape[0], config.shape[2])) cost_e = get_cost_e(config, L) cost_c = get_cost_c(config, L) k = int(np.max(config)) cost = pipe_dp(L, cost_e, cost_c, k, B)[1] costs.append(cost) return np.asarray(costs) # number of GPU per node M = 8 # N = 4 num_iter = 500 init_t = 1 # 16 layers network, 3 macro-batches L = 16 B = 3 h_w, configs, known = generate_initial(M, N) costs = predict(configs, L, B) for i in range(num_iter): cur_t = cool_down(i, num_iter, init_t) new_configs = [] new_h_w = [] for (h, w) in h_w: step = neighbor((h, w), known, M, N) if step is None: new_h, new_w, new_config = (h, w, configs[h_w.index((h,w))]) else: new_h, new_w, new_config = step if step is None: assert False else: pass #print(step) new_h_w.append((new_h, new_w)) new_configs.append(new_config) new_costs = predict(new_configs, L, B) acc_prob = np.exp(np.minimum((costs - new_costs)/ (cur_t+1e-5) , 0)) acc_index = (np.random.random(len(acc_prob)) < acc_prob) for j in range(len(configs)): if acc_index[j]: configs[j] = new_configs[j] costs[j] = new_costs[j] configs, costs # - # + # Scratch code below # - # + def placement_reachable(M, N, m, n, s_joint): #horizontal_tile = np.asarray(list(range(m * n))).reshape((m, n)) #vertical_tile = np.transpose(horizontal_tile) horizontal_tile = np.ones((m,n)) vertical_tile = np.ones((n,m)) vertical_tile[0] = 0 t = True i = 0 while i < N: match = False # Check whether horizontal if i <= N - n: for j in range(n-m, n): #print(s_joint[j:, i:i+n]) match_height = n-j # print(match_height) if (s_joint[j:, i:i+n] == horizontal_tile[:match_height,:]).all(): # print(i, j, "h", s_joint[j:, i:i+n], horizontal_tile[:match_height,:], match_height) i += n if j != n-m: t = False match = True break if i <= N - m: for j in range(n): #print(s_joint,j,i,m, s_joint[j:, i:i+m]) match_height = n-j if (s_joint[j:, i:i+m] == vertical_tile[:match_height,:]).all(): # print(i, j, "v", s_joint[j:, i:i+n], vertical_tile[:match_height,:], match_height) i += m if j != 0: t = False match = True break if not match: return False, _ return True, t # # ! Always assume m < n def init(M, N, m, n, s_array): h, w = s_array.shape checked = np.zeros((h, w)) i = 0 j = 0 # horizontal_tile = np.asarray(list(range(m * n))).reshape((m, n)) # vertical_tile = np.transpose(horizontal_tile) horizontal_tile = np.ones((m,n)) vertical_tile = np.ones((n,m)) vertical_tile[0] = 0 #print(s_array) terminate = True for i in range(h): for j in range(w): if checked[i][j] == 1: continue # Check horizontal if i <= M - m and j <= N - n: match_height = min(h-i, m) if (s_array[i:i+match_height, j:j+n] == horizontal_tile[:match_height,:]).all() and (checked[i:i+m, j:j+n] != 1).all(): checked[i:i + m, j: j + n] = 1 if match_height != m: terminate = False continue # Check vertical if i <= M - n and j <= N - m: match_height = min(h-i, n) if (s_array[i:i+match_height, j:j+m] == vertical_tile[:match_height,:]).all() and (checked[i:i+n, j:j+m] != 1).all(): checked[i:i + n, j: j + m] = 1 if match_height != n: terminate = False continue #print(i, j, s_array, checked) return False, _ return True, terminate # returns all possible pipe group configurations def generate_placement(grid, len_1, len_2): tot_len = len_1 * len_2 # possible configuration number for a row from itertools import product #possible_s = list(product(range(tot_len),repeat = grid.shape[1]*(len_2-1))) #single_possible_s = list(product(list(range(tot_len)),repeat = grid.shape[1])) possible_s = list(product(range(2),repeat = grid.shape[1]*(len_2-1))) single_possible_s = list(product(list(range(2)),repeat = grid.shape[1])) #print(possible_s, single_possible_s) for i in range(len(possible_s)): possible_s[i] = np.asarray(list(possible_s[i])).reshape(1,-1) for i in range(len(single_possible_s)): single_possible_s[i] = np.asarray(list(single_possible_s[i])).reshape(1,-1) # the solution will be the union of all possible configurations in the last row dp = [[None for j in range(len(possible_s))] for i in range(grid.shape[0])] # initialize the first (len_1 -1) row for s_index in range(len(possible_s)): valid, terminate = init(grid.shape[0], grid.shape[1], len_1, len_2, possible_s[s_index].reshape(-1, grid.shape[1])) if valid: dp[0][s_index] = [(possible_s[s_index].reshape(-1, grid.shape[1]), terminate)] #print(possible_s[s_index]) print(dp[0]) # dp by row index for i in range(len_2-1, grid.shape[0]): print(" ") print(dp[i-1], i) print(" ") # iterate through all possibly reachable row? #j = i - 1 for s_index_1 in range(len(possible_s)): # print("haha", s_index_1, len(possible_s)) for s_index_2 in range(len(single_possible_s)): s_1 = possible_s[s_index_1] s_2 = single_possible_s[s_index_2] # print(s_1, s_2) s_joint = np.concatenate((s_1, s_2), axis=0) # early return if the last rows themselves are not possible #print(s_joint, valid) if dp[i-1][s_index_1] is None: print(i-1, s_index_1) continue #valid, terminate = placement_reachable(grid.shape[0], grid.shape[1], len_1, len_2, s_joint) #valid, terminate = init(grid.shape[0], grid.shape[1], len_1, len_2, s_joint) valid, terminate = placement_reachable(grid.shape[0], grid.shape[1], len_1, len_2, s_joint) # print(s_joint, valid) if valid: if dp[i][s_index_2] is None: dp[i][s_index_2] = [] for solution in dp[i-1][s_index_1]: #print(i-1,solution) sol, _ = solution s_joint_sol = np.concatenate((copy.deepcopy(sol), s_2), axis=0) dp[i][s_index_2].append((s_joint_sol, terminate)) # print(dp[0]) # print(dp[1]) # print(dp[2]) ret_sol = [] for i in range(len(single_possible_s)): s = possible_s[i] if dp[grid.shape[0]-1][i] is None: continue for (sol, t) in dp[grid.shape[0]-1][i]: if t: ret_sol.append(sol) return ret_sol # for len_1 in factors: # # Genarate all possible configuratinos # remain = num_gpu / len_1 # factors_2 = [] # for i in range(1, min(cluster_shape) + 1): # if remain % i == 0: # factors_2.append(i) # for len_2 in factors_2: # num_cut = num_gpu / (len_1*len_2) # confs = generate_placement(grid, len_1. len_2) # for conf in confs: # cost_c = get_cost_c(conf) # cost_e = get_cost_e(conf) # opt_pipe = pipe_dp(L, cost_e, cost_c, num_cut, B) # cost = amp_simulator(conf, opt_pipe)
deepspeed/amp/pipeline_dp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from ipywidgets import * import nglview as nv import pytraj as pt from IPython.display import display view = nv.show_pdbid('1tsu') view.add_pdbid('1l2y') class ComponentViewer: def __init__(self, view): self._view = view self.children = [] for i in range(len(view._ngl_component_ids)): cb = Checkbox(value=True) setattr(cb, '_ngl_index', i) def on_change(change): v = change['new'] owner = change['owner'] c = self._view[owner._ngl_index] if v: c.show() else: c.hide() cb.observe(on_change, ['value']) self.children.append(HBox([cb, Label(value=str(i))])) def create_view(self): return VBox(self.children) cv = ComponentViewer(view) cv HBox([view, cv.create_view()]) view.layout.width = '400px'
nglview/tests/notebooks/extra/component_viewer.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.6.2 # language: julia # name: julia-0.6 # --- # ## Condensate growth in 2D projected GPE # This notebook gives an example of usage for the 2D projected GPE. # # Starting from a random initial condition, the low energy c-field evolves in the presence of a high energy reservoir able to exchange particles and energy with the c-field. This exchange is modelled using the damped Projected Gross-Pitaevskii equation, an equation of motion with no noise terms. # # The final states after long time evolution are ground states of the Projected GPE with chemical potential $\mu$ matching that of the reservoir. # # Tempeature is not a meaningful exteral parameter of the model: in the absence of a thermal bath, the evolution internally thermalises the c-field. using Revise, ProjectedGPE # + function timeevolution2() siminfo = Params() #== start parameters ==# #fundamental constants/units ħ = 1.0545718e-34 kB = 1.38064852e-23 amu = 1.660339040e-27 a₀ = 5.29e-11 #Rb87 mass and scattering length m = 86.909180527*amu as = 100*a₀ #trap frequencies ωx = 2π ωy = 4ωx ωz = 0. #choice of time, length, energy units t0 = 1.0/ωy x0 = sqrt(ħ*t0/m) E0 = ħ/t0 #interactions #g = (4*pi*ħ^2*as/m)*x0^3/E0 #dimensionless 3D g = 0.1 #test 2D #damping parameters (dimensionless) γ = 0.05 ℳ = 0.0 #chemical potential (dimensionless) μ = 12.0 #time evolution parameters ti = 0.0 tf = 1.0/γ #evolve for 2 damping times Nt = 50 t = collect(linspace(ti,tf,Nt)) dt = 0.01π/μ #integrate step size [ - should have dt ≪ 2π/μ] #== end parameters ==# @pack siminfo = ωx,ωy,ωz,γ,ℳ,g,t0,x0,E0,μ,ti,tf,Nt,t,dt #Initialize CField (dimensionless) basis = "Hermite" ecut = 30*ħ*ωy/E0 Ω = [ωx; ωy]*t0 cinfo = makecinfo(ecut,Ω,basis) @unpack en,P,M = cinfo ;Mx,My = M x,wx,Tx,y,wy,Ty = makealltrans(M,4,Ω) W = wx.*wy' #test transform c0 = randn(Mx,My)+im*randn(Mx,My); c0=P.*c0 ψ0 = Tx*c0*Ty' #initial condition ψ = Tx*c0*Ty' #a field to write to in place #PGPE time evolution #out of place function nlin(c) ψ = Tx*c*Ty' Tx'*(W.*abs2.(ψ).*ψ)*Ty end #in place function nlin!(dc,c) ψ = Tx*c*Ty' dc.= Tx'*(W.*abs2.(ψ).*ψ)*Ty end #dPGPE in reservoir "frame" #out of place function Lgp(c,p,t) return P.*(-im*(1-im*γ)*((en - μ).*c .+ g*nlin(c))) end #in place function Lgp!(dc,c,p,t) nlin!(dc,c) dc .= P.*(-im*(1-im*γ)*((en - μ).*c .+ g*dc)) end c0 = P.*(randn(Mx,My) + im*randn(Mx,My)) #create random initial state tspan = (t[1],t[end]) prob = ODEProblem(Lgp!,c0,tspan) alg = DP5() println("Started evolution ...") @time sol = solve(prob,alg,dt=dt,saveat=t); println("... Finished.") return siminfo,cinfo,sol end # - siminfo,cinfo,sol = timeevolution2() #plot solution for 2D ## Transform to cartesian grid @unpack ħ,m,ωx,ωy,ωz,γ,ℳ,g,x0,t0,E0,μ,ti,tf,Nt,t,dt = siminfo @unpack M,Ω,ecut,P,en = cinfo; Mx,My = M; # + using Interact, PyPlot #lets be explicit about units: Rx = sqrt(2μ*E0/m/ωx^2) Ry = sqrt(2μ*E0/m/ωy^2) yMax=1.5Ry xMax=1.5Rx Nx = 400 Ny = Nx x = collect(linspace(-xMax,xMax,Nx)) y = collect(linspace(-yMax,yMax,Ny)) Tx = eigmat(Mx,x/x0,ω=ωx/ωy) Ty = eigmat(My,y/x0,ω=ωy/ωy); #θ = unwrap(angle(ψ)); #Plot f=figure(figsize=(12,3)) @manipulate for i in 1:length(t) withfig(f,clear=true) do ψ = Tx*sol[i]*Ty'; pcolormesh(x/x0,y/x0,g*abs2.(ψ')) xlabel("x/x0") ylabel("y/x0") title("t = $(t[i])") colorbar() end end # -
examples/condensate growth.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="skzB79nE-Dco" # # Colab-pytorch-image-classification # Original repo: [bentrevett/pytorch-image-classification](https://github.com/bentrevett/pytorch-image-classification) # # EfficientNet repo: [lukemelas/EfficientNet-PyTorch](https://github.com/lukemelas/EfficientNet-PyTorch) # # My fork: [styler00dollar/Colab-image-classification](https://github.com/styler00dollar/Colab-image-classification) # # This colab is a combination of [this Colab](https://colab.research.google.com/github/bentrevett/pytorch-image-classification/blob/master/5_resnet.ipynb) and [my other Colab](https://colab.research.google.com/github/styler00dollar/Colab-image-classification/blob/master/5_(small)_ResNet.ipynb) to do EfficientNet training. # + id="5UhdgIADdX21" # !nvidia-smi # + id="NW901aKHByGI" # !pip install efficientnet_pytorch # + [markdown] id="gYHmST_xy3gY" # # DATASET CREATION # + id="XQUSvIJDXrUz" cellView="form" #@title Mount Google Drive from google.colab import drive drive.mount('/content/drive') print('Google Drive connected.') # + id="HT74ZbEfdqKM" # # copy data somehow # !mkdir '/content/classification' # !mkdir '/content/classification/images' # !cp "/content/drive/MyDrive/classification_v2.7z" "/content/classification/images/classification.7z" # %cd /content/classification/images !7z x "classification.7z" # !rm -rf /content/classification/images/classification.7z # + id="tnhJlc18BkXp" cellView="form" #@title dataset creation TRAIN_RATIO = 0.90 #@param {type:"number"} import os import shutil from tqdm import tqdm #data_dir = os.path.join(ROOT, 'CUB_200_2011') data_dir = '/content/classification' #@param {type:"string"} images_dir = os.path.join(data_dir, 'images') train_dir = os.path.join(data_dir, 'train') test_dir = os.path.join(data_dir, 'test') if os.path.exists(train_dir): shutil.rmtree(train_dir) if os.path.exists(test_dir): shutil.rmtree(test_dir) os.makedirs(train_dir) os.makedirs(test_dir) classes = os.listdir(images_dir) for c in classes: class_dir = os.path.join(images_dir, c) images = os.listdir(class_dir) n_train = int(len(images) * TRAIN_RATIO) train_images = images[:n_train] test_images = images[n_train:] os.makedirs(os.path.join(train_dir, c), exist_ok = True) os.makedirs(os.path.join(test_dir, c), exist_ok = True) for image in tqdm(train_images): image_src = os.path.join(class_dir, image) image_dst = os.path.join(train_dir, c, image) shutil.copyfile(image_src, image_dst) for image in tqdm(test_images): image_src = os.path.join(class_dir, image) image_dst = os.path.join(test_dir, c, image) shutil.copyfile(image_src, image_dst) # + [markdown] id="sENsL4f6y66s" # # CALC MEANS & STDS # + id="AORf1yn3Pw4H" cellView="form" #@title print means and stds import torch import torchvision.transforms as transforms import torchvision.datasets as datasets from tqdm import tqdm train_data = datasets.ImageFolder(root = train_dir, transform = transforms.ToTensor()) means = torch.zeros(3) stds = torch.zeros(3) for img, label in tqdm(train_data): means += torch.mean(img, dim = (1,2)) stds += torch.std(img, dim = (1,2)) means /= len(train_data) stds /= len(train_data) print("\n") print(f'Calculated means: {means}') print(f'Calculated stds: {stds}') # + [markdown] id="lFKOEBQazWJc" # # TRAIN # + id="NilSkBKhPthJ" cellView="form" #@title import, seed, transforms, dataloader, functions, plot, model, parameter # %cd /content/ from efficientnet_pytorch import EfficientNet from tqdm import tqdm import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler from torch.optim.lr_scheduler import _LRScheduler import torch.utils.data as data import torchvision.transforms as transforms import torchvision.datasets as datasets import torchvision.models as models from sklearn import decomposition from sklearn import manifold from sklearn.metrics import confusion_matrix from sklearn.metrics import ConfusionMatrixDisplay import matplotlib.pyplot as plt import numpy as np import copy from collections import namedtuple import os import random import shutil SEED = 1234 #@param {type:"number"} random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True train_dir = '/content/classification/train' #@param {type:"string"} test_dir = '/content/classification/test' #@param {type:"string"} pretrained_size = 256 #@param {type:"number"} pretrained_means = [0.6838, 0.6086, 0.6063] #@param {type:"raw"} pretrained_stds= [0.2411, 0.2403, 0.2306] #@param {type:"raw"} #https://github.com/mit-han-lab/data-efficient-gans/blob/master/DiffAugment_pytorch.py import torch import torch.nn.functional as F def DiffAugment(x, policy='', channels_first=True): if policy: if not channels_first: x = x.permute(0, 3, 1, 2) for p in policy.split(','): for f in AUGMENT_FNS[p]: x = f(x) if not channels_first: x = x.permute(0, 2, 3, 1) x = x.contiguous() return x def rand_brightness(x): x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5) return x def rand_saturation(x): x_mean = x.mean(dim=1, keepdim=True) x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean return x def rand_contrast(x): x_mean = x.mean(dim=[1, 2, 3], keepdim=True) x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean return x def rand_translation(x, ratio=0.125): shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5) translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device) translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device) grid_batch, grid_x, grid_y = torch.meshgrid( torch.arange(x.size(0), dtype=torch.long, device=x.device), torch.arange(x.size(2), dtype=torch.long, device=x.device), torch.arange(x.size(3), dtype=torch.long, device=x.device), ) grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1) grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1) x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0]) x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2) return x def rand_cutout(x, ratio=0.5): cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5) offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device) offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device) grid_batch, grid_x, grid_y = torch.meshgrid( torch.arange(x.size(0), dtype=torch.long, device=x.device), torch.arange(cutout_size[0], dtype=torch.long, device=x.device), torch.arange(cutout_size[1], dtype=torch.long, device=x.device), ) grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1) grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1) mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device) mask[grid_batch, grid_x, grid_y] = 0 x = x * mask.unsqueeze(1) return x AUGMENT_FNS = { 'color': [rand_brightness, rand_saturation, rand_contrast], 'translation': [rand_translation], 'cutout': [rand_cutout], } train_transforms = transforms.Compose([ transforms.Resize(pretrained_size), transforms.RandomRotation(5), transforms.RandomHorizontalFlip(0.5), transforms.RandomCrop(pretrained_size, padding = 10), transforms.ToTensor(), transforms.Normalize(mean = pretrained_means, std = pretrained_stds) ]) test_transforms = transforms.Compose([ transforms.Resize(pretrained_size), transforms.CenterCrop(pretrained_size), transforms.ToTensor(), transforms.Normalize(mean = pretrained_means, std = pretrained_stds) ]) train_data = datasets.ImageFolder(root = train_dir, transform = train_transforms) test_data = datasets.ImageFolder(root = test_dir, transform = test_transforms) VALID_RATIO = 0.90 #@param {type:"number"} n_train_examples = int(len(train_data) * VALID_RATIO) n_valid_examples = len(train_data) - n_train_examples train_data, valid_data = data.random_split(train_data, [n_train_examples, n_valid_examples]) valid_data = copy.deepcopy(valid_data) valid_data.dataset.transform = test_transforms print(f'Number of training examples: {len(train_data)}') print(f'Number of validation examples: {len(valid_data)}') print(f'Number of testing examples: {len(test_data)}') BATCH_SIZE = 32 #@param {type:"number"} train_iterator = data.DataLoader(train_data, shuffle = True, batch_size = BATCH_SIZE) valid_iterator = data.DataLoader(valid_data, batch_size = BATCH_SIZE) test_iterator = data.DataLoader(test_data, batch_size = BATCH_SIZE) def normalize_image(image): image_min = image.min() image_max = image.max() image.clamp_(min = image_min, max = image_max) image.add_(-image_min).div_(image_max - image_min + 1e-5) return image def plot_images(images, labels, classes, normalize = True): n_images = len(images) rows = int(np.sqrt(n_images)) cols = int(np.sqrt(n_images)) fig = plt.figure(figsize = (15, 15)) for i in range(rows*cols): ax = fig.add_subplot(rows, cols, i+1) image = images[i] if normalize: image = normalize_image(image) ax.imshow(image.permute(1, 2, 0).cpu().numpy()) label = classes[labels[i]] ax.set_title(label) ax.axis('off') N_IMAGES = 25 #@param {type:"number"} images, labels = zip(*[(image, label) for image, label in [train_data[i] for i in range(N_IMAGES)]]) classes = test_data.classes plot_images(images, labels, classes) def format_label(label): label = label.split('.')[-1] label = label.replace('_', ' ') label = label.title() label = label.replace(' ', '') return label test_data.classes = [format_label(c) for c in test_data.classes] classes = test_data.classes plot_images(images, labels, classes) model_train = 'efficientnet-b4' #@param ["efficientnet-b0", "efficientnet-b1", "efficientnet-b2", "efficientnet-b3", "efficientnet-b4", "efficientnet-b5", "efficientnet-b6", "efficientnet-b7"] {type:"string"} if model_train == 'efficientnet-b0': model = EfficientNet.from_pretrained('efficientnet-b0', num_classes=len(test_data.classes)) elif model_train == 'efficientnet-b1': model = EfficientNet.from_pretrained('efficientnet-b1', num_classes=len(test_data.classes)) elif model_train == 'efficientnet-b2': model = EfficientNet.from_pretrained('efficientnet-b2', num_classes=len(test_data.classes)) elif model_train == 'efficientnet-b3': model = EfficientNet.from_pretrained('efficientnet-b3', num_classes=len(test_data.classes)) elif model_train == 'efficientnet-b4': model = EfficientNet.from_pretrained('efficientnet-b4', num_classes=len(test_data.classes)) elif model_train == 'efficientnet-b5': model = EfficientNet.from_pretrained('efficientnet-b5', num_classes=len(test_data.classes)) elif model_train == 'efficientnet-b6': model = EfficientNet.from_pretrained('efficientnet-b6', num_classes=len(test_data.classes)) elif model_train == 'efficientnet-b7': model = EfficientNet.from_pretrained('efficientnet-b7', num_classes=len(test_data.classes)) def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters') START_LR = 1e-7 #@param {type:"number"} optimizer = optim.Adam(model.parameters(), lr=START_LR) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') criterion = nn.CrossEntropyLoss() model = model.to(device) criterion = criterion.to(device) class LRFinder: def __init__(self, model, optimizer, criterion, device): self.optimizer = optimizer self.model = model self.criterion = criterion self.device = device torch.save(model.state_dict(), 'init_params.pt') def range_test(self, iterator, end_lr = 10, num_iter = 100, smooth_f = 0.05, diverge_th = 5): lrs = [] losses = [] best_loss = float('inf') lr_scheduler = ExponentialLR(self.optimizer, end_lr, num_iter) iterator = IteratorWrapper(iterator) for iteration in tqdm(range(num_iter)): loss = self._train_batch(iterator) #update lr lr_scheduler.step() lrs.append(lr_scheduler.get_lr()[0]) if iteration > 0: loss = smooth_f * loss + (1 - smooth_f) * losses[-1] if loss < best_loss: best_loss = loss losses.append(loss) if loss > diverge_th * best_loss: print("Stopping early, the loss has diverged") break #reset model to initial parameters model.load_state_dict(torch.load('init_params.pt')) return lrs, losses def _train_batch(self, iterator): self.model.train() self.optimizer.zero_grad() x, y = iterator.get_batch() x = x.to(self.device) y = y.to(self.device) y_pred, _ = self.model(x) loss = self.criterion(y_pred, y) loss.backward() self.optimizer.step() return loss.item() class ExponentialLR(_LRScheduler): def __init__(self, optimizer, end_lr, num_iter, last_epoch=-1): self.end_lr = end_lr self.num_iter = num_iter super(ExponentialLR, self).__init__(optimizer, last_epoch) def get_lr(self): curr_iter = self.last_epoch + 1 r = curr_iter / self.num_iter return [base_lr * (self.end_lr / base_lr) ** r for base_lr in self.base_lrs] class IteratorWrapper: def __init__(self, iterator): self.iterator = iterator self._iterator = iter(iterator) def __next__(self): try: inputs, labels = next(self._iterator) except StopIteration: self._iterator = iter(self.iterator) inputs, labels, *_ = next(self._iterator) return inputs, labels def get_batch(self): return next(self) def calculate_topk_accuracy(y_pred, y, k = 5): with torch.no_grad(): batch_size = y.shape[0] _, top_pred = y_pred.topk(k=1) top_pred = top_pred.t() correct = top_pred.eq(y.view(1, -1).expand_as(top_pred)) correct_1 = correct[:1].view(-1).float().sum(0, keepdim = True) #correct_k = correct[:k].view(-1).float().sum(0, keepdim = True) acc_1 = correct_1 / batch_size #acc_k = correct_k / batch_size acc_k = 0 return acc_1, acc_k def train(model, iterator, optimizer, criterion, scheduler, device, current_epoch): epoch_loss = 0 epoch_acc_1 = 0 epoch_acc_5 = 0 model.train() policy = 'color,translation,cutout' #@param {type:"string"} diffaug_activate = True #@param ["False", "True"] {type:"raw"} #https://stackoverflow.com/questions/45465031/printing-text-below-tqdm-progress-bar with tqdm(iterator, position=1, bar_format='{desc}') as desc: for (x, y) in tqdm(iterator, position=0): x = x.to(device) y = y.to(device) optimizer.zero_grad() if diffaug_activate == False: y_pred = model(x) else: y_pred = model(DiffAugment(x, policy=policy)) loss = criterion(y_pred, y) acc_1, acc_5 = calculate_topk_accuracy(y_pred, y) loss.backward() optimizer.step() scheduler.step() epoch_loss += loss.item() epoch_acc_1 += acc_1.item() #epoch_acc_5 += acc_5.item() epoch_loss /= len(iterator) epoch_acc_1 /= len(iterator) desc.set_description(f'Epoch: {current_epoch+1}') desc.set_description(f'\tTrain Loss: {epoch_loss:.3f} | Train Acc @1: {epoch_acc_1*100:6.2f}% | ' \ f'Train Acc @5: {epoch_acc_5*100:6.2f}%') return epoch_loss, epoch_acc_1, epoch_acc_5 def evaluate(model, iterator, criterion, device): epoch_loss = 0 epoch_acc_1 = 0 epoch_acc_5 = 0 model.eval() with torch.no_grad(): with tqdm(iterator, position=0, bar_format='{desc}', leave=True) as desc: for (x, y) in iterator: x = x.to(device) y = y.to(device) y_pred = model(x) loss = criterion(y_pred, y) acc_1, acc_5 = calculate_topk_accuracy(y_pred, y) epoch_loss += loss.item() epoch_acc_1 += acc_1.item() #epoch_acc_5 += acc_5.item() epoch_loss /= len(iterator) epoch_acc_1 /= len(iterator) #epoch_acc_5 /= len(iterator) desc.set_description(f'\tValid Loss: {epoch_loss:.3f} | Valid Acc @1: {epoch_acc_1*100:6.2f}% | ' \ f'Valid Acc @5: {epoch_acc_5*100:6.2f}%') return epoch_loss, epoch_acc_1, epoch_acc_5 def epoch_time(start_time, end_time): elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs # + id="fxtlrpfWZk8t" cellView="form" #@title lr_finder END_LR = 10 #@param {type:"number"} NUM_ITER = 100#@param {type:"number"} #100 lr_finder = LRFinder(model, optimizer, criterion, device) lrs, losses = lr_finder.range_test(train_iterator, END_LR, NUM_ITER) # + id="BBzT3twzZnAW" cellView="form" #@title plot_lr_finder def plot_lr_finder(lrs, losses, skip_start = 5, skip_end = 5): if skip_end == 0: lrs = lrs[skip_start:] losses = losses[skip_start:] else: lrs = lrs[skip_start:-skip_end] losses = losses[skip_start:-skip_end] fig = plt.figure(figsize = (16,8)) ax = fig.add_subplot(1,1,1) ax.plot(lrs, losses) ax.set_xscale('log') ax.set_xlabel('Learning rate') ax.set_ylabel('Loss') ax.grid(True, 'both', 'x') plt.show() plot_lr_finder(lrs, losses, skip_start = 30, skip_end = 30) # + id="fDjS7JvNBkZR" cellView="form" #@title config FOUND_LR = 2e-4 #@param {type:"number"} """ params = [ {'params': model.conv1.parameters(), 'lr': FOUND_LR / 10}, {'params': model.bn1.parameters(), 'lr': FOUND_LR / 10}, {'params': model.layer1.parameters(), 'lr': FOUND_LR / 8}, {'params': model.layer2.parameters(), 'lr': FOUND_LR / 6}, {'params': model.layer3.parameters(), 'lr': FOUND_LR / 4}, {'params': model.layer4.parameters(), 'lr': FOUND_LR / 2}, {'params': model.fc.parameters()} ] """ optimizer = optim.Adam(model.parameters(), lr = FOUND_LR) EPOCHS = 25 #@param {type:"number"} STEPS_PER_EPOCH = len(train_iterator) TOTAL_STEPS = EPOCHS * STEPS_PER_EPOCH MAX_LRS = [p['lr'] for p in optimizer.param_groups] scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr = MAX_LRS, total_steps = TOTAL_STEPS) # + id="lvoyX823apEN" cellView="form" #@title training without topk import time best_valid_loss = float('inf') best_valid_accuracy = 0 for epoch in range(EPOCHS): start_time = time.monotonic() train_loss, train_acc_1, train_acc_5 = train(model, train_iterator, optimizer, criterion, scheduler, device, epoch) valid_loss, valid_acc_1, valid_acc_5 = evaluate(model, valid_iterator, criterion, device) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), 'best-validation-loss.pt') if best_valid_accuracy < valid_acc_1: best_valid_accuracy = valid_acc_1 torch.save(model.state_dict(), 'best-validation-accuracy.pt') end_time = time.monotonic() epoch_mins, epoch_secs = epoch_time(start_time, end_time) # + [markdown] id="ccBDif9BzHRD" # ##################################################################################################### # + [markdown] id="qxb15eSW9VsB" # # TESTING # + id="fOla2axMBkZk" cellView="form" #@title Calc test loss model.load_state_dict(torch.load('best-validation-accuracy.pt')) print("best-validation-accuracy.pt") test_loss, test_acc_1, test_acc_5 = evaluate(model, test_iterator, criterion, device) print("-----------------------------") model.load_state_dict(torch.load('best-validation-loss.pt')) print("best-validation-loss.pt") test_loss, test_acc_1, test_acc_5 = evaluate(model, test_iterator, criterion, device) # + id="AlCBiYxQbi4s" cellView="form" #@title plot_confusion_matrix def get_predictions(model, iterator): model.eval() images = [] labels = [] probs = [] with torch.no_grad(): for (x, y) in iterator: x = x.to(device) y_pred = model(x) y_prob = F.softmax(y_pred, dim = -1) top_pred = y_prob.argmax(1, keepdim = True) images.append(x.cpu()) labels.append(y.cpu()) probs.append(y_prob.cpu()) images = torch.cat(images, dim = 0) labels = torch.cat(labels, dim = 0) probs = torch.cat(probs, dim = 0) return images, labels, probs images, labels, probs = get_predictions(model, test_iterator) pred_labels = torch.argmax(probs, 1) def plot_confusion_matrix(labels, pred_labels, classes): fig = plt.figure(figsize = (50, 50)); ax = fig.add_subplot(1, 1, 1); cm = confusion_matrix(labels, pred_labels); cm = ConfusionMatrixDisplay(cm, display_labels = classes); cm.plot(values_format = 'd', cmap = 'Blues', ax = ax) fig.delaxes(fig.axes[1]) #delete colorbar plt.xticks(rotation = 90) plt.xlabel('Predicted Label', fontsize = 50) plt.ylabel('True Label', fontsize = 50) plot_confusion_matrix(labels, pred_labels, classes) # + id="lgSkMA9dbzy8" cellView="form" #@title plot corrects = torch.eq(labels, pred_labels) incorrect_examples = [] for image, label, prob, correct in zip(images, labels, probs, corrects): if not correct: incorrect_examples.append((image, label, prob)) incorrect_examples.sort(reverse = True, key = lambda x: torch.max(x[2], dim = 0).values) def plot_most_incorrect(incorrect, classes, n_images, normalize = True): rows = int(np.sqrt(n_images)) cols = int(np.sqrt(n_images)) fig = plt.figure(figsize = (25, 20)) for i in range(rows*cols): ax = fig.add_subplot(rows, cols, i+1) image, true_label, probs = incorrect[i] image = image.permute(1, 2, 0) true_prob = probs[true_label] incorrect_prob, incorrect_label = torch.max(probs, dim = 0) true_class = classes[true_label] incorrect_class = classes[incorrect_label] if normalize: image = normalize_image(image) ax.imshow(image.cpu().numpy()) ax.set_title(f'true label: {true_class} ({true_prob:.3f})\n' \ f'pred label: {incorrect_class} ({incorrect_prob:.3f})') ax.axis('off') fig.subplots_adjust(hspace=0.4) N_IMAGES = 36 plot_most_incorrect(incorrect_examples, classes, N_IMAGES) # + id="r5UzPHw29QVi" cellView="form" #@title plot_representations def get_representations(model, iterator): model.eval() outputs = [] intermediates = [] labels = [] with torch.no_grad(): for (x, y) in iterator: x = x.to(device) y_pred, _ = model(x) outputs.append(y_pred.cpu()) labels.append(y) outputs = torch.cat(outputs, dim = 0) labels = torch.cat(labels, dim = 0) return outputs, labels outputs, labels = get_representations(model, train_iterator) def get_pca(data, n_components = 2): pca = decomposition.PCA() pca.n_components = n_components pca_data = pca.fit_transform(data) return pca_data def plot_representations(data, labels, classes, n_images = None): if n_images is not None: data = data[:n_images] labels = labels[:n_images] fig = plt.figure(figsize = (15, 15)) ax = fig.add_subplot(111) scatter = ax.scatter(data[:, 0], data[:, 1], c = labels, cmap = 'hsv') #handles, _ = scatter.legend_elements(num = None) #legend = plt.legend(handles = handles, labels = classes) output_pca_data = get_pca(outputs) plot_representations(output_pca_data, labels, classes) # + id="pz6yxrS29i9D" cellView="form" #@title get_tsne def get_tsne(data, n_components = 2, n_images = None): if n_images is not None: data = data[:n_images] tsne = manifold.TSNE(n_components = n_components, random_state = 0) tsne_data = tsne.fit_transform(data) return tsne_data output_tsne_data = get_tsne(outputs) plot_representations(output_tsne_data, labels, classes) # + id="VDqM5kXc9mU7" cellView="form" #@title plot_filtered_images def plot_filtered_images(images, filters, n_filters = None, normalize = True): images = torch.cat([i.unsqueeze(0) for i in images], dim = 0).cpu() filters = filters.cpu() if n_filters is not None: filters = filters[:n_filters] n_images = images.shape[0] n_filters = filters.shape[0] filtered_images = F.conv2d(images, filters) fig = plt.figure(figsize = (30, 30)) for i in range(n_images): image = images[i] if normalize: image = normalize_image(image) ax = fig.add_subplot(n_images, n_filters+1, i+1+(i*n_filters)) ax.imshow(image.permute(1,2,0).numpy()) ax.set_title('Original') ax.axis('off') for j in range(n_filters): image = filtered_images[i][j] if normalize: image = normalize_image(image) ax = fig.add_subplot(n_images, n_filters+1, i+1+(i*n_filters)+j+1) ax.imshow(image.numpy(), cmap = 'bone') ax.set_title(f'Filter {j+1}') ax.axis('off'); fig.subplots_adjust(hspace = -0.7) N_IMAGES = 5 N_FILTERS = 7 images = [image for image, label in [train_data[i] for i in range(N_IMAGES)]] filters = model.conv1.weight.data plot_filtered_images(images, filters, N_FILTERS) # + id="ZOGoonSkxVNk" cellView="form" #@title plot_filters #filters = model.conv1.weight.data def plot_filters(filters, normalize = True): filters = filters.cpu() n_filters = filters.shape[0] rows = int(np.sqrt(n_filters)) cols = int(np.sqrt(n_filters)) fig = plt.figure(figsize = (30, 15)) for i in range(rows*cols): image = filters[i] if normalize: image = normalize_image(image) ax = fig.add_subplot(rows, cols, i+1) ax.imshow(image.permute(1, 2, 0)) ax.axis('off') fig.subplots_adjust(wspace = -0.9) plot_filters(filters) # + id="FroXn0RSLehx" cellView="form" #@title sort files with predictions (configured for 2 classes, uses efficientnet-b0 as base) import torch import glob from efficientnet_pytorch import EfficientNet import cv2 import torch.nn.functional as F import shutil import os from tqdm import tqdm model = EfficientNet.from_pretrained('efficientnet-b0', num_classes=2) model_name = 'model.pt' #@param {type:"string"} model.load_state_dict(torch.load(model_name, map_location=torch.device('cpu'))) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.eval() rootdir = '/content/input' #@param {type:"string"} # probably depends on how categories are alphabetically sorted path0 = '/content/0' #@param {type:"string"} path1 = '/content/1' #@param {type:"string"} if not os.path.exists(path0): os.makedirs(path0) if not os.path.exists(path1): os.makedirs(path1) files = glob.glob(rootdir + '/**/*.png', recursive=True) files_jpg = glob.glob(rootdir + '/**/*.jpg', recursive=True) files.extend(files_jpg) model.to(device) height_min = 256 width_min = 256 with torch.no_grad(): for f in tqdm(files): image = cv2.imread(f) #image = cv2.resize(image, (256,256)) # resizing to match original training, or detections will be bad height = image.shape[0] width = image.shape[1] if height > height_min and width > width_min: height_resized = height_min if width < height: scale_x = width_min/width width_resized = width_min height_resized = scale_x * height else: scale_y = height_min/height height_resized = height_min width_resized = scale_y * width image = cv2.resize(image, (int(width_resized), int(height_resized))) #elif height <= height_min or width <= width_min: # break image = torch.from_numpy(image).unsqueeze(0).permute(0,3,1,2)/255 y_pred = model(image) y_prob = F.softmax(y_pred, dim = -1) top_pred = y_prob.argmax(1, keepdim = True) if top_pred == 0: shutil.move(f, os.path.join(path0, os.path.basename(f))) elif top_pred == 1: shutil.move(f, os.path.join(path1, os.path.basename(f)))
7_EfficientNet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # HIDDEN from datascience import * # %matplotlib inline import matplotlib.pyplot as plots plots.style.use('fivethirtyeight') import numpy as np # ## Lecture 25 ## # ## Average (Mean) ## values = make_array(2, 3, 3, 9) sum(values)/len(values) np.average(values) np.mean(values) (2 + 3 + 3 + 9)/4 2*(1/4) + 3*(2/4) + 9*(1/4) values_table = Table().with_column('value', values) values_table bins_for_display = np.arange(0.5, 10.6, 1) values_table.hist(0, bins = bins_for_display) # + ## Make array of 10 2s, 20 3s, and 10 9s new_vals = np.append(np.append(np.full(10, 2), np.full(20, 3)), np.full(10, 9)) # - Table().with_column('value', new_vals).hist(bins = bins_for_display) np.average(values) np.average(new_vals) # ### Discussion Question nba = Table.read_table('nba2013.csv') nba nba.hist('Height', bins=np.arange(65.5, 90.5)) heights = nba.column('Height') percentile(50, heights) np.average(heights) # ## Standard Deviation ## sd_table = Table().with_columns('Value', values) sd_table average_value = np.average(sd_table.column(0)) average_value deviations = values - average_value sd_table = sd_table.with_column('Deviation', deviations) sd_table sum(deviations) sd_table = sd_table.with_columns('Squared Deviation', deviations ** 2) sd_table # + # Variance of the data variance = np.mean(sd_table.column('Squared Deviation')) variance # + # Standard Deviation (SD) is the square root of the variance sd = variance ** 0.5 sd # - np.std(values)
lec/lec25.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ##import files # + import pandas as pd import numpy as np # + ## cleaning tool def perc_null(X): total = X.isnull().sum().sort_values(ascending=False) data_types = X.dtypes percent = (X.isnull().sum()/X.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, data_types, percent], axis=1, keys=['Total','Type' ,'Percent']) return missing_data # + ##note KNN or other clusters might be helpful group the teams in smart way ... but not now. #models ##regression from sklearn.linear_model import Ridge from sklearn.ensemble import RandomForestRegressor #classifiers (non-tree) from sklearn.linear_model import RidgeClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression, SGDRegressor, SGDClassifier from sklearn.svm import SVC #tree-based classifiers from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier from xgboost import XGBClassifier from xgboost import XGBRegressor ##regression models lr = Ridge(alpha=0.001) rfr = RandomForestRegressor(max_depth=3, random_state=0) xgbr = XGBRegressor() ##classifier models lrc = RidgeClassifier() gnb = GaussianNB() lgr = LogisticRegression(random_state = 0) svc = SVC() #tree-based classifiers rfc = RandomForestClassifier(max_depth=3, random_state=0) bc = BaggingClassifier() gbc = GradientBoostingClassifier() xgbc = XGBClassifier() # + import pandas as pd import numpy as np # + ## cleaning tool def perc_null(X): total = X.isnull().sum().sort_values(ascending=False) data_types = X.dtypes percent = (X.isnull().sum()/X.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, data_types, percent], axis=1, keys=['Total','Type' ,'Percent']) return missing_data # - # ##TUNING INFO # # # ##hyper_parameters from here # ##https://machinelearningmastery.com/hyperparameters-for-classification-machine-learning-algorithms/ # ##for xgboost from here # ##https://machinelearningmastery.com/extreme-gradient-boosting-ensemble-in-python/ # # #xgb # # trees = [10, 50, 100, 500, 1000, 5000] #100 #num of trees # max_depth = range(1,11) ##3-5 # rates = [0.0001, 0.001, 0.01, 0.1, 1.0] #0.1 # subsample in arange(0.1, 1.1, 0.1): #0.4, 0.5 ##this is 0.1, 0.2 ... 1.0 # % of features to sample # # # #svc # kernels in [‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’] #if you use poly, then adjust degree # C in [100, 10, 1.0, 0.1, 0.001] # # #gb # # learning_rate in [0.001, 0.01, 0.1] # n_estimators [10, 100, 1000] # subsample in [0.5, 0.7, 1.0] # max_depth in [3, 7, 9] # # # #rfc # max_features [1 to 20] #key # max_features in [‘sqrt’, ‘log2’] # n_estimators in [10, 100, 1000] # # #bc # n_estimators in [10, 100, 1000] # # svm_dic = {'kernels':[‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’]} # lrc_dic = {'alpha': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]} # lgr_hp_dic = {'solver': [‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’], 'penalty' : [‘none’, ‘l1’, ‘l2’, ‘elasticnet’], # 'C' :[100, 10, 1.0, 0.1, 0.01]} X = pd.read_csv('/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Shaped_Data/data_LJ.csv') # + X = data_L[season != 20182019.iloc[:, 10:] #features test-train Y = data_L[season != 20182019.iloc[:, :10] #targets y = Y['won'] X_18 = data_L[season == 20182019.iloc[:, 10:] #features final test Y_18 = data_L[season == 20182019.iloc[:, :10] #targets y_18 = Y_18['won'] # + from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y) # + ##handy code from Leung from sklearn.metrics import f1_score def train_classifier(clf, X_train, y_train): ''' Fits a classifier to the training data. ''' # Start the clock, train the classifier, then stop the clock start = time() clf.fit(X_train, y_train) end = time() # Print the results print("Trained model in {:.4f} seconds".format(end - start)) def predict_labels(clf, features, target): ''' Makes predictions using a fit classifier based on F1 score. ''' # Start the clock, make predictions, then stop the clock start = time() y_pred = clf.predict(features) end = time() # Print and return results print("Made predictions in {:.4f} seconds.".format(end - start)) return f1_score(target, y_pred), sum(target == y_pred) / float(len(y_pred)) def train_predict(clf, X_train, y_train, X_test, y_test): ''' Train and predict using a classifer based on F1 score. ''' # Indicate the classifier and the training set size print("Training a {} using a training set size of {}. . .".format(clf.__class__.__name__, len(X_train))) # Train the classifier train_classifier(clf, X_train, y_train) # Print the results of prediction for both training and testing f1, acc = predict_labels(clf, X_train, y_train) print(f1, acc) print("F1 score and accuracy score for training set: {:.4f} , {:.4f}.".format(f1 , acc)) f1, acc = predict_labels(clf, X_test, y_test) print("F1 score and accuracy score for test set: {:.4f} , {:.4f}.".format(f1 , acc)) def train_predictproba(clf, X_train, y_train, X_test, y_test): ''' Train and predict using a classifer based on F1 score. ''' # Indicate the classifier and the training set size print("Training a {} using a training set size of {}. . .".format(clf.__class__.__name__, len(X_train))) # Train the classifier train_classifier(clf, X_train, y_train) # Print the results of prediction for both training and testing f1, acc = predict_labels(clf, X_train, y_train) print(f1, acc) print("F1 score and accuracy score for training set: {:.4f} , {:.4f}.".format(f1 , acc)) display(clf.predict_proba(X_test)) # + clf_A = LogisticRegression(random_state = 42) clf_B = SVC(random_state = 43, kernel = 'rbf') clf_C = xgb.XGBClassifier(seed = 44) clf_D = SVC(probability=True) train_predict(clf_A, x_train, y_train, x_test, y_test) print('') train_predict(clf_B, x_train, y_train, x_test, y_test) print('') train_predict(clf_C, x_train, y_train, x_test, y_test) print('') train_predict(clf_D, x_train, y_train, x_test, y_test) print('')
Note_books/Explore_Models/.ipynb_checkpoints/Model3_badv1_Leung_v1-checkpoint.ipynb
# # Select fourth-order FOT in the parameter space of transversely isotropic FOT import numpy as np import sympy as sp import vofotensors as vot from vofotensors.abc import alpha1, rho1 import pandas as pd # Create data alphas = np.linspace(-1.0 / 3.0, 2.0 / 3.0, 10) rho_top = alphas / 56.0 + 1.0 / 60.0 rho_bottom = alphas * alphas / 8.0 - alphas / 42.0 - 1.0 / 90.0 boundary = np.concatenate( [ np.stack([alphas, rho_top], axis=1), np.stack([alphas, rho_bottom], axis=1)[1:-1], ], axis=0, ) # Cast to dataframe df = pd.DataFrame(boundary, columns=["alpha1", "rho1"]) # Get parameterizations parameterizations = vot.fabric_tensors.N4s_parametric parameterization = parameterizations["transv_isotropic"]["alpha1_rho1"] N4_func = sp.lambdify([alpha1, rho1], parameterization) # Evaluate parameterization for each FOT in dataframe df["N4"] = df.apply(lambda row: N4_func(alpha1=row["alpha1"], rho1=row["rho1"]), axis=1) print(df)
docs/source/notebooks/03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # # Import libraries # %%time from beem.account import Account from beem.comment import Comment from beem.exceptions import ContentDoesNotExistsException # %%time username = "farizal" account = Account(username) c_list = {} count = 0 # %%time f = open("hivepostlist.md",'w',encoding="utf-8") for c in map(Comment, account.history(only_ops=["comment"])): if c.permlink in c_list: continue try: c.refresh() except ContentDoesNotExistsException: continue c_list[c.permlink] = 1 if not c.is_comment(): title = "" title = title.join(c.title.splitlines()) # Settled the titles with newline problem count +=1 f.write(str(count) + ". [" + title + "]" + "(" +"https://hive.blog/"+ c.parent_permlink + "/@"+username+"/"+ c.permlink + ")" + "\n") f.close()
CodeTesting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import package1 from package1 import package1_function0, package1_function1 def defined_function_1(var1, var2): return 0 # - # $\lambda = 2$ Hello world! # + pycharm={"name": "#%%\n"} print(2) defined_function_3() # + pycharm={"name": "#%%\n"} def defined_function_2(): a = defined_function_1() return 0 def defined_function_3(var1, var2, var3): return 0 defined_function_2()
test/notebooks/functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Download sample # # Download **PDF articles** for all articles obtained through a PubMed query. # # ---- # # We download a sample of data from PubMed by running a query # # - *pubmed_result.xml* # ``` # ("2010"[Date - Publication] : "3000"[Date - Publication]) AND "Drug Combinations"[MeSH Terms] # ``` # - *amino_acid_substitution.xml* # ``` # ("2010"[Date - Publication] : "3000"[Date - Publication]) AND "Amino Acid Substitution"[MAJR] # ``` # # and saving all results in XML and CSV formats. # # Imports # %run _imports.ipynb # + NOTEBOOK_NAME = 'download_sample' os.makedirs(NOTEBOOK_NAME, exist_ok=True) INPUT_FILE_NAME = 'amino_acid_substitution.xml' OUTPUT_DIR = '{NOTEBOOK_NAME}/pdfs_0' os.makedirs(OUTPUT_DIR, exist_ok=True) # - # # Load PubMed query results INPUT_FILE_NAME data = pmc_tables.xml_parser.parse_pubmed_xml_file(f"{NOTEBOOK_NAME}/{INPUT_FILE_NAME}") df = pd.DataFrame(data) df_pmc = df[df['pmc'].notnull()] df_pmc.head(2) # ## Save # # **Note**: # - `pubmed_url` can be converted to an actual URL by prepending <https://www.ncbi.nlm.nih.gov>. # - `doi` can be converted to a URL by prepending <https://doi.org/> os.makedirs('output', exist_ok=True) # + run_control={"marked": false} output_df = ( df .loc[df['doi'].notnull(), :] ) output_df.head(2) # - output_df.to_csv(f'output/{op.splitext(INPUT_FILE_NAME)[0]}.tsv', sep='\t', index=False) download_results = [] for doi in output_df['doi'].values[100:120]: download_results.append(download_pdf(doi, OUTPUT_DIR)) time.sleep(0.1) output_df['doi'].values[100:110]
notebooks/xx-download_sample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="L_ZC0NdCiFHd" import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # + [markdown] id="0oT7NI7BiFIK" # # Import Data # + id="rEDk1VV4iFIN" df_raw = pd.read_csv("/content/Network Analysis_query3.csv", index_col=None, header=0) # + colab={"base_uri": "https://localhost:8080/", "height": 487} id="UJ4DC9RliFIO" outputId="0b49af2d-6c85-4a15-e01c-ce3384fd88bd" df = pd.DataFrame() df['food'] = df_raw['food'] df['food_name'] = df_raw['foodLabel'] df['ingredient_id'] = df_raw['mother'] df['ingredient_name'] = df_raw['label_clean'] df['origin_id'] = df_raw['father'] df['origin_name'] = df_raw['fatherLabel'] df['ingredient_category'] = df_raw['category'] df # + id="93KuUxdbiFIQ" import sqlite3 as sl # + id="0HG-mX7UiFIR" con = sl.connect('test_db.db') # + id="o3FluD-yiFIS" with con: con.execute(""" CREATE TABLE IF NOT EXISTS FOOD_INGREDIENT ( food TEXT NOT NULL, food_name TEXT, ingredient_id TEXT, ingredient_name TEXT, origin_id TEXT, origin_name TEXT, ingredient_category TEXT ); """) # + id="y7cok695iFIT" data = df.values.tolist() # + id="6Oe7JIgfiFIU" sql = 'INSERT INTO FOOD_INGREDIENT (food, food_name, ingredient_id, ingredient_name, origin_id, origin_name, ingredient_category) values(?, ?, ?, ?, ?, ?, ?)' # + id="02MY2ehIiFIW" with con: con.executemany(sql, data) # + colab={"base_uri": "https://localhost:8080/"} id="ZlkS_yPqiFIY" outputId="8fa53c44-e3ce-489f-8b22-c16462036569" with con: data = con.execute("SELECT * FROM FOOD_INGREDIENT WHERE food_name = 'cenil'") for row in data: print(row) # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="BolZMW4OiFIZ" outputId="f8dcd5b6-6b03-4ae5-d5ee-9ea4f52de995" df = pd.read_sql_query("SELECT food_name, group_concat(replace(ingredient_name,' ','_'), ' ') as ingredient_list from (select distinct food_name, ingredient_name from FOOD_INGREDIENT) group by 1", con) df = df.dropna() df # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="MhATPtrNiFIa" outputId="a8c3ae77-75c3-4cc1-fef3-e539a700cb73" # df.to_csv("Downloads/query_food.csv") total_ingredient = pd.read_sql_query("SELECT ingredient_name, ingredient_category, count(distinct food_name) from FOOD_INGREDIENT where ingredient_name is not null group by 1,2 order by 3 desc", con) total_ingredient # + [markdown] id="CQl2C5GKiFIc" # # Create Ingredient Connection # + id="81E2t1uLiFId" # Create a sublist of lower case words for each tweet ingredient_in_list = [ingredient.split() for ingredient in df.ingredient_list] # + colab={"base_uri": "https://localhost:8080/"} id="Mf19ublxiFIe" outputId="dc2a2ac4-1f44-4e90-f80f-912e9ea97319" import nltk from nltk import bigrams # Create list of lists containing bigrams in tweets terms_bigram = [list(bigrams(tweet)) for tweet in ingredient_in_list] # View bigrams for the first tweet terms_bigram[0] # + colab={"base_uri": "https://localhost:8080/"} id="osy9oSgTiFIf" outputId="0d821522-531b-425a-90e9-d50b9839343c" import itertools import collections # Flatten list of bigrams in clean tweets bigrams = list(itertools.chain(*terms_bigram)) # Create counter of words in clean bigrams bigram_counts = collections.Counter(bigrams) bigram_counts.most_common(20) # + id="oeu1pKZAiFIg" # + id="v9fhdiIfiFIh" from collections import defaultdict def co_occurrence(sentences, window_size): d = defaultdict(int) vocab = set() for text in sentences: # preprocessing text = text.lower().split() # iterate over ingredient list for i in range(len(text)): token = text[i] vocab.add(token) # add to vocab next_token = text[i+1 : i+1+window_size] for t in next_token: key = tuple( sorted([t, token]) ) d[key] += 1 # formulate the dictionary into dataframe vocab = sorted(vocab) # sort vocab df = pd.DataFrame(data=np.zeros((len(vocab), len(vocab)), dtype=np.int16), index=vocab, columns=vocab) for key, value in d.items(): df.at[key[0], key[1]] = value df.at[key[1], key[0]] = value return df # + colab={"base_uri": "https://localhost:8080/", "height": 505} id="BHC4yZAciFIi" outputId="c091078e-e92d-4624-ce5a-33101550ed06" df_test = co_occurrence(df.ingredient_list, 2) df_test # + id="SZiuBoZsiFIj" df_temp = df_test.head(20) # + colab={"base_uri": "https://localhost:8080/"} id="4T6igN7fiFIs" outputId="fc181250-c3d0-41b6-e6d5-f41cafa2a256" columns = list(df_temp) columns # + colab={"base_uri": "https://localhost:8080/"} id="ZWwZ5RBiiFIt" outputId="7e77df91-542d-4564-da9a-dae679ad3649" df_compiled = pd.DataFrame(columns=['bigram','count']) for i,row in df_test.iterrows(): for j in columns: if row[j] > 0: bigram_name = (i,j) reverse_bigram_name = (j,i) bigram_value = row[j] df_t = pd.DataFrame(columns=['bigram','count']) df_t.loc[-1] = [bigram_name, bigram_value] if df_compiled[df_compiled['bigram']==reverse_bigram_name].empty == False: print('dup') else: df_compiled = df_compiled.append(df_t, ignore_index = True) # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="bsZJPCy6iFIv" outputId="9b5a6659-7bd6-467c-f488-6b0c9fe70245" df_compiled # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="iLhqpgSWiFIw" outputId="0e134453-7525-491e-a4bf-2342a136f352" df_compiled = df_compiled.sort_values(by=['count'], ascending=False) df_compiled # + [markdown] id="vW67WnRZiFIy" # # Create Graph # + id="1vf-teZ7iFIz" import networkx as nx # + id="1nQfgofiiFIz" # Create dictionary of edges and their weights d = df_compiled.head(100).set_index('bigram').T.to_dict('records') # + colab={"base_uri": "https://localhost:8080/"} id="5L0JAL4_iFI0" outputId="f1f2e173-5086-4159-be59-a3ed4e8ab8fb" d # + id="CoPxT0i1iFI1" # Create network plot G = nx.Graph() # Create connections between nodes for k, v in d[0].items(): G.add_edge(k[0], k[1], weight=(v * 10)) # + colab={"base_uri": "https://localhost:8080/"} id="xCzLtibCiFI1" outputId="25e27183-3a0f-44f9-9130-0ec4ca99f4a4" G # + [markdown] id="6MFTlRJ-iFI2" # ## All Ingredients Graph # + colab={"base_uri": "https://localhost:8080/", "height": 646} id="YFmc-TlmiFI2" outputId="f787fe59-8cf5-441b-90d4-0f2573964aff" fig, ax = plt.subplots(figsize=(16, 12)) pos = nx.spring_layout(G, k=2) # Plot networks nx.draw_networkx(G, pos, font_size=8, width=3, edge_color='grey', node_color='#00b4d9', with_labels = True, ax=ax) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="BcEsln-XiFI2" outputId="ef8a16fc-5361-4d41-b172-6646068fc2c4" weights = nx.get_edge_attributes(G,'weight').values() weights # + [markdown] id="B6CxBrDniFI3" # ## Modify Graph # + colab={"base_uri": "https://localhost:8080/", "height": 646} id="uZQogEG8iFI3" outputId="38f041fa-bc03-4e9c-c808-0346739c503c" # Use node degree as the node size fig, ax = plt.subplots(figsize=(16, 12)) pos = nx.spring_layout(G, k=2) d = dict(G.degree) nx.draw(G, pos, font_size=8, width=1, edge_color='grey', node_color='#00b4d9', with_labels = True, nodelist=d.keys(), node_size=[v * 12 for v in d.values()]) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="4FkW-VaOiFI4" outputId="4c45795f-24f7-44fa-b416-f831f287bc85" carac = pd.DataFrame() carac = total_ingredient[['ingredient_name','ingredient_category']] carac # + colab={"base_uri": "https://localhost:8080/", "height": 646} id="0CfUyekgiFI5" outputId="0737f8dd-cbe1-4dbc-85b5-9742a8a5ae1d" import matplotlib d = df_compiled.head(100).set_index('bigram').T.to_dict('records') # Create network plot G_mod = nx.Graph() # Create connections between nodes for k, v in d[0].items(): G_mod.add_edge(k[0], k[1], weight=(v * 10)) # Get the dataframe of ingredient name and it's category carac = pd.DataFrame() carac = total_ingredient[['ingredient_name','ingredient_category']] # Reindex the dataframe to align with graph's nodes carac = carac.set_index('ingredient_name') carac = carac.reindex(G_mod.nodes()) carac['ingredient_category'] = pd.Categorical(carac['ingredient_category']) carac['ingredient_category'].cat.codes # Specify colors, number of colors listed should align with the number of categories cmap = matplotlib.colors.ListedColormap(['C0', 'darkorange', 'lightgreen', 'lightyellow', 'darkgreen', 'darkblue', 'purple', 'red', 'pink', 'brown']) # Get node degree in a dict for node size parameter d = dict(G.degree) # Draw graph fig, ax = plt.subplots(figsize=(16, 12)) pos = nx.spring_layout(G, k=2) nx.draw(G, pos, font_size=10, width=1, edge_color='grey', node_color=carac['ingredient_category'].cat.codes, cmap=cmap, with_labels = True, nodelist=d.keys(), node_size=[v * 12 for v in d.values()]) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="qXNAKHWaiFI5" outputId="beabf5f5-c439-4878-88c0-9344b214ac78" # Compute the degree centralities of G: deg_cent deg_cent = nx.degree_centrality(G) # Compute the maximum degree centrality: max_dc max_dc = max(deg_cent.values()) # Find the item(s) that have highest co-occurrence: prolific_collaborators prolific_collaborators = [n for n, dc in deg_cent.items() if dc == max_dc] print(prolific_collaborators) # + [markdown] id="lL0qCpq7iFI6" # ## Subset Cliques # + colab={"base_uri": "https://localhost:8080/"} id="HFXPCs7biFI7" outputId="6de67fc2-f321-4159-f418-ffff4cea1d8d" list(nx.find_cliques(G)) # + id="Ej8xECu6iFI7" # Define get_nodes_and_nbrs() def get_nodes_and_nbrs(G, nodes_of_interest): """ Returns a subgraph of the graph `G` with only the `nodes_of_interest` and their neighbors. """ nodes_to_draw = [] # Iterate over the nodes of interest for n in nodes_of_interest: # Append the nodes of interest to nodes_to_draw nodes_to_draw.append(n) # Iterate over all the neighbors of node n for nbr in G.neighbors(n): # Append the neighbors of n to nodes_to_draw nodes_to_draw.append(nbr) return G.subgraph(nodes_to_draw) # + colab={"base_uri": "https://localhost:8080/", "height": 319} id="xMnxbGZCiFI7" outputId="c5cc393e-be6e-4ec6-a79a-2e30461415ab" # Extract the subgraph with the nodes of interest: T_draw T_draw = get_nodes_and_nbrs(G, ['coconut_milk']) # Draw the subgraph to the screen nx.draw(T_draw, with_labels=True) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 319} id="zEem7QU6iFI8" outputId="5e00f23b-8828-4469-dbfd-0a24fafdd59e" # Extract the subgraph with the nodes of interest: T_draw T_draw = get_nodes_and_nbrs(G, ['palm_sugar']) # Draw the subgraph to the screen nx.draw(T_draw, with_labels=True) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 319} id="FZ3__ECniFI8" outputId="19dc6f04-837e-4972-efeb-629d36c46ed4" # Extract the subgraph with the nodes of interest: T_draw T_draw = get_nodes_and_nbrs(G, ['lemongrass']) # Draw the subgraph to the screen nx.draw(T_draw, with_labels=True) plt.show() # + id="vKJY6dhTiFI8"
H20 ML Notebooks/Network Analysis/Network_Analysis_Indonesian_Food_Ingredients.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://colab.research.google.com/github/adasegroup/ML2022_seminars/blob/master/seminar4/seminar_SVM_solutions.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] slideshow={"slide_type": "notes"} # # Seminar: SVM, Kernel trick # Machine Learning by prof. <NAME> # <br> # Author: <NAME> # + [markdown] slideshow={"slide_type": "notes"} # Given training data $x_i \in \mathbb{R}^p$, $y_i\in\{-1, 1\}$, $i=1,\ldots, n$, we have the following problem: # # Primal problem: # # \begin{equation} # \frac{1}{2} ||w||^2 + C \sum_{i=1}^{n} \xi_i \longrightarrow \min_{w, b, \xi},\\ # \begin{split} # \textrm {subject to }\ & y_i (w^T \phi(x_i) + b) \geq 1 - \xi_i,\\ # & \xi_i \geq 0. # \end{split} # \end{equation} # # # Dual problem: # # \begin{equation} # \frac{1}{2} \sum_{i=1}^n\sum_{j=1}^n \alpha_i\alpha_j y_i y_j \underbrace{\phi(x_i)^{T} \phi(x_j)}_{K(x_i, x_j)} - \sum_{i=1}^n \alpha_i \longrightarrow \min_{\alpha}, \\ # \begin{split} # \textrm {subject to }\ & \sum_{i=1}^n y_i \alpha_i = 0,\\ # & 0 \leq \alpha_i \leq C. # \end{split} # \end{equation} # # $\alpha_i = 0$ for non-support vectors, # # $\alpha_i < C$ if $x_i$ lies on the margin, # # $\alpha_i = C$ if $x_i$ lies inside a margin or misclassified. # + [markdown] slideshow={"slide_type": "notes"} # The decision function is a sum over only SUPPORT data points $S$: # # # \begin{equation} # y(x)= w^T\phi(x) + b = \sum_{x_i \in S} \alpha_i y_i K(x_i, x) + b \qquad \qquad (1) # \end{equation} # # The sum is over only the support vectors! # # Less support vectors (sparsity) - faster classifications! # + slideshow={"slide_type": "slide"} import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # + [markdown] slideshow={"slide_type": "slide"} # ## Topic 1: Separation by a hyperplane # + from sklearn import svm, datasets, metrics X, y = datasets.make_blobs(n_samples=100, centers=2, cluster_std=1.0, center_box = (-15, 15), random_state=0) X = np.concatenate((X, [[4, 8], [0, 0]])) y = np.concatenate((y, [1, 0])) plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired) plt.xlim(-1, 6) plt.ylim(-1, 8) clf = svm.SVC(kernel='linear') clf.fit(X, y) # create grid to evaluate model xx = np.linspace(X[:, 0].min(), X[:, 0].max(), 30) yy = np.linspace(X[:, 1].min(), X[:, 1].max(), 30) YY, XX = np.meshgrid(yy, xx) xy = np.vstack([XX.ravel(), YY.ravel()]).T Z = clf.decision_function(xy).reshape(XX.shape) # plot decision boundary and margins plt.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=100, linewidth=1, facecolors='none', edgecolors='k') print('accuracy on the training set:', metrics.accuracy_score(y, clf.predict(X))) # + [markdown] slideshow={"slide_type": "notes"} # ### Question 1.1: Change the code above to make accuracy of 1. Do not tune any hyperparameters of SVM. What prevents ideal classification in this case? # + [markdown] slideshow={"slide_type": "notes"} # ### Solution 1.1: # Simply remove strings # # ```python # X = np.concatenate((X, [[4, 8], [0, 0]])) # y = np.concatenate((y, [1, 0])) # ``` # # that add outliers to the dataset and classes will become separable # + [markdown] slideshow={"slide_type": "notes"} # ### Question 1.2: How to calculate the value of accuracy without any function call, just looking on the code and on the picture? # + [markdown] slideshow={"slide_type": "notes"} # ### Solution 1.2: # # The accuracy is 100/102, because only 2 outliers among 102 points were missclassified # + [markdown] slideshow={"slide_type": "notes"} # ## Topic 2: Primal SVM and explicit transforms # + slideshow={"slide_type": "fragment"} df = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2022_seminars/master/seminar4/data/dataset0.csv') X = df[['X1', 'X2']] y = df['y'] plt.scatter(X['X1'], X['X2'], c=['brown' if c == 1 else 'b' for c in y]) plt.title('labeled dataset') plt.show() clf = svm.SVC(kernel='linear') clf.fit(X, y) plt.scatter(X['X1'], X['X2'], c=['brown' if c == 1 else 'b' for c in clf.predict(X)]) plt.title('predicted class labels: all blue class was missclassified!') plt.show() print('Linear SVM is not good if neither basic nor kernel is used:') print('accuracy on the training set:', metrics.accuracy_score(y, clf.predict(X))) # + [markdown] slideshow={"slide_type": "notes"} # ### Question 2.1: One of the ideas of SVM is that to transform the initial features to some other space where the classes are better separated by a hyperplane. What about using the polar coordinates in the example above? Apply it and show how SVM works with such a new basis using the following template. # + df = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2022_seminars/master/seminar4/data/dataset0.csv') X = df[['X1', 'X2']] y = df['y'] def to_polar(x, y): r = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) return phi, r # --- HERE IS YOUR CODE!!! # change this block so that it gives new pandas DataFrame called X_transform # with columns 'phi' and 'r' and X and X_transform have the corresponding rows of data. X_transform = pd.DataFrame() X_transform['phi'] = X['X1'] # its wrong X_transform['r'] = X['X2'] # its wrong # --- HERE IS YOUR CODE!!! clf = svm.SVC(kernel='linear') clf.fit(X_transform, y) print('accuracy on the training set:', metrics.accuracy_score(y, clf.predict(X_transform))) plt.scatter(X_transform['phi'], X_transform['r'], c=['brown' if c == 1 else 'b' for c in y]) plt.xlabel('phi') plt.ylabel('r') pp = np.linspace(X_transform['phi'].min(), X_transform['phi'].max(), 30) rr = np.linspace(X_transform['r'].min(), X_transform['r'].max(), 30) RR, PP = np.meshgrid(rr, pp) pr = np.vstack([PP.ravel(), RR.ravel()]).T Z = clf.decision_function(pr).reshape(PP.shape) plt.contour(PP, RR, Z, colors='k', levels=[-1, 0, 1], linestyles=['--', '-', '--']) plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=100, linewidth=1, facecolors='none', edgecolors='k') plt.show() # + [markdown] slideshow={"slide_type": "notes"} # ### Solution 2.1: # One can use this code # ```python # phi, r = to_polar(X['X1'], X['X2']) # X_transform = pd.DataFrame({'phi': phi, 'r': r}) # ``` # + [markdown] slideshow={"slide_type": "notes"} # ### Question 2.2: The separating hyperplane (line in 2D) in polar basis above looks strange: why shouldn't it be a little higher because many blue points are missclassified - they are on the other side of the line! We need to tune the hyperparameters of the model - constant $C$ in SVM. Try different values of $C$, using the code below and then find the optimal value of $C$, which provides the maximum accuracy. # # + df = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2022_seminars/master/seminar4/data/dataset0.csv') X = df[['X1', 'X2']] y = df['y'] phi, r = to_polar(X['X1'], X['X2']) X_transform = pd.DataFrame({'phi': phi, 'r': r}) # # --- YOUR CODE HERE!!! # select the optimal C among some set of values (say from 10**-5 to 10**5) # providing the best model 'clf' and print its accuracy metric on the training dataset clf = svm.SVC(kernel='linear', C=1).fit(X_transform, y) print('accuracy on the training set:', metrics.accuracy_score(y, clf.predict(X_transform))) # # --- YOUR CODE HERE!!! plt.scatter(X_transform['phi'], X_transform['r'], c=['brown' if c == 1 else 'b' for c in y]) plt.xlabel('phi') plt.ylabel('r') pp = np.linspace(X_transform['phi'].min(), X_transform['phi'].max(), 30) rr = np.linspace(X_transform['r'].min(), X_transform['r'].max(), 30) RR, PP = np.meshgrid(rr, pp) pr = np.vstack([PP.ravel(), RR.ravel()]).T Z = clf.decision_function(pr).reshape(PP.shape) plt.contour(PP, RR, Z, colors='k', levels=[-1, 0, 1], linestyles=['--', '-', '--']) plt.show() # - # ### Solution 2.2: # You can use the code # ```python # # tuning of C # best_C = None # best_acc = 0 # for c in [0.1, 1, 10, 100, 1000, 10**4, 10**5]: # clf = svm.SVC(kernel='linear', C=c).fit(X_transform, y) # acc = metrics.accuracy_score(y, clf.predict(X_transform)) # if (acc > best_acc): # best_acc = acc # best_C = c # clf = svm.SVC(kernel='linear', C=best_C) # clf.fit(X_transform, y) # print('best C:', best_C) # print('accuracy on the training set:', metrics.accuracy_score(y, clf.predict(X_transform))) # ``` # or using `GridsearchCV()`: # ```python # # tuning of C # from sklearn.model_selection import GridSearchCV # from sklearn.metrics import accuracy_score, make_scorer # clf = svm.SVC(kernel='linear') # clf = GridSearchCV(clf, # {'C': [0.1, 1, 10, 100, 1000, 10**4, 10**5]}, # scoring=make_scorer(accuracy_score), # we can skip this line because accuracy_score is by default # cv = [(range(len(y)), range(len(y)))] # only 1 split and training and testing sets are the same # ) # clf.fit(X_transform, y) # print('best C:', clf.best_params_['C']) # print('accuracy on the training set:', clf.best_score_) # ``` # We obtain: # ``` # best C: 10000 # accuracy on the training set: 1.0 # ``` # ### Question 2.3: Usually, SVM transforms into a space with the dimension higher than number of original features because it makes easier to separate classes by a hyperplane there. Unlike to polar coordinates, one can try the transform # ### $$y_1=x_1, \quad y_2=x_2, \quad y_3=|x_1|+|x_2|.$$ # ### Why classes are better separable in that 3D space? Imagine the picture in 3D. Since we have previously tuned the model even reaching the accuracy of 1, improve this transform and the range for hyperparameter $C$ for better fitting. # + from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, make_scorer df = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2022_seminars/master/seminar4/data/dataset0.csv') X = df[['X1', 'X2']] y = df['y'] # --- YOUR CODE HERE !!! improve the transform def to_y1y2y3(x1, x2): y1 = x1 y2 = x2 y3 = abs(x1) + abs(x2) return y1, y2, y3 # --- YOUR CODE HERE !!! improve the transform # improve the range for hyperparameter C y1, y2, y3 = to_y1y2y3(X['X1'], X['X2']) X_transform = pd.DataFrame({'Y1': y1, 'Y2': y2, 'Y3': y3}) clf = GridSearchCV(svm.SVC(kernel='linear'), {'C': [1]}, # YOUR CODE HERE, IMPROVE THE RANGE !!! scoring=make_scorer(accuracy_score), # we can skip this line because accuracy_score is by default cv = [(np.array(range(len(y))), np.array(range(len(y))))] # only 1 split and training and testing sets are the same ) clf.fit(X_transform, y) print('best C:', clf.best_params_['C']) print('accuracy on the training set:', clf.best_score_) xx1 = np.linspace(X['X1'].min(), X['X1'].max(), 50) xx2 = np.linspace(X['X2'].min(), X['X2'].max(), 50) XX1, XX2 = np.meshgrid(xx1, xx2) YY1, YY2, YY3 = to_y1y2y3(XX1, XX2) xy = np.vstack([YY1.ravel(), YY2.ravel(), YY3.ravel()]).T Z = clf.decision_function(xy).reshape(XX1.shape) plt.scatter(X['X1'], X['X2'], c=['brown' if c == 1 else 'b' for c in y]) plt.contour(XX1, XX2, Z, colors='k', levels=[-1, 0, 1], linestyles=['--', '-', '--']) plt.show() # - X # ### Solution 2.3: # You can use the transform # ``` # def to_y1y2y3(x1, x2): # y1 = x1 # y2 = x2 # y3 = x1**2 + x2**2 # return y1, y2, y3 # ``` # and select the best $C$ from the set of values (for example) # ``` # {'C': [1, 10, 100, 1000, 10**5, 10**6]} # ``` # It provides # ``` # best C: 1000000 # accuracy on the training set: 1.0 # ``` # ### Question 2.4: Execute the code below. There is a solution for SVM classification by some decision curve that looks like a circle and the ROC-curve that have 1 point between $(0, 0)$ and $(1, 1)$. Why this point is only one? Obtain its coordinates by another way - using the confusion matrix. What part of blue points are missclassified? # ### Question 2.5: Unlike to explicit transform (like polar coordinates $(x_1, x_2) \to (\phi, r)$ or $(x_1, x_2) \to (y_1, y_2, y_3)$) in primal SVM problem it can be implied implicitly by the corresponding kernel in the equivalent dual SVM problem. Apply RBF (Radial Basis Function) kernel to this example with initial features $x_1$ and $x_2$ and tune the hyperparameters $C$ and $\gamma$. # + from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, make_scorer df = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2022_seminars/master/seminar4/data/dataset0.csv') X = df[['X1', 'X2']] y = df['y'] # improve the ranges for hyperparameters C and gamma clf = GridSearchCV( svm.SVC(kernel='rbf'), # kernel is changed to non-linear 'rbf' {'C': [1], 'gamma': [1]}, # YOUR IMPROVEMENT HERE !!! scoring=make_scorer(accuracy_score), # we can skip this line because accuracy_score is by default cv = [(np.array(range(len(y))), np.array(range(len(y))))] # only 1 split and training and testing sets are the same ) clf.fit(X, y) # note that we use the initial X, not X_transform ! print('best params:', clf.best_params_) print('accuracy on the training set:', clf.best_score_) # ROC - curve plt.figure() from sklearn.metrics import accuracy_score, make_scorer, roc_curve fpr, tpr, thresh = roc_curve(y, clf.decision_function(X)) plt.title('ROC curve') plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.plot(fpr, tpr, '--o') fpr, tpr, _ = roc_curve(y, clf.predict(X)) plt.title('ROC curve') plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.plot(fpr, tpr, '--o') plt.show() print('points: ', list(zip(fpr, tpr, thresh))) # Plot classification xx1 = np.linspace(X['X1'].min(), X['X1'].max(), 30) xx2 = np.linspace(X['X2'].min(), X['X2'].max(), 30) XX1, XX2 = np.meshgrid(xx1, xx2) xy = np.vstack([XX1.ravel(), XX2.ravel()]).T Z = clf.decision_function(xy).reshape(XX1.shape) plt.scatter(X['X1'], X['X2'], c=['brown' if c == 1 else 'b' for c in y]) plt.contour(XX1, XX2, Z, colors='k', levels=[0], linestyles=['-']) plt.title('many blue points were missclassified!') # YOUR IMPROVEMENT HERE :) plt.show() # - # ### Solution 2.4: # It gets the values of the point: # ``` # from sklearn.metrics import confusion_matrix # C = confusion_matrix(y, clf.predict(X)) # TN = C[0, 0] # FN = C[1, 0] # FP = C[0, 1] # TP = C[1, 1] # tpr = TP/(TP + FN) # fpr = FP/(FP + TN) # # print('False positive rate:', fpr) # print('True positive rate:', tpr) # ``` # Blue points are "negative" or 0-class, browns are "positive" or 1-class. Missclassified blue points are "negative" points that were classified as "positive" but it is False (outside the curve), so their number is FP. The percentage of them among all blue (negative) is exactly fpr. # ### Solution 2.5: # We can use for example this grid of hyperparameters: # ``` # {'C': [1, 100], 'gamma': [1, 100]} # ``` # It provides # ``` # best params: {'C': 100, 'gamma': 100} # accuracy on the training set: 1.0 # ``` # ### Question 2.6: Here is the example with polar coordinates from above shown in initial features: # + df = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2022_seminars/master/seminar4/data/dataset0.csv') X = df[['X1', 'X2']] y = df['y'] def to_polar(x, y): r = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) return phi, r phi, r = to_polar(X['X1'], X['X2']) X_transform = pd.DataFrame({'phi': phi, 'r': r}) clf = svm.SVC(kernel='linear', C=1) clf.fit(X_transform, y) print('accuracy on the training set:', metrics.accuracy_score(y, clf.predict(X_transform))) plt.scatter(X['X1'], X['X2'], c=['brown' if c == 1 else 'b' for c in y]) plt.xlabel('X1') plt.ylabel('X2') xx1 = np.linspace(X['X1'].min(), X['X1'].max(), 30) xx2 = np.linspace(X['X2'].min(), X['X2'].max(), 30) XX2, XX1 = np.meshgrid(xx2, xx1) X1X2 = np.vstack([XX1.ravel(), XX2.ravel()]).T phi_rho = np.array([to_polar(x1, x2) for x1, x2 in X1X2]) Z = clf.decision_function(phi_rho).reshape(PP.shape) plt.contour(XX1, XX2, Z, colors='k', levels=[-1, 0, 1], linestyles=['--', '-', '--']) plt.scatter(X['X1'][clf.support_], X['X2'][clf.support_], s=100, linewidth=1, facecolors='none', edgecolors='k') plt.show() # - # ### Find the expression for the equivalent kernel analytically. Train a model and show the same picture of classification but using the kernel instead of explicit transform (modify the code below): # + df = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2022_seminars/master/seminar4/data/dataset0.csv') X = df[['X1', 'X2']] y = df['y'] def to_polar(x1, x2): r = np.sqrt(x1**2 + x2**2) varphi = np.arctan2(x2, x1) return varphi, r # modify this function of kernel calculation def my_kernel(X, Y): # here phi(x1, x2) is the basis function of the transformation (varphi, r) = phi(x1, x2) # now it is just a linear kernel, make it to be 'kernel of polar coordinates': phi_x = np.array([(x1, x2) for x1, x2 in X]) phi_y = np.array([(x1, x2) for x1, x2 in Y]) return np.matmul(phi_x, phi_y.T) # all values of inner product phi(x) * phi^T(y) clf = svm.SVC(kernel=my_kernel, C=1) # kernel is used instead of explicit transform (varphi, r) = phi(x1, x2), dual problem clf.fit(np.asarray(X), np.asarray(y)) # initial features X are used, but no explicit polar coordinates! print('accuracy on the training set:', metrics.accuracy_score(y, clf.predict(np.asarray(X)))) plt.scatter(X['X1'], X['X2'], c=['brown' if c == 1 else 'b' for c in y]) plt.xlabel('X1') plt.ylabel('X2') xx1 = np.linspace(X['X1'].min(), X['X1'].max(), 30) xx2 = np.linspace(X['X2'].min(), X['X2'].max(), 30) XX2, XX1 = np.meshgrid(xx2, xx1) X1X2 = np.vstack([XX1.ravel(), XX2.ravel()]).T Z = clf.decision_function(X1X2).reshape(PP.shape) plt.contour(XX1, XX2, Z, colors='k', levels=[-1, 0, 1], linestyles=['--', '-', '--']) plt.scatter(X['X1'][clf.support_], X['X2'][clf.support_], s=100, linewidth=1, facecolors='none', edgecolors='k') plt.show() # - # ### Solution 2.6 # # For $\phi(x_1, x_2) = \left(\sqrt{x_1^2 + x_2^2}, \mathrm{arctg} \frac{x_2}{x_1}\right)$ we have # # $$K(x, y) = \phi(x)^T \phi(y) = \sqrt{(x_1^2 + x_2^2)(y_1^2 + y_2^2)} + \textrm{arctg} \frac{x_2}{x_1} \mathrm{arctg} \frac{y_2}{y_1}.$$ # # Construct your kernel $K(x, y)=\phi(x)^T\phi(y)$ using `to_polar(x1, x2)` as $\phi(x_1, x_2)$: # ``` # phi_x = np.array([to_polar(x1, x2) for x1, x2 in X]) # phi_y = np.array([to_polar(x1, x2) for x1, x2 in Y]) # ``` # + [markdown] slideshow={"slide_type": "notes"} # # Topic 3: Training and testing sets # + from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, make_scorer from sklearn.model_selection import train_test_split df = pd.read_csv('https://raw.githubusercontent.com/adasegroup/ML2022_seminars/master/seminar4/data/dataset1.csv') X = df[['X1', 'X2']] y = df['y'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/3, random_state=0) clf = GridSearchCV(svm.SVC(kernel='rbf'), # kernel is 'rbf'! {'C': [1], 'gamma': [1.5]}, # Underfitting # {'C': [1000], 'gamma': [100]}, # Overfitting # ... YOUR IMPROVEMENT HERE !!! scoring=make_scorer(accuracy_score), cv=3) # we can skip this line because accuracy is by default clf.fit(X_train, y_train) # note that we use X_train, no explicit X_transform ! print('best params:', clf.best_params_) print('accuracy on the training set:', clf.score(X_train, y_train)) print('accuracy on the testing set:', clf.score(X_test, y_test)) # plots results xx1 = np.linspace(X['X1'].min(), X['X1'].max(), 50) xx2 = np.linspace(X['X2'].min(), X['X2'].max(), 50) XX1, XX2 = np.meshgrid(xx1, xx2) xy = np.vstack([XX1.ravel(), XX2.ravel()]).T Z = clf.decision_function(xy).reshape(XX1.shape) def flter(vv, mask): return [v for v, m in zip(vv, mask) if m] plt.scatter(flter(X_train['X1'], y_train), flter(X_train['X2'], y_train), c='brown', alpha = 0.5, label='y_train == 1') plt.scatter(flter(X_train['X1'], 1-y_train), flter(X_train['X2'], 1-y_train), c='b', alpha = 0.5, label='y_train == 0') plt.legend(bbox_to_anchor=(0.999, 1)) plt.contour(XX1, XX2, Z, colors='k', levels=[0], linestyles=['-']) plt.title('$\mathbf{Training}$ set and $\mathbf{trained}$ decision curve') plt.show() plt.scatter(flter(X_test['X1'], y_test), flter(X_test['X2'], y_test), c='brown', label='y_test == 1') plt.scatter(flter(X_test['X1'], 1-y_test), flter(X_test['X2'], 1-y_test), c='b', label='y_test == 0') plt.legend(bbox_to_anchor=(0.999, 1)) plt.contour(XX1, XX2, Z, colors='k', levels=[0], linestyles=['-']) plt.title('$\mathbf{Testing}$ set and $\mathbf{trained}$ decision curve') plt.show() # - # ### Question 3.1: Try different sets of hyperparameters including: # #### Underfitting: # ``` # accuracy on the training set: 0.707317073171 - poor :( # accuracy on the testing set: 0.780487804878 - poor :( # ``` # #### Overfitting: # ``` # accuracy on the training set: 1.0 - even 1.0! but it is useless :| # accuracy on the testing set: 0.841463414634 - poor :( # ``` # ### See the decision curve in both cases (it is the hyperplane in the space of RBF kernel!). Explain them in terms of bias-variance tradeoff. Tune hyperparameters $C$ and $\gamma$ aiming to increase the score on the testing set. # # ### Solution 3.1: # We can use for example this grid of hyperparameters: # ``` # {'C': [1, 10, 100, 10**3, 10**4, 10**5, 10**6], # 'gamma': [1, 1.5, 2, 2.5, 3, 5, 10, 100]}, # # ``` # or even shorter ranges. The scores are # ``` # best params: {'C': 10, 'gamma': 2} # accuracy on the training set: 0.841463414634 # accuracy on the testing set: 0.878048780488 # ``` # + [markdown] slideshow={"slide_type": "notes"} # # Topic 4: Dual SVM # + [markdown] slideshow={"slide_type": "notes"} # ### Question 4.1: In the following example with RBF kernel 4 points are given by their coordinates. Explain why $y([0, 0])=b$. Use formula (1) in the problem formulation in the beginning. # + slideshow={"slide_type": "notes"} from sklearn import svm, datasets, metrics # data of 4 points: X = np.array([[-1, 0], [0, 1], [0, -1], [0, -1.5]]) y = np.array([1, 1, -1, -1]) plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired) clf = svm.SVC(kernel='rbf', C=2, gamma='auto') clf.fit(X, y) # create grid to evaluate model xx = np.linspace(X[:, 0].min(), X[:, 0].max(), 30) yy = np.linspace(X[:, 1].min(), X[:, 1].max(), 30) YY, XX = np.meshgrid(yy, xx) xy = np.vstack([XX.ravel(), YY.ravel()]).T Z = clf.decision_function(xy).reshape(XX.shape) # plot decision boundary and margins plt.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=100, linewidth=1, facecolors='none', edgecolors='k') plt.plot([0], [0], 'dr'); # Why y(x_0) == b, x_0=[0, 0]? print('why are they the same:', clf.decision_function(np.array([[0, 0]])), '==', clf.intercept_, '?') # + [markdown] slideshow={"slide_type": "notes"} # ### Solution 4.1: # RBF kernel depends on between-point distance, $K(x, y) = \exp\left(-\frac{||x-y||^2}{2\sigma^2}\right)$, and, since $x_0=(0, 0)$ is equidistant from 3 support vectors, then $K(x_1, x_0)=K(x_2, x_0)=K(x_3, x_0)$. Using the constraint $\sum_{i=1}^n y_i \alpha_i = 0$ and the fact that $x_4=[0, -1.5]$ is not a support vector ($\alpha_4=0$), we obtain # # $$ # y(x_0)= \sum_{x_i \in S} \alpha_i y_i K(x_i, x_0) + b = K(x_1, x_0) \sum_{x_i \in S} \alpha_i y_i +b = b. # $$ # + [markdown] slideshow={"slide_type": "notes"} # ### Question 4.2: Let in the example above $x_1$ and $x_2$ - brown points, $x_3$ - cyan support vector, $x_4$ - cyan non-support vector. Check what's true: # \begin{align*} # 1. \qquad & \alpha_2 = С, \\ # 2. \qquad & \alpha_1 > \alpha_3,\\ # 3. \qquad & \alpha_3 - \alpha_1 = \alpha_2,\\ # 4. \qquad & \alpha_2 - \alpha_3 < 2C? # \end{align*} # + [markdown] slideshow={"slide_type": "notes"} # ### Solution 4.2: # # \begin{align*} # 1. \qquad & \text{No}, x_2 \ \text{lies on the margin and thus}\ \alpha_2 < С, \\ # 2. \qquad & \text{No}, \text{because}\ \alpha_1 + \alpha_2 - \alpha_3 - \alpha_4 = 0,\ \alpha_2 > 0\ (\text{because}\ x_2\ \text{is support vector}), \alpha_4 = 0\ (\text{because}\ x_4\ \text{is not a support vector}),\\ # 3. \qquad & \text{Yes}, \alpha_1 + \alpha_2 - \alpha_3 = 0,\\ # 4. \qquad & \text{Yes}, \alpha_2 - \alpha_3 <= |\alpha_2 - \alpha_3| <= |\alpha_2| + |\alpha_3| = \alpha_2 + \alpha_3 < 2C,\ \text{since}\ x_2\ \text{and}\ x_3\ \text{are on the margin}. # \end{align*} # -
seminar4/seminar_SVM_solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # This notebook reproduces the spectrum plots in # https://arxiv.org/pdf/1811.06844.pdf. Note that # the solar and DSNB neutrino flux spectra in # CalcSpectra are slightly different than those used # in the code which produced these plots, giving # rise to small difference in the neutrino spectra below # - import numpy as np import matplotlib.pyplot as plt import sys sys.path.append("../../") from paleoSpec import CalcSpectra # + # Set up some plotting settings fs = 14 plt.rc('text', usetex=True) plt.rc('font', family='serif') plt.rcParams.update({'legend.fontsize': fs, 'axes.labelsize': fs, 'axes.titlesize': fs, 'xtick.labelsize': fs, 'ytick.labelsize': fs}) plt.rcParams["figure.figsize"] = (9,7) plt_colors = ['k', '#1b9e77', '#d95f02', '#7570b3', '#e7298a', '#66a61e', '#e6ab02', '#a6761d', '#666666'] # + # Reproduce Fig 1 from from https://arxiv.org/pdf/1811.06844.pdf mineral = "Nchwaningite" SpecCalculator = CalcSpectra.CalcSpectra(mineral, switch_keep_H=True) ranges = SpecCalculator.Trange element_names = SpecCalculator.TargetList.nameT_list plt.close('all') for i, name in enumerate(element_names): plt.plot(ranges[i][:,0], 0.1*ranges[i][:,2], label=name) plt.xscale('log') plt.yscale('log') plt.xlim(3e-2,3e2) plt.ylim(3e-1,3e3) plt.xlabel(r'$E_R$ [keV]') plt.ylabel(r'$x_T$ [nm]') plt.legend() plt.tick_params(right=True,top=True) plt.tick_params(which='minor',right=True,top=True) plt.title(mineral+"; Ranges", y=1.02) plt.tight_layout() plt.show() # + # Reproduce Fig 2a from https://arxiv.org/pdf/1811.06844.pdf mineral = "Halite" SpecCalculator = CalcSpectra.CalcSpectra(mineral) C238 = 1e-11 bkg_nu_solar = SpecCalculator.calc_dRdx_BkgNeu_solar() bkg_nu_DSNB = SpecCalculator.calc_dRdx_BkgNeu_DSNB() bkg_nu_atm = SpecCalculator.calc_dRdx_BkgNeu_atm() bkg_rad_neutrons = SpecCalculator.calc_dRdx_Bkgn(C238) sig_5 = SpecCalculator.calc_dRdx_MW(5., 1e-45) sig_50 = SpecCalculator.calc_dRdx_MW(50., 1e-45) sig_500 = SpecCalculator.calc_dRdx_MW(500., 1e-45) plt.close('all') plt.plot(0.1*bkg_nu_solar[0], 10.*(bkg_nu_solar[1]+bkg_nu_DSNB[1]+bkg_nu_atm[1]), color=plt_colors[1], linestyle=':', label=r'$\nu$') plt.plot(0.1*bkg_rad_neutrons[0], 10.*bkg_rad_neutrons[1], color=plt_colors[2], linestyle='--', label=r'$n$') plt.plot(0.1*sig_5[0], 10.*sig_5[1], color=plt_colors[3], linestyle='-', label=r'$5\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.plot(0.1*sig_50[0], 10.*sig_50[1], color=plt_colors[4], linestyle='-.', label=r'$50\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.plot(0.1*sig_500[0], 10.*sig_500[1], color=plt_colors[0], linestyle='-', label=r'$500\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.xscale('log') plt.yscale('log') plt.xlim(1e0,1e3) plt.ylim(1e-4,1e8) plt.xlabel(r'$x_T$ [nm]') plt.ylabel(r'$dR/dx_T$ [nm$^{-1}$\,kg$^{-1}$\,Myr$^{-1}$]') plt.legend() plt.tick_params(right=True,top=True) plt.tick_params(which='minor',right=True,top=True) plt.title(mineral+r"; $C^{238}/$[g/g] = "+str(C238), y=1.02) plt.tight_layout() plt.show() # + # Reproduce Fig 2b from https://arxiv.org/pdf/1811.06844.pdf mineral = "Epsomite" SpecCalculator = CalcSpectra.CalcSpectra(mineral) C238 = 1e-11 bkg_nu_solar = SpecCalculator.calc_dRdx_BkgNeu_solar() bkg_nu_DSNB = SpecCalculator.calc_dRdx_BkgNeu_DSNB() bkg_nu_atm = SpecCalculator.calc_dRdx_BkgNeu_atm() bkg_rad_neutrons = SpecCalculator.calc_dRdx_Bkgn(C238) sig_5 = SpecCalculator.calc_dRdx_MW(5., 1e-45) sig_50 = SpecCalculator.calc_dRdx_MW(50., 1e-45) sig_500 = SpecCalculator.calc_dRdx_MW(500., 1e-45) plt.close('all') plt.plot(0.1*bkg_nu_solar[0], 10.*(bkg_nu_solar[1]+bkg_nu_DSNB[1]+bkg_nu_atm[1]), color=plt_colors[1], linestyle=':', label=r'$\nu$') plt.plot(0.1*bkg_rad_neutrons[0], 10.*bkg_rad_neutrons[1], color=plt_colors[2], linestyle='--', label=r'$n$') plt.plot(0.1*sig_5[0], 10.*sig_5[1], color=plt_colors[3], linestyle='-', label=r'$5\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.plot(0.1*sig_50[0], 10.*sig_50[1], color=plt_colors[4], linestyle='-.', label=r'$50\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.plot(0.1*sig_500[0], 10.*sig_500[1], color=plt_colors[0], linestyle='-', label=r'$500\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.xscale('log') plt.yscale('log') plt.xlim(1e0,1e3) plt.ylim(1e-4,1e8) plt.xlabel(r'$x_T$ [nm]') plt.ylabel(r'$dR/dx_T$ [nm$^{-1}$\,kg$^{-1}$\,Myr$^{-1}$]') plt.legend() plt.tick_params(right=True,top=True) plt.tick_params(which='minor',right=True,top=True) plt.title(mineral+r"; $C^{238}/$[g/g] = "+str(C238), y=1.02) plt.tight_layout() plt.show() # + # Reproduce Fig 2c from https://arxiv.org/pdf/1811.06844.pdf mineral = "Olivine" SpecCalculator = CalcSpectra.CalcSpectra(mineral) C238 = 1e-10 bkg_nu_solar = SpecCalculator.calc_dRdx_BkgNeu_solar() bkg_nu_DSNB = SpecCalculator.calc_dRdx_BkgNeu_DSNB() bkg_nu_atm = SpecCalculator.calc_dRdx_BkgNeu_atm() bkg_rad_neutrons = SpecCalculator.calc_dRdx_Bkgn(C238) sig_5 = SpecCalculator.calc_dRdx_MW(5., 1e-45) sig_50 = SpecCalculator.calc_dRdx_MW(50., 1e-45) sig_500 = SpecCalculator.calc_dRdx_MW(500., 1e-45) plt.close('all') plt.plot(0.1*bkg_nu_solar[0], 10.*(bkg_nu_solar[1]+bkg_nu_DSNB[1]+bkg_nu_atm[1]), color=plt_colors[1], linestyle=':', label=r'$\nu$') plt.plot(0.1*bkg_rad_neutrons[0], 10.*bkg_rad_neutrons[1], color=plt_colors[2], linestyle='--', label=r'$n$') plt.plot(0.1*sig_5[0], 10.*sig_5[1], color=plt_colors[3], linestyle='-', label=r'$5\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.plot(0.1*sig_50[0], 10.*sig_50[1], color=plt_colors[4], linestyle='-.', label=r'$50\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.plot(0.1*sig_500[0], 10.*sig_500[1], color=plt_colors[0], linestyle='-', label=r'$500\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.xscale('log') plt.yscale('log') plt.xlim(1e0,1e3) plt.ylim(1e-4,1e8) plt.xlabel(r'$x_T$ [nm]') plt.ylabel(r'$dR/dx_T$ [nm$^{-1}$\,kg$^{-1}$\,Myr$^{-1}$]') plt.legend() plt.tick_params(right=True,top=True) plt.tick_params(which='minor',right=True,top=True) plt.title(mineral+r"; $C^{238}/$[g/g] = "+str(C238), y=1.02) plt.tight_layout() plt.show() # + # Reproduce Fig 2c from https://arxiv.org/pdf/1811.06844.pdf mineral = "Nickelbischofite" SpecCalculator = CalcSpectra.CalcSpectra(mineral) C238 = 1e-10 bkg_nu_solar = SpecCalculator.calc_dRdx_BkgNeu_solar() bkg_nu_DSNB = SpecCalculator.calc_dRdx_BkgNeu_DSNB() bkg_nu_atm = SpecCalculator.calc_dRdx_BkgNeu_atm() bkg_rad_neutrons = SpecCalculator.calc_dRdx_Bkgn(C238) sig_5 = SpecCalculator.calc_dRdx_MW(5., 1e-45) sig_50 = SpecCalculator.calc_dRdx_MW(50., 1e-45) sig_500 = SpecCalculator.calc_dRdx_MW(500., 1e-45) plt.close('all') plt.plot(0.1*bkg_nu_solar[0], 10.*(bkg_nu_solar[1]+bkg_nu_DSNB[1]+bkg_nu_atm[1]), color=plt_colors[1], linestyle=':', label=r'$\nu$') plt.plot(0.1*bkg_rad_neutrons[0], 10.*bkg_rad_neutrons[1], color=plt_colors[2], linestyle='--', label=r'$n$') plt.plot(0.1*sig_5[0], 10.*sig_5[1], color=plt_colors[3], linestyle='-', label=r'$5\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.plot(0.1*sig_50[0], 10.*sig_50[1], color=plt_colors[4], linestyle='-.', label=r'$50\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.plot(0.1*sig_500[0], 10.*sig_500[1], color=plt_colors[0], linestyle='-', label=r'$500\,$GeV, $10^{-45}\,{\rm cm}^2$') plt.xscale('log') plt.yscale('log') plt.xlim(1e0,1e3) plt.ylim(1e-4,1e8) plt.xlabel(r'$x_T$ [nm]') plt.ylabel(r'$dR/dx_T$ [nm$^{-1}$\,kg$^{-1}$\,Myr$^{-1}$]') plt.legend() plt.tick_params(right=True,top=True) plt.tick_params(which='minor',right=True,top=True) plt.title(mineral+r"; $C^{238}/$[g/g] = "+str(C238), y=1.02) plt.tight_layout() plt.show() # + # Reproduce Fig 3a from https://arxiv.org/pdf/1811.06844.pdf # These smeared-and-binned spectra have some minor differences # to those in https://arxiv.org/pdf/1811.06844.pdf. There are at # least two reasons for this: # 1) in https://arxiv.org/pdf/1811.06844.pdf, the histograms were, # as below, computed and then plotted with plt.step(). # In https://arxiv.org/pdf/1811.06844.pdf, the bin-edges where # aligned incorrectly with the bin entries. The effect can be # reproduced by setting where_step = 'pre' below. where_step # controls the "where" kwarg of plt.step # 2) The MC-based smearing used in https://arxiv.org/pdf/1811.06844.pdf # seems to insufficiently sample the spectrum in regions where # the spectrum is quickly changing (on scales of the track # length resolution). This leads to some differences in the # neutrino induced spectra below. mineral = "Epsomite" sample_mass = 1e-5 # sample mass [kg] sample_age = 1e3 # sample age [Myr] C238 = 1e-11 res = 10. # readout resolution [Å] xmin = 10. # lower edge of the lowest track length bin [Å] xmax = 1e4 # upper edge of the hightest track length in [Å] nbins = int((xmax-xmin)/res) SpecCalculator = CalcSpectra.CalcSpectra(mineral, switch_keep_H=True) xbin_vec = CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_solar(), res, xmin = xmin, xmax = xmax, nbins = nbins )[0] Spec_nu_solar = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_solar(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_nu_DSNB = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_DSNB(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_nu_atm = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_atm(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_rad_1a = ( SpecCalculator.smear_and_bin_1a( C238, res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass ) Spec_rad_n = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_Bkgn(C238), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM1 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(1., 1e-42), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM5 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(5., 1e-42), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM10 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(10., 1e-42), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM15 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(15., 1e-42), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) where_step = 'post' plt.close('all') plt.step(xbin_vec[:-1]/10., (Spec_nu_solar+Spec_nu_DSNB+Spec_nu_atm), where=where_step, color=plt_colors[0], linestyle='--', label=r'$\nu$') #plt.step(xbin_vec[:-1]/10., Spec_rad_1a, where=where_step, color=plt_colors[1], linestyle='--', label=r'$1\alpha$') plt.step(xbin_vec[:-1]/10., Spec_rad_n, where=where_step, color=plt_colors[2], linestyle='--', label=r'$n$') plt.step(xbin_vec[:-1]/10., Spec_DM1, where=where_step, color=plt_colors[3], linestyle='-', label=r'1\,GeV, $10^{-42}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM5, where=where_step, color=plt_colors[4], linestyle='-', label=r'5\,GeV, $10^{-42}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM10, where=where_step, color=plt_colors[5], linestyle='-', label=r'10\,GeV, $10^{-42}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM15, where=where_step, color=plt_colors[6], linestyle='-', label=r'15\,GeV, $10^{-42}\,{\rm cm}^2$') plt.xscale('log') plt.yscale('log') plt.xlim(1.,1e3) plt.ylim(3e-2,3e5) plt.xlabel(r'$x_T$ [nm]') plt.ylabel(r'tracks/'+str(int((xbin_vec[1]-xbin_vec[0])/10.))+r'\,nm') plt.title(mineral +r'; $\varepsilon/$[kg.Myr] = '+str(sample_mass*sample_age) +r'; $C^{238}$/[g/g] = '+str(C238) +r'; $\sigma_{x_T}$/nm = '+str(res/10.), y=1.02) plt.legend(loc=3) plt.tick_params(right=True,top=True) plt.tick_params(which='minor',right=True,top=True) plt.text(200., 1e5, "w/ H tracks", fontsize=fs) plt.tight_layout() plt.show() # + # Reproduce Fig 3b from https://arxiv.org/pdf/1811.06844.pdf mineral = "Epsomite" SpecCalculator = CalcSpectra.CalcSpectra(mineral) xbin_vec = CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_solar(), res, xmin = xmin, xmax = xmax, nbins = nbins )[0] Spec_nu_solar = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_solar(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_nu_DSNB = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_DSNB(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_nu_atm = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_atm(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_rad_1a = ( SpecCalculator.smear_and_bin_1a( C238, res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass ) Spec_rad_n = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_Bkgn(C238), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM1 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(1., 1e-42), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM5 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(5., 1e-42), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM10 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(10., 1e-42), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM15 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(15., 1e-42), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) where_step='post' plt.close('all') plt.step(xbin_vec[:-1]/10., (Spec_nu_solar+Spec_nu_DSNB+Spec_nu_atm), where=where_step, color=plt_colors[0], linestyle='--', label=r'$\nu$') plt.step(xbin_vec[:-1]/10., Spec_rad_1a, where=where_step, color=plt_colors[1], linestyle='--', label=r'$1\alpha$') plt.step(xbin_vec[:-1]/10., Spec_rad_n, where=where_step, color=plt_colors[2], linestyle='--', label=r'$n$') plt.step(xbin_vec[:-1]/10., Spec_DM1, where=where_step, color=plt_colors[3], linestyle='-', label=r'1\,GeV, $10^{-42}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM5, where=where_step, color=plt_colors[4], linestyle='-', label=r'5\,GeV, $10^{-42}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM10, where=where_step, color=plt_colors[5], linestyle='-', label=r'10\,GeV, $10^{-42}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM15, where=where_step, color=plt_colors[6], linestyle='-', label=r'15\,GeV, $10^{-42}\,{\rm cm}^2$') plt.xscale('log') plt.yscale('log') plt.xlim(1.,1e3) plt.ylim(3e-2,3e5) plt.xlabel(r'$x_T$ [nm]') plt.ylabel(r'tracks/'+str(int((xbin_vec[1]-xbin_vec[0])/10.))+r'\,nm') plt.title(mineral +r'; $\varepsilon/$[kg.Myr] = '+str(sample_mass*sample_age) +r'; $C^{238}$/[g/g] = '+str(C238) +r'; $\sigma_{x_T}$/nm = '+str(res/10.), y=1.02) plt.legend(loc=3) plt.tick_params(right=True,top=True) plt.tick_params(which='minor',right=True,top=True) plt.text(200., 1e5, "w/o H tracks", fontsize=fs) plt.tight_layout() plt.show() # + # Reproduce Fig 4a from https://arxiv.org/pdf/1811.06844.pdf mineral = "Epsomite" sample_mass = 0.1 # sample mass [kg] sample_age = 1e3 # sample age [Myr] C238 = 1e-11 res = 150. # readout resolution [Å] xmin = 100. # lower edge of the lowest track length bin [Å] xmax = 1e4 # upper edge of the hightest track length in [Å] nbins = int((xmax-xmin)/100.) SpecCalculator = CalcSpectra.CalcSpectra(mineral, switch_keep_H=True) xbin_vec = CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_solar(), res, xmin = xmin, xmax = xmax, nbins = nbins )[0] Spec_nu_solar = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_solar(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_nu_DSNB = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_DSNB(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_nu_atm = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_atm(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_rad_1a = ( SpecCalculator.smear_and_bin_1a( C238, res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass ) Spec_rad_n = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_Bkgn(C238), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM5 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(5., 1e-42), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM50 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(50., 1e-47), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM500 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(500., 1e-46), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM5000 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(5000., 1e-45), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) where_step = 'post' plt.close('all') plt.step(xbin_vec[:-1]/10., (Spec_nu_solar+Spec_nu_DSNB+Spec_nu_atm), where=where_step, color=plt_colors[0], linestyle='--', label=r'$\nu$') #plt.step(xbin_vec[:-1]/10., Spec_rad_1a, where=where_step, color=plt_colors[1], linestyle='--', label=r'$1\alpha$') plt.step(xbin_vec[:-1]/10., Spec_rad_n, where=where_step, color=plt_colors[2], linestyle='--', label=r'$n$') plt.step(xbin_vec[:-1]/10., Spec_DM5, where=where_step, color=plt_colors[3], linestyle='-', label=r'5\,GeV, $10^{-42}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM50, where=where_step, color=plt_colors[4], linestyle='-', label=r'50\,GeV, $10^{-47}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM500, where=where_step, color=plt_colors[5], linestyle='-', label=r'500\,GeV, $10^{-46}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM5000, where=where_step, color=plt_colors[6], linestyle='-', label=r'5\,TeV, $10^{-45}\,{\rm cm}^2$') plt.xscale('log') plt.yscale('log') plt.xlim(10.,1e3) plt.ylim(3e0,1e9) plt.xlabel(r'$x_T$ [nm]') plt.ylabel(r'tracks/'+str(int((xbin_vec[1]-xbin_vec[0])/10.))+r'\,nm') plt.title(mineral +r'; $\varepsilon/$[kg.Myr] = '+str(sample_mass*sample_age) +r'; $C^{238}$/[g/g] = '+str(C238) +r'; $\sigma_{x_T}$/nm = '+str(res/10.), y=1.02) plt.legend(loc=3) plt.tick_params(right=True,top=True) plt.tick_params(which='minor',right=True,top=True) plt.text(400., 1e8, "w/ H tracks", fontsize=fs) plt.tight_layout() plt.show() # + # Reproduce Fig 4b from https://arxiv.org/pdf/1811.06844.pdf mineral = "Epsomite" SpecCalculator = CalcSpectra.CalcSpectra(mineral) xbin_vec = CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_solar(), res, xmin = xmin, xmax = xmax, nbins = nbins )[0] Spec_nu_solar = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_solar(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_nu_DSNB = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_DSNB(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_nu_atm = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_BkgNeu_atm(), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_rad_1a = ( SpecCalculator.smear_and_bin_1a( C238, res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass ) Spec_rad_n = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_Bkgn(C238), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM5 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(5., 1e-42), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM50 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(50., 1e-47), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM500 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(500., 1e-46), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) Spec_DM5000 = ( CalcSpectra.smear_and_bin( SpecCalculator.calc_dRdx_MW(5000., 1e-45), res, xmin = xmin, xmax = xmax, nbins = nbins )[1] * sample_mass * sample_age ) where_step = 'post' plt.close('all') plt.step(xbin_vec[:-1]/10., (Spec_nu_solar+Spec_nu_DSNB+Spec_nu_atm), where=where_step, color=plt_colors[0], linestyle='--', label=r'$\nu$') plt.step(xbin_vec[:-1]/10., Spec_rad_1a, where=where_step, color=plt_colors[1], linestyle='--', label=r'$1\alpha$') plt.step(xbin_vec[:-1]/10., Spec_rad_n, where=where_step, color=plt_colors[2], linestyle='--', label=r'$n$') plt.step(xbin_vec[:-1]/10., Spec_DM5, where=where_step, color=plt_colors[3], linestyle='-', label=r'5\,GeV, $10^{-42}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM50, where=where_step, color=plt_colors[4], linestyle='-', label=r'50\,GeV, $10^{-47}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM500, where=where_step, color=plt_colors[5], linestyle='-', label=r'500\,GeV, $10^{-46}\,{\rm cm}^2$') plt.step(xbin_vec[:-1]/10., Spec_DM5000, where=where_step, color=plt_colors[6], linestyle='-', label=r'5\,TeV, $10^{-45}\,{\rm cm}^2$') plt.xscale('log') plt.yscale('log') plt.xlim(10.,1e3) plt.ylim(3e0,1e9) plt.xlabel(r'$x_T$ [nm]') plt.ylabel(r'tracks/'+str(int((xbin_vec[1]-xbin_vec[0])/10.))+r'\,nm') plt.title(mineral +r'; $\varepsilon/$[kg.Myr] = '+str(sample_mass*sample_age) +r'; $C^{238}$/[g/g] = '+str(C238) +r'; $\sigma_{x_T}$/nm = '+str(res/10.), y=1.02) plt.legend(loc=3) plt.tick_params(right=True,top=True) plt.tick_params(which='minor',right=True,top=True) plt.text(400., 1e8, "w/o H tracks", fontsize=fs) plt.tight_layout() plt.show() # -
ExamplesAndTests/Test_Plots_1811.06844.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def get_parent_idx(self, idx): return idx // 2 def get_left_child_idx(self, idx): return idx * 2 def get_right_child_idx(self, idx): return idx * 2 + 1 def push(self, item): # 핵심은 idx만 바꾸다가, 마지막만 arr에 item을 입력 if self.is_full(): raise IndexError("the heap is full") self.heapsize += 1 cur_idx = self.heapsize # 일단 제일 밑으로 보내고 시작 # 자식cur_idx가 부모보다 작아야 # key=data while cur_idx != 1 and item.key > self.arr[self.get_parent_idx(cur_idx)]: # 자식이 더 클 경우, arr자식 자리에 부모 할당 # eg. arr[자식자리] = 부모.key # arr[부모자리] = item self.arr[cur_idx] = self.arr[self.get_parent_idx(cur_idx)] cur_idx = self.get_parent_idx(cur_idx) # target인 item을 arr에 실제 할당 self.arr[cur_idx] = item def pop(self): if self.is_empty(): return None # 삭제된 후 반환될 원소: root rem_elem = self.arr[1] # 임시 root: 맨 마지막 원소 temp = self.arr[self.heapsize] # 루트에서 시작 cur_idx = 1 bigger_child_idx = self.get_bigger_child_idx(cur_idx) # temp와 자식 중 더 큰 원소와 비교 # temp가 더 작으면 arr[temp's idx] = 자식 # cur_idx는 자식 idx로 갱신 while bigger_child_idx and temp.key < self.arr[bigger_child_idx].key: self.arr[cur_idx] = self.arr[bigger_child_idx] cur_idx = bigger_child_idx bigger_child_idx = self.get_bigger_child_idx(cur_idx) # 끝까지 내려갔거나, temp가 자식보다 클 때 # 최종적으로 temp 할당 self.arr[cur_idx] = temp self.heapsize -= 1 return rem_elem
01.Algorithm/max_heap.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.0.3 # language: julia # name: julia-1.0 # --- HTML("$styl") # # Chapter 2 # We need these as some functions have moved from Base to Stdlib # using Printf, SpecialFunctions, LinearAlgebra # ### Arithmetic data types x = 2; typeof(x) x = 2.0; typeof(x) x0 = 2^65 x1 = big(2)^65 @assert x0 == x1 for T = Any[Int8,Int16,Int32,Int64,Int128,UInt8,UInt16,UInt32,UInt64,UInt128] println("$(lpad(T,7)): [$(typemin(T)),$(typemax(T))]") end # ### Logical and Bit datatypes x = 0xbb31; y = 0xaa5f; xor(x,y) x = 0xbb31; x << 8 x = 0xbb31; p = (2 < 3); x + p # ### Arrays # + # v1.0 requires the Statistics module for mean(), std() etc. # using Statistics # Mean of 15 random numbers in range 0:100 # A = rand(0:100,15) mean(A) # + # Create an empty array, note new syntax # 'undef' will not initialise the elements # A = Array{Int64,1}(undef, 15) # Verify: Tuple of the element type and the dimension sizes # (eltype(A),size(A)) # - # Fill array A with the first 15 Fibonnaci series # A[1] = 1 A[2] = 1 [A[i] = A[i-1] + A[i-2] for i = 3:length(A)] # ### Factorials # The 'recursive' definition of factorial function # A simple loop is much quicker # function fac(n::Integer) @assert n > 0 (n == 1) ? 1 : n*fac(n-1) end # This has difficulties with integer overflow # We now need the Printf module to use the @printf macro # using Printf for i = 1:30 @printf "%3d : %d \n" i fac(i) end # But since a BigInt <: Integer if we pass a BigInt the reoutine returns one # fac(big(30)) # + # Find stdlib, location is O/S dependent cd(Sys.BINDIR) cd("../share/julia/stdlib/v1.0") pwd() # - # We can check this using the gamma function # Again we need a module (SpecialFunctions) # using SpecialFunctions gamma(31) # Γ(n+1) <=> n! # + # This non-recursive one liner works! # Note that this returns a BigInt regardless of the input # fac(N::Integer) = (N < 1) ? throw(ArgumentError("N must be positive")) : reduce(*,big.(collect(1:N))) @time(fac(402)) # - gamma(big(403.0)) # --- # ### Other methods for generating Fibonacci numbers # + # The 'standard' recursive definition function fib(k::Integer) @assert k > 0 (k < 3) ? 1 : fib(k-1) + fib(k-2) end @time fib(15) # + # A better version function fib(n::Integer) @assert n > 0 a = Array{typeof(n),1}(undef,n) a[1] = 0 a[2] = 1 for i = 3:n a[i] = a[i-1] + a[i-2] end return a[n] end @time(fib(big(402))) # + # A still better version # This requires no array storage function fib(n::Integer) @assert n > 0 (a, b) = (big(0), big(1)) while n > 0 (a, b) = (b, a+b) n -= 1 end return a end # + # Golden ratio @printf "%.15f" fib(101)/fib(100) # + # Check with the actual value γ = (1.0 + sqrt(5.0))/2.0 # - # ### Bulls and Cows # # - This takes input from Standard Input (stdin) # - It does not run well within a Jupyter cell # - Better to cut-n-paste it into a terminal running a Julia REPL using Random # stdlib module needed for srand() => seed!() # tm = round(time()); seed = convert(Int64,tm); Random.seed!(seed); function bacs() bulls = cows = turns = 0 a = Any[] while length(unique(a)) < 4 push!(a,rand('0':'9')) end my_guess = unique(a) println("Bulls and Cows") while (bulls != 4) print("Guess? > ") s = chomp(readline(stdin)) if (s == "q") print("My guess was "); [print(my_guess[i]) for i=1:4] return end guess = collect(s) if !(length(unique(guess)) == length(guess) == 4 && all(isdigit,guess)) print("\nEnter four distinct digits or q to quit: ") continue end bulls = sum(map(==, guess, my_guess)) cows = length(intersect(guess,my_guess)) - bulls println("$bulls bulls and $cows cows!") turns += 1 end println("\nYou guessed my number in $turns turns.") end bacs() # --- # ### Cat and Mouse # The matrix file is in Files subdirectory # Check we are at the correct location # cd() # Change as needed pwd() #http://en.wikipedia.org/wiki/Stochastic_matrix # # Create stochastic matrix and write to disk # open("./cm3.txt","w") do f write(f,"0.0,0.0,0.5,0.0\n") write(f,"0.0,0.0,1.0,0.0\n") write(f,"0.25,0.25,0.0,0.25\n") write(f,"0.0,0.0,0.5,0.0\n") end # + using DelimitedFiles I = zeros(4,4); [I[i,i] = 1 for i in 1:4]; # + f = open("./cm3.txt","r") T = readdlm(f,','); close(f); T # + Ep = [0 1 0 0]*inv(I - T)*[1,1,1,1]; println("Expected lifetime for the mouse is $(Ep[1]) hops.") # - # --- # ### Norms # # _There is more than one way to skin a cat_ # + # Look at different definitions of the norm function # For a Gaussian distribution of size N we should expect the answer ~= √N # The first call f1(1) is to run in the function and not affect the timing # This version uses the function in the stdlib LinearAlgebra module using LinearAlgebra f1(n) = norm(randn(n)) f1(10); @time f1(100_000_000) # + # We can get the same result using a mapreduce procedure # Note that it is a new set of random number, so the answer is slightly different # The time is about the same f2(n) = sqrt(mapreduce(x -> x*x, +, randn(n))) f2(10); @time f2(100_000_000) # + # Using a conventional mapping we need to pipe the result to sum it # and then take the square root # This takes a little longer tha the previous 2 f3(n) = map(x -> x*x,randn(n)) |> sum |> sqrt f3(10); @time f3(100_000_000) # + # Finally we can non-vectorize the code, which is much quicker, # In Julia non-vectorized (i.e loopy) code is invariably faster # than the vectorized equivalent. function f4(n) t = 0.0 for i = 1:n t += randn()^2 end return sqrt(t) end f4(10); @time f4(100_000_000) # - # ### Pointy Norms # + # Define a very simple type to represent a 2-D point struct Point x::Real y::Real end # + # We can define how to add and scale points # This needs importing the + and * function from Base import Base: +,* +(u::Point,v::Point) = Point(u.x + v.x, u.y + v.y ) *(a::Real,u::Point) = Point(a*u.x, a*u.y) *(u::Point,a::Real) = a*u # + # Just test the type structure u1 = Point(1.0,2.3) u1*(17//13) # - # Using Julia's aggregate object model this type "knows" all about arrays # Note: I'll deal with the object model in greater detail the next two chapters # aa = [Point(randn(),randn()) for i = 1:100]; ab = reshape(aa,10,10) # + # It is useful to define a zero function (not sure about a one()) zero(Point) = Point(0.0,0.0) # + # The dot product is the sum of the product of the x and y coordinates dot(u::Point)::Real = u.x^2 * u.y^2 dot(u::Point,v::Point)::Real = u.x*v.x * u.y*v.y dot(ab[1]) # + # The distance between two points is determined by Pythagoras's rule dist(u::Point,v::Point)::Real = sqrt((u.x - v.x)^2 + (u.y - v.y)^2) dist(ab[4,1],ab[2,7]) # + # The distance of the point from the origin is equivalent to it's norm # We need to import the 'norm' function import LinearAlgebra.norm norm(u::Point)::Real = sqrt(u.x^2 + u.y^2) norm(aa[17]) # + # It is also possible to define this as: dist(u::Point)::Real = dist(u::Point,zero(Point)) # Although this requires slightly more work to compute and it may be # better just to define it as dist(u::Point) = norm(u) @assert(dist(aa[17]) == norm(aa[17])) ## Should produce NO output if it is TRUE # - typeof(aa) # Note that this is an array of points # So norm will not work on this yet # + # One way is to overload the norm function using the mapreduce function above # (or write a non-vectorized one) norm(a::Array{Point,1}) = sqrt(mapreduce(x -> dist(x), +, a)) # + # And now we can estimate PI (as in the previous chapter) N = 1000000; ac = [Point(rand(),rand()) for i = 1:N]; # We need the let/end because of the scoping rules let count = 0 for i = 1:N (dist(ac[i]) < 1.0) && (count += 1) end 4.0*(count/N) end # + # Note as yet norm(ab) will not work so we need to be a little more imaginative # with the function definition. # In the next chapter we will generalise this type definition to 3-Vector and N-vector # and revisit the quiestion then norm(ab) # - # --- # ### Generate a *Julia* set function juliaset(z, z0, nmax::Int64) for n = 1:nmax if abs(z) > 2 (return n-1) end z = z^2 + z0 end return nmax end function create_pgmfile(img, outf::String) s = open(outf, "w") write(s, "P5\n") n, m = size(img) write(s, "$m $n 255\n") for i=1:n, j=1:m p = min(img[i,j],255) write(s, UInt8(p)) end close(s) end # + h = 400; w = 800; m = Array{Int64,2}(undef,h,w); c0 = -0.8 + 0.16im; pgm_name = "jset.pgm"; t0 = time(); for y=1:h, x=1:w c = complex((x-w/2)/(w/2), (y-h/2)/(w/2)) m[y,x] = juliaset(c, c0, 256) end t1 = time(); # + # You should find the file in the same chapter as the notebook create_pgmfile(m, pgm_name); @printf "Written %s\nFinished in %.4f seconds.\n" pgm_name (t1-t0); # + # Display the image using Imagemagick's display command # Clicking the file may display it (OSX/XQuartz and Centos/Gnome certainly does) # On OSX use: brew reinstall imagemagick --with-x11 # # Otherwise you make be able to click onit (OSX or Linux) # or on WIndows use an image processing program such as Irfanview # We will be looking at how to do this in Julia later. here = pwd() run(`display $here/$pgm_name`) # - # ---
Chp02/Chp02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import patsy as pt import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn import preprocessing import pymc3 as pm import matplotlib.ticker as tk import re import pickle from scipy.stats import norm # + # !mkdir outputs/bayes_opt_gp_m52 out_dir = 'outputs/bayes_opt_gp_m52/' # - root_dir = 'outputs/mml_gp_m52/' # Notes: # # We need to do this on smaller amounts of data or it won't be much use. # # First calculate the Expected improvement on all the data, then go back and create predictions using subsets of training data. # # All data calculation # ## Import data # This takes the predictions from the MML model calculated previously. pred = pd.read_csv(root_dir+'predictions.csv') pred.head() pred.loc[pred['basis']=='phipsi', :] # Note: scaling of n_log was done on the experimental data and the minimum was > 10. This means some values of n_s will be < 0. This shouldn't be a problem. # ## Expected improvement # + def exp_imp(f, sigma, xsi, mu_max): """ f: mean of response surface sigma: sd of response surface xsi: explore/exploit tradeoff parameter mu_max: the incumbent """ Z = (f - mu_max - xsi)/sigma # Z is zero for sigma = 0 zero_idx = np.abs(Z) > 1e8 Z[zero_idx] = 0 pdf = norm.pdf(Z) cdf = norm.cdf(Z) ei = (f - mu_max - xsi)*cdf + sigma*pdf return ei def plot_ei_rs(*args, **kwargs): data=kwargs.pop('data') color=kwargs.pop('color') ylim = kwargs.pop('ylim') # plot response surface ax = plt.gca() ax2 = ax.twinx() ax.plot(data['n'], data['f_pred'], color=cols[0], label=r'$f(\chi, n)$') ax.fill_between(data['n'], data['lb'], data['ub'], color=cols[0], alpha=0.5, label=r'$2*\sigma$') # plot acquisition function ax2.plot(data['n'], data['ei'], color=color, label='Expected Improvement') ax2.set_ylim(0, ylim) # + cols = sns.color_palette('colorblind') mu_max = pred['f_pred'].max() pred['ei'] = exp_imp(pred['f_pred'], sigma=pred['sd_f'], xsi=0, mu_max=mu_max) pred.to_csv(root_dir+'predictions_with_ei.csv', index=False) ei_max = np.ceil(100*pred['ei'].max()*1.1)/100 with sns.plotting_context('paper', font_scale=1.25): g = sns.FacetGrid(data=pred, col='basis', col_wrap=3) g.map_dataframe(plot_ei_rs,ylim = 0.1) g.set(xscale='log') # - # # Variable amount of data # ## Import data root_dir = 'outputs/mml_gp_m52/' df = pd.read_csv(root_dir+'data.csv') # ## Model fitting functions # + def gamma(alpha, beta): def g(x): return pm.Gamma(x, alpha=alpha, beta=beta) return g def hcauchy(beta): def g(x): return pm.HalfCauchy(x, beta=beta) return g def fit_model_1(y, X, kernel_type='rbf'): """ function to return a pymc3 model y : dependent variable X : independent variables prop_Xu : number of inducing varibles to use X, y are dataframes. We'll use the column names. """ with pm.Model() as model: # Covert arrays X_a = X.values y_a = y.values X_cols = list(X.columns) # Globals prop_Xu = 0.1 # proportion of observations to use as inducing variables l_prior = gamma(1, 0.05) eta_prior = hcauchy(2) sigma_prior = hcauchy(2) # Kernels # 3 way interaction eta = eta_prior('eta') cov = eta**2 for i in range(X_a.shape[1]): var_lab = 'l_'+X_cols[i] if kernel_type.lower()=='rbf': cov = cov*pm.gp.cov.ExpQuad(X_a.shape[1], ls=l_prior(var_lab), active_dims=[i]) if kernel_type.lower()=='exponential': cov = cov*pm.gp.cov.Exponential(X_a.shape[1], ls=l_prior(var_lab), active_dims=[i]) if kernel_type.lower()=='m52': cov = cov*pm.gp.cov.Matern52(X_a.shape[1], ls=l_prior(var_lab), active_dims=[i]) if kernel_type.lower()=='m32': cov = cov*pm.gp.cov.Matern32(X_a.shape[1], ls=l_prior(var_lab), active_dims=[i]) # Covariance model cov_tot = cov # Model gp = pm.gp.MarginalSparse(cov_func=cov_tot, approx="FITC") # Noise model sigma_n =sigma_prior('sigma_n') # Inducing variables num_Xu = int(X_a.shape[0]*prop_Xu) Xu = pm.gp.util.kmeans_inducing_points(num_Xu, X_a) # Marginal likelihood y_ = gp.marginal_likelihood('y_', X=X_a, y=y_a,Xu=Xu, noise=sigma_n) mp = pm.find_MAP() return gp, mp, model def get_dmatrix(df): if 'y' in df.columns: y = df.loc[:, 'y'] else: y = None # X = df.loc[:, df.columns.difference(['y'])] X_c = pt.dmatrix('~ 0 + n_s + C(basis)', data=df, return_type='dataframe') X_c = X_c.rename(columns=lambda x: re.sub('C|\\(|\\)|\\[|\\]','',x)) return y, X_c # - # ## Create models with different amounts of training data # + n_obs = [10, 25, 50] mods = [] for n in n_obs: print(n) tmp = df.groupby('basis').apply(lambda x: x.sample(n=n)) y, X = get_dmatrix(tmp) gp, mp, model = fit_model_1(y, X, kernel_type='m52') mods.append({'n':n, 'gp': gp, 'mp': mp, 'model': model}) # - pickle.dump(obj=mods, file=open(out_dir+'models.p', 'wb')) # ## Create predictions for each model # The order of the basis functions should be determined (reproducibly) by patsy (it appears to be alphabetical) _, X_new_c = get_dmatrix(pred) X_new = pred.loc[:, ['n', 'basis']] X_new.head() # + all_preds = [] for mod in mods: model = mod['model'] gp = mod['gp'] mp = mod['mp'] n = mod['n'] # over used 'n'! with model: # predict latent mu, var = gp.predict(X_new_c.values, point=mp, diag=True,pred_noise=False) sd_f = np.sqrt(var) # record results tmp = pd.DataFrame({'f_pred': mu, 'sd_f': sd_f}) tmp['n_obs'] = n tmp = tmp.join(X_new) all_preds.append(tmp) all_preds = pd.concat(all_preds) # - all_preds.head() # ## Expected improvement # + all_preds['mu_max'] = all_preds.groupby('n_obs')['f_pred'].transform(lambda x: x.max()) all_preds['lb'] = all_preds['f_pred']-2*all_preds['sd_f'] all_preds['ub'] = all_preds['f_pred']+2*all_preds['sd_f'] # + # all_preds['plt_ei_max'] = all_preds.groupby('n_obs')['ei'].transform(lambda x: np.ceil(100*x.max()*1.1)/100) # + all_preds['ei'] = exp_imp(all_preds['f_pred'], all_preds['sd_f'], xsi=0, mu_max=all_preds['mu_max']) with sns.plotting_context('paper', font_scale=1.25): g = sns.FacetGrid(data=all_preds, col='basis', row='n_obs') g.map_dataframe(plot_ei_rs, ylim = 0.1) g.set(xscale='log')
6_acquisition_function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/paulanavarretec/WineRec/blob/master/wineRec_FM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab_type="code" id="92ZVGyqkoh_q" colab={} #from google.colab import files #uploaded = files.upload() # + [markdown] id="9V6g2dga_THy" colab_type="text" # # Get reviews into data structures # + colab_type="code" id="MUJhr9WOsdZ4" outputId="02f49088-dd62-4f1b-e1d6-1ab881786dda" colab={"base_uri": "https://localhost:8080/", "height": 505} import numpy as np import pandas as pd # !pip install gensim import gensim ### Read data file _reviews = pd.read_csv('Reviews_no_nan.csv', sep=';', names = ['userID','review', 'review_URL','score','sentiment','date_string', 'vintage', 'type', 'producer', 'variety', 'designation', 'vineyard', 'country', 'region', 'subregion', 'appellation', 'price_level'], header=0) ### Check for Descriptive Information print("Matrix size",_reviews.shape) # view dataframe size print("Column Names:",_reviews.columns) # view columns using df.columns ### Data overview #_reviews.head() _reviews.tail() ### No null Reviews allowed print("NaNs:",_reviews['review'].isna().sum()) ### Get null's source df = _reviews[_reviews['review'].notnull()] _df = _reviews[_reviews['review'].isna()] reviews = _reviews[_reviews['review'].notnull()] print("Nulls:",_df) ### Preprocess review's texts for later encoding documents = [] for i in range(len(df)): documents.append(gensim.utils.simple_preprocess(df['review'].values[i])) print(documents[0:3]) # + [markdown] colab_type="text" id="Y-jKawCsrYrp" # # Reviews text Embedding # # Hacemos el embedding de cada review con doc2vec provisto por gensim. # + id="b4o0i2wp_TIO" colab_type="code" outputId="7b79718a-0f38-4287-fd7c-0be54b490c7d" colab={"base_uri": "https://localhost:8080/", "height": 33} #from gensim.test.utils import common_texts # Use when real data IS NOT available common_texts = documents # Use when real data IS available len(common_texts) # + id="8QY-aDSd_TIc" colab_type="code" colab={} from gensim.models.doc2vec import Doc2Vec, TaggedDocument # Learn corpus to vectorize reviews tagged_documents = [TaggedDocument(doc, [i]) for i, doc in enumerate(common_texts)] model = Doc2Vec(tagged_documents, vector_size=10, window=2, min_count=1, workers=4) # + id="oIjpsE8F_TIp" colab_type="code" outputId="b3d312d9-cb92-41f4-9649-5db5dc6118e2" colab={"base_uri": "https://localhost:8080/", "height": 87} ### Simple Example print(documents[0]) vector = model.infer_vector(documents[0]) print(vector) # + id="ShLZDzUq_TI2" colab_type="code" outputId="ed43d419-4b0c-4610-a168-1d8edd0479f9" colab={"base_uri": "https://localhost:8080/", "height": 134} ### Now for all reviews documents_vectors = [] for i in range(len(documents)): documents_vectors.append(model.infer_vector(documents[i])) documents_vectors = np.asarray(documents_vectors) ### Vectorized reviews overlook (documents_vectors[0:3]) # + id="U0dKLyTc_TJD" colab_type="code" outputId="2b6a2394-0b98-4f3c-b46c-c79b0e50e2ce" colab={"base_uri": "https://localhost:8080/", "height": 33} ### Sanity check for dimensions documents_vectors.shape # + [markdown] colab_type="text" id="L6D3VjZOcKpg" # #Users, items y features representation # + colab_type="code" id="L8gyNQsY83so" outputId="2d99fe77-fb88-4dfd-c146-2e35b9eacd92" colab={"base_uri": "https://localhost:8080/", "height": 484} print("Matrix size",reviews.shape) #reviews.head() reviews.tail() # + colab_type="code" id="qCdqi881uWJ5" colab={} ### Fill in the rare values with supported ones reviews['vintage'] = reviews['vintage'].fillna(0).astype('int') reviews['designation'] = reviews['designation'].fillna(0).astype('str') reviews['vineyard'] = reviews['vineyard'].fillna(0).astype('str') reviews['subregion'] = reviews['subregion'].fillna(0).astype('str') reviews['appellation'] = reviews['appellation'].fillna(0).astype('str') # + [markdown] colab_type="text" id="5LnslpraNWwD" # Nuestro review df variable contiene un pandas dataframe con XXXX reviews rows y 17 columnas con wines features. Todas excepto score, year y vintage son categorical features que deben ser codificadas para poder usarlas como input en los algoritmos de aprendizaje. # + colab_type="code" id="idcx42qVPOer" outputId="133bfb17-9dcb-43a5-ef91-d67242fe4398" colab={"base_uri": "https://localhost:8080/", "height": 630} # import preprocessing from sklearn from sklearn import preprocessing from sklearn.preprocessing import LabelEncoder label_encoder = LabelEncoder() ### Encode all categorical data to integers to be able to do some math on it reviews['user_id'] = label_encoder.fit_transform(reviews.userID) reviews['item_id'] = label_encoder.fit_transform(reviews.review_URL) reviews['type_encoded'] = label_encoder.fit_transform(reviews.type) reviews['producer_encoded'] = label_encoder.fit_transform(reviews.producer) reviews['variety_encoded'] = label_encoder.fit_transform(reviews.variety) reviews['designation_encoded'] = label_encoder.fit_transform(reviews.designation) reviews['vineyard_encoded'] = label_encoder.fit_transform(reviews.vineyard) reviews['country_encoded'] = label_encoder.fit_transform(reviews.country) reviews['region_encoded'] = label_encoder.fit_transform(reviews.region) reviews['subregion_encoded'] = label_encoder.fit_transform(reviews.subregion) reviews['appellation_encoded'] = label_encoder.fit_transform(reviews.appellation) print("Column Names:",reviews.columns) # view columns using df.columns reviews.tail(5) # + [markdown] id="drH2FVkU_TJ-" colab_type="text" # ## Item representation # + colab_type="code" id="HnjOua6gSbXx" outputId="9ab5aa50-dc70-4d1b-f738-9a5e8e6055b2" colab={"base_uri": "https://localhost:8080/", "height": 568} from sklearn.preprocessing import OneHotEncoder onehotencoder = OneHotEncoder() ### Create dummies for items and users user_id_ = onehotencoder.fit_transform(reviews.user_id.values.reshape(-1,1)).toarray() item_id_ = onehotencoder.fit_transform(reviews.item_id.values.reshape(-1,1)).toarray() vintage_ = onehotencoder.fit_transform(reviews.vintage.values.reshape(-1,1)).toarray() type_encoded_ = onehotencoder.fit_transform(reviews.type_encoded.values.reshape(-1,1)).toarray() producer_encoded_ = onehotencoder.fit_transform(reviews.producer_encoded.values.reshape(-1,1)).toarray() variety_encoded_ = onehotencoder.fit_transform(reviews.variety_encoded.values.reshape(-1,1)).toarray() designation_encoded_ = onehotencoder.fit_transform(reviews.designation_encoded.values.reshape(-1,1)).toarray() vineyard_encoded_ = onehotencoder.fit_transform(reviews.vineyard_encoded.values.reshape(-1,1)).toarray() country_encoded_ = onehotencoder.fit_transform(reviews.country_encoded.values.reshape(-1,1)).toarray() region_encoded_ = onehotencoder.fit_transform(reviews.region_encoded.values.reshape(-1,1)).toarray() subregion_encoded_ = onehotencoder.fit_transform(reviews.subregion_encoded.values.reshape(-1,1)).toarray() appellation_encoded_ = onehotencoder.fit_transform(reviews.appellation_encoded.values.reshape(-1,1)).toarray() print(user_id_[0:3]) print("Users dimensions:",user_id_.shape) item_id_OneHot = pd.DataFrame(item_id_, columns = ["item_"+str((i)) for i in range(item_id_.shape[1])]) user_id_OneHot = pd.DataFrame(user_id_, columns = ["user_"+str((i)) for i in range(user_id_.shape[1])]) vintage_OneHot = pd.DataFrame(vintage_, columns = ["vintage_"+str((i)) for i in range(vintage_.shape[1])]) type_encoded_OneHot = pd.DataFrame(type_encoded_, columns = ["type_"+str((i)) for i in range(type_encoded_.shape[1])]) producer_encoded_OneHot = pd.DataFrame(producer_encoded_, columns = ["type_"+str((i)) for i in range(producer_encoded_.shape[1])]) variety_encoded_OneHot = pd.DataFrame(variety_encoded_, columns = ["type_"+str((i)) for i in range(variety_encoded_.shape[1])]) designation_encoded_OneHot = pd.DataFrame(designation_encoded_, columns = ["type_"+str((i)) for i in range(designation_encoded_.shape[1])]) vineyard_encoded_OneHot = pd.DataFrame(vineyard_encoded_, columns = ["type_"+str((i)) for i in range(vineyard_encoded_.shape[1])]) country_encoded_OneHot = pd.DataFrame(country_encoded_, columns = ["type_"+str((i)) for i in range(country_encoded_.shape[1])]) region_encoded_OneHot = pd.DataFrame(region_encoded_, columns = ["type_"+str((i)) for i in range(region_encoded_.shape[1])]) subregion_encoded_OneHot = pd.DataFrame(subregion_encoded_, columns = ["type_"+str((i)) for i in range(subregion_encoded_.shape[1])]) appellation_encoded_OneHot = pd.DataFrame(appellation_encoded_, columns = ["type_"+str((i)) for i in range(appellation_encoded_.shape[1])]) items = pd.concat([item_id_OneHot, vintage_OneHot, type_encoded_OneHot, #producer_encoded_OneHot, variety_encoded_OneHot, #designation_encoded_OneHot, #vineyard_encoded_OneHot, country_encoded_OneHot, region_encoded_OneHot, #subregion_encoded_OneHot, #appellation_encoded_OneHot ], axis=1) print("Item dimensions:",items.shape) items.tail(13) # + [markdown] id="RokBq9Hp_TKN" colab_type="text" # ## User representation # + id="G9Tte1gm_TKS" colab_type="code" outputId="0bb7a981-42e3-4185-953d-7c70e3c6e3c7" colab={"base_uri": "https://localhost:8080/", "height": 196} ### Get date data in date format to be able to sort by review date date = [] for i in range(len(reviews)): date.append(reviews['date_string'][i].split(' -')[0]) date = pd.DataFrame(date) date.columns = ['date'] date['date'] = pd.to_datetime(date.date) date.head() # + id="sKe55zz__TKg" colab_type="code" outputId="f422815f-a726-4ce7-81d2-c979643283e1" colab={"base_uri": "https://localhost:8080/", "height": 735} ### Set new column with posts quantity set to zero and add date column to reviews df reviews['posts_qty'] = 0 # to record experience in terms of posts made reviews['tenure'] = 0 # to record experience in terms of time in system users = pd.concat([user_id_OneHot, pd.DataFrame(documents_vectors), date, reviews.posts_qty],axis=1) users['ts'] = users.date.values.astype(np.float64)/float(pow(10,16)) reviews = pd.concat([reviews, users.date], axis=1) print("Cantidad de usuarios unicos:",len(reviews.userID.unique())) print(users.shape) users.tail(13) # + id="3nPorEgK_TKu" colab_type="code" outputId="6efbbd53-d06c-42ee-bc17-566a1b87ac7e" colab={"base_uri": "https://localhost:8080/", "height": 104} ### loop to increment posts_qty by one in ascending date order for i in range(len(reviews.userID.unique())): subset = reviews.loc[reviews['userID'] == reviews.userID.unique()[i]] subset = subset.sort_values(by='date') idx = subset.index.tolist() #print(idx) # just to ease debugging and sanity checks i = 1 start = float(users.iloc[idx[0]]['ts']) for index in idx: current = float(users.iloc[idx[i-1]]['ts']) a = float(current-start) reviews.set_value(index, 'tenure', a) reviews.set_value(index, 'posts_qty', i) i += 1 # + id="QpX9PkpZ_TK4" colab_type="code" outputId="55a23ddc-4aa2-47a5-c79c-e6ba80d1e09b" colab={"base_uri": "https://localhost:8080/", "height": 546} ### Sanity Check: posts_qty column should be in ascending order incremented by one in each row subset = reviews.loc[reviews['userID'] == 'Anonymous'] subset = subset.sort_values(by='posts_qty') subset.head() # + [markdown] id="kjDxW-SU_TLA" colab_type="text" # ## Feature Vectors # + id="sJav6El4R2PT" colab_type="code" colab={} tenure_ = onehotencoder.fit_transform(reviews.tenure.values.reshape(-1,1)).toarray() posts_qty_ = onehotencoder.fit_transform(reviews.posts_qty.values.reshape(-1,1)).toarray() tenure_OneHot = pd.DataFrame(tenure_, columns = ["item_"+str((i)) for i in range(tenure_.shape[1])]) posts_qty_OneHot = pd.DataFrame(posts_qty_, columns = ["item_"+str((i)) for i in range(posts_qty_.shape[1])]) # + colab_type="code" id="Mdz6p5IXPZYV" outputId="b41ea5ec-da3c-424b-ab2a-773b9b0a7d5c" colab={"base_uri": "https://localhost:8080/", "height": 411} from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled = pd.concat([reviews.tenure, reviews.posts_qty ],axis=1) scaled_data = pd.DataFrame(scaler.fit_transform(scaled)) users = pd.concat([user_id_OneHot, #pd.DataFrame(documents_vectors), scaled_data, #tenure_OneHot, #posts_qty_OneHot ],axis=1) features = pd.concat([items, users], axis=1) print(features.shape) features.tail(10) # + id="x9OV9M9V_TLJ" colab_type="code" colab={} scores = reviews['score'] scores.to_csv("scores.csv", sep='\n',index=False) # + [markdown] id="epNZFwu3g4hW" colab_type="text" # #Experiments # + [markdown] id="g5NTOW-zIRzP" colab_type="text" # ##FM # + id="GeCkLXcaIgov" colab_type="code" outputId="9991fc8d-3b39-4694-97c1-7165bb32cfa9" colab={"base_uri": "https://localhost:8080/", "height": 100} # !pip install fastFM from fastFM.datasets import make_user_item_regression from sklearn.model_selection import train_test_split import scipy as sc # + id="Zqbuszami9BV" colab_type="code" colab={} features.fillna(0, inplace=True) scores.fillna(0, inplace=True) X = sc.sparse.csr_matrix(features.values).tocsr() y = scores.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) # + id="LNN6kgPqL2qp" colab_type="code" outputId="0cbbd91f-824b-49d3-c22e-52b88c6a51e3" colab={"base_uri": "https://localhost:8080/", "height": 67} print("Set size:",features.shape,"\nTraining set size:", X_train.shape,"\nTest set size:", X_test.shape ) # + id="cuPqZadqhWXG" colab_type="code" colab={} from fastFM import als from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error import time # Measure time start_time = time.time() n_iter = 500 step_size = 1 l2_reg_w = 0 l2_reg_V = 0 #fm = als.FMRegression(n_iter=0, l2_reg_w=0.1, l2_reg_V=0.1, rank=4, random_state=42) fm = als.FMRegression(n_iter=0, init_stdev=0.1, rank=8, random_state=123, l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0) # Allocates and initalizes the model parameter. fm.fit(X_train, y_train) lapse = time.time()-start_time # + id="h1BXWpGopOU0" colab_type="code" outputId="327ec94b-9151-4d34-d3c6-8e7148efa35f" colab={"base_uri": "https://localhost:8080/", "height": 285} rmse_train = [] rmse_test = [] mae_train = [] mae_test = [] r2_train = [] r2_test = [] for i in range(1, n_iter): fm.fit(X_train, y_train, n_more_iter=step_size) y_pred = fm.predict(X_test) rmse_train.append(np.sqrt(mean_squared_error(fm.predict(X_train), y_train))) rmse_test.append(np.sqrt(mean_squared_error(fm.predict(X_test), y_test))) mae_train.append(mean_absolute_error(fm.predict(X_train), y_train)) mae_test.append(mean_absolute_error(fm.predict(X_test), y_test)) r2_train.append(r2_score(fm.predict(X_train), y_train, multioutput='variance_weighted')) r2_test.append(r2_score(fm.predict(X_test), y_test, multioutput='variance_weighted')) from matplotlib import pyplot as plt fig, axes = plt.subplots(ncols=3, figsize=(15, 4)) x = np.arange(1, n_iter) * step_size with plt.style.context('fivethirtyeight'): axes[0].plot(x, rmse_test, label='RMSE-test', color='b') axes[1].plot(x, mae_test, label='MAE-test', color='r') axes[2].plot(x, r2_test, label='$R^2-test$', color='g') axes[0].set_ylabel('RMSE', color='b') axes[1].set_ylabel('MAE', color='r') axes[2].set_ylabel('$R^2$', color='g') axes[0].set_title('RMSE FM') axes[1].set_title('MAE FM') axes[2].set_title('$R^2$ FM') axes[0].legend() axes[1].legend() axes[2].legend() fig.savefig('R2_RMSE_MAE_FM.png') min_mae_fm = min(rmse_test) min_rmse_fm = min(mae_test) #from google.colab import files #files.download('RMSE_MAE_FM.png') # + [markdown] id="VnCkcNrdPGSF" colab_type="text" # ### Zoom # + id="sQsQN_l7PJJP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 330} outputId="bac07e56-621c-4cf4-81d9-16cc2557a895" n_iter = 100 fm = als.FMRegression(n_iter=0, init_stdev=0.1, rank=8, random_state=123, l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0) # Allocates and initalizes the model parameter. fm.fit(X_train, y_train) rmse_train = [] rmse_test = [] mae_train = [] mae_test = [] r2_train = [] r2_test = [] for i in range(1, n_iter): fm.fit(X_train, y_train, n_more_iter=step_size) y_pred = fm.predict(X_test) rmse_train.append(np.sqrt(mean_squared_error(fm.predict(X_train), y_train))) rmse_test.append(np.sqrt(mean_squared_error(fm.predict(X_test), y_test))) mae_train.append(mean_absolute_error(fm.predict(X_train), y_train)) mae_test.append(mean_absolute_error(fm.predict(X_test), y_test)) r2_train.append(r2_score(fm.predict(X_train), y_train, multioutput='variance_weighted')) r2_test.append(r2_score(fm.predict(X_test), y_test, multioutput='variance_weighted')) from matplotlib import pyplot as plt fig, axes = plt.subplots(ncols=2, figsize=(15, 4)) x = np.arange(1, n_iter) * step_size with plt.style.context('fivethirtyeight'): axes[0].plot(x, rmse_test, label='RMSE-test', color='b') axes[1].plot(x, mae_test, label='MAE-test', color='r') axes[0].set_ylabel('RMSE', color='b') axes[1].set_ylabel('MAE', color='r') axes[0].set_title('RMSE FM') axes[1].set_title('MAE FM') axes[0].legend() axes[1].legend() fig.savefig('R2_RMSE_MAE_FM_zoom.png') min_rmse_fm = min(rmse_test) min_mae_fm = min(mae_test) print('min_mae_fm:',min_mae_fm) print('min_rmse_fm:',min_rmse_fm) print('Lapse:',lapse/1000,'segs') # + [markdown] id="RbQRweugu8d_" colab_type="text" # ## Data represention for baseline Experiments # + id="Q8OuYu5RACnO" colab_type="code" colab={} reviews['ts'] = date.values.astype(np.int64) df_X_train, df_X_test, df_y_train, df_y_test = train_test_split(reviews, scores, test_size=0.3, random_state=42) # for later use df_y_train = df_y_train.to_frame() df_y_train.columns = ['rating'] df_y_test = df_y_test.to_frame() df_y_test.columns = ['rating'] # + id="8Y7pmjs09U6P" colab_type="code" outputId="c56b77a6-ce60-48c9-bdc4-4fb0b8c85634" colab={"base_uri": "https://localhost:8080/", "height": 33} print(len(df_X_train), len(df_X_test), type(df_y_train), type(df_y_test)) selected_columns = ['user_id','item_id','ts', 'vintage', 'type_encoded', 'producer_encoded', 'variety_encoded', 'designation_encoded', 'vineyard_encoded', 'country_encoded', 'region_encoded', 'subregion_encoded', 'appellation_encoded' ] df_X_train = df_X_train[selected_columns] df_X_test = df_X_test[selected_columns] #df_X_test.head() # + id="i7FW5hB6FXgh" colab_type="code" outputId="54be4ac5-59cd-4962-f69f-7af3bb1817c5" colab={"base_uri": "https://localhost:8080/", "height": 216} train_70 = pd.concat([df_X_train, df_y_train], axis=1) train_30 = pd.concat([df_X_test, df_y_test], axis=1) train_70.to_csv('train_70.csv', encoding='utf-8', index=False) train_30.to_csv('train_30.csv', encoding='utf-8', index=False) train_70.head()
wineRec_FM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 2 - Evapotranspiration # # - toc: false # - badges: true # - comments: false # - categories: [jupyter] # ## 📒 instructions # This is where learning happens, not during a lecture. You'll learn a ton of things by doing them yourself. Much success! 😄 # # Create a Jupyter Notebook called `assignment-02-IDNUMBER`, where `IDNUMBER` is your 9-digit ID. This is the file only file we will check. # # ## 📌 locations and data # # **Choose two stations with different climates.** # # Go to NOAA's [Climate Reference Network Data](https://www.ncdc.noaa.gov/crn/qcdatasets.html) website. The sub-hourly (5-min) data contains information on # * air temperature, # * precipitation, # * global solar radiation, # * surface infrared temperature, # * relative humidity, # * soil moisture and temperature, # * wetness, and # * 1.5 meter wind speed. # # There is no data on air pressure, so one needs to use the stations coordinates (lat, lon) to find its height above sea level, and from that infer the air pressure. You can use Google Earth or any other means to find the station's height. # # In the Data Access link, choose a year and a station you would like to analyze. If you are not sure where the stations are, find them using the 2-letter state abbreviation and the station name. # # Download the following files: # 1. One full year of data for each station. Make sure important data we need to calculate Penman's ET estimation is available. # 2. The headers file # 3. The documentation file # # Make sure you understand what are the units provided for each measurement (see documentation). # # ## 🛠 tasks # # Produce potential ET estimates using Thornthwaite's equation and Penman's equation. # Produce plots of ET as a function of time for each station, comparing the two methods you used. # Also, using Penman's ET estimates, compare the two stations and discuss about their differences/similarities. # # You might find interesting things in the data, such as periods of unusually high/low temperatures, radiation, etc. Discuss how these factors might have affected the ET estimates that you calculated. # # You will have **two weeks** to deliver your assignment. You should **not** hand in a dry document with only figures and code, I'm expecting text before and after each code/graph cell, explaining what you did, why you did it, and how it fits the story you are telling. Don't forget to put labels on your plot axes, title, legend, etc. # # Your Jupyter Notebook should be **fully functional**: if we press `Kernel > Restart & Run All`, all the code must work without any errors. # # ## 🌅 presentation # All the assignment must be in **one single** Jupyter Notebook. Use markdown cells to discuss the analysis and results, and in code cells show **all the code** you used to produce the figures and data analysis. Leave only the code necessary for your analysis, delete unnecessary lines your wrote while analyzing your data. Don't forget to comment your code, just like we did during exercise sessions. # # You can write in English or in Hebrew, but the text in the figures must be in English. If you choose to write the discussion in Hebrew, be aware that Jupyter Notebooks don't have native right-to-left language support: # # ניתן לכתוב בעברית, למרות שזה לא נראה כ״כ טוב... # # You can use some HTML code to achieve best results in Hebrew. Type the following # ```html # <p dir="rtl" style="text-align: right;"> # עכשיו הרבה יותר טוב! # </p> # ``` # to get # <p dir="rtl" style="text-align: right;"> # עכשיו הרבה יותר טוב! # </p> # # If you have many paragraphs in hebrew, do the following: # <p dir="rtl" style="text-align: right;"> # פסקה מספר 1. # </p> # # <p dir="rtl" style="text-align: right;"> # פסקה מספר 2. # </p> # # # <p dir="rtl" style="text-align: right;"> # אם יש לכם כמה פסקאות, כל אחת מהן תהיה בתוך "dir" משלה # </p> # # In my opinion it is too complicated to write in Hebrew in Jupyter Notebooks, just write in English, your grade will not be affected by typos nor less-than-perfect English proficiency. # # ## 💯 evaluation # # Your assignment will be evaluated according to the following criteria: # * 40% Presentation. How the graphs look, labels, general organization, markdown, clean code. # * 30% Discussion. This is where you explain what you did, what you found out, etc. # * 15% Depth of analysis. You can analyze/explore the data with different levels of complexity, this is where we take that into consideration. # * 10% Replicability: Your code runs flawlessly. # * 5%: Code commenting. Explain in your code what you are doing, this is good for everyone, especially for yourself! # # # ## 🚚 importing the data # # Below you can find an example of how to import the data file provided by NOAA's Climate Reference Network Data website. You might have to make some adjustments to it. # # ```python # data_file = "CRNS0101-05-2020-CO_Boulder_14_W.txt" # df = pd.read_csv(data_file, # header=None, # no headers needed, we'll do that later # delim_whitespace=True, # blank spaces separate between columns # na_values=["-99.000", "-9999.0"] # substitute these values for missing (NaN) values # ) # headers = pd.read_csv("HEADERS_sub_hourly.txt", # load headers file # header=1, # skip the first [0] line # delim_whitespace=True # ) # df.columns = headers.columns # rename df columns with headers columns # # LST = local standard time # df["LST_TIME"] = [f"{x:04d}" for x in df["LST_TIME"]] # time needs padding of zeros, then convert to string # df['LST_DATE'] = df['LST_DATE'].astype(str) # convert date into string # df['datetime'] = df['LST_DATE'] + ' ' + df['LST_TIME'] # combine date+time into datetime # df['datetime'] = pd.to_datetime(df['datetime']) # interpret datetime # df = df.set_index('datetime') # make datetime the index # df # ```
_notebooks/2020-02-03-assignment-02-ET.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # K- Means Clustering # by *<NAME>* # ## Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline from sklearn import datasets # ## Load the iris dataset iris = datasets.load_iris() iris_df = pd.DataFrame(iris.data, columns = iris.feature_names) iris_df.head() iris_df.describe() # ## Finding the optimum number of clusters for k-means classification # > ### Elbow method # # > The basic idea behind partitioning methods, such as k-means clustering, is to define clusters such that the total intra-cluster variation or total within-cluster sum of square WSS is minimized. The total WSS measures the compactness of the clustering and we want it to be as small as possible. # # The Elbow method looks at the total WSS as a function of the number of clusters: One should choose a number of clusters so that adding another cluster doesn’t improve much better the total WSS. # # The optimal number of clusters can be defined as follow: # # 1. Compute clustering algorithm (e.g., k-means clustering) for different values of k. For instance, by varying k from 1 to 10 clusters. # 2. For each k, calculate the total within-cluster sum of square (wss). # 3. Plot the curve of wss according to the number of clusters k. # 4. The location of a bend (knee) in the plot is generally considered as an indicator of the appropriate number of clusters. # Now we will implement 'The elbow method' on the Iris dataset. The elbow method allows us to pick the optimum amount of clusters for classification. although we already know the answer is 3 it is still interesting to run. # + x = iris_df.iloc[:, [0, 1, 2, 3]].values from sklearn.cluster import KMeans wcss = [] #this loop will fit the k-means algorithm to our data and #second we will compute the within cluster sum of squares and #appended to our wcss list. for i in range(1, 11): kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) #i above is between 1-10 numbers. init parameter is the random #initialization method #we select kmeans++ method. max_iter parameter the maximum number of iterations there can be to #find the final clusters when the K-meands algorithm is running. we #enter the default value of 300 #the next parameter is n_init which is the number of times the #K_means algorithm will be run with #different initial centroid. kmeans.fit(x) #kmeans algorithm fits to the X dataset wcss.append(kmeans.inertia_) #kmeans inertia_ attribute is: Sum of squared distances of samples #to their closest cluster center. # - # Plotting the results onto a line graph, # `allowing us to observe 'The elbow' plt.plot(range(1, 11), wcss) plt.title('The elbow method') plt.xlabel('Number of clusters') plt.ylabel('WCSS') # Within cluster sum of squares plt.show() # You can clearly see why it is called 'The elbow method' from the above graph, the optimum clusters is where the elbow occurs. This is when the within cluster sum of squares (WCSS) doesn't decrease significantly with every iteration. Now that we have the optimum amount of clusters, we can move on to applying K-means clustering to the Iris dataset. # ## Applying kmeans to the dataset / Creating the kmeans classifier kmeans = KMeans(n_clusters = 3, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) y_kmeans = kmeans.fit_predict(x) # > We are going to use the fit predict method that returns for each observation which cluster it belongs to. The cluster to which client belongs and it will return this cluster numbers into a single vector that is called y K-means # ## Visualising the clusters - On the first two columns # + plt.scatter(x[y_kmeans == 0, 0], x[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Iris-setosa') plt.scatter(x[y_kmeans == 1, 0], x[y_kmeans == 1, 1], s = 100, c = 'green', label = 'Iris-versicolour') plt.scatter(x[y_kmeans == 2, 0], x[y_kmeans == 2, 1], s = 100, c = 'blue', label = 'Iris-virginica') # Plotting the centroids of the clusters plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1], s = 100, c = 'yellow', label = 'Centroids') plt.legend() # - # > ## Average Silhouette Analysis # > The average silhouette approach we’ll be described comprehensively in the chapter cluster validation statistics. Briefly, it measures the quality of a clustering. That is, it determines how well each object lies within its cluster. A high average silhouette width indicates a good clustering. # # > Average silhouette method computes the average silhouette of observations for different values of k. The optimal number of clusters k is the one that maximize the average silhouette over a range of possible values for k (Kaufman and Rousseeuw 1990). # # The algorithm is similar to the elbow method and can be computed as follow: # # 1. Compute clustering algorithm (e.g., k-means clustering) for different values of k. For instance, by varying k from 1 to 10 clusters. # 2. For each k, calculate the average silhouette of observations (avg.sil). # 3. Plot the curve of avg.sil according to the number of clusters k. # 4. The location of the maximum is considered as the appropriate number of clusters. from sklearn.datasets import load_iris iris = load_iris() X = iris['data'][:, 1:3] # + from __future__ import print_function from sklearn.datasets import make_blobs from sklearn.cluster import KMeans from sklearn.metrics import silhouette_samples, silhouette_score import matplotlib.pyplot as plt import matplotlib.cm as cm import numpy as np print(__doc__) # Generating the sample data from make_blobs # This particular setting has one distinct cluster and 3 clusters placed close # together. X, y = make_blobs(n_samples=500, n_features=2, centers=4, cluster_std=1, center_box=(-10.0, 10.0), shuffle=True, random_state=1) # For reproducibility range_n_clusters = [2, 3, 4, 5, 6, 7, 8] for n_clusters in range_n_clusters: # Create a subplot with 1 row and 2 columns fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(18, 7) # The 1st subplot is the silhouette plot # The silhouette coefficient can range from -1, 1 but in this example all # lie within [-0.1, 1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is for inserting blank space between silhouette # plots of individual clusters, to demarcate them clearly. ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10]) # Initialize the clusterer with n_clusters value and a random generator # seed of 10 for reproducibility. clusterer = KMeans(n_clusters=n_clusters, random_state=10) cluster_labels = clusterer.fit_predict(X) # The silhouette_score gives the average value for all the samples. # This gives a perspective into the density and separation of the formed # clusters silhouette_avg = silhouette_score(X, cluster_labels) print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(X, cluster_labels) y_lower = 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = \ sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.nipy_spectral(float(i) / n_clusters) ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7) # Label the silhouette plots with their cluster numbers at the middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i)) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples ax1.set_title("The silhouette plot for the various clusters.") ax1.set_xlabel("The silhouette coefficient values") ax1.set_ylabel("Cluster label") # The vertical line for average silhouette score of all the values ax1.axvline(x=silhouette_avg, color="red", linestyle="--") ax1.set_yticks([]) # Clear the yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) # 2nd Plot showing the actual clusters formed colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters) ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=colors, edgecolor='k') # Labeling the clusters centers = clusterer.cluster_centers_ # Draw white circles at cluster centers ax2.scatter(centers[:, 0], centers[:, 1], marker='o', c="white", alpha=1, s=200, edgecolor='k') for i, c in enumerate(centers): ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50, edgecolor='k') ax2.set_title("The visualization of the clustered data.") ax2.set_xlabel("Feature space for the 1st feature") ax2.set_ylabel("Feature space for the 2nd feature") plt.suptitle(("Silhouette analysis for KMeans clustering on sample data " "with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold') plt.show() # - # Silhouette coefficients (as these values are referred to as) near +1 indicate that the sample is far away from the neighboring clusters. A value of 0 indicates that the sample is on or very close to the decision boundary between two neighboring clusters and negative values indicate that those samples might have been assigned to the wrong cluster. # #
Task-3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Scikit learn, supervised learning, part 1 # + import numpy as np import scipy import sklearn import matplotlib.pyplot as plt # %matplotlib inline # - mnist = np.loadtxt("../data/mnist_train.csv", delimiter=",", skiprows=1) # + X = mnist[:10000, 1:] y = mnist[:10000, 0] print X.shape # - def plot_roc_auc(y_score, y_test): from sklearn.metrics import roc_curve, auc # Compute micro-average ROC curve and ROC area fpr = dict() tpr = dict() roc_auc = dict() for i in range(10): fpr[i], tpr[i] , _ = roc_curve(np.where(y_test == i, 1, 0).ravel(), y_score[:, i].ravel()) roc_auc[i] = auc(fpr[i], tpr[i]) ############################################################################## # Plot of a ROC curve for a specific class plt.figure(num=None, figsize=(14, 7), dpi=400) for i in range(10): plt.plot(fpr[i], tpr[i], label='ROC curve for %d (area = %0.2f)' % (i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") return plt.show() # ## Some utilities # + from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=333) # - from sklearn.metrics import auc, accuracy_score # ## Naive Bayes # + from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() # + # %%time gnb.fit(X_train, y_train) y_predicted = gnb.predict(X_test) # - accuracy_score(y_predicted, y_test) # + y_score = gnb.predict_proba(X_test) plot_roc_auc(y_score, y_test) # - # Note how NB is making prediction only in {0, 1}. Why does that happen? # ## kNN from sklearn.neighbors import KNeighborsClassifier # + # %%time knn = KNeighborsClassifier(n_neighbors=5).fit(X_train, y_train) y_predicted = knn.predict_proba(X_test) # - plot_roc_auc(y_predicted, y_test) y_predicted = knn.predict(X_test) accuracy_score(y_predicted, y_test) # ### Searching for optimal k # + X_train_, X_validation, y_train_, y_validation = train_test_split(X_train, y_train, test_size=0.8) ks = np.arange(1, 10) acc = np.zeros(ks.shape) for i in range(ks.shape[0]): y_predicted = KNeighborsClassifier(n_neighbors=ks[i]).fit(X_train_, y_train_).predict(X_validation) acc[i] = accuracy_score(y_predicted, y_validation) # - plt.plot(ks, 1.0 - acc) plt.title("kNN error depending on k") # ### Nearest Neighbor algoritms # - KDTree # - BallTree # - brute force # ## Metrics # Possible metrics are closely related to Nearest Neghbor algorithms, for tree-like algorithms it should satisfy triangle inequality: # $$|x + y| \leq |x| + |y|$$ # ## Regression N = 20 xs = np.linspace(0, 1, num=N).reshape((N, 1)) ys = xs * 0.75 + np.random.standard_normal(size=N).reshape((N, 1)) * 0.1 plt.scatter(xs, ys) # ### Ordinary Least Squares from sklearn.linear_model import LinearRegression lsr = LinearRegression().fit(xs, ys) lsr.coef_ test_xs = np.linspace(0, 1.0, num=100).reshape((100, 1)) test_ys = lsr.predict(test_xs) plt.scatter(xs, ys) plt.plot(test_xs, test_ys) # ### Ridge regression # + import scipy.special xs = np.linspace(0, 1, 50) signal = scipy.special.jv(0.0, xs * 20) ys = signal + np.random.standard_normal(size=xs.shape[0]) * 0.1 # - plt.scatter(xs, ys) plt.plot(xs, signal) plt.title("Bessel function $J_0(x)$") def make_cosine_basis(xs, k = 5): js = np.arange(k) A = np.cos(np.outer(xs, js)) return A # + k = 30 X = make_cosine_basis(xs, k = k) xs_test = np.linspace(0, 1, num=100) X_test = make_cosine_basis(xs_test, k = k) # + from sklearn.linear_model import LinearRegression lsr = LinearRegression(fit_intercept=False).fit(X, ys) # - ys_lsr = lsr.predict(X_test) plt.figure(None, figsize=(12, 6)) plt.plot(xs, signal, "--") plt.scatter(xs, ys) plt.plot(xs_test, ys_lsr) # + from sklearn.linear_model import Ridge alphas = np.linspace(0, 2, num=5) ridge = [Ridge(alpha=alpha, fit_intercept=False).fit(X, ys) for alpha in alphas] # + test_ys = [ reg.predict(X_test) for reg in ridge ] plt.figure(figsize=(16, 8)) for test_y, alpha in zip(test_ys, alphas): plt.plot(xs_test, test_y, label="alpha=%f" % alpha) plt.scatter(xs, ys) plt.plot(xs, signal, "--") plt.legend(loc="lower left") # - # ### LASSO # + from sklearn.linear_model import Lasso alphas = np.linspace(0.001, 0.02, num=5) ridge = [Lasso(alpha=alpha, fit_intercept=False).fit(X, ys) for alpha in alphas] # + test_ys = [ reg.predict(X_test) for reg in ridge ] plt.figure(figsize=(16, 8)) for test_y, alpha in zip(test_ys, alphas): plt.plot(xs_test, test_y, label="alpha=%f" % alpha) plt.scatter(xs, ys) plt.plot(xs, signal, "--") plt.legend(loc="lower left") # - # ### Elastic Net # + from sklearn.linear_model import ElasticNet alpha = 0.05 en = ElasticNet(alpha=alpha, fit_intercept=False).fit(X, ys) ridge = Ridge(alpha=alpha, fit_intercept=False).fit(X, ys) lasso = Lasso(alpha=alpha, fit_intercept=False).fit(X, ys) # - plt.plot(xs_test, en.predict(X_test), label="elastic net") plt.plot(xs_test, lasso.predict(X_test), label="lasso") plt.plot(xs_test, ridge.predict(X_test), label="ridge") plt.plot(xs, signal, "--", label="true") plt.legend(loc="upper right") # + coef_x = np.arange(en.coef_.shape[0]) * 5 plt.figure(figsize=(12, 8)) plt.bar(coef_x, ridge.coef_, color="blue", label="Ridge") plt.bar(coef_x + 1, en.coef_, color="red", label="Elastic net") plt.bar(coef_x + 2, lasso.coef_, color="green", label="Lasso") plt.legend() # - np.where(lasso.coef_ != 0.0) # ### Linear Discrete Analysis and Quadratic Discrete Analysis # + from sklearn.lda import LDA from sklearn.qda import QDA X = mnist[:10000, 1:] y = mnist[:10000, 0] # + from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=333) # + lda = LDA(n_components=2) X_2d = lda.fit_transform(X_train, y_train) # + plt.figure(figsize=(16, 8)) colors = 'bgrcmykbgrcmykbgrcmykbgrcmyk' for i, color in zip(range(10), colors): plt.scatter(X_2d[y_train == i, 0], X_2d[y_train == i, 1], color=color, label="%d" % i) plt.legend() # + lda8 = LDA(n_components=8) X_train8 = lda8.fit_transform(X_train, y_train) X_test8 = lda8.transform(X_test) y_score_lda = lda8.predict(X_test) # - y_score_nb8 = GaussianNB().fit(X_train8, y_train).predict(X_test8) y_score_nb = GaussianNB().fit(X_train, y_train).predict(X_test) y_score_knn = KNeighborsClassifier(n_neighbors=3).fit(X_train, y_train).predict(X_test) y_score_knn8 = KNeighborsClassifier(n_neighbors=2).fit(X_train8, y_train).predict(X_test8) print "LDA:", accuracy_score(y_score_lda, y_test) print "NB:", accuracy_score(y_score_nb, y_test) print "LDA PCA + NB:", accuracy_score(y_score_nb, y_test) print "kNN:", accuracy_score(y_score_knn, y_test) print "LDA PCA + kNN:", accuracy_score(y_score_knn8, y_test) np.sum(y_score_nb - y_score_nb8) # ## Logistic regression # + from sklearn.linear_model import LogisticRegression lr = LogisticRegression(penalty="l2", C=1.0) lr.fit(X_train8, y_train) # - y_score = lr.predict(X_test8) accuracy_score(y_score, y_test) # ## SVM and RKHS # + from sklearn.svm import LinearSVC linear_svc = LinearSVC().fit(X_train, y_train) # - y_predicted = linear_svc.predict(X_test) accuracy_score(y_predicted, y_test) # + from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_predicted, y_test) plt.figure(figsize=(16, 8)) plt.imshow(cm, interpolation="none") plt.colorbar() # + from sklearn.svm import SVC svc = SVC(kernel="rbf", gamma=0.001).fit(X_train, y_train) # - y_predicted = svc.predict(X_test) accuracy_score(y_predicted, y_test) # + cm = confusion_matrix(y_predicted, y_test) plt.figure(figsize=(16, 8)) plt.imshow(cm, interpolation="none") plt.colorbar() # - # # Exersices # - compare time of different neighbor algorithms in kNN for MNIST dataset. Can we say something about the structure of the data? # # - perform non-linear regression on the example ($J_0(x)$) using additional phase parameter: # $f_k = \cos(k x + \theta)$ # - make a sparse Fourier transformation of MNIST data set # - compare the results of kNN and Naive Bayes on transformed dataset with the same obtained by PCA (see previous lecture) # - How does spectrum change after applying PCA? # - use Mahalanobis distance for kNN: $\rho^2(x, y) = x \Sigma y^T$, optimize it with respect to kNN score; how to make $\Sigma$ sparse? # - visualize different SVM kernels for one of previously found 2D projections of MNIST.
lecture2/Sklearn_supervised_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1>Boston Using Neural</h1> # <h6><NAME>ari</h6> # Date: 29 July 2019 # + colab={} colab_type="code" id="d9f76ouSuVFF" import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + colab={} colab_type="code" id="lyIbul6furTC" from sklearn.datasets import load_boston # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="OSeLqG7puxTz" outputId="b39e6398-67cf-48bd-8cde-77068365a5c9" dataset=load_boston() dataset.keys() # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="mp2SeAf5u3ef" outputId="a6b16935-797d-40a4-cdfa-3f735d25630c" dataset['data'].shape # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="CICgMZTju7Eq" outputId="2b6e21b1-11ab-46a0-d25d-9310f55d09c7" dataset['target'].shape # + colab={} colab_type="code" id="eMDsMy3IvGFf" import keras from keras.layers import Dense,Activation from keras.models import Sequential # + colab={"base_uri": "https://localhost:8080/", "height": 106} colab_type="code" id="gz1OkENLviH0" outputId="dc2a1100-e04c-4c08-87e6-32e32135b0cb" import pandas as pd df=pd.DataFrame(dataset['data']) df.head(2) # + colab={} colab_type="code" id="E_UqsKp7vnOE" from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test= train_test_split(dataset['data'], dataset['target']) print(X_train.shape,X_test.shape) print(y_train.shape,y_test.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="_-ZjOWCZwOL3" outputId="0c241ea9-256b-4d51-b140-fc3707d45067" model=Sequential() model.add(Dense(10000, input_shape=(13,))) model.add(Activation('linear')) model.add(Dense(1)) model.add(Activation('linear')) model.summary() # + colab={} colab_type="code" id="AmtiT9tBwrEi" model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mse']) # + colab={"base_uri": "https://localhost:8080/", "height": 722} colab_type="code" id="BIWu0oxxwuvM" outputId="025458dc-f8b4-4fc2-8245-f0b1d2f71a9b" model.fit(X_train,y_train,batch_size= 1 ,epochs=20, verbose=2, validation_data = (X_test,y_test)) # + colab={} colab_type="code" id="KnZnTzcxyCpe"
Basic Projects/Boston Using Neural.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''venv'': venv)' # language: python # name: python3 # --- import sys sys.path.insert(0, '..') import pandas as pd from bcb import Expectativas em = Expectativas() em.describe() em.describe('ExpectativasMercadoAnuais') ep = em.get_endpoint('ExpectativasMercadoAnuais') df_ipca_2021 = (ep.query() .filter(ep.Indicador == 'IPCA') .filter(ep.Data >= '2021-01-01', ep.Data <= '2021-12-31') .filter(ep.DataReferencia == '2021', ep.baseCalculo == 1) .orderby(ep.Data.asc()) .select(ep.Data, ep.Media, ep.Mediana) .collect()) df_ipca_2021.head() df_ipca_2021['Data'] = pd.to_datetime(df_ipca_2021['Data']) df_ipca_2021 = df_ipca_2021.set_index('Data') df_ipca_2021.plot(figsize=(16,6));
notebooks/expectativas IPCA anual.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="bs5dRVOjZ6pT" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="2801a080-a77a-4e62-d58c-732d318416b3" from google.colab import drive drive.mount('/data/') data_dir = '/data/My Drive/Colab Notebooks/FEC dataset' # !ls '/data/My Drive/Colab Notebooks/FEC dataset' # !pip install matplotlib # + id="uivLBlKyuC2V" import zipfile zip = zipfile.ZipFile(data_dir+'/indiv20.zip') #zip.namelist() # + id="m-0PQq0Oufje" colab={"base_uri": "https://localhost:8080/", "height": 428} outputId="b79a482b-6444-49f8-f88a-d79975a0442c" import pandas as pd header = pd.read_csv(data_dir+'/indiv_header_file.csv') data=pd.read_csv(zip.open('by_date/itcont_2020_20200630_20300630.txt'), sep='|', names=header.columns) data.head() # + id="6PB4UgTa1Bih" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5339e7e6-3336-494c-edb9-cfd45fa8cee4" print(data['TRANSACTION_AMT'].max()) # + id="owm7xZS11HEB" colab={"base_uri": "https://localhost:8080/", "height": 326} outputId="6769a41a-fe29-4f5b-acab-71ab9a292fd8" sort_amt = data.sort_values(by='TRANSACTION_AMT', ascending=False) sort_amt.head() # + id="WuJLWjlA29OT" df = pd.DataFrame(data, columns=['CMTE_ID', 'NAME', 'CITY', 'STATE', 'ZIP_CODE', 'EMPLOYER', 'OCCUPATION', 'TRANSACTION_DT', 'TRANSACTION_AMT']) # + id="I9CI-mdn29Fq" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="5c059bc9-6a43-4365-db85-a24049a7568b" from zipfile import ZipFile import pandas as pd header = pd.read_csv(data_dir+'/cn_header_file.csv') with ZipFile(data_dir+'/cn20.zip') as zip: candidates = pd.read_csv(zip.open('cn.txt'), sep='|', names=header.columns) candidates.head() # + id="uF9YJ-SQ6psu" candidates_final = pd.DataFrame(candidates, columns=['CAND_ID', 'CAND_PTY_AFFILIATION']) # + id="H4_26uJ23RXX" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="543f7301-8524-4da0-85da-b3f3c2a4e6a2" header = pd.read_csv(data_dir+'/ccl_header_file.csv') with ZipFile(data_dir+'/ccl20.zip') as zip: #print(zip.namelist()) linkage = pd.read_csv(zip.open('ccl.txt'), sep='|', names=header.columns) linkage.head() # + id="wyK3OZ3y7Srb" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="632f3820-90b7-4759-99e7-089714c8243b" df_merge = pd.merge(candidates_final, linkage, on='CAND_ID') df_merge.head() # + id="8i2m3TRG3QWd" # + id="8ICMtX8B3TZi" sort_amt.dropna(subset = ["EMPLOYER", "OCCUPATION"], inplace=True) # + id="jNiWeB9J3TN7" colab={"base_uri": "https://localhost:8080/", "height": 343} outputId="7d11d265-7680-4c8f-ccba-dc19ec310ec3" sort_amt.head() # + id="ZpDCZF044orD" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="43935d99-c06b-4fcd-e723-4a5c5affe85d" sort_amt[sort_amt['OCCUPATION']=='EXECUTIVE']['EMPLOYER'].describe() # + id="hyiY1HCz4oaE" # + id="k0qWZ1iFAlCE" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="490d0372-c2ff-4de9-8ad7-77783c72dd63" df_newdup = df[(df['EMPLOYER'].duplicated()) & (df['EMPLOYER']!='NOT EMPLOYED') & (df['EMPLOYER']!='RETIRED')] df_newdup.head() # + id="0OznZbEdSjB1" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="91335d8e-6ce1-44c8-c9ba-1a7856cacdcb" set(df_newdup['EMPLOYER']) # + id="UTIiuGxW938o" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7ef9a292-4ce5-4a57-c2c1-73d96de700e9" len(set(df_newdup['EMPLOYER'])) # + id="BTZ4nqIiaA55" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="7bf205a2-b579-4044-b982-c41ec5790f23" df_newdup.dropna(subset = ["EMPLOYER"], inplace=True) # + id="yUEct7Y5XyPf" colab={"base_uri": "https://localhost:8080/", "height": 979} outputId="b2bd54a6-52f1-43d2-cb48-71dd48bd9c81" df_aero = df_newdup[df_newdup['EMPLOYER'].str.contains('AEROSPACE CORPORATION')] df_aero # + id="n1Em1PItYDEH" colab={"base_uri": "https://localhost:8080/", "height": 758} outputId="6fc5d917-59ed-4b2b-8b0e-aebf68ebcfb9" df_aero_merge = pd.merge(df_bob, df_merge, on='CMTE_ID') df_aero_merge # + id="4qQlV6tnlega" CD = ['CA-37', 'CA-47', 'CA-47', 'CA-33', 'MD-03', 'FL-08', 'VA-05', 'VA-05', 'CO-05', 'VA-05', 'CO-05', 'CO-05', 'AZ-07'] # + id="PPqAAylhx0mj" df_aero_merge['CD'] = CD # + id="Ls7gegLCyafO" df_aero_merge = df_aero_merge.drop(columns=['CAND_ELECTION_YR', 'FEC_ELECTION_YR', 'CMTE_TP', 'CMTE_DSGN']) # + id="Sn-H_tkYzBcc" colab={"base_uri": "https://localhost:8080/", "height": 673} outputId="b9c7ea40-2dcf-4b44-a00b-f4b7caf32df4" df_aero_final = pd.merge(df_aero_merge, trends, on ='CD', how ='inner') df_aero_final # + id="R1wEhe8Rzaa_" df_aero_final['INDEX']= [1 if x =='DEM' else 0 for x in df_aero_final['CAND_PTY_AFFILIATION']] # + id="Ix6G1cxzzuUA" df_aero_final['INDEX_BOSS']=1 # + id="dQqF_pwqldjH" colab={"base_uri": "https://localhost:8080/", "height": 673} outputId="4a60d88a-2393-487c-f12e-9f420683be30" df_aero_final # + id="JZdHyMWo0Pbl" subset2 = df_aero_final[['INDEX','INDEX_BOSS']] # + id="4-m6o5ek0Pup" from sklearn.linear_model import LinearRegression # + id="UQeN6gFQ0CoX" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b5f639bc-9878-4fd2-bfec-445ecc3aba55" linear_regressor = LinearRegression() from sklearn.preprocessing import MinMaxScaler scaler1 = MinMaxScaler() scaler1.fit(subset2) inner_join_scaled=scaler1.transform(subset2) x = inner_join_scaled[:,0].reshape(-1,1) y = inner_join_scaled[:,1].reshape(-1,1) linear_regressor.fit(x, y) # + id="kdgNzW9Q0k7v" # + id="vqZW4AL43o0r" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="76681360-3984-4485-f175-2fa4f4713437" df4 = df_newdup[df_newdup['EMPLOYER'].str.contains('AT&T')] df4 # + id="YvlPckei7uEA" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="757de2e5-e546-4bd7-d851-5a318d8008a0" df5 = pd.merge(df4, df_merge, on='CMTE_ID') df5.tail() # + id="ptU7ClSQEgW3" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="add4cc5d-fe11-4bd7-ecab-c93ceb212d1a" df_biogen = df_newdup[df_newdup['EMPLOYER'].str.contains('BIOGEN')] df_biogen # + id="gZz7kaXIEfwZ" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="18a45932-d5ca-4591-824b-41403a07cdf3" df6 = pd.merge(df_biogen, df_merge, on='CMTE_ID') df6 # + id="tYTdUPrlFKJn" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="d5fbed25-b24a-452c-8a0f-caa683c07d94" df6[df6['OCCUPATION'].str.contains('DIRECTOR')]['TRANSACTION_DT'] # + id="4nct8sKi6ZRo" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="531fd6c2-e159-4311-98b3-a6c5766583dc" df6[df6['OCCUPATION'].str.contains('VP')]['TRANSACTION_DT'] # + id="TM9ctAKSHBGV" CD = ['MA-05', 'MA-03', 'MA-03', 'MA-03', 'MA-03', 'MA-05', 'MA-05', 'MA-05', 'MA-05', 'MA-07', 'TN-09', 'MA-05', 'MA-05', 'WA-10', 'TX-20', 'MA-08', 'MA-05', 'TN-09', 'MA-07', 'MA-07', 'MA-05', 'MA-04', 'MA-04', 'MA-07', 'MA-05', 'MI-03'] # + id="uqfKk_S8Hgr_" df6['CD'] = CD # + id="eJkDmYpJ6ZBr" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="deaf95cb-90c6-4a6e-86bd-6f9a89bd20e4" df7 = df6.drop(columns=['CAND_ELECTION_YR', 'FEC_ELECTION_YR', 'CMTE_TP', 'CMTE_DSGN']) df7 # + id="Mw4bIeWOOoaE" trends = pd.read_excel(data_dir+'/CD_trends.xlsx') # + id="VMqR9ED6OoOW" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="6b45fe0d-9ae7-49d0-e8fc-0febc8350755" trends.head() # + id="qg5DNqhaGyzR" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="d37d3813-eaeb-4d10-b202-c12a21ec5f6e" inner_join = pd.merge(df7, trends, on ='CD', how ='inner') inner_join # + id="a2v3lRkYTjag" inner_join['INDEX']= [1 if x =='DEM' else 0 for x in inner_join['CAND_PTY_AFFILIATION']] # + id="Objszz9NSHe1" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a33bc488-4ac4-42af-c4a1-bfeeda4caad7" inner_join # + id="XL7glHAoSHM3" inner_join['INDEX_BOSS'] = 1 # + id="u2BIIZl3RYg1" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a0f10c04-3eb4-4294-a054-bb8f4275471e" inner_join # + id="rW1ZZvZwYZ2s" subset2 = inner_join[['INDEX','INDEX_BOSS']] # + id="M7vLCa_-dmhg" from sklearn.linear_model import LinearRegression # + id="apb76xJfYP-w" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bd90bb55-85ee-4d15-b585-3c3b1406d16c" linear_regressor = LinearRegression() from sklearn.preprocessing import MinMaxScaler scaler1 = MinMaxScaler() scaler1.fit(subset2) inner_join_scaled=scaler1.transform(subset2) x = inner_join_scaled[:,0].reshape(-1,1) y = inner_join_scaled[:,1].reshape(-1,1) linear_regressor.fit(x, y) # + id="Q33krPq74eVs"
assets/EMSE6574/Week3_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="xu2SVpFJjmJr" # # DeepDreaming with TensorFlow # + [markdown] colab_type="text" id="eALNdPq7iH9a" # This example has moved. # # * [TensorFlow 2.0 version](https://tensorflow.org/en/beta/tutorials/generative/deepdream.ipynb) # * [The Original](https://github.com/tensorflow/examples/tree/master/community/en/r1/deepdream.ipynb)
tensorflow/examples/tutorials/deepdream/deepdream.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Django Shell-Plus # language: python # name: django_extensions # --- from cast.forms import PostForm form = PostForm() lines = "foo bar baz blub".split("\n") for line in lines: start, *parts = line.split()
notebooks/chaptermarks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- import numpy as np import pandas as pd from pandas import Series, DataFrame dframe = pd.read_csv('lect25.csv') dframe dframe = pd.read_csv('lect25.csv', header= None) dframe dframe = pd.read_table('lect25.csv', sep=',', header = None) dframe pd.read_csv('lect25.csv', header=None, nrows=2) dframe.to_csv('mytextdata_out.csv') pd.read_csv('mytextdata_out.csv') import sys dframe.to_csv(sys.stdout) dframe.to_csv(sys.stdout, sep='_') dframe.to_csv(sys.stdout, sep='?') dframe.to_csv(sys.stdout, columns=[0,1,2])
l5/Lecture 25 - Reading and Writing Text Files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # import pyspark class Row from module sql from pyspark.sql import * spark = SparkSession \ .builder \ .config("spark.some.config.option", "some-value") \ .getOrCreate() # Create Example Data - Departments and Employees # Create the Departments department1 = Row(id='123456', name='Computer Science') department2 = Row(id='789012', name='Mechanical Engineering') department3 = Row(id='345678', name='Theater and Drama') department4 = Row(id='901234', name='Indoor Recreation') # Create the Employees Employee = Row("firstName", "lastName", "email", "salary") employee1 = Employee('michael', 'armbrust', '<EMAIL>', 100000) employee2 = Employee('xiangrui', 'meng', '<EMAIL>', 120000) employee3 = Employee('matei', None, '<EMAIL>', 140000) employee4 = Employee(None, 'wendell', '<EMAIL>', 160000) # Create the DepartmentWithEmployees instances from Departments and Employees departmentWithEmployees1 = Row(department=department1, employees=[employee1, employee2]) departmentWithEmployees2 = Row(department=department2, employees=[employee3, employee4]) departmentWithEmployees3 = Row(department=department3, employees=[employee1, employee4]) departmentWithEmployees4 = Row(department=department4, employees=[employee2, employee3]) print('department1: ', department1) print('employee2: ', employee2) print('email-id of first employee in departmentWithEmployees1: ', departmentWithEmployees1.employees[0].email) departmentsWithEmployeesSeq1 = [departmentWithEmployees1, departmentWithEmployees2] df1 = spark.createDataFrame(departmentsWithEmployeesSeq1) df1.show() departmentsWithEmployeesSeq2 = [departmentWithEmployees3, departmentWithEmployees4] df2 = spark.createDataFrame(departmentsWithEmployeesSeq2) df2.show() # - unionDF = df1.unionAll(df2) unionDF.show() unionDF.write.parquet("file:/E:/code/git-2018/ETL-Workflow/ETL-Examples/src/main/python/resources/df-example.parquet") parquetDF = spark.read.parquet("file:/E:/code/git-2018/ETL-Workflow/ETL-Examples/src/main/python/resources/df-example.parquet") parquetDF.show() # + from pyspark.sql import Row from pyspark.sql.functions import explode eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})]) eDF.select(explode(eDF.intlist).alias("anInt")).show() eDF.select(explode(eDF.mapfield).alias("key", "value")).show() # + df = parquetDF.select(explode("employees").alias("e")) explodeDF = df.selectExpr("e.firstName", "e.lastName", "e.email", "e.salary") explodeDF.show() # - filterDF = explodeDF.filter(explodeDF.firstName == "xiangrui").sort(explodeDF.lastName) filterDF.show() # + from pyspark.sql.functions import col, asc # Use `|` instead of `or` filterDF = explodeDF.filter((col("firstName") == "xiangrui") | (col("firstName") == "michael")).sort(asc("lastName")) filterDF.show() # - whereDF = explodeDF.where((col("firstName") == "xiangrui") | (col("firstName") == "michael")).sort(asc("lastName")) whereDF.show() nonNullDF = explodeDF.fillna("--") nonNullDF.show() filterNonNullDF = explodeDF.filter(col("firstName").isNull() | col("lastName").isNull()).sort("email") filterNonNullDF.show() # + from pyspark.sql.functions import countDistinct countDistinctDF = explodeDF.select("firstName", "lastName")\ .groupBy("firstName", "lastName")\ .agg(countDistinct("firstName")) countDistinctDF.show() # + # register the DataFrame as a temp table so that we can query it using SQL explodeDF.registerTempTable("df_example") # Perform the same query as the DataFrame above and return ``explain`` countDistinctDF_sql = spark.sql("SELECT firstName, lastName, count(distinct firstName) as distinct_first_names FROM df_example GROUP BY firstName, lastName") countDistinctDF_sql.show() # - salarySumDF = explodeDF.agg({"salary" : "sum"}) salarySumDF.show() explodeDF.describe("salary").show()
ETL-Examples/src/main/python/pyspark-sql-examples/dataframe-operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Logistic Regression # # ## From Regression to Classification # # In the previous chapter we discussed linear regression and its application. It is a very valuable tool because # # * it is easy to fit # * has simple interpretation and # * tests of statistical significance can be performed without difficulty. # # But it has its drawbacks too. For example: it makes quite strong assumptions about the functional form of $f(X)$ and the error terms. Another disadvantage is that linear regression is not suited for classification problems where we deal with qualitative responses. # # Below figure, where we regress the probability of default onto credit card balance, visualizes the issue. # # * Linear regression produces negative probabilities for some balances # * Due to the large number of non-defaults, the linear regression's probability of default barely rises above 25%. This begs the question where we should set the threshold to qualify a default case. It can't be 50%, can it? # <img src="Graphics/0206_LinRegClassification.png" alt="LinearRegClassification" style="width: 800px;"/> # Logistic regression is able to overcome these obstacles. Rather than modeling the default class $y$ (e.g. 0 for non-default, 1 for default) it models the (conditional) probability that $y$ belongs to a certain category, $\Pr(Y=y|X)$. # ## Logistic Model # # Above figure estimated the probability of default given a client's credit card balance. In the same way we could have used a multiple linear regression to estimate the probability. # # $$\begin{equation} # \Pr(Y=y|X) = \beta_0 + \beta_1 X_1 + \ldots + \beta_p X_p # \end{equation}$$ # # But as discussed above for the simple case, this too would yield unbounded probabilities (For convenience let us assume that we are using the generic 0/1 coding for the dependent variable). with $y > 1$ and $y<0$ for some $X$. The question thus is: how could we improve on that model to have bounded results with probabilities of default? A well known function which satisfies the inequality $0 \leq f(x) \leq 1$ (and is continuous and hence differentiable) is the so called "Sigmoid" or "Logistic" function which is defined as # # $$\begin{equation} # S(x) = \frac{1}{1 + e^{-x}} = \frac{e^x}{1 + e^x} # \end{equation}$$ # <img src="Graphics/0206_Sigmoid.png" alt="LogisticFunction" style="width: 500px;"/> # Above figure displays the function's shape. Its range is bounded by $[0, 1]$ and its notable "S" shape depends on the input parameter $x$. For logistic regression $x$ takes on the known functional form of a linear regression: # # $$\begin{equation} # \Pr(Y=y|X) = \frac{e^{\beta_0 + \beta_1 X_1 + \ldots + \beta_p X_p}}{1 + e^{\beta_0 + \beta_1 X_1 + \ldots + \beta_p X_p}} # \end{equation}$$ # # This function is related to the linear model in that by rearranging the equation we arrive at # # $$\begin{equation*} # \frac{p(X)}{1 - p(X)} = e^{\beta_0 + \beta_1 X_1 + \ldots + \beta_p X_p} # \end{equation*}$$ # # Notice that for ease of use we define $p(X)$ as equivalent of $\Pr(Y=y|X)$. Taking the natural logarithm of this expression yields # # $$\begin{equation} # \underbrace{\log\left(\frac{p(X)}{1 - p(X)}\right)}_{\textit{Logit}} = \beta_0 + \beta_1 X_1 + \ldots + \beta_p X_p, # \end{equation}$$ # # a function where the output on the left-hand side, the so called **Logit** or **log-odds**, is linear in $X$. # ## Estimating the Coefficients # # To run a logistic regression we need to estimate the $\beta$ coefficients in above equation. In the linear regression setting we used a least squares approach. Here, however, we switch to a method called **maximum likelihood**. This approach is used in many areas of statistics/machine learning to fit models and as such often studied in graduate courses. Here we will restrict ourselves to a very superficial discussion of the intuition. For the interested reader, [Elkan (2014)](http://cseweb.ucsd.edu/~elkan/250B/logreg.pdf) provides an excellent, concise introduction, whereas Bishop (2006) has a more advanced, yet sound discussion of the topic. # # Back to the intuition: maximum likelihood optimizes the Logit-equation to get estimates of $\beta_0, \beta_1$ "such that the predicted probability $\hat{p}(x_i)$ of default for each individual [$\ldots$] corresponds as closely as possible to the individual's observed default status. In other words, we try to find $\hat{\beta}_0$ and $\hat{\beta}_1$ such that plugging these estimates into the model for $p(X)$ [$\ldots$] yields a number close to one for all individuals who defaulted, and a number close to zero for all individuals who did not." (James et al. (2013, p. 133)). # # The maximum likelihood function that is used to estimate the $\beta$ coefficients in the logistic regression model, is derived from the probability distribution of the dependent variable $y$. Since $y$ takes on one of two values (in our example $y \in \{0, 1\}$) and assuming the response values are independent of each other, the probability mass function of $Y$ follows a Bernoulli distribution, $Y \sim Bern(p)$, which is a special case of the Binomial distribution $Bin(n, p)$ with $n=1$. Its probability mass function is # # \begin{equation*} # f(y;p) = p(Y=y) = p^y (1-p)^{1-y} = # \begin{cases} # p & \text{ if } y=1 \\ # (1-p) & \text{ if } y=0 # \end{cases} # \end{equation*} # # The joint probability mass function of $Y$ is # # \begin{equation*} # f(y | \beta) = \prod_{i=1}^N p(x_i)^{y_i} \, (1-p(x_i))^{1-y_i} # \end{equation*} # # and describes the values of $y$ as a function of known, fixed values for $\beta$, where $\beta$ is related to $y$ through the logistic function. Since we don't know the coefficients $\beta$ but have measured outcomes for $y$ the likelihood function reverses above joint probability mass function such that it expresses the values of $\beta$ in terms of known, fixed values for $y$. This is the likelihood function ([Czepiel (2002)](https://czep.net/stat/mlelr.pdf)). # # \begin{equation} # L(\beta | y) = \prod_{i=1}^N p(x_i)^{y_i} \, (1-p(x_i))^{1-y_i} # \end{equation} # # The maximum likelihood estimates are now those values for $\beta$ which maximize the likelihood function $L(\beta | y)$. Typically, to find the maximum likelihood estimates we would differentiate the (log-) likelihood with respect to the coefficients, set the derivatives equal to zero, and solve. However, there is no closed-form solution for this and thus numerical methods (such as Newton-Raphson, Newton-conjugent gradient etc.) are required to derive a maxima ([Shalizi (2017)](http://www.stat.cmu.edu/~cshalizi/ADAfaEPoV/ADAfaEPoV.pdf)). Thankfully, statistical packages such as Python's `statsmodels` have these necessary tools integrated into the relevant functions such that we do not need to concern ourselves with the details. # # ## Logistic Regression in Python # # ### Package Selection # # To show how logistic regression is run in Python we will again rely on functions from the `statsmodels` package. The Scikit-learn package `sklearn` contains similar functions. However, when it comes to calling results, the `statsmodels` functions for logistic regression follow those for linear regression which we got to know in the previous chapter. Therefore we will work with this package. # # ### Data Load # # We will use the generic 'Default' data set from James et al. (2013), which we discussed in this chapter's introduction. It is taken from the book's corresponding `R` package and made available in this course's data folder on GitHub. We start our journey as usual with the initial load of the necessary packages and setting a few options. # %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt import statsmodels.api as sm import statsmodels.formula.api as smf plt.style.use('seaborn-whitegrid') # + # Default data set is not available online. Data was extracted from R package "ISLR" df = pd.read_csv('Data/Default.csv', sep=',') # Factorize 'No' and 'Yes' in columns 'default' and 'student' df['defaultFac'] = df.default.factorize()[0] df['studentFac'] = df.student.factorize()[0] df.head(5) # - # ### Simple Logistic Regression # # Similar to linear regression with `statsmodels` there exist different ways to compute a logistic regression. For reference, we will show three approaches here: # # 1. R-Style: The `statsmodels.formula.api` library has a `glm` function mimicking `R`'s way of running glm regressions # 2. Classic GLM: Using the `GLM` function from the `statsmodels.api` library # 3. Logit regression: `statsmodels.api`'s `Logit` function follows the approach we used for OLS case # # You might ask what GLM/glm means. It stands for *Generlized Linear Models* and as such is a generalization of linear regression approaches (linear, logistic, Poisson regression). There are many good resources available on the web for the interested. A good textbook introducing GLM is <NAME>'s *Foundations of Linear and Generalized Linear Models* (Agresti (2015)). # #### R-Style # # * This method allows for a verbose definition of the regression. # * It follows the pattern `y ~ X1 + X2 + ... + Xn` - in words we regress $y$ on $X_1, X_2, \ldots, X_n$ where $X_i$ is a vector of a particular feature. # * By default a constant is added to the regression # * `family` is a necessary argument. Remember that above where we discussed the maximum likelihood approach of estimating the coefficients we mentioned that the binary response $y$ follows a Bernoulli distribution, which is a special case of a Binomial distribution. Thus we chose `family=sm.families.Binomial()`. # * The `.fit()` method runs the maximum likelihood estimation of the coefficients. Default method is 'Newton-Raphson' (method=`newton`) and this fits our needs. Nonetheless, others are available. Use the help function `sm.Logit.fit?` to see what options you have (it's the same for all three approaches). # # More details can be found [here](http://www.statsmodels.org/dev/example_formulas.html) # + # 1. R-Style # R-Style formula formula = 'defaultFac ~ balance' # Regress model logReg = smf.glm(formula=formula, data=df, family=sm.families.Binomial()).fit() print(logReg.summary()) # - # #### Classic GLM # # * `endog` = endogenous variable = response = $y$ # * `exog` = exogenous variable(s) = features = $X$ # * Notice that a constant needs to be added manually with `sm.add_constant(X)` # # Function details are described [here](http://www.statsmodels.org/0.6.1/generated/statsmodels.genmod.generalized_linear_model.GLM.html). # + # 2. GLM # Regress model logReg = sm.GLM(endog=df.defaultFac, exog=sm.add_constant(df.balance), family=sm.families.Binomial()).fit() print(logReg.summary()) # - # #### Logit Regression # # * `endog` = endogenous variable = response = $y$ # * `exog` = exogenous variable(s) = features = $X$ # * Notice that a constant needs to be added manually with `sm.add_constant(X)` # * No need to specify `family` (Logit is per default binomial) # # # The full function details are to be found [here](http://www.statsmodels.org/dev/generated/statsmodels.discrete.discrete_model.Logit.html) # + # 3. Logit # Regress model logReg = sm.Logit(endog=df.defaultFac, exog=sm.add_constant(df.balance)).fit() print(logReg.summary()) # - # ### Accessing Output # # Though the scope of information in the summaries differ slightly between the different approaches the function yield the same results. As in the case of linear regression, the model's output can be accessed through its different methods and attributes. For example the models parameters and p-values are easily accessed as shown below. logReg.params logReg.pvalues # ### Hypothesis Testing # # The $z$-statistic in above summary and the corresponding p-values play the same role as the $t$-statistic in the linear regression output. For example, the $z$-statistic associated with $\hat{\beta}_1$ is equal to $\hat{\beta}_1 / SE(\hat{\beta}_1)$. A large (absolute) $z$-value, and correspondingly a small p-value, provides evidence against the null hypothesis $H_0 : \beta_1 = 0$. Notice that this null hypothesis implies that $p(X) = \dfrac{e^{\beta_0}}{(1 + e^{\beta_0})}$. # # The main purpose of the intercept is to adjust the average fitted probabilities to the proportion of ones in the data. Beyond that it is not of interest. # # If we are interested in the coefficient's 99% CI (instead of 95% like in the summary) we can use the same code as in the linear regression case. logReg.conf_int(alpha=0.01) # ### Making Predictions # # If we wish to make predictions of the default probability we can use the `.predict()` method. Given we leave the brackets empty, this method will calculate $p(X)$ on the basis of the previously used feature training sample. If we wish to get the probability of default for a balance of, let us say, USD 2'000 we can run the following line of code: # X must include 1 in first column for intercept logReg.predict([1, 2000]) # The vector $[1, 2000]$ in the above `.predict()` function corresponds to a row vector containing the $x$-values for which we wish to predict the probability of default. If we had $p$ features, this vector would obviously have lenght $p+1$ ($p$ features + 1 for intercept). # # For future reference, below code generates a confusion matrix. We will discuss Confusion Matrices in more detail in the chapter on $k$-Nearest Neighbor. The `threshold` parameter defines above what probability an new observation is labeled as 1; default is 0.5. # Confusion Matrix for LogRegression logReg.pred_table(threshold=0.5) # ### Plotting # # Above we regressed default on balance. Thus this can still be displayed in a figure. In order to do so we need to sort the data first. Once this is done, plotting follows the usual routine. # + # Create sorted resutls-df res = pd.DataFrame() res['balance'] = df.balance res['prob'] = logReg.predict() res = res.sort_values('balance') # Plot scatter & logReg plt.figure(figsize=(12, 8)) plt.scatter(df.balance, df.defaultFac, marker='.') plt.plot(res.balance, res.prob, c='k') plt.axhline(y=0, color='gray', linestyle='dashed') plt.axhline(y=1, color='gray', linestyle='dashed') plt.ylabel('Probability of default', fontsize=12) plt.xlabel('Balance', fontsize=12); # - # ### Multiple Logistic Regression # # Regressing 'default' on multiple features follows the same procedure as for simple logistic regression. As a case of use we regress 'default' on 'balance', a credit card holder's 'income' and the dummy variable 'studentFac' (1 if yes, 0 otherwise). # + # Assign features to matrix X and response to y X = sm.add_constant(df[['balance', 'income', 'studentFac']]) X.income = X.income / 1000 # Income in 1'000 dollars y = df.defaultFac # Run Logistic Regression logReg = sm.Logit(endog=y, exog=X).fit() print(logReg.summary().tables[1]) # - # The p-value for `income` is surprisingly large, indicatign that there is no clear evidence of a real association between the probability of default and income. In a case study this certainly would need to be further examined. # ### Multiple Logistic Regression with Scikit-learn # # For reference we show below how logistic regression is run with Scikit-learn. Unfortunately, there is no summary output available in Scikit-learn like there is within the Statsmodels package. The main reason is that sklearn is not used for statistical inference but for predictive modelling/ML and the evaluation criteria are based on performance on previously unseen data. # # Notice that in order to have similar coefficients/results, the hyperparameter `C` had to be set to `C=1e9`. This parameter is a regularization parameter that refers to the L1/L2 penalty scheme implemented. For details refer to Scikit-learn's [user guide](http://scikit-learn.org/stable/modules/linear_model.html#logistic-regression), [this discussion on Stackoverflow](https://stackoverflow.com/questions/43431236/how-to-perform-an-unregularized-logistic-regression-using-scikit-learn) or chapter 6.2 in James et al. (2013). from sklearn.linear_model import LogisticRegression logReg = LogisticRegression(C=1e9, fit_intercept=False).fit(X, y) logReg.coef_ # ### Decision Boundary # # We've seen above how one can plot a logistic regression in the simple case with just one feature. If you regress a response on two features, it is still possible to create very meaningful plots. A good example is [this answer on stackoverflow](https://stackoverflow.com/questions/28256058/plotting-decision-boundary-of-logistic-regression) where the decision boundary is drawn and the color scheme follows the probability of the response. # ## Closing Comments # In closing this chapter it should be mentioned that the preceding code sections are by no means a thorough analysis but a superficial digging into the topics. Yet the tools and measures necessary to understand the output are yet to be introduced. For this we refer to the chapter on linear discriminant analysis (LDA) and quadratic discriminant analysis (QDA) (and subsequent chapters) where many tools are introduced that are of significant help in analyzing an algorithm's output. # # Further Ressources # # # In writing this notebook, many ressources were consulted. For internet ressources links are provided within the textflow above. Beyond these links, the following ressources were consulted and are recommended as further reading on the discussed topics: # # * <NAME>, 2015, *Foundations of Linear and Generalized Linear Models* (John Wiley & Sons, Hoboken, NJ). # * <NAME>., 2006, *Pattern Recognition and Machine Learning* (Springer, New York, NY). # * Czepiel, <NAME>., 2002, Maximum Likelihood Estimation of Logistic Regression Models: Theory and Implementation from website, https://czep.net/stat/mlelr.pdf, 08/24/17. # * <NAME>, 2014, Maximum Likelihood, Logistic Regression, and Stochastic Gradient Training from website, http://cseweb.ucsd.edu/~elkan/250B/logreg.pdf, 08/24/17. # * James, Gareth, <NAME>, <NAME>, and <NAME>, 2013, *An Introduction to Statistical Learning: With Applications in R* (Springer Science & Business Media, New York, NY). # * Shalizi, <NAME>, 2017, Advanced Data Analysis from an Elementary Point of View from website, http://www.stat.cmu.edu/~cshalizi/ADAfaEPoV/ADAfaEPoV.pdf, 08/24/17. #
0206_LogisticRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- n = input("Digite algo:") print('O tipo digitado é uma {}'.format(type(n))) print("É numerico? {}".format(n.isnumeric())) print("É letra? {}".format(n.isalpha())) print("Tem só espaço? {}".format(n.isspace())) print("É alfanumerico? {}".format(n.isalnum())) print("Letra Maiuscula? {}".format(n.isupper())) print("Letra Minuscula? {}".format(n.islower())) print("Ambos os tipos? {}".format(n.istitle()))
.ipynb_checkpoints/EX004 - Dissecando uma Variável -checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # cd .. import os import re import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np folder = 'data/silver' files = os.listdir(folder) # + dataframes = [] for file in files: csv = pd.read_csv(f'{folder}/{file}') dataframes.append(csv) df = pd.concat(dataframes, axis=0, ignore_index=True) # - columns = [ # Target 'Celková cena', # Features 'Užitná plocha', 'Podlaží', 'estate_locality_district', 'estate_disposition', 'estate_category_main_cb', 'Stavba', 'Stav objektu', 'Poznámka k ceně', 'Energetická náročnost budovy', 'Vlastnictví', 'Tram', 'Vybavení', 'Výtah', 'Divadlo', 'Kino', 'Obchod', 'Cukrárna', 'Veterinář', 'Vlak', 'Lékárna', 'Bankomat', 'Sportoviště', 'Bus MHD', 'Lékař', 'Škola', 'Školka', 'Hospoda', 'Pošta', 'Restaurace', 'Večerka', 'Hřiště', ] is_rental = df.loc[:, 'estate_rental_or_sell'] == 2 is_flat_or_house = df.loc[:, 'estate_category_main_cb'].isin([1, 2]) has_price = ~df.loc[:, 'Celková cena'].isna() rentals = ( df .loc[is_rental & is_flat_or_house & has_price, columns] .drop_duplicates() ) rentals.head() # ### celkova cena rentals.loc[:, 'Celková cena'] = rentals.loc[:, 'Celková cena'].str.replace(r'\s', '').astype(int) x = rentals.loc[:, 'Celková cena'] plt.figure(figsize=(10,5)) sns.distplot(x[x < 100000]) # ### Užitná plocha x = rentals.loc[:, 'Užitná plocha'] x.describe() x.isna().sum() sns.distplot(x[x < 200]) # ### estate_locality_district rentals.drop_duplicates().shape rentals.loc[:, 'estate_locality_district'].value_counts().head(20) locality_district_mapping = { 1: "ceske budejovice", 12: "plzen-mesto", 28: "hradec kralove", 32: "pardubice", 42: "olomouc", 5001: "praha 1", 5002: "praha 2", 5003: "praha 3", 5004: "praha 4", 5005: "praha 5", 5006: "praha 6", 5007: "praha 7", 5008: "praha 8", 5009: "praha 9", 5010: "praha 10", 62: "karvina", 65: "ostrava-mesto", 72: "brno-mesto" } rentals.loc[:, 'estate_locality_district'].map(locality_district_mapping).fillna('ostatni') # #### 'estate_disposition', rentals.loc[:, 'estate_disposition'].value_counts() disposition_mapping = { 2: "1+kk", 3: "1+1", 4: "2+kk", 5: "2+1", 6: "3+kk", 7: "3+1", 8: "4+kk", 9: "4+1", 10: "5+kk", 11: "5+1", 12: "6 a vice", 16: "atypicky", 37: "rodinny", 39: "vila", 43: "chalupa", 33: "chata", } rentals.loc[:, 'estate_disposition'].map(disposition_mapping).fillna('ostatni').value_counts() # #### estate_category_main_cb rentals.estate_category_main_cb.value_counts() category_mapping = { 1: 'flat', 2: 'house' } rentals.estate_category_main_cb.map(category_mapping) rentals.Stavba.value_counts() rentals.loc[:, 'Energetická náročnost budovy'].value_counts() efficiency_mapping = { 'Třída A': 1, 'Třída B': 2, 'Třída C': 3, 'Třída D': 4, 'Třída E': 5, 'Třída F': 6, 'Třída G': 7 } ( rentals .loc[:, 'Energetická náročnost budovy'] .apply(lambda x: re.match(r'Třída \w', x)[0] if isinstance(x, str) else x) .map(efficiency_mapping) ) rentals.loc[:, 'Poznámka k ceně'].value_counts() def parse_floor(x): if not x: return None elif isinstance(x, float) or isinstance(x, int): return x elif 'přízemí' in x: return 0 parsed_x = str(x).split('.')[0] if 'včetně' in parsed_x: return int(parsed_x[0]) try: x = int(parsed_x) x = max(x, -1) x = min(x, 20) return x except ValueError: return None rentals.loc[:,'Podlaží'].apply(lambda x: parse_floor(x)).value_counts() df.loc[:,'Stavba',].value_counts() df.loc[:,'Stav objektu'].value_counts() furnishing_mapping = { 'True': 1, 'Částečně': 0.5, 'False': 0, } rentals.loc[:, 'Vybavení'].map(furnishing_mapping).value_counts() # ## round 2 # cd src/data/gold from preprocessing import * from mappings import * r = load_rentals(df) b = pd.DataFrame() b = b.assign( price = preprocess_price(r), area_m2 = r.loc[:, 'Užitná plocha'], district = apply_column_mapping(r, 'estate_locality_district', locality_district_mapping), disposition = apply_column_mapping(r, 'estate_disposition', disposition_mapping), category = apply_column_mapping(r, 'estate_category_main_cb', category_mapping), furnishing = apply_column_mapping(r, 'Vybavení', furnishing_mapping), efficiency = preprocess_efficiency(r), floor = preprocess_floor(r), building_type = r.loc[:, 'Stavba'], building_state = r.loc[:, 'Stav objektu'], ownership = r.loc[:, 'Vlastnictví'], tram = r.loc[:, 'Tram'], elevator = r.loc[:, 'Výtah'], theatre = r.loc[:, 'Divadlo'], cinema = r.loc[:, 'Kino'], groceries = r.loc[:, 'Obchod'], candy_shop = r.loc[:, 'Cukrárna'], veterinary = r.loc[:, 'Veterinář'], train = r.loc[:, 'Vlak'], pharmacist = r.loc[:, 'Lékárna'], atm = r.loc[:, 'Bankomat'], sports = r.loc[:, 'Sportoviště'], bus = r.loc[:, 'Bus MHD'], doctors = r.loc[:, 'Lékař'], school = r.loc[:, 'Škola'], kindergarten = r.loc[:, 'Školka'], pub = r.loc[:, 'Hospoda'], post_office = r.loc[:, 'Pošta'], restaurant = r.loc[:, 'Restaurace'], seven_eleven = r.loc[:, 'Večerka'], playground = r.loc[:, 'Hřiště'], ) b.head()
notebooks/03_Rentals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Imports: import pandas as pd import geopandas as gpd from osgeo import osr, gdal import matplotlib.pyplot as plt import numpy as np import georasters as gr import seaborn as sns sns.set_theme(style="darkgrid") from scipy.interpolate import griddata import rasterio import pyproj from rasterio.plot import show from scipy.interpolate import griddata import copy import collections import math import pgmpy.models import pgmpy.inference from pgmpy.estimators import BayesianEstimator from pgmpy.estimators import ParameterEstimator from pgmpy.inference import VariableElimination import networkx as nx # + # Read Files/ Load Data: vlm_df = pd.read_excel('Data/data_analisis.xls') elevation_file = gr.from_file('Data/Elevation.TIF') elevation_df = elevation_file.to_geopandas() dataset = rasterio.open('Data/Elevation.tif') elevation = dataset.read(1) slr_df = pd.read_excel('Data/SLR-A.xls', skiprows=15) #note: the Elevation-Habitat map does not need to be read because it will be created and used in this notebook # - # Load Habitat Map: # # Interpolation of VLM Data: vlm = vlm_df.drop(columns=['Station', 'VLM_std']) # Boundary points # Top point: max latitude top = vlm.iloc[vlm.idxmax().Latitude] # Bottom point: min latitude bottom = vlm.iloc[vlm.idxmin().Latitude] # Left point: min longitude left = vlm.iloc[vlm.idxmin().Longitude] # Right point: max longitude right = vlm.iloc[vlm.idxmax().Longitude] # Artificial points for calculating distances # point = (lon, lat) # Top counter: lon = top, lat = bottom top_counter = (top.Longitude, bottom.Latitude) # Bottom counter: lon = bottom, lat = top bottom_counter = (bottom.Longitude, top.Latitude) # Left counter: lon = right, lat = left left_counter = (right.Longitude, left.Latitude) # Right counter: lon = left, lat = right right_counter = (left.Longitude, right.Latitude) # Arrays for plotting top_pair = (np.array([top.Longitude, top_counter[0]]), np.array([top.Latitude, top_counter[1]])) bottom_pair = (np.array([bottom.Longitude, bottom_counter[0]]), np.array([bottom.Latitude, bottom_counter[1]])) left_pair = (np.array([left.Longitude, left_counter[0]]), np.array([left.Latitude, left_counter[1]])) right_pair = (np.array([right.Longitude, right_counter[0]]), np.array([right.Latitude, right_counter[1]])) sns.relplot(x="Longitude", y="Latitude", hue="VLM", data=vlm, palette="mako", height=6, aspect=1.25) plt.scatter(top_pair[0], top_pair[1], c='r', marker='x', s=200, alpha=0.8) plt.scatter(bottom_pair[0], bottom_pair[1], c='g', marker='x', s=200, alpha=0.8) plt.scatter(left_pair[0], left_pair[1], c='b', marker='x', s=200, alpha=0.8) plt.scatter(right_pair[0], right_pair[1], c='yellow', marker='x', s=200, alpha=0.8) from math import radians, cos, sin, asin, sqrt def distance(lon1, lat1, lon2, lat2): # The math module contains a function named # radians which converts from degrees to radians. lon1 = radians(lon1) lon2 = radians(lon2) lat1 = radians(lat1) lat2 = radians(lat2) # Haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2 c = 2 * asin(sqrt(a)) # Radius of earth in meters. Use 3956 for miles r = 6371*1000 # calculate the result return(c * r) # Distances of vertical pairs (top & bottom) ver_top = distance(top.Longitude, top.Latitude, top_counter[0], top_counter[1]) ver_bottom = distance(bottom.Longitude, bottom.Latitude, bottom_counter[0], bottom_counter[1]) # Distances of horizontal pairs (left & right) hor_left = distance(left.Longitude, left.Latitude, left_counter[0], left_counter[1]) hor_right = distance(right.Longitude, right.Latitude, right_counter[0], right_counter[1]) # There is some slight difference so I'm taking the rounded mean values dis_ver = np.ceil(np.mean((ver_top, ver_bottom))) dis_hor = np.ceil(np.mean((hor_left, hor_right))) # Boundary values x_min, x_max = vlm.min().Longitude, vlm.max().Longitude y_min, y_max = vlm.min().Latitude, vlm.max().Latitude # Divide by distance of 10m seems a bit too detailed. Trying with adding points every 100m instead nx, ny = (np.int(np.ceil(dis_ver / 100)), np.int(np.ceil(dis_hor / 100))) x = np.linspace(x_min, x_max, nx) y = np.linspace(y_min, y_max, ny) xv, yv = np.meshgrid(x, y) vlm_points = vlm[['Longitude', 'Latitude']].values vlm_values = vlm.VLM.values vlm_grid = griddata(vlm_points, vlm_values, (xv, yv), method='cubic') sns.relplot(x="Longitude", y="Latitude", hue="VLM", data=vlm, s=50, palette="rocket", height=10) plt.imshow(vlm_grid, extent=(x_min, x_max, y_min, y_max), origin='lower', alpha=0.6) plt.show() # + elevation_new = copy.deepcopy(elevation) elevation_new = elevation_new.astype('float') elevation_new[elevation_new == 32767] = np.nan plt.imshow(elevation_new) # - # Idea: flatten the coordinate grid into pairs of coordinates to use as inputs for another interpolation vlm_inter_points = np.hstack((xv.reshape(-1, 1), yv.reshape(-1, 1))) vlm_inter_values = vlm_grid.flatten() elev_coor = elevation_df[['x', 'y']].values elev_grid_0 = griddata(vlm_points, vlm_values, elev_coor, method='cubic') # without pre-interpolation elev_grid_1 = griddata(vlm_inter_points, vlm_inter_values, elev_coor, method='cubic') # with pre-interpolation plt.scatter(x=elevation_df.x, y=elevation_df.y, c=elev_grid_0) # + # Find elevation map boundaries x_min_elev = dataset.bounds.left x_max_elev = dataset.bounds.right y_min_elev = dataset.bounds.bottom y_max_elev = dataset.bounds.top # Create elevation meshgrid nyy, nxx = elevation_new.shape xx = np.linspace(x_min_elev, x_max_elev, nxx) yy = np.linspace(y_min_elev, y_max_elev, nyy) xxv, yyv = np.meshgrid(xx, yy) xxv.shape, yyv.shape ((1758, 2521), (1758, 2521)) elev_grid = griddata(vlm_inter_points, vlm_inter_values, (xxv, yyv), method='linear') # - sns.relplot(x="Longitude", y="Latitude", hue="VLM", data=vlm, s=50, palette="rocket", height=10) plt.imshow(elev_grid, extent=(x_min_elev, x_max_elev, y_min_elev, y_max_elev), origin="lower", alpha=0.3) plt.show() elev_grid_copy = copy.deepcopy(elev_grid) elev_grid_copy[np.isnan(np.flip(elevation_new, 0))] = np.nan # Needs to flip elevation array vertically. I don't really understand why. sns.relplot(x="Longitude", y="Latitude", hue="VLM", data=vlm, s=100, edgecolor="white", palette="rocket", height=10) plt.imshow(elev_grid_copy, extent=(x_min_elev, x_max_elev, y_min_elev, y_max_elev), origin='lower', alpha=0.8) plt.show() # # **the interpolated VLM values are stored in: elev_grid_copy # # Calculating AE: slr_new = slr_df.loc[(slr_df.Scenario == '0.3 - LOW') | (slr_df.Scenario == '2.5 - HIGH')] slr_new['SL'] = slr_new.sum(axis=1) ae_low = copy.deepcopy(elev_grid_copy) ae_high = copy.deepcopy(elev_grid_copy) # Division by 100 to fix unit difference ae_low = np.flip(elevation_new, 0) - slr_new.iloc[0].SL/100 + ae_low ae_high = np.flip(elevation_new, 0) - slr_new.iloc[1].SL/100 + ae_high ae_min = min(ae_low[~np.isnan(ae_low)].min(), ae_high[~np.isnan(ae_high)].min()) ae_max = max(ae_low[~np.isnan(ae_low)].max(), ae_high[~np.isnan(ae_high)].max()) fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(24, 8)) im1 = ax1.imshow(ae_low, extent=(x_min_elev, x_max_elev, y_min_elev, y_max_elev), origin='lower', alpha=0.8, vmin=ae_min, vmax=ae_max) im2 = ax2.imshow(ae_high, extent=(x_min_elev, x_max_elev, y_min_elev, y_max_elev), origin='lower', alpha=0.8, vmin=ae_min, vmax=ae_max) cbar_ax = fig.add_axes([0.9, 0.15, 0.02, 0.7]) fig.colorbar(im2, cax=cbar_ax) # # Elevation-Habitat Map: # elev_habit_map from time import time from shapely.geometry import Point, Polygon from shapely.ops import cascaded_union # + t00 = time() # file = gr.from_file('../Week 6/Elevation.tif') # elevation_df = file.to_geopandas() habitat_path = r"Data/UAE_habitats_new1.shp" habitat = gpd.read_file(habitat_path) elevation_df.to_crs(habitat.crs, inplace=True) elev_bounds = elevation_df.total_bounds print("Loading files: %.2fs" % (time() - t00)) # + # Create boundary points # Top left - top right - bottom right - bottom left tl = Point(elev_bounds[0], elev_bounds[3]) tr = Point(elev_bounds[2], elev_bounds[3]) br = Point(elev_bounds[2], elev_bounds[1]) bl = Point(elev_bounds[0], elev_bounds[1]) boundary = Polygon([tl, tr, br, bl]) boundary_df = gpd.GeoSeries(boundary) # - # Intersecting original habitat with bounding box habitat['Intersection'] = habitat.geometry.intersects(boundary) habitat_cut = habitat[habitat.Intersection == True] t0 = time() elev_union_shape = cascaded_union(list(elevation_df.geometry)) print("Merging elevation geometries into one polygon: %.2fs" % (time() - t0)) elev_union = gpd.GeoSeries(elev_union_shape) elev_union_df = gpd.GeoDataFrame({'geometry': elev_union}) elev_union_df.crs = habitat.crs elev_union.crs = habitat.crs elev_union_shape.crs = habitat.crs t1 = time() habitat_cut['Intersection_2'] = habitat_cut.geometry.intersects(elev_union_shape) print("Intersecting reduced habitat map with elevation polygon: %.2fs" % (time() - t1)) habitat_cut_cut = habitat_cut[habitat_cut['Intersection_2'] == True] t2 = time() final = gpd.sjoin(elevation_df, habitat_cut_cut, how="left", op="within") print("Joining elevation df with habitat_cut_cut: %.2fs" % (time() - t2)) def fillna_nearest(series): fact = series.astype('category').factorize() series_cat = gpd.GeoSeries(fact[0]).replace(-1, np.nan) # get string as categorical (-1 is NaN) series_cat_interp = series_cat.interpolate("nearest") # interpolate categorical cat_to_string = {i:x for i,x in enumerate(fact[1])} # dict connecting category to string series_str_interp = series_cat_interp.map(cat_to_string) # turn category back to string return series_str_interp t3 = time() final['Fill'] = fillna_nearest(final.Habitats) print("Interpolating missing values in final df: %.2fs" % (time() - t3)) t4 = time() f, ax = plt.subplots(1, 1, figsize=(14, 10)) ax = final.plot(column='Fill', ax=ax, legend=True, cmap='magma', edgecolor="face", linewidth=0.) leg = ax.get_legend() leg.set_bbox_to_anchor((1.25, 1)) plt.show() print("Plotting final df: %.2fs" % (time() - t4)) # # Habitats Grouping: elev_habit_map = final.drop(columns=["col", "index_right", "OBJECTID", "Id", "HabitatTyp", "HabitatT_1", "HabitatSub", "HabitatS_1", "RuleID", "Shape_Leng", "Shape_Area", "Habitats", "Intersection", "Intersection_2"], axis=1) elev_habit_map.rename(columns={"Fill": "Habitats"}, inplace=True) # Create New Column for New Habitat Groups: elev_habit_map['Habitat_Groups'] = '' elev_habit_map.head(1) np.unique(elev_habit_map.Habitats) elev_habit_map.loc[ (elev_habit_map.Habitats == 'Marine Structure') | (elev_habit_map.Habitats == 'Developed') | (elev_habit_map.Habitats == 'Dredged Area Wall') | (elev_habit_map.Habitats == 'Dredged Seabed') | (elev_habit_map.Habitats == 'Farmland') , 'Habitat_Groups'] = 'Developed' elev_habit_map.loc[ (elev_habit_map.Habitats == 'Mountains') | (elev_habit_map.Habitats == 'Coastal Cliff') | (elev_habit_map.Habitats == 'Coastal Rocky Plains') | (elev_habit_map.Habitats == 'Gravel Plains') | (elev_habit_map.Habitats == 'Rock Armouring / Artificial Reef') | (elev_habit_map.Habitats == 'Rocky Beaches') | (elev_habit_map.Habitats == 'Storm Beach Ridges') , 'Habitat_Groups'] = 'Rocky' elev_habit_map.loc[ (elev_habit_map.Habitats == 'Mega Dunes') | (elev_habit_map.Habitats == 'Sand Sheets and Dunes') | (elev_habit_map.Habitats == 'Sandy Beaches') | (elev_habit_map.Habitats == 'Coastal Sand Plains') , 'Habitat_Groups'] = 'Sandy' elev_habit_map.loc[ (elev_habit_map.Habitats == 'Coastal Salt Flats') | (elev_habit_map.Habitats == 'Inland Salt Flats') | (elev_habit_map.Habitats == 'Saltmarsh') | (elev_habit_map.Habitats == 'Intertidal Habitats') | (elev_habit_map.Habitats == 'Wetlands') , 'Habitat_Groups'] = 'Marsh/Salt Flats' elev_habit_map.loc[ (elev_habit_map.Habitats == 'Coral Reefs') | (elev_habit_map.Habitats == 'Deep Sub-Tidal Seabed') | (elev_habit_map.Habitats == 'Hard-Bottom') | (elev_habit_map.Habitats == 'Seagrass Bed') | (elev_habit_map.Habitats == 'Lakes or Artificial Lakes') | (elev_habit_map.Habitats == 'Unconsolidated Bottom') , 'Habitat_Groups'] = 'Subaqueous' elev_habit_map.loc[ (elev_habit_map.Habitats == 'Forest Plantations') | (elev_habit_map.Habitats == 'Mangroves') , 'Habitat_Groups'] = 'Forest' # Be carful: it is spelled: 'Coastal Sand Plains' NOT: 'Coastal Sand Planes' unique_groups = np.unique(elev_habit_map.Habitat_Groups) print(unique_groups) print(len(unique_groups)) # elev_habit_map.loc[elev_habit_map.Habitat_Groups == ''] #--> to see which rows still didnt have a group assigned to them sns.catplot(x="Habitat_Groups", kind="count", palette="mako", data=elev_habit_map, height=5, aspect=1.5) labels = plt.xticks(rotation=45) # # **The Elev-Habit DF now has habitat groups & it is called: 'elev_habit_map' # # VLM Bins & Habitat Classes: # # 1. VLM Bins: print(len(elev_grid_copy)) print(type(elev_grid_copy)) print(type(elev_grid_copy.flatten())) # Dropping the NaN values in the array: nan_array = np.isnan(elev_grid_copy.flatten()) not_nan_array = ~ nan_array vlm_interpolated_arr = elev_grid_copy.flatten()[not_nan_array] vlm_interpolated_arr # # **The clean, flattened VLM array for interpolated VLM values is called: # # 'vlm_interpolated_arr' # + # Step 1: Making 3 equal-size bins for VLM data: note: interval differences are irrelevant vlm_bins = pd.qcut(vlm_interpolated_arr, q=3, precision=1, labels=['Bin #1', 'Bin #2', 'Bin #3']) # bin definition bins = vlm_bins.categories print(bins) # bin corresponding to each point in data codes = vlm_bins.codes print(np.unique(codes)) # + # Step 2: Making Sure that the Bins are of Almost Equal Size: size = collections.Counter(codes) print(size) d_table = pd.value_counts(codes).to_frame(name='Frequency') d_table = d_table.reset_index() d_table = d_table.rename(columns={'index': 'Bin Index'}) fig, ax = plt.subplots() sns.barplot(x="Bin Index", y="Frequency", data=d_table, label="Size of Each of the 3 Bins", ax=ax) print(d_table) # - # Step 3: Calculating Probability of each Bin: prob0 = (d_table.loc[0].Frequency)/len(vlm_interpolated_arr) prob1 = (d_table.loc[1].Frequency)/len(vlm_interpolated_arr) prob2 = (d_table.loc[2].Frequency)/len(vlm_interpolated_arr) print(prob0, prob1, prob2) # + # Step 4: Joining Everything in a Single Data Frame for aesthetic: vlm_bins_df = pd.DataFrame() vlm_bins_df['VLM Values'] = vlm_interpolated_arr vlm_bins_df['Bins'] = vlm_bins vlm_bins_df['Intervals'] = pd.qcut(vlm_interpolated_arr, q=3, precision=1) vlm_bins_df['Probability'] = '' vlm_bins_df.loc[ (vlm_bins_df.Bins == 'Bin #1'), 'Probability'] = prob0 vlm_bins_df.loc[ (vlm_bins_df.Bins == 'Bin #2'), 'Probability'] = prob1 vlm_bins_df.loc[ (vlm_bins_df.Bins == 'Bin #3'), 'Probability'] = prob2 vlm_bins_df.head() # - # # 2. Elevation Classes: # Step 1: Create Data Frame: elevation_classes = pd.DataFrame() elevation_classes['Elevation_Values'] = elevation_df.value # Step 2: Get Max and Min Values for Elevation min_elev = elevation_df.value.min() max_elev = elevation_df.value.max() # Step 3: Create Intervals: interval_0 = pd.cut(x=elevation_df['value'], bins=[1, 5, 10, max_elev]) interval_1 = pd.cut(x=elevation_df['value'], bins=[min_elev, -10, -1, 0], right=False) interval_2 = pd.cut(x=elevation_df['value'], bins=[0, 1], include_lowest=True) # + # Step 4: Add intervals to dataframe: elevation_classes['Intervals_0'] = interval_0 elevation_classes['Intervals_1'] = interval_1 elevation_classes['Intervals_2'] = interval_2 elevation_classes['Intervals'] = '' elevation_classes.loc[ ((elevation_classes.Intervals_0.isnull()) & (elevation_classes.Intervals_1.isnull())), 'Intervals'] = interval_2 elevation_classes.loc[ ((elevation_classes.Intervals_0.isnull()) & (elevation_classes.Intervals_2.isnull())), 'Intervals'] = interval_1 elevation_classes.loc[ ((elevation_classes.Intervals_1.isnull()) & (elevation_classes.Intervals_2.isnull())), 'Intervals'] = interval_0 elevation_classes.drop(['Intervals_2', 'Intervals_1', 'Intervals_0'], axis='columns', inplace=True) # + # Step 5: Plotting the Size of Each Interval: size = collections.Counter(elevation_classes.Intervals) print(size) d_table_elev = pd.value_counts(elevation_classes.Intervals).to_frame(name='Frequency') d_table_elev = d_table_elev.reset_index() d_table_elev = d_table_elev.rename(columns={'index': 'Class Index'}) fig, ax = plt.subplots(figsize=(10, 5)) sns.barplot(x="Class Index", y="Frequency", data=d_table_elev, label="Size of Each Class", ax=ax) print(d_table_elev) # + # Step 6: Calculate Probabilities: prob0_elev = (d_table_elev.loc[6].Frequency)/len(elevation_classes) # [min_elev, -10) prob1_elev = (d_table_elev.loc[5].Frequency)/len(elevation_classes) # [-10, -1) prob2_elev = (d_table_elev.loc[4].Frequency)/len(elevation_classes) # [-1, 0) prob3_elev = (d_table_elev.loc[2].Frequency)/len(elevation_classes) # [0, 1] prob4_elev = (d_table_elev.loc[0].Frequency)/len(elevation_classes) # (1, 5] prob5_elev = (d_table_elev.loc[3].Frequency)/len(elevation_classes) # (5, 10] prob6_elev = (d_table_elev.loc[1].Frequency)/len(elevation_classes) # (10, max_elev] print(prob0_elev, prob1_elev, prob2_elev, prob3_elev, prob4_elev, prob5_elev, prob6_elev) # - # Step 7: Adding probabilities to d_table_elev for visualization: d_table_elev['Probability'] = '' d_table_elev['Probability'].loc[0] = prob4_elev d_table_elev['Probability'].loc[1] = prob6_elev d_table_elev['Probability'].loc[2] = prob3_elev d_table_elev['Probability'].loc[3] = prob5_elev d_table_elev['Probability'].loc[4] = prob2_elev d_table_elev['Probability'].loc[5] = prob1_elev d_table_elev['Probability'].loc[6] = prob0_elev d_table_elev # # SLR Scenarios: elev_habit_map['Migitation 46-65'] = elev_habit_map.value - 0.27 + elev_habit_map.VLM elev_habit_map['Intermediate 46-65'] = elev_habit_map.value - 0.3 + elev_habit_map.VLM elev_habit_map['Intermediate-High 46-65'] = elev_habit_map.value - 0.28 + elev_habit_map.VLM elev_habit_map['High 46-65'] = elev_habit_map.value - 0.33 + elev_habit_map.VLM elev_habit_map.head() # # AE Bins: # + # Step 1: Create Data Frame for each scenario: mitigation_df = pd.DataFrame() mitigation_df['AE_Values'] = elev_habit_map['Migitation 46-65'] inter_df = pd.DataFrame() inter_df['AE_Values'] = elev_habit_map['Intermediate 46-65'] inter_high_df = pd.DataFrame() inter_high_df['AE_Values'] = elev_habit_map['Intermediate-High 46-65'] high_df = pd.DataFrame() high_df['AE_Values'] = elev_habit_map['High 46-65'] # - # Step 2: Find min and max values for each df: # Mitigation df: min_mit = mitigation_df.AE_Values.min() max_mit = mitigation_df.AE_Values.max() # Intermediate df: min_inter = inter_df.AE_Values.min() max_inter = inter_df.AE_Values.max() # Intermediate-High df: min_inter_high = inter_high_df.AE_Values.min() max_inter_high = inter_high_df.AE_Values.max() # High df: min_high = high_df.AE_Values.min() max_high = high_df.AE_Values.max() # Step 3: Create Intervals for each df: # intervals are for all slr data frame: interval_0_mit = pd.cut(x=mitigation_df['AE_Values'], bins=[1, 5, 10, max_mit]) interval_1_mit = pd.cut(x=mitigation_df['AE_Values'], bins=[min_mit, -12, -1, 0], right=False) interval_2_mit = pd.cut(x=mitigation_df['AE_Values'], bins=[0, 1], include_lowest=True) # + # Step 4: Add intervals to dataframe: # Intermediate df: inter_df['Intervals_0'] = interval_0_mit inter_df['Intervals_1'] = interval_1_mit inter_df['Intervals_2'] = interval_2_mit inter_df['Intervals'] = '' inter_df.loc[ ((inter_df.Intervals_0.isnull()) & (inter_df.Intervals_1.isnull())), 'Intervals'] = interval_2_mit inter_df.loc[ ((inter_df.Intervals_0.isnull()) & (inter_df.Intervals_2.isnull())), 'Intervals'] = interval_1_mit inter_df.loc[ ((inter_df.Intervals_1.isnull()) & (inter_df.Intervals_2.isnull())), 'Intervals'] = interval_0_mit inter_df.drop(['Intervals_2', 'Intervals_1', 'Intervals_0'], axis='columns', inplace=True) # Mitigation df: mitigation_df['Intervals_0'] = interval_0_mit mitigation_df['Intervals_1'] = interval_1_mit mitigation_df['Intervals_2'] = interval_2_mit mitigation_df['Intervals'] = '' mitigation_df.loc[ ((mitigation_df.Intervals_0.isnull()) & (mitigation_df.Intervals_1.isnull())), 'Intervals'] = interval_2_mit mitigation_df.loc[ ((mitigation_df.Intervals_0.isnull()) & (mitigation_df.Intervals_2.isnull())), 'Intervals'] = interval_1_mit mitigation_df.loc[ ((mitigation_df.Intervals_1.isnull()) & (mitigation_df.Intervals_2.isnull())), 'Intervals'] = interval_0_mit mitigation_df.drop(['Intervals_2', 'Intervals_1', 'Intervals_0'], axis='columns', inplace=True) # Intermediate-High df: inter_high_df['Intervals_0'] = interval_0_mit inter_high_df['Intervals_1'] = interval_1_mit inter_high_df['Intervals_2'] = interval_2_mit inter_high_df['Intervals'] = '' inter_high_df.loc[ ((inter_high_df.Intervals_0.isnull()) & (inter_high_df.Intervals_1.isnull())), 'Intervals'] = interval_2_mit inter_high_df.loc[ ((inter_high_df.Intervals_0.isnull()) & (inter_high_df.Intervals_2.isnull())), 'Intervals'] = interval_1_mit inter_high_df.loc[ ((inter_high_df.Intervals_1.isnull()) & (inter_high_df.Intervals_2.isnull())), 'Intervals'] = interval_0_mit inter_high_df.drop(['Intervals_2', 'Intervals_1', 'Intervals_0'], axis='columns', inplace=True) # High df: high_df['Intervals_0'] = interval_0_mit high_df['Intervals_1'] = interval_1_mit high_df['Intervals_2'] = interval_2_mit high_df['Intervals'] = '' high_df.loc[ ((high_df.Intervals_0.isnull()) & (high_df.Intervals_1.isnull())), 'Intervals'] = interval_2_mit high_df.loc[ ((high_df.Intervals_0.isnull()) & (high_df.Intervals_2.isnull())), 'Intervals'] = interval_1_mit high_df.loc[ ((high_df.Intervals_1.isnull()) & (high_df.Intervals_2.isnull())), 'Intervals'] = interval_0_mit high_df.drop(['Intervals_2', 'Intervals_1', 'Intervals_0'], axis='columns', inplace=True) # + # Step 5: Plotting the Size of Each Interval: # Mitigation df: size = collections.Counter(mitigation_df.Intervals) print(size) d_table_mit = pd.value_counts(mitigation_df.Intervals).to_frame(name='Frequency') d_table_mit = d_table_mit.reset_index() d_table_mit = d_table_mit.rename(columns={'index': 'Class Index'}) fig, ax = plt.subplots(figsize=(10, 5)) sns.barplot(x="Class Index", y="Frequency", data=d_table_mit, label="Size of Each Class", ax=ax) print(d_table_mit) # Intermediate df: d_table_inter = pd.value_counts(inter_df.Intervals).to_frame(name='Frequency') d_table_inter = d_table_inter.reset_index() d_table_inter = d_table_inter.rename(columns={'index': 'Class Index'}) # Intermediate-High df: d_table_inter_high = pd.value_counts(inter_high_df.Intervals).to_frame(name='Frequency') d_table_inter_high = d_table_inter_high.reset_index() d_table_inter_high = d_table_inter_high.rename(columns={'index': 'Class Index'}) # High df: size = collections.Counter(high_df.Intervals) print(size) d_table_high = pd.value_counts(high_df.Intervals).to_frame(name='Frequency') d_table_high = d_table_high.reset_index() d_table_high = d_table_high.rename(columns={'index': 'Class Index'}) fig, ax = plt.subplots(figsize=(10, 5)) sns.barplot(x="Class Index", y="Frequency", data=d_table_high, label="Size of Each Class", ax=ax) print(d_table_high) # - mitigation_count = pd.DataFrame(mitigation_df.Intervals.value_counts()) mitigation_count.sort_index(inplace=True) mitigation_count sns.barplot(x=mitigation_count.index, y="Intervals", palette="mako", data=mitigation_count) # # Calculating Probabilities of Each Scenario: # Mitigation: d_table_mit['Probability'] = (d_table_mit.Frequency)/(d_table_mit.Frequency.sum()) d_table_inter['Probability'] = (d_table_inter.Frequency)/(d_table_inter.Frequency.sum()) d_table_inter_high['Probability'] = (d_table_inter_high.Frequency)/(d_table_inter_high.Frequency.sum()) d_table_high['Probability'] = (d_table_high.Frequency)/(d_table_high.Frequency.sum()) # # BN Model: # Build the networks: model_mit = pgmpy.models.BayesianModel([('SLR', 'AE'), ('VLM', 'AE'), ('Elevation', 'AE'), ('Elevation', 'Habitat'), ('Habitat', 'CR'), ('AE', 'CR')]) model_inter = pgmpy.models.BayesianModel([('SLR', 'AE'), ('VLM', 'AE'), ('Elevation', 'AE'), ('Elevation', 'Habitat'), ('Habitat', 'CR'), ('AE', 'CR')]) model_inter_high = pgmpy.models.BayesianModel([('SLR', 'AE'), ('VLM', 'AE'), ('Elevation', 'AE'), ('Elevation', 'Habitat'), ('Habitat', 'CR'), ('AE', 'CR')]) model_high = pgmpy.models.BayesianModel([('SLR', 'AE'), ('VLM', 'AE'), ('Elevation', 'AE'), ('Elevation', 'Habitat'), ('Habitat', 'CR'), ('AE', 'CR')]) # CPDs for SLR for models: cpd_slr_mit = pgmpy.factors.discrete.TabularCPD('SLR', 4, [[1], [0], [0], [0]], state_names={'SLR': ['0.18-0.34', '0.2-0.37', '0.2-0.36', '0.25-0.43']}) cpd_slr_inter = pgmpy.factors.discrete.TabularCPD('SLR', 4, [[0], [1], [0], [0]], state_names={'SLR': ['0.18-0.34', '0.2-0.37', '0.2-0.36', '0.25-0.43']}) cpd_slr_inter_high = pgmpy.factors.discrete.TabularCPD('SLR', 4, [[0], [0], [1], [0]], state_names={'SLR': ['0.18-0.34', '0.2-0.37', '0.2-0.36', '0.25-0.43']}) cpd_slr_high = pgmpy.factors.discrete.TabularCPD('SLR', 4, [[0], [0], [0], [1]], state_names={'SLR': ['0.18-0.34', '0.2-0.37', '0.2-0.36', '0.25-0.43']}) # CPD for VLM: cpd_vlm = pgmpy.factors.discrete.TabularCPD('VLM', 3, [[prob0], [prob1], [prob2]], state_names={'VLM': ['Bin 1', 'Bin 2', 'Bin 3']}) # CPD for Elevation: cpd_elevation = pgmpy.factors.discrete.TabularCPD('Elevation', 7, [[prob0_elev], [prob1_elev], [prob2_elev], [prob3_elev], [prob4_elev], [prob5_elev], [prob6_elev]], state_names={'Elevation': ['[min_elev, -10)', '[-10, -1)', '[-1, 0)', '[0, 1]', '(1, 5]', '(5, 10]', '(10, max_elev]']}) # Add CPDs: model_mit.add_cpds(cpd_slr_mit, cpd_vlm, cpd_elevation) model_inter.add_cpds(cpd_slr_inter, cpd_vlm, cpd_elevation) model_inter_high.add_cpds(cpd_slr_inter_high, cpd_vlm, cpd_elevation) model_high.add_cpds(cpd_slr_high, cpd_vlm, cpd_elevation) probs_mit = np.array(d_table_mit.Probability).reshape(-1, 1) probs_inter = np.array(d_table_inter.Probability).reshape(-1, 1) probs_inter_high = np.array(d_table_inter_high.Probability).reshape(-1, 1) probs_high = np.array(d_table_high.Probability).reshape(-1, 1) state_names = ['(1.0, 5.0]', '(10.0, 82.733]', '(5.0, 10.0]', '(-0.001, 1.0]', '[-1.0, 0.0)', '[-12.0, -1.0)', '[-89.269, -12.0)'] cpd_ae_mit = pgmpy.factors.discrete.TabularCPD('AE', 7, probs_mit, state_names={'AE': state_names}, evidence=['SLR', 'VLM', 'Elevation'], evidence_card=[4, 3,7]) cpd_ae_inter = pgmpy.factors.discrete.TabularCPD('AE', 7, probs_inter, state_names={'AE': state_names}) cpd_ae_inter_high = pgmpy.factors.discrete.TabularCPD('AE', 7, probs_inter_high, state_names={'AE': state_names}) cpd_ae_high = pgmpy.factors.discrete.TabularCPD('AE', 7, probs_high, state_names={'AE': state_names}) model_mit.add_cpds(cpd_ae_mit) model_inter.add_cpds(cpd_ae_inter) model_inter_high.add_cpds(cpd_ae_inter_high) model_high.add_cpds(cpd_ae_high) model_mit.check_model() # # Add VLM: vlm_interpolated_arr inter_vlm_df = pd.DataFrame(vlm_interpolated_arr, columns=['VLM']) elev_habit_map['VLM'] = inter_vlm_df.VLM/1000 elev_habit_map.VLM.value_counts(dropna=False) # # Fix AE:
Final-Code/Merged-Code-Farah.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys import time import matplotlib.pyplot as plt import numpy as np import GCode import GRBL # - # Poplar 1x4". Cut BlockHeight = 89.0 # mm BlockLength = 2 * BlockHeight # mm # ?GCode.HorzLine horz_lines = np.array([ 0. , 8.9, 17.8, 26.7, 35.6, 44.5, 53.4, 62.3, 71.2, 80.1]) Line1_Y=horz_lines[5] Line1_Y pattern = [5, 2, 1, 2] sum(pattern) # + prog = GCode.Program() prog.lines=list() for X in range(0, int(BlockLength), sum(pattern)): for p in pattern: line_pts = GCode.HorzLine(X0=X, Xf=X+dit, Y=Line1_Y) line=GCode.Line(points=line_pts, power=0, feed=300) prog.lines.append(line) line_pts = GCode.HorzLine(X0=X+dit, Xf=X+dit+dash, Y=Line1_Y) line=GCode.Line(points=line_pts, power=128, feed=300) prog.lines.append(line) prog.generate_gcode() prog # - cnc = GRBL.GRBL(port="/dev/cnc_3018") cnc.status cnc.home() cnc.status prog.machine=cnc prog.dist prog.time t1=time.time() prog.run() t2=time.time() t2-t1
Drawing/OldDrawing/LineTypes-Copy3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="iEL0LqOvNctE" # # In this tutorial we will build basic CNN for image classification. # Author :- <NAME> # * We will define our model and learn how to use keras module to build custom layers # * We will also design our own training loop, that is identical to model.fit in Keras. # * The aim of this excercise is to teach, how to use exisiting Tensorflow API to construct our own module and integrate it with tf.keras API. # + id="SK3DMbzThNBc" import tensorflow as tf import numpy as np import matplotlib.pyplot as plt np.random.seed(1234) # + [markdown] id="j3wnZ5tvOTCV" # #Things to do # * Remember to Normalize your data and create validation split from train set. # * Learn about tf.data, tf.slices and also tf.records # + colab={"base_uri": "https://localhost:8080/"} id="WWo3ho3whTWU" outputId="a6486bcc-0e16-4c8e-f814-8884f8c80e70" (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_val = x_train[50000:60000] x_train = x_train[0:50000] y_val = y_train[50000:60000] y_train = y_train[0:50000] x_train = x_train.astype(np.float32).reshape(-1,28,28,1) / 255.0 x_val = x_val.astype(np.float32).reshape(-1,28,28,1) / 255.0 x_test = x_test.astype(np.float32).reshape(-1,28,28,1) / 255.0 y_train = tf.one_hot(y_train, depth=10) y_val = tf.one_hot(y_val, depth=10) y_test = tf.one_hot(y_test, depth=10) print(x_train.shape) print(x_test.shape) print(x_val.shape) train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(128) train_dataset_full = train_dataset.shuffle(buffer_size=1024).batch(len(train_dataset)) val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) val_dataset = val_dataset.batch(128) test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)) test_dataset = test_dataset.batch(128) print(len(train_dataset)) print(len(test_dataset)) # + id="oFKpqs1Shglk" #https://d2l.ai/chapter_convolutional-modern/batch-norm.html def batch_norm(X, gamma, beta, moving_mean, moving_var, eps): # Compute reciprocal of square root of the moving variance elementwise inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype) # Scale and shift inv *= gamma Y = X * inv + (beta - moving_mean * inv) return Y class BatchNorm(tf.keras.layers.Layer): def __init__(self, **kwargs): super(BatchNorm, self).__init__(**kwargs) def build(self, input_shape): weight_shape = [input_shape[-1], ] # The scale parameter and the shift parameter (model parameters) are # initialized to 1 and 0, respectively self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True) self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True) # The variables that are not model parameters are initialized to 0 self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False) self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False) super(BatchNorm, self).build(input_shape) def assign_moving_average(self, variable, value): momentum = 0.9 delta = variable * momentum + value * (1 - momentum) return variable.assign(delta) @tf.function def call(self, inputs, training): if training: axes = list(range(len(inputs.shape) - 1)) batch_mean = tf.reduce_mean(inputs, axes, keepdims=True) batch_variance = tf.reduce_mean(tf.math.squared_difference( inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True) batch_mean = tf.squeeze(batch_mean, axes) batch_variance = tf.squeeze(batch_variance, axes) mean_update = self.assign_moving_average( self.moving_mean, batch_mean) variance_update = self.assign_moving_average( self.moving_variance, batch_variance) self.add_update(mean_update) self.add_update(variance_update) mean, variance = batch_mean, batch_variance else: mean, variance = self.moving_mean, self.moving_variance output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5) return output # + [markdown] id="Rs9r9QDvO48Y" # # Create your custom CNN class # * Convolution layers has 4D weights of size (h,w,input_feature, output_feature), where h=height of your kernel and w = width of our kernel. If you add batches then it is 5D. # * Now your model will convolve across your input feature map with kernel and create output feature map, that is then passed to next layer. # * As we have learned in our prior class, to initialize your weights, we use tf.Variable(weight_init(size)), tf.keras.layers.Conv2D will do this for you. Play with the function and see how it works for your problem. # * Few important concepts, learn to save your model after every k epochs and start re-training from last checkpoint. This is very useful, and you don't need to retrain your model from scratch. # # + id="KGjSk_lMhb7V" class ImageRecognitionCNN(tf.keras.Model): def __init__(self, num_classes, device='cpu:0', checkpoint_directory=None): ''' Define the parameterized layers used during forward-pass, the device where you would like to run the computation (GPU, TPU, CPU) on and the checkpoint directory. Args: num_classes: the number of labels in the network. device: string, 'cpu:n' or 'gpu:n' (n can vary). Default, 'cpu:0'. checkpoint_directory: the directory where you would like to save or restore a model. ''' super(ImageRecognitionCNN, self).__init__() # Initialize layers self.conv1 = tf.keras.layers.Conv2D(64, 3, padding='same', activation=None) self.conv2 = tf.keras.layers.Conv2D(64, 3,padding='same', activation=None) self.pool1 = tf.keras.layers.MaxPool2D() self.conv3 = tf.keras.layers.Conv2D(64, 3, padding='same', activation=None) self.conv4 = tf.keras.layers.Conv2D(64, 3, padding='same', activation=None) # self.pool2 = tf.keras.layers.MaxPool2D() # self.conv5 = tf.keras.layers.Conv2D(64, 3, padding='same', activation=None) # self.pool2 = tf.keras.layers.MaxPool2D() # self.conv6 = tf.keras.layers.Conv2D(64, 3, 2, padding='same', activation=None) # self.conv7 = tf.keras.layers.Conv2D(64, 1, padding='same', activation=None) self.conv8 = tf.keras.layers.Conv2D(num_classes, 1, padding='same', activation=None) self.bn1=BatchNorm() self.bn2=BatchNorm() self.bn3=BatchNorm() self.bn4=BatchNorm() # Define the device self.device = device # Define the checkpoint directory self.checkpoint_directory = checkpoint_directory self.acc = tf.keras.metrics.Accuracy() def predict(self, images, training): """ Predicts the probability of each class, based on the input sample. Args: images: 4D tensor. Either an image or a batch of images. training: Boolean. Either the network is predicting in training mode or not. """ x = self.conv1(images) x = tf.nn.relu(x) x = self.bn1(x,training) x = self.pool1(x) x = self.conv2(x) x = tf.nn.relu(x) x = self.bn2(x,training) x = self.pool1(x) x = self.conv3(x) x = tf.nn.relu(x) x = self.bn3(x,training) x = self.pool1(x) x = self.conv4(x) x = tf.nn.relu(x) x = self.bn4(x,training) x = self.pool1(x) x = self.conv8(x) #x = tf.nn.relu(x) #print(x.shape) x = tf.reshape(x, (-1, 1, 10)) #x = tf.keras.layers.Flatten(x) return x def loss_fn(self, images, target, training): """ Defines the loss function used during training. """ preds = self.predict(images, training) #print(preds.shape) #print(target.shape) loss = tf.nn.softmax_cross_entropy_with_logits(labels=target, logits=preds) return loss def grads_fn(self, images, target, training): """ Dynamically computes the gradients of the loss value with respect to the parameters of the model, in each forward pass. """ with tf.GradientTape() as tape: loss = self.loss_fn(images, target, training) return tape.gradient(loss, self.variables) def restore_model(self): """ Function to restore trained model. """ with tf.device(self.device): # Run the model once to initialize variables dummy_input = tf.constant(tf.zeros((1,48,48,1))) dummy_pred = self.predict(dummy_input, training=False) # Restore the variables of the model saver = tf.Saver(self.variables) saver.restore(tf.train.latest_checkpoint (self.checkpoint_directory)) def save_model(self, global_step=0): """ Function to save trained model. """ tf.Saver(self.variables).save(self.checkpoint_directory, global_step=global_step) # def compute_accuracy(self, input_data): # """ Compute the accuracy on the input data. # """ # with tf.device(self.device): # #acc = tf.metrics.Accuracy() # for step ,(images, targets) in enumerate(input_data): # # Predict the probability of each class # #print(targets.shape) # logits = self.predict(images, training=False) # # Select the class with the highest probability # #print(logits.shape) # logits = tf.nn.softmax(logits) # logits = tf.reshape(logits, [-1, 10]) # targets = tf.reshape(targets, [-1,10]) # preds = tf.argmax(logits, axis=1) # #m1.update_state # # Compute the accuracy # #print(preds.shape) # acc(tf.reshape(targets, preds)) # return acc def compute_accuracy_2(self, images, targets): """ Compute the accuracy on the input data. """ with tf.device(self.device): # Predict the probability of each class logits = self.predict(images, training=False) # Select the class with the highest probability logits = tf.nn.softmax(logits) logits = tf.reshape(logits, [-1, 10]) targets = tf.reshape(targets, [-1,10]) preds = tf.argmax(logits, axis=1) goal = tf.argmax(targets, axis=1) self.acc.update_state(goal, preds) # Compute the accuracy result = self.acc.result().numpy() return result def fit_fc(self, training_data, eval_data, test_data, optimizer, num_epochs=500, early_stopping_rounds=10, verbose=10, train_from_scratch=False): """ Function to train the model, using the selected optimizer and for the desired number of epochs. You can either train from scratch or load the latest model trained. Early stopping is used in order to mitigate the risk of overfitting the network. Args: training_data: the data you would like to train the model on. Must be in the tf.data.Dataset format. eval_data: the data you would like to evaluate the model on. Must be in the tf.data.Dataset format. optimizer: the optimizer used during training. num_epochs: the maximum number of iterations you would like to train the model. early_stopping_rounds: stop training if the loss on the eval dataset does not decrease after n epochs. verbose: int. Specify how often to print the loss value of the network. train_from_scratch: boolean. Whether to initialize variables of the the last trained model or initialize them randomly. """ if train_from_scratch==False: self.restore_model() # Initialize best loss. This variable will store the lowest loss on the # eval dataset. best_loss = 999 # Initialize classes to update the mean loss of train and eval train_loss = tf.keras.metrics.Mean('train_loss') eval_loss = tf.keras.metrics.Mean('eval_loss') acc_train = tf.keras.metrics.Mean('train_acc') acc_val = tf.keras.metrics.Mean('val_acc') test_val = tf.keras.metrics.Mean('test_acc') # Initialize dictionary to store the loss history self.history = {} self.history['train_loss'] = [] self.history['eval_loss'] = [] self.history['train_acc'] = [] self.history['val_acc'] = [] self.history['test_acc'] = [] # Begin training with tf.device(self.device): for i in range(num_epochs): # Training with gradient descent #training_data_x = training_data.shuffle(buffer_size=1024).batch(128) for step, (images, target) in enumerate(training_data): grads = self.grads_fn(images, target, True) optimizer.apply_gradients(zip(grads, self.variables)) # Compute the loss on the training data after one epoch for step, (images, target) in enumerate(training_data): loss = self.loss_fn(images, target, False) accuracy = self.compute_accuracy_2(images,target) acc_train(accuracy) train_loss(loss) self.history['train_loss'].append(train_loss.result().numpy()) self.history['train_acc'].append(acc_train.result().numpy()) # Reset metrics train_loss.reset_states() acc_train.reset_states() # Compute the loss on the eval data after one epoch for step, (images, target) in enumerate(eval_data): loss = self.loss_fn(images, target, False) accuracy = self.compute_accuracy_2(images,target) acc_val(accuracy) eval_loss(loss) self.history['eval_loss'].append(eval_loss.result().numpy()) self.history['val_acc'].append(acc_val.result().numpy()) # Reset metrics eval_loss.reset_states() acc_val.reset_states() # Print train and eval losses if (i==0) | ((i+1)%verbose==0): print('Train loss at epoch %d: ' %(i+1), self.history['train_loss'][-1]) print('Train Acc at epoch %d: ' %(i+1), self.history['train_acc'][-1]) print('Eval loss at epoch %d: ' %(i+1), self.history['eval_loss'][-1]) print('Eval Acc at epoch %d: ' %(i+1), self.history['val_acc'][-1]) # Check for early stopping if self.history['eval_loss'][-1]<best_loss: best_loss = self.history['eval_loss'][-1] count = early_stopping_rounds else: count -= 1 if count==0: break for step, (images, target) in enumerate(test_data): loss = self.loss_fn(images, target, False) accuracy = self.compute_accuracy_2(images,target) test_val(accuracy) #eval_loss(loss) #self.history['eval_loss'].append(eval_loss.result().numpy()) self.history['test_acc'].append(test_val.result().numpy()) return self.history # + id="4a-iuiHIypry" colab={"base_uri": "https://localhost:8080/"} outputId="7fb94679-eac4-4cca-e49b-90f26ad98f11" # Specify the path where you want to save/restore the trained variables. checkpoint_directory = 'models_checkpoints/mnist/' # Use the GPU if available. device = 'gpu:0' # Define optimizer. trail=[] for j in range(3): tf.random.set_seed(j+100) optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=1e-4) # Instantiate model. This doesn't initialize the variables yet. model = ImageRecognitionCNN(num_classes=10, device=device, checkpoint_directory=checkpoint_directory) #model = ImageRecognitionCNN(num_classes=7, device=device) # Train model history=model.fit_fc(train_dataset, val_dataset,test_dataset, optimizer, num_epochs=10, early_stopping_rounds=2, verbose=2, train_from_scratch=True) trail.append(history) # + colab={"base_uri": "https://localhost:8080/"} id="GKXBXb-vYZrf" outputId="7c49a3bb-e5ea-486f-998e-78023921fb8b" totalTestAcc=[history['test_acc'] for history in trail] totalTestAcc_avg=np.mean(totalTestAcc) var=np.var(totalTestAcc) print(f"the test Acc_avg of PRE BN CNN is {totalTestAcc_avg}, and the variance is {var}")
HW4/CNN_POST_BN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Traverse a tree (depth first search) # # Traversing a tree means "visiting" all the nodes in the tree once. Unlike an array or linked list, there's more than one way to walk through a tree, starting from the root node. # # Traversing a tree is helpful for printing out all the values stored in the tree, as well as searching for a value in a tree, inserting into or deleting values from the tree. There's depth first search and breadth first search. # # Depth first search has 3 types: pre-order, in-order, and post-order. # # Let's walk through pre-order traversal by hand first, and then try it out in code. # ## Creating a sample tree # # We'll create a tree that looks like the following: # ![tree image](images/tree_01.png "Tree") # + # this code makes the tree that we'll traverse class Node(object): def __init__(self,value = None): self.value = value self.left = None self.right = None def set_value(self,value): self.value = value def get_value(self): return self.value def set_left_child(self,left): self.left = left def set_right_child(self, right): self.right = right def get_left_child(self): return self.left def get_right_child(self): return self.right def has_left_child(self): return self.left != None def has_right_child(self): return self.right != None # define __repr_ to decide what a print statement displays for a Node object def __repr__(self): return f"Node({self.get_value()})" def __str__(self): return f"Node({self.get_value()})" class Tree(): def __init__(self, value=None): self.root = Node(value) def get_root(self): return self.root # - # create a tree and add some nodes tree = Tree("apple") tree.get_root().set_left_child(Node("banana")) tree.get_root().set_right_child(Node("cherry")) tree.get_root().get_left_child().set_left_child(Node("dates")) print(tree.get_root().get_right_child()) # ## Depth first, pre-order traversal # pre-order traversal of the tree would visit the nodes in this order: # # ![tree image](images/tree_01.png "Tree") # # apple, banana, dates, cherry # #### Stack # # Notice how we're retracing our steps. It's like we are hiking on a trail, and trying to retrace our steps on the way back. This is an indication that we should use a stack. # Let's define a stack to help keep track of the tree nodes class Stack(): def __init__(self): self.list = list() def push(self,value): self.list.append(value) def pop(self): return self.list.pop() def top(self): if len(self.list) > 0: return self.list[-1] else: return None def is_empty(self): return len(self.list) == 0 def __repr__(self): if len(self.list) > 0: s = "<top of stack>\n_________________\n" s += "\n_________________\n".join([str(item) for item in self.list[::-1]]) s += "\n_________________\n<bottom of stack>" return s else: return "<stack is empty>" # check Stack stack = Stack() stack.push("apple") stack.push("banana") stack.push("cherry") stack.push("dates") print(stack.pop()) print("\n") print(stack) # ## Walk through the steps with code # # We're going to translate what we're doing by hand into code, one step at a time. This will help us check if our code is doing what we expect it to do. # # ![tree image](images/tree_01.png "Tree") # + visit_order = list() stack = Stack() # start at the root node, visit it and then add it to the stack node = tree.get_root() visit_order.append(node.get_value()) stack.push(node) print(f""" visit_order {visit_order} stack: {stack} """) # + # check if apple has a left child print(f"{node} has left child? {node.has_left_child()}") # since apple has a left child (banana) # we'll visit banana and add it to the stack if( node.has_left_child()): node = node.get_left_child() print(f"visit {node}") visit_order.append(node.get_value()) stack.push(node) print(f""" visit_order {visit_order} stack: {stack} """) # + # check if banana has a left child print(f"{node} has left child? {node.has_left_child()}") # since banana has a left child "dates" # we'll visit "dates" and add it to the stack if( node.has_left_child()): node = node.get_left_child() print(f"visit {node}") visit_order.append(node.get_value()) stack.push(node) print(f""" visit_order {visit_order} stack: {stack} """) # - # check if "dates" has a left child print(f"{node} has left child? {node.has_left_child()}") # since dates doesn't have a left child, we'll check if it has a right child print(f"{node} has right child? {node.has_right_child()}") # since "dates" is a leaf node (has no children), we can start to retrace our steps # in other words, we can pop it off the stack. print(stack.pop()) stack # now we'll set the node to the new top of the stack, which is banana node = stack.top() print(node) # we already checked for banana's left child, so we'll check for its right child print(f"{node} has right child? {node.has_right_child()}") # banana doesn't have a right child, so we're also done tracking it. # so we can pop banana off the stack print(f"pop {stack.pop()} off stack") print(f""" stack {stack} """) # now we'll track the new top of the stack, which is apple node = stack.top() print(node) # we've already checked if apple has a left child; we'll check if it has a right child print(f"{node} has right child? {node.has_right_child()}") # + # since it has a right child (cherry), # we'll visit cherry and add it to the stack. if node.has_right_child(): node = node.get_right_child() print(f"visit {node}") visit_order.append(node.get_value()) stack.push(node) print(f""" visit_order {visit_order} stack {stack} """) # + # Now we'll check if cherry has a left child print(f"{node} has left child? {node.has_left_child()}") # it doesn't, so we'll check if it has a right child print(f"{node} has right child? {node.has_right_child()}") # + # since cherry has neither left nor right child nodes, # we are done tracking it, and can pop it off the stack print(f"pop {stack.pop()} off the stack") print(f""" visit_order {visit_order} stack {stack} """) # + # now we're back to apple at the top of the stack. # since we've already checked apple's left and right child nodes, # we can pop apple off the stack print(f"pop {stack.pop()} off stack") print(f"pre-order traversal visited nodes in this order: {visit_order}") # - print(f"""stack {stack}""") # ## pre-order traversal using a stack (something's missing) # # Here is some code that has an error, so it will have an infinite loop. There is a counter to make the loop stop so that it doesn't run forever. def pre_order_with_stack_buggy(tree): visit_order = list() stack = Stack() node = tree.get_root() visit_order.append(node.get_value()) stack.push(node) count = 0 loop_limit = 7 while(node and count < loop_limit): print(f""" loop count: {count} current node: {node} stack: {stack} """) count +=1 if node.has_left_child(): node = node.get_left_child() visit_order.append(node.get_value()) stack.push(node) elif node.has_right_child(): node = node.get_right_child() visit_order.append(node.get_value()) stack.push(node) else: stack.pop() if not stack.is_empty(): node = stack.top() else: node = None return visit_order pre_order_with_stack_buggy(tree) # ## pre-order traversal using a stack, tracking state # # Here's how we implement DFS with a stack, where we also track whether we've already visited the left or right child of the node. class State(object): def __init__(self,node): self.node = node self.visited_left = False self.visited_right = False def get_node(self): return self.node def get_visited_left(self): return self.visited_left def get_visited_right(self): return self.visited_right def set_visited_left(self): self.visited_left = True def set_visited_right(self): self.visited_right = True def __repr__(self): s = f"""{self.node} visited_left: {self.visited_left} visited_right: {self.visited_right} """ return s def pre_order_with_stack(tree, debug_mode=False): visit_order = list() stack = Stack() node = tree.get_root() visit_order.append(node.get_value()) state = State(node) stack.push(state) count = 0 while(node): if debug_mode: print(f""" loop count: {count} current node: {node} stack: {stack} """) count +=1 if node.has_left_child() and not state.get_visited_left(): state.set_visited_left() node = node.get_left_child() visit_order.append(node.get_value()) state = State(node) stack.push(state) elif node.has_right_child() and not state.get_visited_right(): state.set_visited_right() node = node.get_right_child() visit_order.append(node.get_value()) state = State(node) else: stack.pop() if not stack.is_empty(): state = stack.top() node = state.get_node() else: node = None if debug_mode: print(f""" loop count: {count} current node: {node} stack: {stack} """) return visit_order # + # check pre-order traversal pre_order_with_stack(tree, debug_mode=True) # - # ## task 01: pre-order traversal with recursion # # Use recursion and perform pre_order traversal. def pre_order(tree): visit_order = list() def traverse(node): if node: # visit the node visit_order.append(node.get_value()) # traverse left subtree traverse(node.get_left_child()) # traverse right subtree traverse(node.get_right_child()) traverse(tree.get_root()) return visit_order pre_order(tree) # ## Task: do in-order traversal # # We want to traverse the left subtree, then visit the node, and then traverse the right subtree. # # **hint**: it's very similar in structure to the pre-order traversal. # + # define in-order traversal # - # solution def in_order(tree): visit_order = list() def traverse(node): if node: # traverse left subtree traverse(node.get_left_child()) # visit node visit_order.append(node.get_value()) # traverse right sub-tree traverse(node.get_right_child()) traverse(tree.get_root()) return visit_order # check solution: should get: ['dates', 'banana', 'apple', 'cherry'] in_order(tree) # ## Task: post-order traversal # # Traverse left subtree, then right subtree, and then visit the node. # + # define post_order traversal # - # solution def post_order(tree): visit_order = list() def traverse(node): if node: # traverse left subtree traverse(node.get_left_child()) # traverse right subtree traverse(node.get_right_child()) # visit node visit_order.append(node.get_value()) traverse(tree.get_root()) return visit_order # check solution: should get: ['dates', 'banana', 'cherry', 'apple'] post_order(tree)
trees/.ipynb_checkpoints/traverse_a_tree_dfs-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pickle from sklearn.model_selection import train_test_split from sklearn import ensemble from sklearn.metrics import mean_absolute_error from sklearn.linear_model import LinearRegression, ElasticNet, Lasso, Ridge from sklearn.svm import SVR from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.model_selection import KFold # - X, y = pickle.load(open('X_y.pkl', 'rb')).values() X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42) X.shape kf = KFold(n_splits=20) maes = [] gb = GradientBoostingRegressor(n_estimators=550) for k, (train, test) in enumerate(kf.split(X, y)): gb.fit(X[train], y[train]) maes.append(mean_absolute_error(y[test], gb.predict(X[test]))) import numpy as np m = np.array(maes) 100-m.mean()
.ipynb_checkpoints/feature_regress-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import tensorflow as tf tf.config.experimental.list_physical_devices() tf.test.is_built_with_cuda() # # Importing Libraries import numpy as np import pandas as pd from matplotlib import pyplot as plt import os.path as op import pickle import tensorflow as tf from tensorflow import keras from keras.models import Model,Sequential,load_model from keras.layers import Input, Embedding from keras.layers import Dense, Bidirectional from keras.layers.recurrent import LSTM import keras.metrics as metrics import itertools from tensorflow.python.keras.utils.data_utils import Sequence from decimal import Decimal from keras import backend as K from keras.layers import Conv1D,MaxPooling1D,Flatten,Dense # # Data Fetching # + A1=np.empty((0,5),dtype='float32') U1=np.empty((0,7),dtype='float32') node=['150','149','147','144','142','140','136','61'] mon=['Apr','Mar','Aug','Jun','Jul','Sep','May','Oct'] for j in node: for i in mon: inp= pd.read_csv('data_gkv/AT510_Node_'+str(j)+'_'+str(i)+'19_OutputFile.csv',usecols=[1,2,3,15,16]) out= pd.read_csv('data_gkv/AT510_Node_'+str(j)+'_'+str(i)+'19_OutputFile.csv',usecols=[5,6,7,8,17,18,19]) inp=np.array(inp,dtype='float32') out=np.array(out,dtype='float32') A1=np.append(A1, inp, axis=0) U1=np.append(U1, out, axis=0) print(A1) print(U1) # - # # Min Max Scaler # + from sklearn.preprocessing import MinMaxScaler import warnings scaler_obj=MinMaxScaler() X1=scaler_obj.fit_transform(A1) Y1=scaler_obj.fit_transform(U1) warnings.filterwarnings(action='ignore', category=UserWarning) X1=X1[:,np.newaxis,:] Y1=Y1[:,np.newaxis,:] # + def rmse(y_true, y_pred): return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)) def coeff_determination(y_true, y_pred): SS_res = K.sum(K.square( y_true-y_pred )) SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) ) return ( 1 - SS_res/(SS_tot + K.epsilon()) ) # - # # Model # + inp=keras.Input(shape=(1,5)) l=keras.layers.Conv1D(16,1,padding="same",activation="tanh",kernel_initializer="glorot_uniform")(inp) output = keras.layers.Conv1D(7,4,padding="same",activation='sigmoid')(l) model1=keras.Model(inputs=inp,outputs=output) model1.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-5), loss='binary_crossentropy',metrics=['accuracy','mse','mae',rmse]) model1.summary() # + from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42) history1 = model1.fit(x_train,y_train,batch_size=256,epochs=50, validation_data=(x_test, y_test),verbose = 2, shuffle= False) # - # # Saving Model as File model_json = model1.to_json() with open("Model_File/cnn_relu.json", "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 model1.save_weights("Model_File/cnn_relu.h5") print("Saved model to disk") # + from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42) from keras.models import model_from_json json_file = open('Model_File/cnn_relu.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("Model_File/cnn_relu.h5") print("Loaded model from disk") loaded_model.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-5), loss='mse',metrics=['accuracy','mse','mae',rmse]) # - loaded_model.evaluate(x_train, y_train, verbose=0) loaded_model.evaluate(x_test, y_test, verbose=0) # # Error Analysis # + # _, train_acc = model1.evaluate(x_train, y_train, verbose=0) # _, test_acc = model1.evaluate(x_test, y_test, verbose=0) # print('Train: %.3f, Test: %.3f' % (train_acc, test_acc)) # summarize history for loss plt.plot(history1.history['loss']) plt.plot(history1.history['val_loss']) plt.title('Model Loss',fontweight ='bold',fontsize = 15) plt.ylabel('Loss',fontweight ='bold',fontsize = 15) plt.xlabel('Epoch',fontweight ='bold',fontsize = 15) plt.legend(['Train', 'Test'], loc='upper left') plt.show() # summarize history for accuracy plt.plot(history1.history['accuracy']) plt.plot(history1.history['val_accuracy']) plt.title('Model accuracy',fontweight ='bold',fontsize = 15) plt.ylabel('Accuracy',fontweight ='bold',fontsize = 15) plt.xlabel('Epoch',fontweight ='bold',fontsize = 15) plt.legend(['Train', 'Test'], loc='upper left') plt.show() # + from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42) y_test_pred=loaded_model.predict(x_test) y_test_pred # - y_test y_test=y_test[:,0] y_test_pred=y_test_pred[:,0] from numpy import savetxt savetxt('ARRAY_DATA/cnn_y_test_pred.csv', y_test_pred[:1001], delimiter=',') from numpy import savetxt savetxt('ARRAY_DATA/cnn_y_test.csv', y_test[:1001], delimiter=',') # + #completed
OPC_Sensor/.ipynb_checkpoints/CNN_tanh_binary-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # Python program to find the area of triangle # Take inputs from the user a = float(input('Enter first side: ')) b = float(input('Enter second side: ')) c = float(input('Enter third side: ')) # calculate the semi-perimeter s = (a + b + c) / 2 # calculate the area area = (s*(s-a)*(s-b)*(s-c)) ** 0.5 print('The area of the triangle is %0.2f' %area) # -
lesson 16.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import csv from operator import add global Path Path = "file:/home/ethan/pythonwork/ipynotebook/quiz/" # input data dataset = sc.textFile( Path + "kddcup.data.corrected" ) #split data with "," rowData = dataset.map(lambda row : row.split(",")) #protocol_type protocol_type = rowData.map(lambda column: column[1])\ .map(lambda x : (x, 1))\ .reduceByKey(add).sortBy(lambda x: -x[1]) pro_tmp = protocol_type.collect() #service service = rowData.map(lambda column: column[2])\ .map(lambda x : (x, 1))\ .reduceByKey(add).sortBy(lambda x: -x[1]) ser_tmp = service.collect() #flag flag = rowData.map(lambda column: column[3])\ .map(lambda x : (x, 1))\ .reduceByKey(add).sortBy(lambda x: -x[1]) flag_tmp = flag.collect() #logged_in logged_in = rowData.map(lambda column: column[11])\ .map(lambda x : (x, 1))\ .reduceByKey(add).sortBy(lambda x: -x[1]) logged_tmp = logged_in.collect() #intrusion_type intrusion_type = rowData.map(lambda column: column[-1])\ .map(lambda x : (x, 1))\ .reduceByKey(add).sortBy(lambda x: -x[1]) ins_tmp = intrusion_type.collect() with open('task2.txt' , 'a') as file: file.write("protocol_type:") file.write("\n") for i in range(0, protocol_type.count()): file.write(str(pro_tmp[i])) file.write("\n") file.write("service:") file.write("\n") for i in range(0, service.count()): file.write(str(ser_tmp[i])) file.write("\n") file.write("flag:") file.write("\n") for i in range(0, flag.count()): file.write(str(flag_tmp[i])) file.write("\n") file.write("logged_in:") file.write("\n") for i in range(0, logged_in.count()): file.write(str(logged_tmp[i])) file.write("\n") file.write("intrusion_type:") file.write("\n") for i in range(0, intrusion_type.count()): file.write(str(ins_tmp[i])) file.write("\n") print('done') # -
Quiz_106598005/Quiz_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <small> # Copyright (c) 2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # </small> # # # # # Deep Learning From Basics to Practice # ## by <NAME>, https://dlbasics.com, http://glassner.com # ------ # ## Chapter 15: Scikit-Learn # ### Notebook 5: Ensembles # + import numpy as np import math import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import RidgeClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.datasets import make_moons from sklearn.model_selection import train_test_split from matplotlib.colors import ListedColormap import seaborn as sns ; sns.set() # + # Make a File_Helper for saving and loading files. save_files = True import os, sys, inspect current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(0, os.path.dirname(current_dir)) # path to parent dir from DLBasics_Utilities import File_Helper file_helper = File_Helper(save_files) # - def spiral_maker(number_of_arms, points_per_arm): inner_radius = 0.4 outer_radius = 1.0 arm_radius = (inner_radius + outer_radius)/2 center_r = ((arm_radius*2)-inner_radius)/2 samples = [] labels = [] for arm_number in range(number_of_arms): labels += [arm_number] * points_per_arm theta = (math.pi*2*arm_number)/number_of_arms center_x = center_r * math.cos(theta + math.pi) center_y = center_r * math.sin(theta + math.pi) for step in range(points_per_arm): alpha = 1.25*math.pi*step/points_per_arm point_x = center_x + (arm_radius * math.cos(alpha + theta)) point_y = center_y + (arm_radius * math.sin(alpha + theta)) samples.append([point_x, point_y]) training_samples, test_samples, training_labels, test_labels = \ train_test_split(samples, labels, test_size=0.33, random_state=42) return (np.array(training_samples), np.array(test_samples), training_labels, test_labels) training_samples, test_samples, training_labels, test_labels = \ spiral_maker(5, 200) clrs = ['#C4435E', '#F8C58C', '#E57661', '#86DDB2', '#F8E7A2'] plt.figure(figsize=(5,5)) clr_list = [clrs[i] for i in training_labels] plt.scatter(training_samples[:,0], training_samples[:,1], s=80, c=clr_list) plt.xticks([],[]) plt.yticks([],[]) file_helper.save_figure('ensemble-starting-data') plt.show() def show_predictions(ensemble, filename): plt.figure(figsize=(8,3)) ensemble.fit(training_samples, training_labels) predicted_classes = ensemble.predict(test_samples) clr_list = [clrs[i] for i in predicted_classes] custom_cmap = ListedColormap(clrs, name='custom_colormap') plt.subplot(1, 3, 1) plt.scatter(test_samples[:,0], test_samples[:,1], c=clr_list, s=80) plt.xticks([],[]) plt.yticks([],[]) plt.subplot(1, 3, 2) side_size = 400 xy_limit = 1.5 x = np.linspace(-xy_limit, xy_limit, side_size) y = np.linspace(-xy_limit, xy_limit, side_size) X, Y = np.meshgrid(x, y) xy_samples = [[px, py] for px in x for py in y] predictions = ensemble.predict(xy_samples) predictions = predictions.reshape((side_size, side_size)) predictions = predictions.T # because pcolormesh wants transpose plt.pcolormesh(X, Y, predictions, cmap=custom_cmap) plt.xlim(-1.5, 1.5) plt.ylim(-1.5, 1.5) plt.xticks([],[]) plt.yticks([],[]) plt.subplot(1, 3, 3) side_size = 400 xy_limit = 1.5 x = np.linspace(-xy_limit, xy_limit, side_size) y = np.linspace(-xy_limit, xy_limit, side_size) X, Y = np.meshgrid(x, y) xy_samples = [[px, py] for px in x for py in y] predictions = ensemble.predict(xy_samples) predictions = predictions.reshape((side_size, side_size)) predictions = predictions.T # because pcolormesh wants transpose plt.pcolormesh(X, Y, predictions, cmap=custom_cmap) plt.scatter(test_samples[:,0], test_samples[:,1], facecolors='none', s=80, edgecolors='black', lw=0.5) plt.xlim(-1.5, 1.5) plt.ylim(-1.5, 1.5) plt.xticks([],[]) plt.yticks([],[]) plt.tight_layout() file_helper.save_figure(filename) plt.show() ridge_ensemble = AdaBoostClassifier(RidgeClassifier(), algorithm='SAMME') show_predictions(ridge_ensemble, 'ensemble-ridge') tree_ensemble = AdaBoostClassifier(DecisionTreeClassifier()) show_predictions(tree_ensemble, 'ensemble-tree')
Chapter15-Scikit-Learn/Scikit-Learn-Notebook-5-Ensembles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Available questions # ## A note on types # # - `javaRegex` と `nodeSpec` は `string` のエイリアス。 # - Javaの正規表現はPythonとちょっと違うので注意。 # - `headerConstraint` は、IPv4パケットヘッダーの条件を指定するための特別なタイプ。 # ## Preparing # Pybatfishをインポートする。 from pybatfish.client.commands import * from pybatfish.question.question import load_questions, list_questions from pybatfish.question import bfq # クエスチョンテンプレートをBatfishからpybatfishへロードする。 load_questions() # ネットワークスナップショットをアップロードする(スナップショット名の後にいくつかのログが表示される)。 # bf_init_snapshot()の引数はZipファイルでもいいらしい。 bf_init_snapshot('networks/example') # ## List of questions # ### pybatfish.question.bfq.aaaAuthenticationLogin() # # AAA認証を必要としないlineを返す。 aaa_necessity_ans = bfq.aaaAuthenticationLogin().answer() aaa_necessity_ans.frame().head() # ### pybatfish.question.bfq.bgpPeerConfiguration() # # BGPピア設定を返す。 bgp_peerconf_ans = bfq.bgpPeerConfiguration().answer() bgp_peerconf_ans.frame().head() # ### pybatfish.question.bfq.bgpProcessConfiguration() # # BGPプロセス設定を返す。 bgp_procconf_ans = bfq.bgpProcessConfiguration().answer() bgp_procconf_ans.frame().head() # ### pybatfish.question.bfq.bgpSessionCompatibility() # # 各BGPセッションの情報を返す。 bgp_sesscomp_ans = bfq.bgpSessionCompatibility().answer() bgp_sesscomp_ans.frame().head() # ### pybatfish.question.bfq.bgpSessionStatus() # # pybatfish.question.bgq.bgpSessionCompatibility()に、ネイバーが確立しているかどうかを示すEstablished_neighborsを加えた情報を返す。 bgp_sessstat_ans = bfq.bgpSessionStatus().answer() bgp_sessstat_ans.frame().head() # ### pybatfish.question.bfq.definedStructures() # # ネットワーク内で定義されている構造体の一覧を返す。 def_struct_ans = bfq.definedStructures().answer() def_struct_ans.frame().head() # ### pybatfish.question.bfq.edges() # # 様々な種類のエッジを返す。 # # - エッジの種類(edgeTypeとして指定できる。デフォルトはlayer3?) # - bgp # - eigrp # - isis # - layer1 # - layer2 # - layer3 # - ospf # - rip bgp_edges_ans = bfq.edges(edgeType="bgp").answer() bgp_edges_ans.frame().head() # ### pybatfish.question.bfq.fileParseStatus() # # 各設定ファイルのパースが成功したかを返す。 # - pass # - fail # - partially parsed filepstat_ans = bfq.fileParseStatus().answer() filepstat_ans.frame().head() # ### pybatfish.question.bfq.filterLineReachability() # # ACLの中で、評価されないエントリを返す。 fltreach_ans = bfq.filterLineReachability().answer() fltreach_ans.frame().head() # ### pybatfish.question.bfq.filterTable() # # クエスチョンの回答のサブセットを返す。 # columnsやfilterなどの変数に何か入れて使うっぽいが、よく分からないので後回し。 filtertable_ans = bfq.filterTable(innerQuestion=bfq.filterLineReachability()).answer() filtertable_ans.frame().head() # ### pybatfish.question.bfq.interfaceMtu() # <comparatorで指定した不等号><mtuBytesで指定した値>の条件に合致するインターフェイスを返す。 # 突如answerオブジェクトからframe属性が消える。 intmtu_ans = bfq.interfaceMtu(mtuBytes=100, comparator='>').answer() intmtu_ans.frame().head() # ### pybatfish.question.bfq.interfaceProperties() # # 各インターフェイスの設定を返す。 intprop_ans = bfq.interfaceProperties().answer() intprop_ans.frame().head() # ### pybatfish.question.bfq.ipOwners() # # 各IPアドレスを保持しているノード、インターフェイスなどの情報を返す。 ipowners_ans = bfq.ipOwners().answer() ipowners_ans.frame().head() # ### pybatfish.question.bfq.ipsecSessionStatus() # # 各IPsec VPNのセッション情報を返す。 ipsecsesstat_ans = bfq.ipsecSessionStatus().answer() ipsecsesstat_ans.frame().head() # ### pybatfish.question.bfq.multipathConsistency() # # マルチパス環境下で、異なる動作をするパスを返す。よく分からない。 multipathcons_ans = bfq.multipathConsistency().answer() multipathcons_ans.frame().head() # ### pybatfish.question.bfq.namedStructures() # # 各ノードの名前付き構造体の一覧を返す。構造体って名前がつかないのもあるのか? namedstruct_ans = bfq.namedStructures().answer() namedstruct_ans.frame().head() # ### pybatfish.question.bfq.neighbors() # # 各ネイバーの情報を見られると思ったらframe属性がない。 neighbors_ans = bfq.neighbors().answer() neighbors_ans.frame().head() # ### pybatfish.question.bfq.nodeProperties() # # 各ノードの設定情報が見られる。 nodeprop_ans = bfq.nodeProperties().answer() nodeprop_ans.frame().head() # ### pybatfish.question.bfq.nodes() # # JSONでノードの設定情報を返してくれるようだが、frame属性がない模様。 nodes_ans = bfq.nodes().answer() nodes_ans.frame().head() # ### pybatfish.question.bfq.ospfProperties() # # 各ノードのOSPF設定情報を返す。 ospfprop_ans = bfq.ospfProperties().answer() ospfprop_ans.frame().head() # ### pybatfish.question.bfq.parseWarning() # # スナップショットをパースする時に発生した警告の一覧を返す。 parsewarn_ans = bfq.parseWarning().answer() parsewarn_ans.frame().head() # ### pybatfish.question.bfq.prefixTracer() # # ネットワーク内でのプレフィックスの伝播を追跡する。frame属性なし。 preftrace_ans = bfq.prefixTracer().answer() preftrace_ans.frame().head() # ### pybatfish.question.bfq.reachability() # # headersやpathConstraints、actionsなどで指定した条件に合致したフローを返す。 reachability_ans = bfq.reachability().answer() reachability_ans.frame().head() # ### pybatfish.question.bfq.reducedReachability() # # あるスナップショットでは成功したが、他のスナップショットで成功しなかったフローを返す。 # スナップショットをまたいで使う? redreach_ans = bfq.reducedReachability().answer() redreach_ans.frame().head() # ### pybatfish.question.bfq.referencedStructures() refstruct_ans = bfq.referencedStructures().answer() refstruct_ans.frame().head() # ### pybatfish.question.bfq.routes() # # 各ノードのルート情報を表示する。 routes_ans = bfq.routes().answer() routes_ans.frame().head()
notebooks/available-questions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Using letters as shapes # + import pandas as pd from plotnine import * from plotnine.data import mtcars # %matplotlib inline # - # You can create shape symbols using $\TeX$. For example # + mixed_shapes = ( r'$\mathrm{A}$', r'$\mathrm{B}$', r'$\mathrm{C}$', r'$\mathrm{D}$', ) (ggplot(mtcars, aes('wt', 'mpg', shape='factor(gear)', colour='factor(gear)')) + geom_point(size=6) + scale_shape_manual(values=mixed_shapes) ) # - # See matplotlib [documentation](https://matplotlib.org/users/mathtext.html) for more.
plotnine_examples/tutorials/miscellaneous-using-letter-as-shapes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #source:https://github.com/dwavesystems/dwave-hybrid which itself is licensed under Apache License 2.0 import dimod import hybrid import networkx as nx import scipy import dwave_networkx as dnx import numpy as np # This function performs the max clique operation but uses the hybrid instead of the max_clique function in ocean SDK def hybrid_max_clique(input_qubo, probsize): # Construct a problem bqm = dimod.BinaryQuadraticModel.from_qubo(input_qubo) #bqm = dimod.BinaryQuadraticModel({}, input_qubo, 0, dimod.SPIN) # definition of workflow - choosing to use hybrid workflows instead of the max_clique solver #5 racing branches are defines, the interruptible tabu, and combination of 2 samplers(auto, simulator) and 2 decomposers (energy impact, random subproblem) ) iteration = hybrid.RacingBranches( hybrid.InterruptableTabuSampler(), hybrid.EnergyImpactDecomposer(size=probsize) | hybrid.QPUSubproblemAutoEmbeddingSampler() | hybrid.SplatComposer(), hybrid.EnergyImpactDecomposer(size=probsize) | hybrid.SimulatedAnnealingSubproblemSampler() | hybrid.SplatComposer(), hybrid.RandomSubproblemDecomposer(size=probsize) | hybrid.QPUSubproblemAutoEmbeddingSampler() | hybrid.SplatComposer(), hybrid.RandomSubproblemDecomposer(size=probsize) | hybrid.SimulatedAnnealingSubproblemSampler() | hybrid.SplatComposer(), ) | hybrid.ArgMin() workflow = hybrid.LoopUntilNoImprovement(iteration, convergence=3) # Solve the problem init_state = hybrid.State.from_problem(bqm) final_state = workflow.run(init_state).result() # Print results print("Solution: sample={.samples.first}".format(final_state)) return final_state ##Sample array - hydrogen atom - source:https://github.com/Qiskit/qiskit-aqua G=np.array([ [0, 1, 1, 1, 1], [1, 0, 1, 1, 0], [1, 1, 0, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 0, 0] ]) ## NOTE - problem_size must be size of the input matrix problem_size=len(G) ##runs the code graph = nx.Graph(G) graph_comp = nx.complement(graph) input_hammer=dnx.algorithms.independent_set.maximum_weighted_independent_set_qubo(graph_comp) hybrid_max_clique(input_hammer,problem_size)
Dwave/dwave_hybrid_max_clique.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="0zAbYvv-WCve" # # Principal Component Analysis with Cancer Data # + colab={} colab_type="code" id="1waXlt6iWCvh" #Import all the necessary modules import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scipy.stats import zscore from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler # + [markdown] colab_type="text" id="6FZ90t1vWCvq" # # Q1. Load the Data file ( Breast Cancer CSV) into Python DataFrame and view top 10 rows # + colab={} colab_type="code" id="HdpptBPZSM4P" cancer_df = pd.read_csv('breast-cancer-wisconsin-data.csv') # + colab={} colab_type="code" id="xusO2ruNWCv4" # Id columns is to identify rows hence can be skipped in analysis # All columns have numerical values # Class would be the target variable. Should be removed when PCA is done # - features_df = cancer_df.drop(['ID'], axis = 1) features_df.head() # + [markdown] colab_type="text" id="KYTFwE0iWCv_" # # Q2 Print the datatypes of each column and the shape of the dataset. Perform descriptive analysis # + colab={} colab_type="code" id="abAqUIqjSM4X" features_df.dtypes # - features_df.describe().T # + [markdown] colab_type="text" id="ecghJZJIWCwW" # # Q3 Check for missing value check, incorrect data, duplicate data and perform imputation with mean, median, mode as necessary. # # + colab={} colab_type="code" id="49KjoukMSM4b" # We could see "?" values in column, this should be removed from data set # Check for missing value in any other column # + colab={} colab_type="code" id="02fqzvZuSM4f" # No missing values found. So let us try to remove ? from bare nuclei column # Get count of rows having ? # + colab={} colab_type="code" id="jAmWPaXpWCw3" # 16 values are corrupted. We can either delete them as it forms roughly 2% of data. # Here we would like to impute it with suitable values # - features_df.isnull().sum() filter1 = features_df['Bare Nuclei'] == '?' features_df[filter1].shape features_df.loc[filter1, 'Bare Nuclei'] = np.nan features_df.isnull().sum() features_df = features_df.apply(lambda x: x.fillna(x.median()),axis=1) features_df.isnull().sum() features_df.shape features_df.duplicated(keep='first').sum() features_df.drop_duplicates(keep = 'first', inplace = True) features_df.shape features_df['Bare Nuclei'] = features_df['Bare Nuclei'].astype('float64') # + [markdown] colab_type="text" id="YAu8NvGaWCxr" # # Q4. Perform bi variate analysis including correlation, pairplots and state the inferences. # # + colab={} colab_type="code" id="nu6J2sWuSM4m" # Check for correlation of variable # + colab={} colab_type="code" id="ysZh50iDWCxz" # Cell size shows high significance with cell shape,marginal adhesion, single epithelial cell size,bare nuclei, normal nucleoli # and bland chromatin # Target variable shows high correlation with most of these variables # + colab={} colab_type="code" id="5Wt84dgVSM4s" #Let us check for pair plots # + colab={} colab_type="code" id="OTh11akvWCx8" # Relationship between variables shows come correlation. # Distribution of variables shows most of the values are concentrated on lower side, though range remains same for all that is # Between 1 to 10 # - corr_matrix = features_df.corr() corr_matrix # Observations # ------------------- # 1. Clump Thickness is moderatley positively correlated with Cell Size (0.578156) # # 2. Clump Thickness is moderately positively correlated with Cell Shape (0.588956) # # 3. Cell Size is highly positvely correlated with Cell Shape (0.877404) # # 5. Cell Size is moderately positively correlated with Marginal Adhesion(0.640096), Single Epithelial Cell Size(0.689982), Bare Nuclei(0.598223), Normal Nucleoli(0.712986), Bland Chromatin(0.657170) # # # 7. Cell Shape is moderately positively correlated with Marginal Adhesion(0.683079), Single Epithelial Cell Size(0.719668), Bare Nuclei(0.715495), Normal Nucleoli(0.735948), Bland Chromatin(0.719446) # # 8. Cell Shape is adequately highly correlated with Class(0.818934) # # 9. Bare Nuclei is adequately correlated with Class (0.820678) # # 10. Normal Nucleoli is moderately highly correlated with Class (0.756618) # # 11. Bland Chromatin is moderately highly correlated with Class (0.715540) sns.pairplot(data = features_df, diag_kind = 'kde') plt.show() # + [markdown] colab_type="text" id="9hxe7D9BWCx-" # # Q5 Remove any unwanted columns or outliers, standardize variables in pre-processing step # + colab={} colab_type="code" id="ViVkMsQTWCza" # We could see most of the outliers are now removed. # + colab={} colab_type="code" id="TqhCGZe4SM43" features_df.boxplot(figsize=(15, 10)) plt.show() # - features_df.shape # + colab={} colab_type="code" id="aEHksGGtSM48" cols = ['Mitoses', 'Single Epithelial Cell Size'] for col in cols: Q1 = features_df[col].quantile(0.25) Q3 = features_df[col].quantile(0.75) IQR = Q3 - Q1 lower_limit = Q1 - (1.5 * IQR) upper_limit = Q3 + (1.5 * IQR) filter2 = features_df[col] > upper_limit features_df.drop(features_df[filter2].index, inplace = True) # + colab={} colab_type="code" id="9i0Qqn28SM4_" features_df.shape # + [markdown] colab_type="text" id="lmMHMAtkWCzm" # # Q6 Create a covariance matrix for identifying Principal components # + colab={} colab_type="code" id="X3pUdlSdSM5D" # PCA # Step 1 - Create covariance matrix cov_matrix = features_df.cov() # + colab={} colab_type="code" id="ppL2hWreSM5G" cov_matrix # + [markdown] colab_type="text" id="vbZ-YcsdWCzz" # # Q7 Identify eigen values and eigen vector # + colab={} colab_type="code" id="sH-AksvLSM5L" # Step 2- Get eigen values and eigen vector # + colab={} colab_type="code" id="YaNotXfFSM5O" eig_vals, eig_vectors =np.linalg.eig(cov_matrix) # - eig_vals eig_vectors # + [markdown] colab_type="text" id="-2KqQg2KWCz7" # # Q8 Find variance and cumulative variance by each eigen vector # + colab={} colab_type="code" id="HVrVYdZpSM5e" eig_vectors.var() # + colab={} colab_type="code" id="wGszn1TqSM5i" total_eigen_vals = sum(eig_vals) var_explained = [(i/total_eigen_vals * 100) for i in sorted(eig_vals, reverse = True)] print(var_explained) print(np.cumsum(var_explained)) # + [markdown] colab_type="text" id="x8YxQPNGWC0e" # # Q9 Use PCA command from sklearn and find Principal Components. Transform data to components formed # + X = features_df.drop('Class', axis = 1) y = features_df['Class'] # + colab={} colab_type="code" id="-06rQu3PSM5o" pca = PCA() pca.fit(X) # - X_pca = pca.transform(X) X_pca.shape # + [markdown] colab_type="text" id="8L-IGI0QWC0t" # # Q10 Find correlation between components and features # + colab={} colab_type="code" id="eLi4UsMcSM5r" pca.components_ # + colab={} colab_type="code" id="bXUp9TCDWC1G" pca.explained_variance_ # - pca.explained_variance_ratio_ corr_df = pd.DataFrame(data = pca.components_, columns = X.columns) corr_df.head() sns.heatmap(corr_df) plt.show() # + [markdown] colab_type="text" id="HnHXocYbSM6n" # # Popularity Based Recommendation System # + [markdown] colab_type="text" id="r8LHW5isSM6o" # ### About Dataset # + [markdown] colab_type="text" id="m8i-LSc4SM6p" # Anonymous Ratings on jokes. # + [markdown] colab_type="text" id="y-wmIsyYSM6p" # 1. Ratings are real values ranging from -10.00 to +10.00 (the value "99" corresponds to "null" = "not rated"). # # 2. One row per user # # 3. The first column gives the number of jokes rated by that user. The next 100 columns give the ratings for jokes 01 - 100. # + [markdown] colab_type="text" id="LC-Uca2PSM6q" # # Q11 Read the dataset(jokes.csv) # # + colab={} colab_type="code" id="mndzWciDSM6r" jokes_df = pd.read_excel('jokes.xlsx') # - jokes_df.head() # + [markdown] colab_type="text" id="6QEWF8o-SM6t" # # Q12 Create a new dataframe named `ratings`, with only first 200 rows and all columns from 1(first column is 0) of dataset # + colab={} colab_type="code" id="xc0b-lBRSM6u" ratings = jokes_df.head(200) # + [markdown] colab_type="text" id="d-0_FGl0SM6-" # # Q13 In the dataset, the null ratings are given as 99.00, so replace all 99.00s with 0 # Hint: You can use `ratings.replace(<the given value>, <new value you wanted to change with>)` # + colab={} colab_type="code" id="veL5hwRLSM6_" ratings = ratings_df.replace(99.00, 0) # - # # Q14 Normalize the ratings using StandardScaler and save them in ratings_diff variable # + scaler = StandardScaler() ratings_diff = scaler.fit_transform(ratings) # - ratings_diff # + [markdown] colab_type="text" id="T8K_S9ukSM7b" # # Q15 Find the mean for each column in `ratings_diff` i.e, for each joke # # + colab={} colab_type="code" id="usXWscbfSM7c" all_mean = ratings_diff.mean(axis = 0) all_mean # - # # Q16 Consider all the mean ratings and find the jokes with highest mean value and display the top 10 joke IDs. all_mean_df = pd.DataFrame(data = ratings_diff) all_mean_df all_mean_df.mean(axis = 0) new_df = pd.DataFrame(data = all_mean_df) new_df.iloc[:,0].argsort()[:-10:-1]
Breast_Cancer_Dataset_PCA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ref: https://www.kaggle.com/hamishdickson/bidirectional-lstm-in-keras-with-glove-embeddings # # ref: https://lovit.github.io/nlp/representation/2018/09/05/glove/ # + import pandas as pd import time import os, gc import numpy as np from tqdm import tqdm import random from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score from sklearn.model_selection import train_test_split from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import * from keras.utils.np_utils import to_categorical from keras.initializers import Constant import re import matplotlib.pyplot as plt # %matplotlib inline import torch # + def seed_everything(seed=1234): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything(42) # - # %%time #train_df=pd.read_csv("../KB_NLP/morphs/komo_morphs_train.csv") #test_df=pd.read_csv("../KB_NLP/morphs/komo_morphs_test.csv") train_df=pd.read_csv("../KB_NLP/raw_data/train.csv") test_df=pd.read_csv("../KB_NLP/raw_data/public_test.csv") pd.set_option('display.max_colwidth',-1) train_df.head() # + from soynlp.hangle import decompose doublespace_pattern = re.compile('\s+') def jamo_sentence(sent): def transform(char): if char == ' ': return char cjj = decompose(char) try: len(cjj) except: return ' ' if len(cjj) == 1: return cjj cjj_ = ''.join(c if c != ' ' else '' for c in cjj) return cjj_ sent_ = ''.join(transform(char) for char in sent) sent_ = doublespace_pattern.sub(' ', sent_) return sent_ # 'ㅇㅓ-ㅇㅣ-ㄱㅗ- ㅋㅔㄱㅋㅔㄱ ㅇㅏ-ㅇㅣ-ㄱㅗ-ㅇㅗ-' jamo_sentence(train_df.loc[0, 'text']) # - # %%time test_df['jamo'] = test_df['text'].apply(lambda x: jamo_sentence(x)) train_df['jamo'] = train_df['text'].apply(lambda x: jamo_sentence(x)) # %%time train_df.to_csv("../KB_NLP/jamo_data/jamo_train.csv", index=False) test_df.to_csv("../KB_NLP/jamo_data/jamo_test.csv", index=False) raw_corpus_fname = '\n'.join(pd.concat([train_df['jamo'], test_df['jamo']])) file=open('../KB_NLP/jamo_text.txt','w') file.write(raw_corpus_fname) file.close() jamo_jumo = pd.concat([train_df['jamo'], test_df['jamo']]) input_sentences = list(jamo_jumo) # + from soynlp.utils import DoublespaceLineCorpus from soynlp.vectorizer import sent_to_word_contexts_matrix #corpus_path = '2016-10-20_article_all_normed_ltokenize.txt' #corpus = DoublespaceLineCorpus(corpus_path, iter_sent=True) x, idx2vocab = sent_to_word_contexts_matrix( input_sentences, windows=1, min_tf=10, tokenizer=lambda x:x.split(), # (default) lambda x:x.split(), dynamic_weight=True, verbose=True) print(x.shape) # (36002, 36002) # - from glove import Corpus, Glove glove = Glove(no_components=200, learning_rate=0.01, random_state=42) glove.fit(x.tocoo(), epochs=10, no_threads=4, verbose=True) dictionary = {vocab:idx for idx, vocab in enumerate(idx2vocab)} glove.add_dictionary(dictionary) glove.word_vectors.shape glove.word_vectors[:1] glove.word_vectors[0] len(glove.word_vectors) with open("glove_test.pkl",'w') as f: for word in glove.dictionary: f.write(word) f.write(" ") for i in range(0, 200): f.write(str(glove.word_vectors[glove.dictionary[word]][i])) f.write(" ") f.write("\n") gd = glove.dictionary gd # %%time glove = Glove(no_components=200, learning_rate=0.01, random_state=42) glove.fit(x.tocoo(), epochs=100, no_threads=4, verbose=True) dictionary = {vocab:idx for idx, vocab in enumerate(idx2vocab)} glove.add_dictionary(dictionary) glove.word_vectors.shape glove.word_vectors[:1] glove_embedding = pd.DataFrame() glove_embedding['key'] = list(glove.dictionary.keys()) glove_embedding['vectors'] = list(glove.word_vectors) glove_embedding['vectors'] = glove_embedding['vectors'].map(str) glove_embedding['vectors'] = glove_embedding['key'] + glove_embedding['vectors'] for i in tqdm(range(len(glove_embedding))): glove_embedding['vectors'][i] = glove_embedding['vectors'][i].replace('[','').replace(']','').replace('\n','') glove_embedding.head() np.savetxt('../KB_NLP/glove.200D.100E.txt',glove_embedding['vectors'],fmt='%s') # --- # ## 1000 epochs, max loss # %%time glove = Glove(no_components=200, learning_rate=0.01, random_state=42,max_loss=0.0002) glove.fit(x.tocoo(), epochs=1000, no_threads=4, verbose=True) # --- # # make every 10 th embeddings with 200dim # %%time for k in range(10,210,10): glove = Glove(no_components=200, learning_rate=0.01, random_state=42,) glove.fit(x.tocoo(), epochs=k, no_threads=4, verbose=False) dictionary = {vocab:idx for idx, vocab in enumerate(idx2vocab)} glove.add_dictionary(dictionary) glove_embedding = pd.DataFrame() glove_embedding['key'] = list(glove.dictionary.keys()) glove_embedding['vectors'] = list(glove.word_vectors) glove_embedding['vectors'] = glove_embedding['vectors'].map(str) glove_embedding['vectors'] = glove_embedding['key'] + glove_embedding['vectors'] for i in tqdm(range(len(glove_embedding))): glove_embedding['vectors'][i] = glove_embedding['vectors'][i].replace('[','').replace(']','').replace('\n','') np.savetxt('../KB_NLP/glove_embedding/glove.200D.{}E.txt'.format(k),glove_embedding['vectors'],fmt='%s') print("{}th embedding DONE".format(k)) # --- # ## 1000 epochs # %%time glove = Glove(no_components=200, learning_rate=0.01, random_state=42,) glove.fit(x.tocoo(), epochs=1000, no_threads=4, verbose=True) # + dictionary = {vocab:idx for idx, vocab in enumerate(idx2vocab)} glove.add_dictionary(dictionary) print("shape: {}".format(glove.word_vectors.shape)) glove_embedding = pd.DataFrame() glove_embedding['key'] = list(glove.dictionary.keys()) glove_embedding['vectors'] = list(glove.word_vectors) glove_embedding['vectors'] = glove_embedding['vectors'].map(str) glove_embedding['vectors'] = glove_embedding['key'] + glove_embedding['vectors'] # - for i in tqdm(range(len(glove_embedding))): glove_embedding['vectors'][i] = glove_embedding['vectors'][i].replace('[','').replace(']','').replace('\n','') glove_embedding.head() np.savetxt('../KB_NLP/glove.200D.1000E.txt',glove_embedding['vectors'],fmt='%s')
jamo_glove.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import warnings warnings.filterwarnings('ignore') # %matplotlib inline from pycocotools.coco import COCO import numpy as np import skimage.io as io import matplotlib.pyplot as plt import pylab pylab.rcParams['figure.figsize'] = (8.0, 10.0) # - dataDir='../data' dataType='train_v2' annFile='{}/annotations/instances_{}.json'.format(dataDir,dataType) coco=COCO(annFile) print('categories :',len(coco.dataset['categories'])) print('images :',len(coco.dataset['images'])) print('annotations:',len(coco.dataset['annotations'])) cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(' '.join(nms))) catIds = coco.getCatIds(catNms=['ship']); imgIds = coco.getImgIds(catIds=catIds ); # + img = coco.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0] print('file_name:',img['file_name']) # load and display image I = io.imread('%s/%s/%s'%(dataDir,dataType,img['file_name'])) plt.axis('off') plt.imshow(I) plt.show() # - plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None) anns = coco.loadAnns(annIds) print("%d ships."%len(anns)) coco.showAnns(anns)
coco/coco_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # ![](http://i67.tinypic.com/2jcbwcw.png) # # ## NLTK code examples # Python code examples to mirror lecture material # # **Author List**: <NAME> # # **Original Sources**: http://nltk.org # # **License**: Feel free to do whatever you want to with this code # # Let's begin by importing NLTK and a couple sets of data. We'll import corpora previously downloaded through nltk, and start exploring the Project Gutenberg corpus - an archive of over 50,000 ebooks. # # *for more info on accessing corpora: http://www.nltk.org/book/ch02.html* import nltk from nltk.corpus import gutenberg, shakespeare # # ## Project Gutenberg # # Different corpora have different information available. Let's explore some of the functions that we highlighted using Jane Austen's *Emma*. # # sents() tokenizes a text file into multiple lists of words - each list contains the words in a single sentence. # + sentences = gutenberg.sents('austen-emma.txt') print("Sentence: " + str(sentences[1500]) + "\n") print("Number of sentences: " + str(len(sentences))) # - # Likewise, words() tokenizes a text file into mutiple lists containing single words. words = gutenberg.words('austen-emma.txt') print("Word: " + str(words[50000]) + "\n") print("Number of words: " + str(len(words))) # ## Breakout # # 1. Download and import the gutenberg corpus # 2. Calculate the number of sentences and words in each corpus # 3. Find the corpus with the most words # 4. Find the corpus with the most sentences # # + # import nltk # from nltk.corpus import gutenberg all_files = gutenberg.fileids() for file in all_files: print(file) # print(all_files) # - # # # # ## Shakespeare # # Here we'll being playing around with the Shakespeare corpus - a corpus containing a set of Shakespeare's plays. shakespeare.fileids() # We've printed a list of file names for each play in the Shakespeare corpus. Let's compare the lengths of each of these plays. for play in shakespeare.fileids(): words = shakespeare.words(play) print(play + ": " + str(len(words))) play = shakespeare.xml('r_and_j.xml') print (play[0].tag + ": " + play[0].text) # NLTK has already categorized words in the Shakespeare corpus with certain tags. One of these tags is 'PERSONAE/PERSONA', which marks words that are related to characters in Shakespeare's plays. Let's use this to list the characters in the play, and count how many roles are available. # + people = [] for person in play.findall("PERSONAE/PERSONA"): people.append(person.text) for person in people: print(person) print("\n") print("Cast size: " + str(len(people))) # - # # ## Twitter # # Now let's explore another interesting corpus called twitter_samples: a sample of a couple thousand tweets from twitter's global feed. # twitter_samples is partitioned into positive_tweets and negative_tweets. We'll use this fact to make some basic comparisons between positive and negative tweets. # # (include link to twitter_samples documentation) # + from nltk.corpus import twitter_samples happy = twitter_samples.tokenized('positive_tweets.json') sad = twitter_samples.tokenized('negative_tweets.json') print("Happy tweets: " + str(len(happy))) print("Sad tweets: " + str(len(sad))) # - # Let's compare the frequency of some words that might be used in a tweet # + sad_omg = [] for tweet in sad: if "omg" in tweet: sad_omg.append(tweet) print("sad omg count: " + str(len(sad_omg))) happy_omg = [] for tweet in happy: if "omg" in tweet: happy_omg.append(tweet) print("Happy Tweets: " + str(happy_omg)) count = 1 for tweet in happy_omg: tw = " ".join(tweet) print("Tweet " + str(count) + ": " + str(tw) + "\n") count += 1 print("happy omg count: " + str(len(happy_omg))) # - # Based on the dataset provided by the twitter_samples corpus, sad tweets seem to contain the phrase "omg" more frequently than happy tweets. # # This of course is a very basic example of analyzing twitter data - NLTK provides a very powerful set of tools that can be used for many other applications. # # ## Classifying Tweets # # Now that we've seen what's possible with NLTK and the twitter_samples corpus, let's create our own metric for classifying positive and negative tweets (we'll keep it relatively simple) # # We've covered two of the files in the twitter_samples corpus, but lets see all of the filenames to see the other files. print(twitter_samples.fileids()) # It looks like the third file is called 'tweets.20150430-223406.json'. This happens to be a collection of tweets from 4/30/2015 that have yet to be classified. This means that for all intents and purposes, these tweets are pretty much a random sampling of the twitterverse. # # Let's go ahead and unpack the tweets in this file like we did for the positive/negative tweets. # random_tweets = twitter_samples.tokenized('tweets.20150430-223406.json') print("Random tweets: " + str(len(random_tweets))) # It looks like our sample contains 20,000 unclassified tweets. In order to classify them, let's create two lists of keywords/phrases that we'll be searching for. # + happy_indicators = [] #fill with happy strings sad_indicators = [] #fill with sad strings # ex: # happy = ["amazing", "awesome", "yay", ":)", ":-)", ":o)", ":D", "=)", "=D"] # sad = ["no", "bad", "terrible", "$#@%", ":(", ":-(", ":o(", "=("] # - # For now, we'll stick to these simple indicators. Can you think of any problems that might arise if we used this model in the real world? # # *hint: things that are easy for humans to understand, but difficult for computers # + happy_tweets = [] sad_tweets = [] for tweet in random_tweets: # for each tweet for word in happy_indicators: # we'll check if the tweet contains a word from our happy_indicators if word in tweet: happy_tweets.append(tweet) # if it does, we'll add that tweet to our happy_tweets for word in sad_indicators: # repeat for sad if word in tweet: sad_tweets.append(tweet) print("Happy tweets: " + str(len(happy_tweets))) print("Sad tweets: " + str(len(sad_tweets))) # - #
07b-tools-word2vec/55a-NLTK.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/tranlethaison/60_Days_RL_Challenge/blob/master/super_convergence/cifar_10_super_convergence.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="QKMuMH9Rn-gg" # # Summary # Train a model within 18 epochs, to at least 0.94 validation accuracy, on Cifar10 dataset. # + [markdown] id="bsj4CN7pPeQZ" # # References # - https://www.fast.ai/2018/07/02/adam-weight-decay/ # - https://keras.io/guides/transfer_learning/ # - https://keras.io/examples/vision/image_classification_efficientnet_fine_tuning/ # - https://blog.tensorflow.org/2020/05/bigtransfer-bit-state-of-art-transfer-learning-computer-vision.html # + [markdown] id="pLNLLwv3nwAN" # # To-do # - Try https://github.com/szagoruyko/wide-residual-networks # + [markdown] id="k3Xk_NdNQi2L" # # Utils # + id="6hpKwQjXdsQp" _kg_hide-output=false # %%writefile requirements.txt plotly snoop # tensorflow tensorboard_plugin_profile tensorflow_addons tensorflow_hub # + id="P0K1NcPwQD4W" # !pip install -qUr requirements.txt # + id="BlDHaWgqQWwV" import os import subprocess import json import pickle import datetime import enum import numpy as np # import numba # from numba import njit, prange import pandas as pd import tensorflow as tf import tensorflow_addons as tfa import tensorflow.keras as tk import tensorflow.keras.backend as K from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.mixed_precision import experimental as mixed_precision import tensorflow_hub as hub import plotly.graph_objects as go from plotly.subplots import make_subplots import plotly.express as px from PIL import Image from tqdm import tqdm from IPython.display import display, HTML import snoop snoop.install() # %load_ext tensorboard # %load_ext snoop pp(tf.__version__) pp(tfa.__version__) pp(hub.__version__) # + id="NlV476Qo03xF" # Use these if run into Conv2D error # gpus = tf.config.list_physical_devices("GPU") # for gpu in gpus: # tf.config.experimental.set_memory_growth(gpu, True) # gpus # + [markdown] id="CSWOAjLhQ6er" # # Config # + id="o3iYu_P8Sqb1" ds_name = "cifar-10" # Behaviors do_make_dataset = True #@param {type:"boolean"} do_use_mixed_precision = False #@param {type:"boolean"} do_augmentation = True #@param {type:"boolean"} do_find_lr = False #@param {type:"boolean"} do_train = True #@param {type:"boolean"} do_load_model = False #@param {type:"boolean"} on_colab = True #@param {type:"boolean"} on_kaggle = False #@param {type:"boolean"} assert (on_colab and on_kaggle) == False # << Behaviors # Directory home_dir = os.path.expanduser("~") now_str = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") if on_colab: work_dp = r"/content/drive/My Drive/AITrainingRecipe/super_convergence" elif on_kaggle: work_dp = r"/kaggle/working/AITrainingRecipe/super_convergence" else: work_dp = None if not work_dp is None: os.makedirs(work_dp, exist_ok=True) os.chdir(work_dp) # # !pwd && ls -lh && du -h dataset_dp = os.path.join(home_dir, "datasets", ds_name) os.makedirs(dataset_dp, exist_ok=True) tfhub_cache_dir = os.path.join(home_dir, "tfhub_modules") os.environ["TFHUB_CACHE_DIR"] = tfhub_cache_dir os.makedirs(tfhub_cache_dir, exist_ok=True) lr_find_result_dir = os.path.join("lr_find_result") os.makedirs(lr_find_result_dir, exist_ok=True) # << Directory # Dataset info # input_shape = [32, 32, 3] # input_shape = [71, 71, 3] # input_shape = [75, 75, 3] # input_shape = [96, 96, 3] input_shape = [128, 128, 3] # input_shape = [224, 224, 3] n_classes = 10 # << Dataset info # Training # GPU Tensor Cores (XLA) requires batch_size to be a multiple of 8 batch_size = 128 # 128 256 512 1024 n_epochs = 18 # << Training # + [markdown] id="2JilVdjg28BO" # # Training Optimization # + id="2ulr4i6-25Iq" # Input prefetch. # !lscpu -e # Number of CPU threads (nproc --all) workers = int(subprocess.check_output("nproc --all", shell=True)) prefetch_cfg = dict( max_queue_size=10, workers=workers, ) pp(prefetch_cfg) # << Input prefetch. # Reduce 'Kernel Launch' time. os.environ["TF_GPU_THREAD_MODE"] = "gpu_private" # Mixed precision (use of both 16-bit and 32-bit). # If using "Mixed precision", remember to cast output of last layer to "float32" for numeric stability. # For GPU: "mixed_float16"; for TPU: "mixed_bfloat16" if do_use_mixed_precision: policy = mixed_precision.Policy("mixed_float16") mixed_precision.set_policy(policy) pp(policy.compute_dtype) pp(policy.variable_dtype) pp(policy.loss_scale) # !nvidia-smi -L # << Mixed precision. def reset_env(): """Call this before creating new model.""" tf.keras.backend.clear_session() tf.config.optimizer.set_jit(True) # Enable XLA # + [markdown] id="b9OStAR5RBEm" # # Dataset # + id="919L2_M0Q_Kk" # %%time # https://www.cs.toronto.edu/~kriz/cifar.html def unpickle(file): with open(file, 'rb') as fo: d = pickle.load(fo, encoding='bytes') return d def data_to_img(data): data = np.array_split(data, 3, axis=0) data = [np.array_split(channel, 32, axis=0) for channel in data] data = np.stack(data, axis=-1) return data def save_images(images, dir, labels, filenames, target_size=None, do_override=False): # Resize image image_size = images.shape[1:3] if target_size is not None and list(target_size) != image_size: target_height, target_width = target_size images = tf.image.resize_with_pad(images, target_height, target_width).numpy() print("Resized images from {} to {}.".format(image_size, target_size)) for image, label, filename in tqdm(zip(images, labels, filenames)): image_fp = os.path.join(dir, label, filename) if not os.path.isfile(image_fp) or do_override: pil_im = Image.fromarray(image.astype(np.uint8)) pil_im.save(image_fp) def make_dataset(pickle_path, dir): raw = unpickle(pickle_path) x = np.array([data_to_img(img_data) for img_data in raw[b"data"]]) y = np.array(raw[b"labels"]) pp(x.shape, y.shape) labels = np.array(meta[b"label_names"], dtype=str)[y] filenames = np.array(raw[b"filenames"], dtype=str) save_images(x, dir, labels, filenames, target_size=None, do_override=False) train_dp = os.path.join(dataset_dp, "cifar-10_32x32/train/") test_dp = os.path.join(dataset_dp, "cifar-10_32x32/test/") if do_make_dataset: # Download and extract raw data raw_data_fp = "cifar-10-python.tar.gz" # !cd $dataset_dp && wget -nc https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz -O $raw_data_fp # !cd $dataset_dp && tar --skip-old-files -xf $raw_data_fp # Find top-level directory(-ies) of an archive. raw_data_dp = subprocess.check_output( "cd {} && tar -tf {} | sed -e 's@/.*@@' | uniq".format(dataset_dp, raw_data_fp), shell=True, ) raw_data_dp = raw_data_dp.decode().replace("\n", "") # Raw data dir path raw_data_dp = os.path.join(dataset_dp, raw_data_dp) # Make dataset meta = unpickle(os.path.join(raw_data_dp, "batches.meta")) train_pickle_paths = [ os.path.join(raw_data_dp, "data_batch_1"), os.path.join(raw_data_dp, "data_batch_2"), os.path.join(raw_data_dp, "data_batch_3"), os.path.join(raw_data_dp, "data_batch_4"), os.path.join(raw_data_dp, "data_batch_5"), ] test_pickle_paths = [os.path.join(raw_data_dp, "test_batch")] os.makedirs(train_dp, exist_ok=True) os.makedirs(test_dp, exist_ok=True) for cls in meta[b"label_names"]: cls = cls.decode("utf-8") os.makedirs(os.path.join(train_dp, cls), exist_ok=True) os.makedirs(os.path.join(test_dp, cls), exist_ok=True) for train_pickle_path in train_pickle_paths: pp(train_pickle_path, train_dp) make_dataset(train_pickle_path, train_dp) for test_pickle_path in test_pickle_paths: pp(test_pickle_path, test_dp) make_dataset(test_pickle_path, test_dp) # !cd $dataset_dp && pwd && ls -lh && du -h # CPU times: user 6min 41s, sys: 26.9 s, total: 7min 8s # Wall time: 6min 37s # + [markdown] id="7UfqmP7ORNxl" # # Model # + id="aVphHHL1Wzys" # %%time # Noted: GPU Tensor Cores (XLA) requires units, filters to be a multiple of 8. # tensorflow_hub def cache_tfhub_model(tfhub_cache_dir, hub_model_link, net_name): """ Manually cache tfhub model, use this in case automatically caching fail, especially when on Colab. """ tfhub_module_dir = os.path.join(tfhub_cache_dir, net_name) if not os.path.isfile(os.path.join(tfhub_module_dir, "saved_model.pb")): os.makedirs(tfhub_module_dir, exist_ok=True) subprocess.run( f"curl -L {hub_model_link}?tf-hub-format=compressed | tar -zxvC {tfhub_module_dir}", shell=True, check=True, ) return tfhub_module_dir # hub_model_link = "https://tfhub.dev/google/imagenet/mobilenet_v2_100_96/feature_vector/4" # hub_model_link = "https://tfhub.dev/google/imagenet/mobilenet_v2_075_96/feature_vector/4" hub_model_link = "https://tfhub.dev/google/bit/m-r50x1/1" # preprocessing_function = lambda x : tf.image.convert_image_dtype(x, tf.float32) net_name = "bit_m-r50x1" handle = hub_model_link # handle = cache_tfhub_model(tfhub_cache_dir, hub_model_link, net_name) # << tensorflow_hub # # tf.keras.applications # # base_model = tk.applications.MobileNetV2 # # preprocessing_function = tk.applications.mobilenet_v2.preprocess_input # # base_model = tk.applications.EfficientNetB0 # # base_model = tk.applications.EfficientNetB4 # # preprocessing_function = tk.applications.efficientnet.preprocess_input # # base_model = tk.applications.InceptionV3 # # preprocessing_function = tk.applications.inception_v3.preprocess_input # base_model = tf.keras.applications.Xception # preprocessing_function = tk.applications.xception.preprocess_input # # base_model = tf.keras.applications.InceptionResNetV2 # # preprocessing_function = tk.applications.inception_resnet_v2.preprocess_input # # base_model = tf.keras.applications.ResNet50V2 # # preprocessing_function = tk.applications.resnet_v2.preprocess_input # # base_model = tf.keras.applications.NASNetLarge # # base_model = tf.keras.applications.NASNetMobile # # preprocessing_function = tk.applications.nasnet.preprocess_input # net_name = base_model.__name__ # # << tf.keras.applications def get_model(input_shape, n_classes): # tf.keras.applications # base = base_model( # include_top=False, # weights="imagenet", # pooling="avg", # name=net_name, # ) # tensorflow_hub base = hub.KerasLayer(handle, name=net_name, trainable=True) # base.trainable = True # for layer in base.layers: # if isinstance(layer, tk.layers.BatchNormalization): # layer.trainable = False inputs = tk.layers.Input(input_shape) x = base(inputs) x = tk.layers.Dense(n_classes, kernel_initializer="zeros", name="logits")(x) outputs = tk.layers.Activation("softmax", name="pred", dtype=tf.float32)(x) # Use "float32" for numeric stability. model = tk.models.Model(inputs, outputs, name=f"{ds_name}_{net_name}") print("Layers' computations dtype ", x.dtype) print("Outputs' dtype ", outputs.dtype) return model reset_env() model = get_model(input_shape, n_classes) model_plot_fp = f"{ds_name}_{net_name}.png" tk.utils.plot_model(model, show_shapes=True, to_file=model_plot_fp) model.summary() # + [markdown] id="WlCHBwGPREdk" # # Data Generator # + id="PxdBGeyzWbbi" common_generator_cfg = dict( rescale=1/255., # preprocessing_function=preprocessing_function, dtype=tf.float32, # On CPU, float32 operations are faster. ) data_aug_cfg = dict( rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, # shear_range=0.2, zoom_range=0.2, fill_mode="nearest", horizontal_flip=True, ) if do_augmentation else {} flow_cfg = dict( target_size=input_shape[:2], color_mode="rgb", class_mode="categorical", batch_size=batch_size, ) train_gen = ImageDataGenerator(**{**common_generator_cfg, **data_aug_cfg}) train_data = train_gen.flow_from_directory(train_dp, shuffle=True, **flow_cfg) test_gen = ImageDataGenerator(**common_generator_cfg) test_data = test_gen.flow_from_directory(test_dp, shuffle=False, **flow_cfg) # + [markdown] id="8D4f1L7lRC4Q" # # HP Scheduler # + id="iR1ABne6RIXf" class DecayType(enum.IntEnum): """Data class, each decay type is assigned a number.""" LINEAR = 0 COSINE = 1 EXPONENTIAL = 2 POLYNOMIAL = 3 class DecayScheduler(): """Given initial and endvalue, this class generates the value depending on decay type and decay steps (by calling). """ def __init__(self, start_val, end_val, decay_steps, decay_type, extra=1.0): self.start_val = start_val self.end_val = end_val self.decay_steps = decay_steps self.decay_type = decay_type self.extra = extra def __call__(self, step): if self.decay_type == DecayType.LINEAR: pct = step / self.decay_steps return self.start_val + pct * (self.end_val - self.start_val) elif self.decay_type == DecayType.COSINE: cos_out = np.cos(np.pi * step / self.decay_steps) + 1 return self.end_val + (self.start_val - self.end_val) / 2 * cos_out elif self.decay_type == DecayType.EXPONENTIAL: ratio = self.end_val / self.start_val return self.start_val * ratio ** (step / self.decay_steps) elif self.decay_type == DecayType.POLYNOMIAL: return self.end_val + (self.start_val - self.end_val) * (1 - step / self.decay_steps) ** self.extra # + id="A58rqOU27XsT" def get_decay_steps(cycle_len, anneal_pct): phase_len = int(cycle_len * (1 - anneal_pct) / 2) anneal_len = cycle_len - phase_len * 2 return phase_len, phase_len, anneal_len def onecyle_learning_rate( init_lr, end_lr, train_steps, decay_type=DecayType.LINEAR, anneal_pct=0.075, ): """OneCyle learning rates Args: anneal_pct (float): Percentage to leave for the annealing at the end. The annealing phase goes from the minimum lr to 1/100th of it linearly. """ phase_decay_steps = get_decay_steps(train_steps, anneal_pct) lr_schedulers = [ DecayScheduler(init_lr, end_lr, phase_decay_steps[0], decay_type), DecayScheduler(end_lr, init_lr, phase_decay_steps[1], decay_type), DecayScheduler(init_lr, init_lr / 100., phase_decay_steps[2], decay_type), ] learning_rates = [] for lr_scheduler, decay_steps in zip(lr_schedulers, phase_decay_steps): learning_rates.append(lr_scheduler(np.arange(decay_steps))) learning_rates = np.concatenate(learning_rates, 0) return learning_rates def onecyle_momentum( init_mom, end_mom, train_steps, decay_type=DecayType.LINEAR, anneal_pct=0.075, ): """OneCyle learning rates Args: anneal_pct (float): Percentage to leave for the annealing at the end. The annealing phase use constant maximum momentum. """ phase_decay_steps = get_decay_steps(train_steps, anneal_pct) mom_schedulers = [ DecayScheduler(init_mom, end_mom, phase_decay_steps[0], decay_type), DecayScheduler(end_mom, init_mom, phase_decay_steps[1], decay_type), ] moms = [] for mom_scheduler, decay_steps in zip(mom_schedulers, phase_decay_steps): moms.append(mom_scheduler(np.arange(decay_steps))) moms.append(np.array([init_mom] * phase_decay_steps[2])) moms = np.concatenate(moms, 0) return moms # + id="2Ov6UfaQvqSM" class OneCycleScheduler(tk.callbacks.Callback): """Callback that update lr, momentum at begining of mini batch based on OneCycle policy.""" def __init__( self, init_lr, end_lr, train_steps, decay_type=DecayType.LINEAR, anneal_pct=0.075, init_mom=None, end_mom=None, ): super().__init__() self.train_steps = train_steps common_kwargs = dict( train_steps=train_steps, decay_type=decay_type, anneal_pct=anneal_pct, ) self.learning_rates = onecyle_learning_rate( init_lr=init_lr, end_lr=end_lr, **common_kwargs ) if not (init_mom is None or end_mom is None): self.moms = onecyle_momentum( init_mom=init_mom, end_mom=end_mom, **common_kwargs ) else: self.moms = None def on_train_begin(self, logs=None): self.train_step = 0 def on_train_batch_begin(self, batch, logs=None): if self.train_step < self.train_steps: K.set_value(self.model.optimizer.learning_rate, self.learning_rates[self.train_step]) if not self.moms is None: # If using "tf.keras.mixed_precision.experimental", then set HP this way (except for learning_rate) # K.set_value(self.model.optimizer._optimizer.beta_1, self.moms[self.train_step]) K.set_value(self.model.optimizer.beta_1, self.moms[self.train_step]) self.train_step += 1 def plot(self): a_train_steps = np.arange(self.train_steps) traces = [ go.Scatter(x=a_train_steps, y=self.learning_rates, name="learning_rate"), ] if not self.moms is None: traces.append(go.Scatter(x=a_train_steps, y=self.moms, name="momentum")) fig = make_subplots(len(traces), 1) for i, trace in enumerate(traces): fig.add_trace(trace, row=i+1, col=1) fig.show() # + [markdown] id="AE3VQBLS8Ysl" # # LR Finder # + id="-WFER7dzoQ91" class LRFinder(tk.callbacks.Callback): """Learning Rate Finder Callback""" def __init__( self, init_lr, end_lr, decay_type=DecayType.EXPONENTIAL, beta=0.98 ): super().__init__() self.init_lr = init_lr self.end_lr = end_lr self.decay_type = decay_type self.beta = beta def on_train_begin(self, logs=None): # pp(self.params) self.train_steps = self.params["epochs"] * self.params["steps"] self.scheduler = DecayScheduler( start_val=self.init_lr, end_val=self.end_lr, decay_steps=self.train_steps, decay_type=self.decay_type, ) self.learning_rates = self.scheduler(np.arange(self.train_steps)) self.history = {} self.train_step = 0 self.avg_loss = 0. self.best_loss = 0. self.best_learning_rate = 0. def on_train_batch_begin(self, batch, logs=None): K.set_value(self.model.optimizer.learning_rate, self.learning_rates[self.train_step]) def on_train_batch_end(self, batch, logs=None): # Compute the smoothed loss self.avg_loss = self.beta * self.avg_loss + (1 - self.beta) * logs["loss"] smoothed_loss = self.avg_loss / (1 - self.beta ** (self.train_step + 1)) # Stop if the loss is exploding if self.train_step > 1 and smoothed_loss > 4 * self.best_loss: self.model.stop_training = True print("Stop training because loss is exploding.") # Record the best loss, learning_rate if self.train_step == 1 or smoothed_loss < self.best_loss: self.best_loss = smoothed_loss self.best_learning_rate = self.learning_rates[self.train_step] if not self.model.stop_training: # History for key, val in logs.items(): self.history.setdefault(key, []).append(val) self.history.setdefault("smoothed_loss", []).append(smoothed_loss) self.train_step += 1 def plot_learning_rate(self): fig = px.line( x=np.arange(self.train_step), y=self.learning_rates[:self.train_step], labels=dict(x="train_step", y="learning_rate"), ) fig.show() def _plot_metric(self, metric, metric_name): fig = px.line( x=self.learning_rates[:self.train_step], y=metric, log_x=True, labels=dict(x="learning_rate(log)", y=metric_name), ) fig.show() def plot_accuracy(self): self._plot_metric(self.history["accuracy"], "accuracy") def plot_loss(self): self._plot_metric(self.history["loss"], "loss") def plot_smoothed_loss(self): self._plot_metric(self.history["smoothed_loss"], "smoothed_loss") def plot_metric(learning_rates, metrics, metric_name, optimizer_cfgs): """ Plot the same metric from multiple training run (using different optimizer configuration), against learning rates. """ fig = go.Figure() for metric, optimizer_cfg in zip(metrics, optimizer_cfgs): if len(metric) < len(learning_rates): metric = np.append( metric, [np.nan] * (len(learning_rates) - len(metric)) ) fig.add_trace( go.Scatter(x=learning_rates, y=metric, name=f"{optimizer_cfg}") ) fig.update_xaxes(type="log") fig.update_layout(xaxis_title="learning_rate(log)", yaxis_title=metric_name) fig.show() return fig # + id="pqIGsU9lZV6S" loss = "categorical_crossentropy" optimizer_class = tfa.optimizers.AdamW weight_decays = [1e-2, 1e-3, 1e-4] optimizer_cfgs = [ dict(weight_decay=wd) for wd in weight_decays ] # optimizer_class = tk.optimizers.RMSprop # optimizer_class = tk.optimizers.Adam # optimizer_cfg = dict() # + id="dY_jKl59oQ91" # %%time if do_find_lr: init_lr = 1e-4 end_lr = 1 learning_rates = None accuracies = [] losses = [] smoothed_losses = [] best_losses = [] best_learning_rates = [] for optimizer_cfg in optimizer_cfgs: pp(optimizer_cfg) reset_env() model = get_model(input_shape, n_classes) cb_lrfinder = LRFinder(init_lr, end_lr, decay_type=DecayType.EXPONENTIAL, beta=0.98) optimizer = optimizer_class(**optimizer_cfg) model.compile(loss=loss, optimizer=optimizer, metrics=["accuracy"]) model.fit( train_data, epochs=1, batch_size=batch_size, callbacks=[cb_lrfinder], verbose=1, **prefetch_cfg ) # cb_lrfinder.plot_learning_rate() # cb_lrfinder.plot_accuracy() # cb_lrfinder.plot_loss() # cb_lrfinder.plot_smoothed_loss() # pp(cb_lrfinder.best_loss, cb_lrfinder.best_learning_rate) if learning_rates is None: learning_rates = cb_lrfinder.learning_rates accuracies.append(cb_lrfinder.history["accuracy"]) losses.append(cb_lrfinder.history["loss"]) smoothed_losses.append(cb_lrfinder.history["smoothed_loss"]) best_losses.append(cb_lrfinder.best_loss) best_learning_rates.append(cb_lrfinder.best_learning_rate) # + id="b1tLGpqwuETG" lr_find_result_f = os.path.join(lr_find_result_dir, f"{net_name}.pickle") if do_find_lr: lr_find_result = { "accuracies" : accuracies, "losses" : losses, "smoothed_losses" : smoothed_losses, "learning_rates" : learning_rates, "best_losses" : best_losses, "best_learning_rates": best_learning_rates, } with open(lr_find_result_f, "wb") as pickle_fo: pickle.dump(lr_find_result, pickle_fo) else: if os.path.isfile(lr_find_result_f): with open(lr_find_result_f, "rb") as pickle_fo: lr_find_result = pickle.load(pickle_fo) else: lr_find_result = {} for key, val in lr_find_result.items(): if key in ["accuracies", "losses", "smoothed_losses"]: fig = plot_metric( lr_find_result["learning_rates"], metrics=val, metric_name=key, optimizer_cfgs=optimizer_cfgs ) pp(optimizer_cfgs, lr_find_result.get("best_losses"), lr_find_result.get("best_learning_rates")) # + [markdown] id="t3-147gd3FtV" # # HP # + id="9Te0_bkJQFrs" n_batches_per_epoch = len(train_data) n_train_steps = n_epochs * n_batches_per_epoch pp(n_train_steps) optimizer_cfg = dict( # weight_decay=0.0001, # bit_m-r50x fine tune 512 batch_size, 30 epochs weight_decay=0.0001, # bit_m-r50x fine tune 128 batch_size, 18 epochs ) # LR # end_lr = 0.001 # bit_m-r50x fine tune 512 batch_size, 30 epochs end_lr = 0.001 # bit_m-r50x fine tune 128 batch_size, 18 epochs init_lr = end_lr / 10. # Momemtum init_mom = 0.95 end_mom = 0.85 cb_onecycle = OneCycleScheduler( init_lr, end_lr, n_train_steps, decay_type=DecayType.LINEAR, anneal_pct=0.075, init_mom=init_mom, end_mom=end_mom, ) cb_onecycle.plot() # + [markdown] id="mORDm9zERLBp" # # Callbacks # + id="VkT6kliMnm0M" monitor = "val_accuracy" mode = "max" # goal = 0.97 min_delta = 1e-3 validation_freq = 3 cb_early_stopping = tf.keras.callbacks.EarlyStopping( monitor=monitor, mode=mode, min_delta=min_delta, patience=9, restore_best_weights=True, ) # Model checkpoint dir model_root_dir = os.path.join("models/", net_name) os.makedirs(model_root_dir, exist_ok=True) checkpoint_fp = os.path.join(model_root_dir, now_str) model_fps = os.listdir(model_root_dir) lastest_model_fp = ( os.path.join(model_root_dir, sorted(model_fps)[-1]) if len(model_fps) > 0 else None ) cb_checkpoint = tf.keras.callbacks.ModelCheckpoint( checkpoint_fp, monitor=monitor, mode=mode, save_weights_only=False, save_best_only=True, save_format="tf", include_optimizer=True, ) # Tensorboard logs dir tb_logs_root_dir = os.path.join("tb_logs/", net_name) if do_train: tb_logs_dp = os.path.join(tb_logs_root_dir, now_str) else: tb_logs_dps = os.listdir(tb_logs_root_dir) tb_logs_dp = ( os.path.join(tb_logs_root_dir, sorted(tb_logs_dps)[-1]) if len(tb_logs_dps) > 0 else None ) cb_tensorboard = tf.keras.callbacks.TensorBoard( log_dir=tb_logs_dp, histogram_freq=1, update_freq='epoch', profile_batch="2,22", ) # class Goal(tf.keras.callbacks.Callback): # def __init__(self, monitor, mode, goal): # super().__init__() # self.monitor = monitor # self.mode = mode # self.goal = goal # def on_epoch_end(self, epoch, logs={}): # if self.mode == "min": # goal_achieved = logs[self.monitor] <= self.goal # elif self.mode == "max": # goal_achieved = logs[self.monitor] >= self.goal # if goal_achieved: # print("Goal {}: {} achieved. Stop training.".format(self.monitor, self.goal)) # self.model.stop_training = True # cb_goal = Goal(monitor, mode, goal) # cb_reduce_lr = tf.keras.callbacks.ReduceLROnPlateau( # monitor=monitor, # mode=mode, # min_delta=min_delta, # patience=5, # factor=0.2, # min_lr=1e-6, # ) callbacks = [ cb_checkpoint, cb_early_stopping, cb_tensorboard, # cb_goal, # cb_reduce_lr, cb_onecycle, ] # + [markdown] id="YxMYqm3xR0Vr" # # Train # + id="WTMrnqIinoTK" # %%time reset_env() tf.get_logger().setLevel('ERROR') # For test run # n_epochs = 3 if do_train: model_loaded = False if do_load_model: try: model = tk.models.load_model(lastest_model_fp, compile=True) model_loaded = True print("Model loaded: {}".format(lastest_model_fp)) except Exception as ex: print("ERROR load_model: {}".format(ex)) if not do_load_model or not model_loaded: model = get_model(input_shape, n_classes) model.compile( loss=loss, optimizer=optimizer_class(**optimizer_cfg), metrics=["accuracy"] ) print("Created new model.") history = model.fit( train_data, epochs=n_epochs, validation_data=test_data, validation_freq=validation_freq, callbacks=callbacks, verbose=1, **prefetch_cfg, ) else: model = tk.models.load_model(lastest_model_fp, compile=True) # + [markdown] id="SZHWJ6pE3IWw" # # Evaluation # + id="3aS58GcroUPt" model.evaluate(test_data, verbose=1, **prefetch_cfg) # + id="IPSzNbYFdeQs" # %tensorboard --logdir $tb_logs_dp
super_convergence/cifar_10_super_convergence.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Evaluate different ising influence strategies on the Pokec dataset for a range of different linear field gradients. # # Created on: 28/09/20 # + import networkx as nx import numpy as np import pandas as pd import ast import seaborn as sns import matplotlib.pyplot as plt import tqdm import random import itertools import matplotlib from scipy import stats import math from ising_block_level_influence import N_Block_sbm_class as NBlock from ising_block_level_influence import projection_simplex as proj from ising_block_level_influence import mean_field_IIM from spatial_spin_monte_carlo import spatial_spin_monte_carlo as Spins import Pokec_processing as PokProc from pokec_utils import * # - # ### Read in processed data # # This data has been generated using: # # 1. **make_Pokec_reduced_profiles.ipynb** - which then feeds data into: # # 2. The pre-processing script **'make_bratislava_graph_and_blocks.py'**. graph = nx.read_graphml('Data/Bratislava_graph.graphml') beta_c = Spins.crit_beta_sparse(graph) bratislava_profiles = pd.read_csv("Data/bratislava_profiles.csv") coupling_graph = nx.read_graphml('Data/Bratislava_coupling.graphml') block_data = pd.read_csv('Data/block_info.csv',converters={'Block' : ast.literal_eval}) mean_block_ages = list(block_data['average_age']) block_sizes = list(block_data['block_size']) block_names = list(block_data['block_name']) block_data['age_group'] = [ 'ages_' + k.split('_')[-1] for k in list(block_data['block_name'])] block_data.head() def linear_field(x : np.ndarray,gradient :float) : return gradient*x #Scale ages to [-1,1]: rescaled_ages = [ (k-np.mean(mean_block_ages))/(max(mean_block_ages)-min(mean_block_ages)) for k in mean_block_ages ] bratislava_profiles_indices = bratislava_profiles.reset_index() groups = [ bratislava_profiles_indices.loc[bratislava_profiles_indices['block']==block] for block in block_names] groups_node_ids = [list(k['index']) for k in groups] # ## Sweep over $\beta$ values # + def check_group(input_val,group_label) : if input_val == group_label : return 1.0 else : return 0.0 def mean_and_se(values) : return f"{np.mean(values)} +/- {stats.sem(values)} " #Seed the random number generators: seed = 1 random.seed(seed) np.random.seed(seed) Field_Budget = 2500.0 T=10000 T_Burn = 2*300000 Samples = 15 sample_frac = 1.0 # for snapshot control figure='4d' if figure == '3c' : # figure 3c params init_sign=1.0 beta_factor_vals = [(10**k) for k in np.linspace(-1,1.8,12)] grad_vals = [0.0] con_names=['block','unif','full'] save_path='Data/Pokec_control_eval_as_beta.csv' elif figure == '4c' : # Figure 4c params init_sign=-1.0 beta_factor_vals=[8.0] grad_vals = np.arange(-10.0,12.0,2.0) con_names = ['no con','unif','full','block','sv','nc','snapshot'] save_path='Data/Pokec_control_eval_negative.csv' elif figure == '4d' : # figure 4d params init_sign=1.0 beta_factor_vals=[8.0] grad_vals = np.arange(-10.0,12.0,2.0) con_names = ['no con','unif','full','block','sv','nc','snapshot'] save_path='Data/Pokec_control_eval_positive.csv' eval_data = pd.DataFrame() control_data = pd.DataFrame() control_dict={} for gradient in tqdm.tqdm_notebook(grad_vals) : age_field = [linear_field(a,gradient) for a in rescaled_ages ] age_field_map = {k:j for k,j in zip(list(block_data['age_group']),age_field)} # Block MF setup: coupling_graph = nx.convert_node_labels_to_integers(coupling_graph) block_graph_ising_system = mean_field_IIM.mean_field_ising_system(coupling_graph,age_field) block_graph_ising_system.gamma = 1.0 block_graph_ising_system.tol = 1E-5 block_graph_ising_system.max_mf_fp_iterations = 10000 block_graph_ising_system.mf_fp_init_state = init_sign*np.ones(len(block_sizes)) block_graph_ising_system.mf_fp_noisy = False # Full graph Mf setup background_field = np.asarray([age_field_map[k] for k in list(bratislava_profiles['age_group'])]) relab_graph = nx.relabel.convert_node_labels_to_integers(graph) full_graph_ising_system = mean_field_IIM.mean_field_ising_system(relab_graph,background_field) full_graph_ising_system.mf_fp_init_state = init_sign*np.ones(len(graph)) full_graph_ising_system.mf_fp_noisy = False for beta_factor in tqdm.tqdm_notebook(beta_factor_vals) : beta = beta_c*beta_factor # Block gradient computation: m_block = block_graph_ising_system.mf_magnetization(age_field,beta) mag_gradient = block_graph_ising_system.mf_magnetization_gradient(m_block, beta) mag_grad_map = { i:j for i,j in zip(block_names,mag_gradient)} block_names_list = list(bratislava_profiles['block']) block_mf_gradient = np.asarray([mag_grad_map[k] for k in block_names_list]) # Full gradient computation: if 'full' in con_names : m_full = full_graph_ising_system.mf_magnetization(background_field,beta) mag_grad_full = full_graph_ising_system.mf_sparse_magnetisation_gradient(m_full, beta) H_full = np.sum(mag_grad_full) mag_grad_full_at_field_budget = (Field_Budget/H_full)*mag_grad_full control_dict['full']=mag_grad_full_at_field_budget # Compute the controls: H_block = np.sum(block_mf_gradient) uniform_control = (Field_Budget / len(graph)) * np.ones(len(graph)) mag_grad_block_at_field_budget = (Field_Budget/H_block)*block_mf_gradient negative_cancelling_field = [] for field in background_field : if field < 0.0: negative_cancelling_field.append(-1.0*field) else: negative_cancelling_field.append(0.0) negative_cancelling_field = np.asarray(negative_cancelling_field) negative_cancelling_field = (Field_Budget/np.sum(negative_cancelling_field))*np.asarray(negative_cancelling_field) swing_voter_nodes = np.asarray([check_group(k,'ages_22-28') for k in list(bratislava_profiles['age_group']) ]) H_SV = np.sum(swing_voter_nodes) swing_voter_control = (Field_Budget/H_SV)*swing_voter_nodes #Snapshot control initial_state = init_sign*np.ones(len(graph)) block_snapshot = Run_MonteCarlo_Snapshot(relab_graph,groups_node_ids ,0, beta_factor,beta_c ,T_Burn=T_Burn,addition_control=None,sampling_method="Metropolis",full_graph_field=background_field,initial_state=initial_state,frac_to_sample=sample_frac) snapshot_gradient = [ (1.0-k**2) for k in block_snapshot ] snapshot_grad_map = { i:j for i,j in zip(block_names,snapshot_gradient)} snapshot_influence = np.asarray([snapshot_grad_map[k] for k in block_names_list]) #Normalize: H_snapshot = np.sum(snapshot_influence) snapshot_influence = (Field_Budget/H_snapshot)*snapshot_influence control_dict['no con']=np.zeros(len(graph)) control_dict['unif']=uniform_control control_dict['block']=mag_grad_block_at_field_budget control_dict['sv']=swing_voter_control control_dict['nc']=negative_cancelling_field control_dict['snapshot']=snapshot_influence for s in tqdm.tqdm_notebook( range(Samples) ) : magnetisations_dict={} for con_name in con_names : initial_state = init_sign*np.ones(len(graph)) control_w_background=np.asarray([i+j for i,j in zip(control_dict[con_name],background_field)]) magnetisations=Run_MonteCarlo_Average(relab_graph, T, beta_factor,beta_c, T_Burn=T_Burn,addition_control=None,sampling_method="Metropolis",full_graph_field=control_w_background,initial_state=initial_state) magnetisations_dict[con_name]=magnetisations means=[] ses=[] #Loop in same order as cont_names # control_list = [no_control,unif_control,full_sus_control_mags,block_sus_control_mags,sv_control_mags,nc_control_mags,snapshot_control_mags] #for k in control_list : for k in list(magnetisations_dict.values()): print(mean_and_se(k)) means.append(np.mean(k)) ses.append(stats.sem(k)) eval_data = eval_data.append(pd.DataFrame({'control':list(magnetisations_dict.keys()),'magnetisation':means,'mag_se':ses,'beta_factor':beta_factor*np.ones(len(means)),'gradient':gradient*np.ones(len(means))})) eval_data.to_csv(save_path) # - eval_data
Figures_3-4_Pokec/Pokec_markup_simulations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + from colour import Color, RGB_TO_COLOR_NAMES from PIL import Image, ImageDraw def to_rgb(c: Color) -> tuple: return tuple([int(_ * 255) for _ in c.rgb]) def square(color: Color) -> None: im = Image.new('RGB', (40, 40), to_rgb(color)) im.show() def scale(colors: list) -> None: im = Image.new('RGB', (400, 40), to_rgb(Color("white"))) draw = ImageDraw.Draw(im) for i, c in enumerate(colors): draw.rectangle((i * 10, 0, 10 + i * 10, 40), fill=to_rgb(c)) im.show() ALL_COLORS = [Color(_) for t in RGB_TO_COLOR_NAMES.items() for _ in t[1]] for i in range(0, len(ALL_COLORS), 40): scale(ALL_COLORS[i:i+40]) # - for s in RGB_TO_COLOR_NAMES.items(): print(s)
notebooks/Colors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from sklearn.model_selection import train_test_split import warnings import matplotlib.pyplot as plt # %matplotlib inline warnings.filterwarnings('ignore') path = "Data\Garage Unprocessed.csv" df = pd.read_csv(path) df.head() X = df.iloc[:,:7] y = df.iloc[:,-1] X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=2) print(X_train.head()) print(X_test.head()) # # Outlier Detection fig, [[ax_1, ax_2], [ax_3, ax_4]] = plt.subplots(nrows=2, ncols=2, figsize = [15, 10]) ax_1.scatter(X_train.LotFrontage, y_train, color='red') ax_1.set_title('SalePrice vs LotFrontage') ax_1.set_xlabel('LotFrontage') ax_1.set_ylabel('SalePrice') ax_2.scatter(X_train.TotalBsmtSF, y_train, color='blue') ax_2.set_title('SalePrice vs TotalBsmtSF') ax_2.set_xlabel('TotalBsmtSF') ax_2.set_ylabel('SalePrice') ax_3.scatter(X_train.GrLivArea, y_train, color='green') ax_3.set_title('SalePrice vs GrLivArea') ax_3.set_xlabel('GrLivArea') ax_3.set_ylabel('SalePrice') ax_4.scatter(X_train.LotArea, y_train, color='black') ax_4.set_title('SalePrice vs LotArea') ax_4.set_xlabel('LotArea') ax_4.set_ylabel('SalePrice') fig.show() train = pd.concat([X_train, y_train], axis=1) mask1 = train.LotFrontage < 300 mask2 = train.TotalBsmtSF < 5000 mask3 = train.GrLivArea < 4500 mask4 = train.LotArea < 100000 train = train[mask1 & mask2 & mask3 & mask4] # # Detect Missing Data X_train, y_train = train.iloc[:,:7], train[['SalePrice']] missing_columns = X_train.isnull().sum() * 100 / len(X_train) mask = missing_columns>50 columns = missing_columns[mask].index.tolist() print(columns) rows_percentage = (1 - (len(X_train.dropna(thresh=5)) / len(X_train))) * 100 print(rows_percentage) # # Handling Missing Data from sklearn.preprocessing import Imputer dict_new = {'Attchd':0,'Detchd':1,'BuiltIn':2,'2Types':3,'CarPort':4,'Basment':5} X_train['GarageType'] = X_train['GarageType'].map(dict_new) X_test['GarageType'] = X_test['GarageType'].map(dict_new) mean_imputer = Imputer(strategy='mean') mode_imputer = Imputer(strategy='most_frequent') if 'PoolQC' in X_train: X_train.drop(columns='PoolQC', inplace=True, axis=1) if 'PoolQC' in X_test: X_test.drop(columns='PoolQC', inplace=True, axis=1) X_train.head() X_train[['GarageType']].shape mode_imputer.fit(X_train[['GarageType']]) X_train.GarageType = mode_imputer.transform(X_train[['GarageType']]) X_test.GarageType = mode_imputer.transform(X_test[['GarageType']]) mean_imputer.fit(X_train[['LotFrontage']]) X_train.LotFrontage = mean_imputer.transform(X_train[['LotFrontage']]) X_test.LotFrontage = mean_imputer.transform(X_test[['LotFrontage']]) # # Data Transformation # ## Reduce Skewness # ### log,sqrt,reciprocal for postive/right skeweed and sqr,cubi,.. for negative/left skeweed import seaborn as sns sns.distplot(y_train) y_train = np.log(y_train) sns.distplot(y_train) # ## Standardizing and normalizing continuous variables from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import StandardScaler num_columns = ['LotFrontage', 'TotalBsmtSF', 'GrLivArea', 'LotArea'] normalizer = MinMaxScaler() normalizer.fit(X_train[num_columns]) X_train[num_columns] = normalizer.transform(X_train[num_columns]) X_test[num_columns] = normalizer.transform(X_test[num_columns]) # ## Encoding categorical data from sklearn.preprocessing import LabelEncoder label_encoder =LabelEncoder() X_train.SaleCondition = label_encoder.fit_transform(X_train.SaleCondition) X_test.SaleCondition = label_encoder.fit_transform(X_test.SaleCondition) X_train.head() x_train = pd.get_dummies(X_train.GarageType) x_test = pd.get_dummies(X_test.GarageType) print(x_train.head()) print(x_test.head()) # # Assignment path = r"Data\googleplaystore.csv" data = pd.read_csv(path) # ## Data Exploration data.Rating.plot(kind='hist') data = data[data.Rating<=5] data.Rating.plot(kind='hist') plt.hist(data.Rating**3) # ## Null Value Treatment total_null = data.isnull().sum() percent_null = total_null/len(data)*100 missing_data = pd.concat([total_null,percent_null],keys=['Total','Percent'],axis=1) print(missing_data) data.dropna(inplace=True) total_null_1 = data.isnull().sum() percent_null_1 = total_null_1/len(data)*100 missing_data_1 = pd.concat([total_null_1,percent_null_1],keys=['Total','Percent'],axis=1) print(missing_data_1) fig = sns.catplot(x='Category', y='Rating', data=data, kind='box', height=10).set_xticklabels(rotation=90) fig.set(title="Rating vs Category [BoxPlot]") print(data.Installs.value_counts()) data.Installs = data['Installs'].apply(lambda x:int(x[:-1].replace(",",""))) le = LabelEncoder() le.fit(data.Installs) data.Installs = le.transform(data.Installs) sns.regplot(x='Installs', y='Rating', data=data).set(title="Rating vs Installs [RegPlot]") print(data.Price.value_counts()) data.Price = data.Price.apply(lambda x: float(x[1:]) if len(x)>1 else float(x)) sns.regplot(x="Price", y="Rating", data=data).set(title = "Rating vs Price [RegPlot]") data.Genres.unique() data.Genres = data.Genres.apply(lambda x: x.split(";")[0]) data.Genres.unique() gr_mean = data[["Genres", "Rating"]].groupby(by="Genres", as_index=False).mean() gr_mean.describe() gr_mean = gr_mean.sort_values(by='Rating') print(gr_mean.iloc[0]) print(gr_mean.iloc[-1]) data['Last Updated'].head() data['Last Updated'] = data['Last Updated'].apply(lambda x: pd.to_datetime(x)) data['Last Updated'].head() max_date = data['Last Updated'].max() max_date data['Last Updated Days'] = (max_date - data['Last Updated']).apply(lambda x: x.days) data.head() sns.regplot(x="Last Updated Days", y="Rating", data=data).set(title="Rating vs Last Updated [RegPlot]") plt.plot([1,2,3,4], lw=10)
ipynb/Data PreProcessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 处理 txt 文件 file = 'file.txt' f = open(file, 'r') # s1 = f.read(10) # 读取接下来的 N 个字符到字符串 s = f.read() # 把整个文件读进一个字符串 print(s) f = open(file, 'r') s = f.readline() # 读取下一行 s f = open(file, 'r') s = f.readlines() # 读取整个文件到一个字符串列表 s f.close() # 打开文件后要关闭文件 # 从文本文件逐行读取的最佳方式就是根本不要读取该文件,文本也是一个迭代器, # 可以用for循环、列表推导,或者其它迭代上下文中自动逐行读取文件。 # 文本迭代器往往是最佳选择 for line in open(file): print(line) # ### 处理 Python 原生对象 pickle[B1P293] # pickle 模块可以让我们直接在文件中存储几乎任何Python对象的高级工具,同时不需要我们对字符串进行来回转换。它就像是超级通用的数据格式化和解析工具。 D = {'a':1, 'b':2} F = open('datafile.pkl', 'wb') import pickle pickle.dump(D, F) F.close() # 取回刚才存储的数据,只需要再次使用pickle重建。 F = open('datafile.pkl', 'rb') E = pickle.load(F) E # ### 文件上下文管理器 # 它把文件处理代码包装到一个逻辑层里,以确保在退出后一定会自动关闭文件(同时可以满足将其输出缓冲区内容写入磁盘),而不是依靠垃圾回收时的自动关闭。 with open('file.txt', 'r') as f: for line in f: print(line + '***')
Learning python/2. 数据类型/6. 文件.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + nbsphinx="hidden" import open3d as o3d import numpy as np import matplotlib.pyplot as plt import copy import os import sys # only needed for tutorial, monkey patches visualization sys.path.append('..') import open3d_tutorial as o3dtut # change to True if you want to interact with the visualization windows o3dtut.interactive = not "CI" in os.environ # - # # Voxelization # Point clouds and triangle meshes are very flexible, but irregular, geometry types. The voxel grid is another geometry type in 3D that is defined on a regular 3D grid, whereas a voxel can be thought of as the 3D counterpart to the pixel in 2D. Open3D has the geometry type `VoxelGrid` that can be used to work with voxel grids. # ## From triangle mesh # Open3D provides the method `create_from_triangle_mesh` that creates a voxel grid from a triangle mesh. It returns a voxel grid where all voxels that are intersected by a triangle are set to `1`, all others are set to `0`. The argument `voxel_size` defines the resolution of the voxel grid. # + print('input') mesh = o3dtut.get_bunny_mesh() # fit to unit cube mesh.scale(1 / np.max(mesh.get_max_bound() - mesh.get_min_bound()), center=mesh.get_center()) o3d.visualization.draw_geometries([mesh]) print('voxelization') voxel_grid = o3d.geometry.VoxelGrid.create_from_triangle_mesh(mesh, voxel_size=0.05) o3d.visualization.draw_geometries([voxel_grid]) # - # ## From point cloud # The voxel grid can also be created from a point cloud using the method `create_from_point_cloud`. A voxel is occupied if at least one point of the point cloud is within the voxel. The color of the voxel is the average of all the points within the voxel. The argument `voxel_size` defines the resolution of the voxel grid. # + print('input') N = 2000 pcd = o3dtut.get_armadillo_mesh().sample_points_poisson_disk(N) # fit to unit cube pcd.scale(1 / np.max(pcd.get_max_bound() - pcd.get_min_bound()), center=pcd.get_center()) pcd.colors = o3d.utility.Vector3dVector(np.random.uniform(0, 1, size=(N, 3))) o3d.visualization.draw_geometries([pcd]) print('voxelization') voxel_grid = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, voxel_size=0.05) o3d.visualization.draw_geometries([voxel_grid]) # - # ## Inclusion test # The voxel grid can also be used to test if points are within an occupied voxel. The method `check_if_included` takes a `(n,3)` array as input and outputs a `bool` array. queries = np.asarray(pcd.points) output = voxel_grid.check_if_included(o3d.utility.Vector3dVector(queries)) print(output[:10]) # ## Voxel carving # The methods `create_from_point_cloud` and `create_from_triangle_mesh` create occupied voxels only on the surface of the geometry. It is however possible to carve a voxel grid from a number of depth maps or silhouettes. Open3D provides the methods `carve_depth_map` and `carve_silhouette` for voxel carving. # # The code below demonstrates the usage by first rendering depthmaps from a geometry and using those depthmaps to carve a dense voxel grid. The result is a filled voxel grid of the given shape. # + def xyz_spherical(xyz): x = xyz[0] y = xyz[1] z = xyz[2] r = np.sqrt(x * x + y * y + z * z) r_x = np.arccos(y / r) r_y = np.arctan2(z, x) return [r, r_x, r_y] def get_rotation_matrix(r_x, r_y): rot_x = np.asarray([[1, 0, 0], [0, np.cos(r_x), -np.sin(r_x)], [0, np.sin(r_x), np.cos(r_x)]]) rot_y = np.asarray([[np.cos(r_y), 0, np.sin(r_y)], [0, 1, 0], [-np.sin(r_y), 0, np.cos(r_y)]]) return rot_y.dot(rot_x) def get_extrinsic(xyz): rvec = xyz_spherical(xyz) r = get_rotation_matrix(rvec[1], rvec[2]) t = np.asarray([0, 0, 2]).transpose() trans = np.eye(4) trans[:3, :3] = r trans[:3, 3] = t return trans def preprocess(model): min_bound = model.get_min_bound() max_bound = model.get_max_bound() center = min_bound + (max_bound - min_bound) / 2.0 scale = np.linalg.norm(max_bound - min_bound) / 2.0 vertices = np.asarray(model.vertices) vertices -= center model.vertices = o3d.utility.Vector3dVector(vertices / scale) return model def voxel_carving(mesh, output_filename, camera_path, cubic_size, voxel_resolution, w=300, h=300, use_depth=True, surface_method='pointcloud'): mesh.compute_vertex_normals() camera_sphere = o3d.io.read_triangle_mesh(camera_path) # setup dense voxel grid voxel_carving = o3d.geometry.VoxelGrid.create_dense( width=cubic_size, height=cubic_size, depth=cubic_size, voxel_size=cubic_size / voxel_resolution, origin=[-cubic_size / 2.0, -cubic_size / 2.0, -cubic_size / 2.0]) # rescale geometry camera_sphere = preprocess(camera_sphere) mesh = preprocess(mesh) # setup visualizer to render depthmaps vis = o3d.visualization.Visualizer() vis.create_window(width=w, height=h, visible=False) vis.add_geometry(mesh) vis.get_render_option().mesh_show_back_face = True ctr = vis.get_view_control() param = ctr.convert_to_pinhole_camera_parameters() # carve voxel grid pcd_agg = o3d.geometry.PointCloud() centers_pts = np.zeros((len(camera_sphere.vertices), 3)) for cid, xyz in enumerate(camera_sphere.vertices): # get new camera pose trans = get_extrinsic(xyz) param.extrinsic = trans c = np.linalg.inv(trans).dot(np.asarray([0, 0, 0, 1]).transpose()) centers_pts[cid, :] = c[:3] ctr.convert_from_pinhole_camera_parameters(param) # capture depth image and make a point cloud vis.poll_events() vis.update_renderer() depth = vis.capture_depth_float_buffer(False) pcd_agg += o3d.geometry.PointCloud.create_from_depth_image( o3d.geometry.Image(depth), param.intrinsic, param.extrinsic, depth_scale=1) # depth map carving method if use_depth: voxel_carving.carve_depth_map(o3d.geometry.Image(depth), param) else: voxel_carving.carve_silhouette(o3d.geometry.Image(depth), param) print("Carve view %03d/%03d" % (cid + 1, len(camera_sphere.vertices))) vis.destroy_window() # add voxel grid survace print('Surface voxel grid from %s' % surface_method) if surface_method == 'pointcloud': voxel_surface = o3d.geometry.VoxelGrid.create_from_point_cloud_within_bounds( pcd_agg, voxel_size=cubic_size / voxel_resolution, min_bound=(-cubic_size / 2, -cubic_size / 2, -cubic_size / 2), max_bound=(cubic_size / 2, cubic_size / 2, cubic_size / 2)) elif surface_method == 'mesh': voxel_surface = o3d.geometry.VoxelGrid.create_from_triangle_mesh_within_bounds( mesh, voxel_size=cubic_size / voxel_resolution, min_bound=(-cubic_size / 2, -cubic_size / 2, -cubic_size / 2), max_bound=(cubic_size / 2, cubic_size / 2, cubic_size / 2)) else: raise Exception('invalid surface method') voxel_carving_surface = voxel_surface + voxel_carving return voxel_carving_surface, voxel_carving, voxel_surface # + mesh = o3dtut.get_armadillo_mesh() output_filename = os.path.abspath("../../test_data/voxelized.ply") camera_path = os.path.abspath("../../test_data/sphere.ply") visualization = True cubic_size = 2.0 voxel_resolution = 128.0 voxel_grid, voxel_carving, voxel_surface = voxel_carving( mesh, output_filename, camera_path, cubic_size, voxel_resolution) # + print("surface voxels") print(voxel_surface) o3d.visualization.draw_geometries([voxel_surface]) print("carved voxels") print(voxel_carving) o3d.visualization.draw_geometries([voxel_carving]) print("combined voxels (carved + surface)") print(voxel_grid) o3d.visualization.draw_geometries([voxel_grid])
examples/python/Advanced/voxelization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:deep_nlp] # language: python # name: conda-env-deep_nlp-py # --- # # Machine Learning Models Evaluation # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import spearmanr from models.utils.evaluation import print_metrics # - # Load train and test datasets. # + X_train = pd.read_csv("../features/weebit_train_with_features.csv", index_col=0) X_test = pd.read_csv("../features/weebit_test_with_features.csv", index_col=0) # get Y y_train = X_train["Level"] y_test = X_test["Level"] # remove Y and Text columns X_train.drop(columns=['Text', 'Level'], inplace=True) X_test.drop(columns=['Text', 'Level'], inplace=True) # whole set; used in cross-validation X = pd.concat([X_train, X_test]).reset_index(drop=True) y = pd.concat([y_train, y_test]).reset_index(drop=True) # - # For scoring, we will use __Spearman correlation__. scoring_function = lambda y_true, y_pred: spearmanr(y_true, y_pred)[0] # ## 1. Random Forest Regression from models.random_forest import RandomForest from models.utils.hyperparemeter_optimization import grid_search_cv_for_ensembles # Firstly, we need to __find the best hyperparameters.__ We will do this using grid search. # + # set the hyperparameter grid max_depth_values = [5, 10, 15, 20] n_estimators_values = [10, 50, 100] # perform hyperparameter search max_depth, n_estimators = grid_search_cv_for_ensembles(RandomForest(), max_depth_values, n_estimators_values, X_train, y_train, scoring_function, k=3, verbose=1) print() print("Best hyperparemeters are: max_depth=" + str(max_depth) + " n_estimators=" + str(n_estimators)) # + rf = RandomForest(max_depth=max_depth, n_estimators=n_estimators, save_model=True) rf.fit(X_train, y_train) y_pred = rf.predict(X_test) # - print_metrics(y_test, y_pred) # ## 2. XGBoost Regression from models.xgboost import XGBoost # xgboost is showing a particular meaningless warning, we will ignore it import warnings warnings.simplefilter(action='ignore', category=FutureWarning) # Firstly, we need to __find the best hyperparameters.__ We will do this using grid search. # + ## set the hyperparameter grid max_depth_values = [5, 10, 15, 20, 30] n_estimators_values = [10, 50, 100, 200] # perform hyperparameter search max_depth, n_estimators = grid_search_cv_for_ensembles(XGBoost(), max_depth_values, n_estimators_values, X_train, y_train, scoring_function, k=3, verbose=1) print() print("Best hyperparemeters are: max_depth=" + str(max_depth) + " n_estimators=" + str(n_estimators)) # + xgboost = XGBoost(save_model=True) xgboost.fit(X_train, y_train) y_pred = xgboost.predict(X_test) # - print_metrics(y_test, y_pred) # Find mean Spearman correlation over k folds. # ## 3. Support Vector Machine from models.support_vector_machine import SupportVectorMachine from models.utils.hyperparemeter_optimization import find_best_C # Firstly, we need to __find the best hyperparameter C.__ # + ## set the hyperparameter grid c_values = [1.0, 2.0, 5.0, 10.0, 20.0] # perform hyperparameter search best_c = find_best_C(SupportVectorMachine(), c_values, X_train, y_train, scoring_function, k=3, verbose=1) print() print("Best C is " + str(best_c)) # + svm = SupportVectorMachine(C=best_c, save_model=True) svm.fit(X_train, y_train) y_pred = svm.predict(X_test) # - print_metrics(y_test, y_pred) # ## 4. Multilayer Perceptron from models.multilayer_perceptron import MultilayerPerceptron # + mlp = MultilayerPerceptron(input_dim=X_train.shape[1], save_model=True, verbose=0) mlp.fit(X_train, y_train) y_pred = mlp.predict(X_test) # - print_metrics(y_test, y_pred)
ml_models/model_evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Using Microsoft Excel, Pandas, Matplotlib, and Microsoft Word # # This exercise is one of reading Excel data into Pandas, analysing, ploting, and creating output file in Microsoft Word. Add cells as you need them. import datetime import pandas as pd import numpy as np # If you did the week 14 and 15 typealongs you already have matplotlib installed. import matplotlib.pyplot as plt import matplotlib.image as mpimg # Install the docxtpl package in your Python environment. This is the same way you installed Pandas, so for example, you'll need to `pip install docxtpl`. # + from docxtpl import ( DocxTemplate, InlineImage, ) # For this you'll need to `pip install docxtpl` # This comes in with the templating library from docx.shared import Inches # - # # Part 1 - Say Hello With Word # Create a docx template instance from the template Word file template = DocxTemplate("student_classes_schedule_template.docx") # Get an image from the file system image = InlineImage(template, "cu-logo.png", Inches(1.5)) # Update the following dictionary with your information and add your classes. document_context = { "image": image, "day": datetime.datetime.now().strftime("%d"), "month": datetime.datetime.now().strftime("%B"), "year": datetime.datetime.now().strftime("%Y"), "first_name": "Elvis", "last_name": "Presley", "courses": [ { "code": "ENGL1220", "description": "Shakespeare: Lessons in Business", "start_date": "1/15/2021", "end_date": "4/29/2021", }, { "code": "BAIM3220", "description": "Introduction to Python Programming", "start_date": "1/15/2021", "end_date": "4/29/2021", }, { "code": "XBUS6500", "description": "Corporate Strategy", "start_date": "1/15/2021", "end_date": "4/29/2021", }, ], } # Run the following and take a look at the new file, 'student_classes_schedule.docx'. # + # render the object in memory template.render(document_context) # save the object to the file system template.save("student_classes_schedule.docx") # - # # Part 2 - Sales Discount Letters # # Who are our top three customers? I'd like to send a letter to our existing customers and offer them a discount as a thanks for their business. # ## Part 2.1 - Read our Inventory data # # Look up the pd.read_excel() function to read the `Inventory.xlsx` file and the sheets -- you determine which ones -- you need into dataframes. # Cacluate a discount (I used a function and `apply()`), such that, # # - Give a 15% discount if the total purchases are greater than $300. # # - Give a 10% discount if the total purchases are greater than $100. # # - Otherwise give a 5% discount # Apply your function creating a column called 'percentage_offer'. Remember to calculate discount based on the invoice total per customer. # ## Part 2.2 - Create the reports # # Using the file 'sales_promotion_template.docx' to create files for the top three customers. The results files should be in the form `Promotional Sales Letter -- <customer_first_name> <customer_last_name>.docx`. # # If I were a top 3 customer I would expect a letter called "Promotional Sales Letter -- <NAME>.docx" # *********** DON'T MISS THIS ************** # # Replace this markdown cell to describe your strategy of how you created the invoice totals, calculate discount, select the top three, and loop through that to create the top three files. # # If you create all of the customer files, watch out for giving 5% discounts to folks who have never purchased anything. # This is an example of the structure I used. It will be slightly different for each customer, yes? document_context = { "banner": "Promotional Sales Discount!", "day": datetime.datetime.now().strftime("%d"), "month": datetime.datetime.now().strftime("%B"), "year": datetime.datetime.now().strftime("%Y"), "first_name": "Steve", "last_name": "Taylor", "percentage_award": "15", "products": [ {"code": "xyz123", "description": "this is a description"}, {"code": "xyz789", "description": "this is a description"}, ], } # + # We only need to read the template once, but the render and save have to be called for each customer. # template = DocxTemplate("sales_promotion_template.docx") # template.render(document_context) # template.save( # f"Promotional Sales Letter -- {document_context['first_name']} {document_context['last_name']}.docx" #) # - # # Part 3 - Create a report for financial folks # ## Part 3.1 - Create Plots # # Create two additonal plots and insert them into the financials document called 'Current Financials.docx' (use the `current_financials_template.docx`. # # One plot is given below to show how to save the plot to your file system so it can be read back for Word. # + # Pie chart, where the slices will be ordered and plotted counter-clockwise: qoh_total = products.P_QOH.sum() min_total = products.P_MIN.sum() labels = ["Quantity on Hand", "Reorder Minimum"] sizes = [qoh_total, min_total] explode = (0, 0.1) # explode out the minimums fig1, ax = plt.subplots() ax.pie( sizes, explode=explode, labels=labels, autopct="%1.1f%%", shadow=True, startangle=90 ) plt.show() # a simple plot is just plt.savefig, but our subplots returns fig1 for this purpose fig1.savefig("quantities-v-reorder.png", transparent=True) # - # Create a line (or scatter if you prefer) plot that shows the dates on the X axis, and the invoice totals on the Y. Remember to savefig() to a file on your file system. # Create a plot the two series of Customer's total spend (as a percentage of total revenue), and their discount percentage values (from the second part of this notebook above). If you didn't do that part find another series that is interesting to compare. I asked for percentage of total revenue to ensure the Y axis was of the same scale. # ## Part 3.2 - Assemble the report # # This is the same as the first section at the top of this notebook, but instead of one image it has three. # Create a docx template instance from the template Word file template = DocxTemplate("current_financials_template.docx") # read in the first image image1 = InlineImage(template, "quantities-v-reorder.png", Inches(4)) # read in the second image # read in the third image # This is an example of the structure I used. document_context = { "day": datetime.datetime.now().strftime("%d"), "month": datetime.datetime.now().strftime("%B"), "year": datetime.datetime.now().strftime("%Y"), "image1": image1, "image2": image2, "image3": image3, } # + # render the object in memory template.render(document_context) # save the object to the file system template.save("Current Financials.docx")
Week 15/Week15-Extra_Credit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h2 align='center'>Phase Change Material</h2> # Import the required libraries import pandas as pd import matplotlib.pyplot as plt import numpy as np import math from scipy import integrate import seaborn as sns # Load the dataset df=pd.read_excel('../data/pcmdata.xlsx') # Look at the data print(df.shape) df.head() class PcmProperties(): """ This class allows us to calculate the properties of etutectic organic phase change materials Ex: new_pcm = PcmProperties(pcm_data) new_pcm. """ def __init__(self, data): self.name = data[0] self.molar_mass = data[1] self.fusion_T = data[2]+273.15 self.heat_of_fusion = data[3]*data[1] self.cp_solid_a = data[4] self.cp_solid_b = data[5] self.cp_solid_c = data[6] self.cp_liquid_a = data[7] self.cp_liquid_b = data[8] self.cp_liquid_c = data[9] self.cp_liquid_d = data[10] A = PcmProperties(df[df['pcm']==df['pcm'][0]].values.tolist()[0]) B = PcmProperties(df[df['pcm']==df['pcm'][1]].values.tolist()[0]) # + class EutecticMixture(): #Ideal gas constant in J/mol.K def __init__(self,A,B): self.A = A self.B = B self.R = 8.314 def eutectic_properties(self): self.mole_fraction_A = [i for i in np.arange(0.0005,0.9995,0.0001)] self.temperature_BA = list(map(lambda xA: (self.A.fusion_T*self.A.heat_of_fusion)/(self.A.heat_of_fusion-self.R*self.A.fusion_T*math.log(xA)),self.mole_fraction_A)) self.temperature_AB = list(map(lambda xA: (self.B.fusion_T*self.B.heat_of_fusion)/(self.B.heat_of_fusion-self.R*self.B.fusion_T*math.log(1-xA)),self.mole_fraction_A)) for j in range(len(self.temperature_BA)-1): err0 = self.temperature_AB[j]-self.temperature_BA[j] err1 = self.temperature_AB[j+1]-self.temperature_BA[j+1] if err0*err1<0: break self.TE = (self.temperature_AB[j]+self.temperature_BA[j])/2 self.xE = j*0.0001 return self.TE, self.xE def plot_temp_AB(self): sns.set_theme() plt.scatter(self.mole_fraction_A,self.temperature_AB,marker =".") plt.scatter(self.mole_fraction_A,self.temperature_BA,marker =".") # To plot lines font1 = {'family':'serif','color':'blue','size':18} font2 = {'family':'serif','color':'darkred','size':12} min_value = min(self.temperature_AB+self.temperature_BA) plt.plot([0,self.xE,self.xE],[self.TE,self.TE,min_value]) plt.title("Plots of liquidus lines",fontdict = font1) #plt.text(-0.1,B.fusion_T-273.15,"$TB$") plt.annotate(f'(xE={self.xE}, TE={round(self.TE,2)})', xy=(self.xE, self.TE),xytext=(self.xE+0.2,self.TE-20),arrowprops=dict(facecolor='red', shrink=0.05)) plt.xlabel("Mole fraction of A",fontdict = font2) plt.ylabel("Temperature ($T^oC$)",fontdict = font2) plt.show() def entropy(self): ds1_integrand = lambda T: (self.B.cp_solid_a + self.B.cp_solid_b*T +self.B.cp_solid_c*T**2)/T self.ds1 = (1-self.xE)*integrate.quad(ds1_integrand,self.TE,self.B.fusion_T)[0] self.ds2 = (1-self.xE)*self.B.heat_of_fusion/self.B.fusion_T ds3_integrand = lambda T: (self.A.cp_solid_a + self.A.cp_solid_b*T + self.A.cp_solid_c*T**2)/T self.ds3 = self.xE*integrate.quad(ds3_integrand,self.TE,self.A.fusion_T)[0] self.ds4 = self.xE*self.A.heat_of_fusion/self.A.fusion_T ds5_integrand = lambda T: (self.A.cp_liquid_a+self.A.cp_liquid_b*T+self.A.cp_liquid_c*T**2+self.A.cp_liquid_d*T**3)/T self.ds5 = self.xE*integrate.quad(ds5_integrand,self.A.fusion_T,self.B.fusion_T)[0] self.ds6 = -self.R*(self.xE*math.log(self.xE) + (1-self.xE)*math.log(1-self.xE)) ds7_integrand = lambda T: (self.xE*(self.A.cp_liquid_a+self.A.cp_liquid_b*T +self.A.cp_liquid_c*T**2+ self.A.cp_liquid_d*T**3)+ (1-self.xE)*(self.B.cp_liquid_a+self.B.cp_liquid_b*T +self.B.cp_liquid_c*T**2+ self.B.cp_liquid_d*T**3))/T self.ds7 = integrate.quad(ds7_integrand,self.B.fusion_T,self.TE)[0] self.ds_total = self.ds1 + self.ds2 + self.ds3 + self.ds4 + self.ds5 + self.ds6 + self.ds7 return self.ds_total def enthalpy(self): dh1_integrand = lambda T: (self.B.cp_solid_a + self.B.cp_solid_b*T +self.B.cp_solid_c*T**2) self.dh1 = (1-self.xE)*integrate.quad(dh1_integrand,self.TE,self.B.fusion_T)[0] self.dh2 = (1-self.xE)*self.B.heat_of_fusion dh3_integrand = lambda T: (self.A.cp_solid_a + self.A.cp_solid_b*T + self.A.cp_solid_c*T**2) self.dh3 = self.xE*integrate.quad(dh3_integrand,self.TE,self.A.fusion_T)[0] self.dh4 = self.xE*self.A.heat_of_fusion dh5_integrand = lambda T: (self.A.cp_liquid_a+self.A.cp_liquid_b*T+self.A.cp_liquid_c*T**2+self.A.cp_liquid_d*T**3) self.dh5 = self.xE*integrate.quad(dh5_integrand,self.A.fusion_T,self.B.fusion_T)[0] self.dh6 = 0 dh7_integrand = lambda T: (self.xE*(self.A.cp_liquid_a+self.A.cp_liquid_b*T +self.A.cp_liquid_c*T**2+ self.A.cp_liquid_d*T**3)+ (1-self.xE)*(self.B.cp_liquid_a+self.B.cp_liquid_b*T +self.B.cp_liquid_c*T**2+ self.B.cp_liquid_d*T**3)) self.dh7 = integrate.quad(dh7_integrand,self.B.fusion_T,self.TE)[0] self.dh_total = self.dh1 + self.dh2 + self.dh3 + self.dh4 + self.dh5 + self.dh6 + self.dh7 return self.dh_total def plot_entropy(self): self.entropy() x_entropy = ['$\Delta S1$','$\Delta S2$','$\Delta S3$','$\Delta S4$','$\Delta S5$','$\Delta S6$','$\Delta S7$'] self.ds = [self.ds1, self.ds2,self.ds3,self.ds4,self.ds5,self.ds6,self.ds7] plt.bar(x_entropy,self.ds) plt.show() def plot_enthalpy(self): self.enthalpy() x_enthalpy = ['$\Delta H1$','$\Delta H2$','$\Delta H3$','$\Delta H4$','$\Delta H5$','$\Delta H6$','$\Delta H7$'] self.dh = [self.dh1, self.dh2,self.dh3,self.dh4,self.dh5,self.dh6,self.dh7] plt.bar(x_enthalpy,self.dh) plt.show() # - mix = EutecticMixture(A,B) TE, xE = mix.eutectic_properties() mix.plot_temp_AB() print(mix.entropy()) print(mix.ds1) print(mix.enthalpy()) mix.plot_entropy() mix.plot_enthalpy() # mix.dh1 mix.ds1 # + sns.set_theme() plt.scatter(xA,temp_AB,marker =".") plt.scatter(xA,temp_BA,marker =".") # To plot lines font1 = {'family':'serif','color':'blue','size':18} font2 = {'family':'serif','color':'darkred','size':12} min_value = min(temp_AB+temp_BA) plt.plot([0,xE,xE],[TE,TE,min_value]) plt.title("Plots of liquidus lines",fontdict = font1) #plt.text(-0.1,B.fusion_T-273.15,"$TB$") plt.annotate(f'(xE={xE}, TE={round(TE,2)})', xy=(xE, TE),xytext=(xE+0.2,TE-20),arrowprops=dict(facecolor='red', shrink=0.05)) plt.xlabel("Mole fraction of A",fontdict = font2) plt.ylabel("Temperature ($T^oC$)",fontdict = font2) plt.show() # - mix.TE mix.B.fusion_T df.columns # + #Calculation of xE, TE and DeltaH for the mixture for the given dataset A = PcmProperties(df[df['pcm']==df['pcm'][0]].values.tolist()[0]) B = PcmProperties(df[df['pcm']==df['pcm'][1]].values.tolist()[0]) mix = EutecticMixture(A,B) TE, xE = mix.eutectic_properties() HE = mix.enthalpy() # - df_out['pcm_A'].append('A1') df_out['pcm_B'].append('A21') df_out['xE'].append(0.651) df_out['TE'].append(201) df_out['dH'].append(161181) df_out.head() data =[['pcm_A', 'pcm_B', 'xE', 'TE', 'dH'],['pcm_A', 'pcm_B', 'xE', 'TE', 'dH'],['pcm_A', 'pcm_B', 'xE', 'TE', 'dH']] df = pd.DataFrame(data,columns=['pcm_A', 'pcm_B', 'xE', 'TE', 'dH']) print(df) df # + data = [] for i in range(len(df)-1): for j in range(i+1,len(df)): A = PcmProperties(df[df['pcm']==df['pcm'][i]].values.tolist()[0]) B = PcmProperties(df[df['pcm']==df['pcm'][j]].values.tolist()[0]) mix = EutecticMixture(A,B) TE, xE = mix.eutectic_properties() HE = mix.enthalpy() data.append([A.name,B.name,xE,round(TE-273.15,1),HE]) df_enthalpy = pd.DataFrame(data,columns=['pcm_A', 'pcm_B', 'xE', 'TE', 'dH']) len(df_enthalpy) # - df_enthalpy.to_csv('../data/eutectic_enthalpy.csv',index=False) df_enthalpy selected_melting_T=-12 # + selected_melting_T=-12 def sort_nearby(x): return min(abs(x-selected_melting_T),abs(selected_melting_T-x)) df_enthalpy['d'] =df_enthalpy['TE'].apply(sort_nearby()) d =df_enthalpy.sort_values(by='d') d.head(12) # - df_enthalpy['TE']=df_enthalpy['TE'].apply(round(1)) df_enthalpy
notebooks/pcm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <b>Calcule a integral dada</b> # $12. \int 3t\sqrt{t^2 + 8}dt$ # $u = t^2 + 8$ # $du = 2tdt$ # $\frac{du}{2} = tdt$ # <b>Aplicando as substituições</b> # $3 \cdot \frac{1}{2} \cdot \int \sqrt{u}du$ # <b>Integrando $3 \cdot \frac{1}{2} \cdot \int \sqrt{u}du$</b> # $3 \cdot \frac{1}{2} \cdot \int \sqrt{u}du = \frac{u^{\frac{1}{2}+1}}{\frac{1}{2} + 2}$ # $3 \cdot \frac{1}{2} \cdot \int \sqrt{u}du = \frac{3}{2} \cdot \frac{u^{\frac{3}{2}}}{\frac{3}{2}} + C$ # $3 \cdot \frac{1}{2} \cdot \int \sqrt{u}du = \sqrt{u^3} + C$ # <b>Desfazendo a substituição</b> # $3 \cdot \frac{1}{2} \cdot \int \sqrt{u}du = \sqrt{(t^2 + 8)^3} + C$
Problemas 5.2/12.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Vraidd/2020python/blob/master/2020_Python_Programming_Practical_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="jp3BkDml8xiR" colab_type="text" # # **2020 Python Programming Practical 2** # # If you do not have one already, create a [GitHub](https://github.com) account using your DHS Mail. # # Create a public repository 2020python # # File --> Save a copy in GitHub under your 2020python repository # # Also share this colab file with edit access with <EMAIL> # # # + [markdown] id="-U8w0oQW9P_b" colab_type="text" # **Q1. (Check if number is odd or even)** # # Write a program that reads an integer and checks whether it is odd or even. # # Sample interaction: # ``` # Enter number: 25 # 25 is odd # # Enter number: 8 # 8 is even # # ``` # + id="D0uTR7B-8qbF" colab_type="code" colab={} # + [markdown] id="7Ui5CuS_9Za1" colab_type="text" # **Q2. (Validate triangles and compute perimeter)** # # Write a program that reads three edges for a triangle and determines whether the input is valid. The input is valid if the sum of any two edges is greater than the third edge. The program will compute the perimeter of the triangle if the input is valid. Otherwise, display that the input is invalid. # # Sample interaction: # ``` # Enter side 1: 2 # Enter side 2: 2 # Enter side 3: 1 # Perimeter = 5 # # Enter side 1: 1 # Enter side 2: 2 # Enter side 3: 1 # Invalid triangle! # # ``` # # # # + id="CfkRAAWV9cXS" colab_type="code" colab={} # + [markdown] id="aVhHiT8w9c7d" colab_type="text" # **Q3. (Determine grade)** # # Write a program that prompts the user to enter a score between 0 and 100 inclusive and output the corresponding grade as well as a Pass/Fail status. Output an error message if the entered score in not within the valid range. The grading system is as follows (S and U are not passes): # # A: 70 - 100 # B: 60 - 69 # C: 55 - 59 # D: 50 - 54 # E: 45 - 49 # S: 35 - 44 # U: 0 - 34 # # Sample interaction: # ``` # Enter score: 73 # A # Pass # # Enter score: 37 # S # Fail # # Enter Score: -1 # Invalid! Score must be within 0 - 100. # # ``` # + id="PwRmweEa9fny" colab_type="code" colab={} # + [markdown] id="GwAIqQNY9gFd" colab_type="text" # **Q4. (Determine leap year)** # # Write a program that prompts the user to enter a year and determines whether it is a leap year. A year is a leap year if it is divisible by 4 but not 100, or is divisible by 400. # # Sample interaction: # ``` # Enter year: 2020 # Leap # # Enter year: 2019 # Non-Leap # ``` # + id="aOozTwoR9j9B" colab_type="code" colab={} # + [markdown] id="lE0ScX9e9kcg" colab_type="text" # **Q5. (Find number of days in a month)** # # Write a program that prompts the user to enter the month and year, and displays the number of days in the month. Sample interaction: # ``` # Enter month: 2 # Enter year: 2020 # February 2020 has 29 days. # # Enter month: 3 # Enter year: 2021 # March 2021 has 31 days. # # ``` # + id="sE_bxwDs9mf6" colab_type="code" colab={} # + [markdown] id="KSviQY0T9m7Q" colab_type="text" # **Q6. (Convert from kilograms to pounds)** # # Write a program that displays the following table (1 kilogram = 2.2 pounds): # ``` # Kilograms Pounds # 1 2.2 # 2 4.4 # 3 6.6 # ... # 9 19.8 # 10 22.0 # ``` # + id="YbH9Pbul9rK-" colab_type="code" colab={} # + [markdown] id="V9Z95nlg97e4" colab_type="text" # **Q7. (Convert from miles to kilometres)** # # Write a program that displays the following two tables side-by-side (1 mile = 1.609 kilometres): # ``` # Miles Kilometers Kilometres Miles # 1 1.609 20 12.430 # 2 3.218 25 15.538 # # ... # 9 14.481 60 37.290 # 10 16.090 65 40.398 # ``` # # # + id="1AZvvAW499PK" colab_type="code" colab={} # + [markdown] colab_type="text" id="79toYNfyD9XP" # **Q8. (Find two highest scores)** # # Write a program that prompts the user to enter the number of students and each student's name and score, and finally displays the student with the highest score and the student with the second-highest score. Enter up to 5 student records. # # # + id="Se5J95IjC9bT" colab_type="code" colab={} # + [markdown] id="seLRswd3ED9x" colab_type="text" # **Q9. (Finding the smallest n such that n<sup>2</sup> > 12000)** # # Use a while loop to find the smallest integer n such that n<sup>2</sup> is greater than 12,000. # + id="n_1AS9EREHgQ" colab_type="code" colab={} # + [markdown] id="YHyv5TE6EH24" colab_type="text" # **Q10. (Finding the largest n such that n<sup>3</sup> < 12000)** # # Use a while loop to find the largest integer n such that n<sup>3</sup> is less than 12,000. # + id="B--GNWkcEKkR" colab_type="code" colab={} # + [markdown] id="lr8Er0cXEKzY" colab_type="text" # **Q11. (Computing the greatest common divisor)** # # A solution to find the greatest common divisor of two integers n1 and n2 is as follows: # * First find d to be the minimum of n1 and n2 # * Then check whether d, d-1, … d-2, 2, or 1 is a divisor for both n1 and n2 in this order. # * The first such common divisor is the greatest common divisor for n1 and n2. # # Write a program to implement this solution. # Sample interaction: # ``` # Enter n1: 12 # Enter n2: 18 # GCD: 6 # ``` # + id="T8iKWDpgEOSw" colab_type="code" colab={} # + [markdown] id="-kojjY6zEOkn" colab_type="text" # **Q12. (Find factors of a positive integer)** # # Write a program that reads a positive integer > 1 and displays all its factors. # Sample interaction: # ``` # Enter n: 120 # Factors: 2 2 2 3 5 # ``` # + id="s3hoL9l5EPjU" colab_type="code" colab={}
2020_Python_Programming_Practical_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CVPR2021 NAS竞赛Track 1第2名方案 # (队伍名称:ANS - 榜单名次:2 - 队长姓名:陆顺) # ## 1. 代码及结果复现: # ### 1.1 环境安装 # cd ./cvpr21_nas_track1/Track1_Submit bash shell/setup.sh # ### 1.2 提交时所使用的checkpoint (对应榜单最好结果) # 最好模型对应路径:cd ./cvpr21_nas_track1/Track1_Submit/files/supernet.th # ### 1.3 基于supernet生成的最终提交结果(对应榜单最好结果) # cd ./cvpr21_nas_track1/Track1_Submit bash ./shell/test.sh # 关于这一任务的训练log输出,我们记录在./train_eval/eval/test.log # ### 1.4 复现supernet的训练过程(对应榜单最好结果) # cd ./cvpr21_nas_track1/Track1_Submit bash shell/train.sh # 关于这一任务的训练log输出,我们记录在./train_eval/train/train.log # ## 2. 方法说明与技术讨论 # 我们的技术方案以单路径采样为主,并使用多种不同的技巧在此基础上进一步提升。以下将进行详细讨论,所有的训练技巧我们通过train_supernet.py一个文件全部实现。 # ### 2.1 数据预处理 # 在实验中我们发现,使用和官方提供的代码中相同的数据预处理并不好,根据官方代码得到的数据预处理如下: train_transform = transforms.Compose([ transforms.RandomCrop(32, 4), transforms.RandomApply([transforms.ColorJitter(brightness=0.1, contrast=0.1)]), transforms.RandomHorizontalFlip(), transforms.RandomRotation(15), transforms.ToTensor(), transforms.Normalize(mean=[0.5071, 0.4865, 0.4409], std=[0.1942, 0.1918, 0.1958]) ]) # 我们发现,去掉ColorJitter并添加Cutout的效果更好,代码如下 train_transform = transforms.Compose([ transforms.RandomCrop(32, 4), transforms.RandomHorizontalFlip(), transforms.RandomRotation(15), transforms.ToTensor(), Cutout(16), transforms.Normalize(mean=[0.5071, 0.4865, 0.4409], std=[0.1942, 0.1918, 0.1958]) ]) # ### 2.2 模型调优 # #### 2.2.1 超网模型一致性构建 # 官方提供的单模型构建repo中,在相邻两层且stride=1时,若通道不同则包含1x1卷积,若通道相同,则不包含1x1卷积; # # # 实验中,我们发现,无论相邻两层通道是否相同,均采用1x1卷积,可以提升最终的一致性 # #### 2.2.2 BN参数设置 # 官方提供的单模型构建repo中,BN使用默认配置,其中track_running_stats=True。 # 实验中,我们发现,当设置BN参数为False时,最终的排序一致性更好。 # 因为每次采样一条路径,统计方差会很大,所以设置track_running_stats=False更合理。 # #### 2.2.3 少量模型进一步调优 # 超网训练结束后,部分模型存在训练不充分的情况,我们从50000个模型中均匀采样了200个模型,对这一局部进行继续训练(具体采样的结构详见./Track1_Submit/files/Track1_200_archs.json),我们发现继续训练30个epoch可使得超网的预测一致性进一步提升。 # 实验中,我们测试了5/10/15/20/25/30/40/50/60/70/80epoch,分别使用不同的checkpoint对50000个模型预测后,提交发现使用30epoch最好。 # 发现这一方法有效后,我们继续使用随机采样的方式,但由于时间紧急我们只测试了采样200个/100个模型,继续微调,发现采样100个模型继续微调性能还会提升,这100个模型对应的模型文件为./Track1_Submit/files/Track1_100_archs.json。 # #### 2.2.4 蒸馏方法的应用 # 根据BigNAS的启发,我们使用超网中的最大模型的输出作为软标签,来蒸馏采样模型。同时训练采样模型和最大模型。这里的采样模型和2.2.3中的采样模型相同,还是只对200个模型进行采样。代码实现如下,实现过程中,由于时间紧急,只微调了两次。第一次微调,使用初始学习率1e-3微调30epoch;第二次微调,使用初始学习率4e-3微调30epoch。发现这样可以获得一致性的进一步提升。 output = model(input_var) # compute output loss = criterion(output, target_var) # compute loss if args.distill: teacher_output = model(input_var, [16, 16, 16, 16, 16, 16, 16, 32, 32, 32, 32, 32, 32, 64, 64, 64, 64, 64, 64, 64]) teacher_loss = criterion(teacher_output, target_var) soft_target_var = torch.nn.functional.softmax(teacher_output, dim=1).detach() distill_loss = soft_criterion(output, soft_target_var) loss = 0.5 * (loss + teacher_loss) + 2.0 * distill_loss # ### 2.3 参数设置及遍历 # #### 2.3.1 每一步采样模型数量 # 受到FairNAS启发,每一步采样时,累积4/6/8/10/12/16个模型的梯度一并更新,发现使用6时,超网排序性最好 # #### 2.3.2 超网训练的epoch数量 # 超网的训练epoch,我们尝试了100/125/150/200/230/240/250/260/300,发现使用250epoch训练超网最好 # #### 2.3.3 超网训练的学习率 # 超网训练的学习率我们尝试了0.1/0.05/0.025/0.01/0.005,发现使用0.025最好 # #### 2.3.4 超网训练的weight decay # 超网训练的学习率我们尝试了1e-4/2e-4/3e-4/4e-4/5e-4/6e-4,发现使用5e-4最好
NAS/cvpr21_nas_track1/Track1_Submit/README.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Q5-Qj7-GJyXj" # #**Kalman Filter For Moving Object** # + colab={"base_uri": "https://localhost:8080/"} id="YjDXG-IJJ9qY" outputId="8fdab3e7-16e6-48f4-a121-232f5f238e51" # ! pip install pykalman # + id="ovMyujOOJqte" import numpy as np import pylab as pl from pykalman import KalmanFilter # + [markdown] id="ltZQQ9t4Ptg6" # **Let’s assume an object moves up and down over time, and the censor detects it with a small error. I postulate a perfect sin wave with randomness for the movement over time. When the location is plotted over time, it looks like the following** # + colab={"base_uri": "https://localhost:8080/", "height": 480} id="K2cdB3qNKE-i" outputId="c1173c1e-8b3f-4327-c3a3-cdb695fd0ae2" rnd = np.random.RandomState(0) n_timesteps = 100 x = np.linspace(0, 3 * np.pi, n_timesteps) y = 20 * (np.sin(x) + 0.5 * rnd.randn(n_timesteps)) pl.figure(figsize=(40, 10)) pl.scatter(x,y, marker='x', color='b', label='observations') pl.xlabel('time') pl.ylabel('Position') pl.show() # + [markdown] id="RxKQGv_7P-tt" # **Construction of Kalman Filter with transition_covariance** # + id="L55mFmNAKPvP" kf = KalmanFilter(transition_matrices=np.array([[1, 1], [0, 1]]), transition_covariance=0.01 * np.eye(2)) states_pred = kf.em(y).smooth(y)[0] # + [markdown] id="wDOaXUwdO4xL" # **Here,the state vector Xt can have two elements: position and velocity, and both are unobservable and hence the transition matrix would also consist of the same ,when passing the same through the kalman filter the output would be a perfect sine curve** # + colab={"base_uri": "https://localhost:8080/", "height": 483} id="-EvDmlIWKTwm" outputId="407ffa94-fd6c-4842-9cf7-bee97a7730e2" pl.figure(figsize=(40, 10)) obs_scatter = pl.scatter(x, y, marker='x', color='b', label='observations') position_line = pl.plot(x, states_pred[:, 0],markersize=2, linestyle='-', marker='o', color='r', label='position est.') velocity_line = pl.plot(x, states_pred[:, 1],markersize=2, linestyle='-', marker='o', color='g', label='velocity est.') pl.legend(loc='lower left',fontsize=14) pl.xlim(xmin=0, xmax=x.max()) pl.xlabel('time') pl.show() # + [markdown] id="aLwoMM3AOhOb" # **If in case there is only a single element in the transition matrix , then this would be the result for the same , it would not be a perfect sinecurve but would resemble the same** # + colab={"base_uri": "https://localhost:8080/", "height": 483} id="VQT4o7pMObSN" outputId="bc41eae7-1f2a-4fa7-b20e-a3dc80ef383e" kf = KalmanFilter(transition_matrices=[1], transition_covariance=0.01) states_pred = kf.em(y).smooth(y)[0] pl.figure(figsize=(40, 10)) obs_scatter = pl.scatter(x, y, marker='x', color='b', label='observations') position_line = pl.plot(x, states_pred,markersize=2, linestyle='-', marker='o', color='r', label='position est.') pl.legend(loc='lower left',fontsize=14) pl.xlim(xmin=0, xmax=x.max()) pl.xlabel('time') pl.show()
KFMO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="tXz5afYnv0Ax" # ## IAI5101 Project - Group 3 # + id="OmD9Sk6I2xH2" import pandas as pd import numpy as np import sklearn import seaborn as sns import matplotlib import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="jSfEQV6kMYjY" outputId="8f023133-86c6-40fe-f696-937f2dfcde8c" df = pd.read_csv("https://raw.githubusercontent.com/strem078/IAI5101_Project/main/Data/Data_Raw.csv") df.tail() # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="RKtcm_wB4WIX" outputId="f3b55a8a-f1a6-4ace-fc6b-071e147e4c7d" # Preprocessing steps df.fillna(0,inplace=True) df['Voltage_1'] = df['Voltage_1']/5.0 df['Voltage_2'] = df['Voltage_2']/5.0 df['P_x'] = df['P_x']/(2.0/5.0*1024) df['P_y'] = df['P_y']/(2.0/5.0*1024) df['dPx_dV'] = df['dPx_dV']/(2.0/5.0*1024) df['dPy_dV'] = df['dPy_dV']/(2.0/5.0*1024) df['S1'] = (df['S1']+1)/2 df['dS1_dV'] = (df['dS1_dV']+1)/2 df # + [markdown] id="pVN9qOai65X1" # ### Naive Bayes # + colab={"base_uri": "https://localhost:8080/"} id="ODOSGVGJ10vw" outputId="53ab3dd9-b075-45a9-a734-a6d147f50235" from sklearn.naive_bayes import BernoulliNB from sklearn.model_selection import train_test_split from sklearn import metrics x = df[df.columns[2:-4]] y = df["Mode_Locked"] x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=20) naive_bayes_bern = BernoulliNB() naive_bayes_bern.fit(x_train, y_train) y_predicted_bern = naive_bayes_bern.predict(x_test) print("The accuracy of the Bernoulli Naive Bayes model is {:.2f}".format(metrics.accuracy_score(y_predicted_bern, y_test)*100),"%") print("The recall score of the Bernoulli Naive Bayes model is {:.2f}".format(metrics.recall_score(y_predicted_bern, y_test)*100),"%") print("The F_1 score of the Bernoulli Naive Bayes model is {:.2f}".format(metrics.fbeta_score(y_predicted_bern, y_test, beta=1)*100),"%") # + colab={"base_uri": "https://localhost:8080/"} id="3x4VuoSKCGOI" outputId="ea818d5f-56a7-4174-c2d3-76eaf7c8132d" from sklearn.naive_bayes import GaussianNB from sklearn import metrics naive_bayes = GaussianNB() naive_bayes.fit(x_train, y_train) y_predicted = naive_bayes.predict(x_test) print("The accuracy of the Gaussian Naive Bayes Model is {:.2f}".format(metrics.accuracy_score(y_predicted , y_test)*100),"%") print("The recall score of the Gaussian Naive Bayes Model is {:.2f}".format(metrics.recall_score(y_predicted , y_test)*100),"%") print("The F_1 of the Gaussian Naive Bayes Model is {:.2f}".format(metrics.f1_score(y_predicted , y_test)*100),"%") # + [markdown] id="GcZ32qVq68un" # ### SVC # + id="4h4VUElsHhr5" from sklearn.svm import SVC from sklearn.preprocessing import StandardScaler from sklearn.model_selection import cross_val_score from sklearn.metrics import classification_report,confusion_matrix from sklearn.metrics import f1_score, precision_score, recall_score import seaborn as sn # + id="gwCi-T0CeIE3" xx_train, xx_test, yy_train, yy_test = train_test_split(x, y, test_size=0.3, random_state=0) # + id="aGrqcqc4Hsg6" # sc = StandardScaler() # sc.fit(xx_train) # x_train_std = sc.transform(xx_train) # x_test_std = sc.transform(xx_test) x_train_std = xx_train x_test_std = xx_test # + colab={"base_uri": "https://localhost:8080/", "height": 781} id="YJ8NSXT9ZQsz" outputId="6a8ba4eb-294a-4145-bdfb-b2aa93ea9b10" svm = SVC(kernel='rbf', random_state=0, gamma=.10, C=1.0) svm_pred = svm.fit(x_train_std, yy_train) svm_pred = svm_pred.predict(x_test_std) print("SVM classification Report:\n", classification_report(yy_test, svm_pred)) print("\n F1:\n", f1_score(yy_test, svm_pred)) print("\n Precision score is:\n", precision_score(yy_test, svm_pred)) print("\n Recall score is:\n", recall_score(yy_test, svm_pred)) sn.heatmap(confusion_matrix(yy_test, svm_pred)) # + [markdown] id="Q61Qq1RjZcGk" # ### k-NN # + colab={"base_uri": "https://localhost:8080/", "height": 654} id="DvMyxC-4ZbK1" outputId="7ec21b2d-4ee1-4a2e-8db3-94413ce64e3d" from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors = 7, p = 2, metric='minkowski') knn_pred = knn.fit(x_train_std, yy_train) knn_pred = knn_pred.predict(x_test_std) print('The accuracy of the Knn classifier on training data is {:.2f}'.format(knn.score(x_train_std, yy_train)*100),"%") print('The accuracy of the Knn classifier on test data is {:.2f}'.format(knn.score(x_test_std, yy_test)*100),"%") print("k-NN classification Report:\n", classification_report(yy_test, knn_pred)) print("\n F1:\n", f1_score(yy_test, knn_pred)) print("\n Precision score is:\n", precision_score(yy_test, knn_pred)) print("\n Recall score is:\n", recall_score(yy_test, knn_pred)) sn.heatmap(confusion_matrix(yy_test, knn_pred)) # + [markdown] id="5D0YfrpXfTX_" # ### Decision Tree # + colab={"base_uri": "https://localhost:8080/", "height": 654} id="RUBUyA9de_J6" outputId="7451227c-1103-45f8-b2c7-fae9b99b61d6" from sklearn import tree decision_tree = tree.DecisionTreeClassifier(criterion='gini') dt_pred = decision_tree.fit(x_train_std, yy_train) dt_pred = dt_pred.predict(x_test_std) print('The accuracy of the Decision Tree classifier on training data is {:.2f}'.format(decision_tree.score(x_train_std, yy_train)*100),"%") print('The accuracy of the Decision Tree classifier on test data is {:.2f}'.format(decision_tree.score(x_test_std, yy_test)*100),"%") print("Decision Tree classification Report:\n", classification_report(yy_test, dt_pred)) print("\n F1:\n", f1_score(yy_test, dt_pred)) print("\n Precision score is:\n", precision_score(yy_test, dt_pred)) print("\n Recall score is:\n", recall_score(yy_test, dt_pred)) sn.heatmap(confusion_matrix(yy_test, dt_pred)) # + [markdown] id="5fTlInNffV4s" # ### XGBoost # + colab={"base_uri": "https://localhost:8080/", "height": 654} id="GYQSsYAAfKfd" outputId="a05b65a2-c475-421e-d4c2-b85adbd80f73" import xgboost as xgb xgb_clf = xgb.XGBClassifier() xgb_clf = xgb_clf.fit(x_train_std, yy_train) xgb_clf_pred = xgb_clf.predict(x_test_std) print('The accuracy of the XGBoost classifier on training data is {:.2f}'.format(xgb_clf.score(x_train_std, yy_train)*100),"%") print('The accuracy of the XGBoost classifier on test data is {:.2f}'.format(xgb_clf.score(x_test_std, yy_test)*100),"%") print("XG Boost classification Report:\n", classification_report(yy_test, xgb_clf_pred)) print("\n F1:\n", f1_score(yy_test, xgb_clf_pred)) print("\n Precision score is:\n", precision_score(yy_test, xgb_clf_pred)) print("\n Recall score is:\n", recall_score(yy_test, xgb_clf_pred)) sn.heatmap(confusion_matrix(yy_test, xgb_clf_pred)) # + [markdown] id="gHSC-bZifryV" # ### Random Forest # + colab={"base_uri": "https://localhost:8080/", "height": 654} id="Iw-My0tgfncs" outputId="779da9ae-cb93-48bb-b93f-d8e1b6e43f8d" from sklearn.ensemble import RandomForestClassifier random_forest = RandomForestClassifier() random_forest.fit(x_train_std, yy_train) r_forest_pred = random_forest.predict(x_test_std) print('The accuracy of the Random Forest classifier on training data is {:.2f}'.format(random_forest.score(x_train_std, yy_train)*100),"%") print('The accuracy of the Random Forest classifier on test data is {:.2f}'.format(random_forest.score(x_test_std, yy_test)*100),"%") print("Random Forest classification Report:\n", classification_report(yy_test, r_forest_pred)) print("\n F1:\n", f1_score(yy_test, r_forest_pred)) print("\n Precision score is:\n", precision_score(yy_test, r_forest_pred)) print("\n Recall score is:\n", recall_score(yy_test, r_forest_pred)) sn.heatmap(confusion_matrix(yy_test, r_forest_pred)) # + [markdown] id="UJB-qjj1vbF-" # ### Simulating In-Field Testing # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="lfeKby9DvklE" outputId="11fa6200-00cc-44f0-8c75-876961a37d6f" df = pd.read_csv("https://raw.githubusercontent.com/strem078/IAI5101_Project/main/Data/Data_Raw.csv") import xgboost as xgb # Preprocessing steps df.fillna(0,inplace=True) df['Voltage_1'] = df['Voltage_1']/5.0 df['Voltage_2'] = df['Voltage_2']/5.0 df['P_x'] = df['P_x']/(2.0/5.0*1024) df['P_y'] = df['P_y']/(2.0/5.0*1024) df['dPx_dV'] = df['dPx_dV']/(2.0/5.0*1024) df['dPy_dV'] = df['dPy_dV']/(2.0/5.0*1024) df['S1'] = (df['S1']+1)/2 df['dS1_dV'] = (df['dS1_dV']+1)/2 # Cross Validation among 10 sets f_1 = [] precision = [] recall = [] data_sz = 512 slices = range(0,5120,data_sz) for start in slices: end = start+data_sz x = df[df.columns[2:-4]] x_train = pd.concat([x[x.columns][:start], x[x.columns][end:]]) x_test = x[x.columns][start:end] y_train = pd.concat([df["Mode_Locked"][:start], df["Mode_Locked"][end:]]) y_test = df["Mode_Locked"][start:end] xgb_clf = xgb.XGBClassifier() xgb_clf = xgb_clf.fit(x_train, y_train) xgb_clf_pred = xgb_clf.predict(x_test) print('The accuracy of the XGBoost classifier on test data is {:.2f}'.format(xgb_clf.score(x_test, y_test)*100),"%") print("XG Boost classification Report:\n", classification_report(y_test, xgb_clf_pred)) print("\n F1:\n", f1_score(y_test, xgb_clf_pred)) print("\n Precision score is:\n", precision_score(y_test, xgb_clf_pred)) print("\n Recall score is:\n", recall_score(y_test, xgb_clf_pred)) sn.heatmap(confusion_matrix(y_test, xgb_clf_pred)) # + [markdown] id="mQJjJ-iECzv_" # # Deep Learning Using Keras # + id="KWT2keDBCDBI" from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.layers import Dropout #Initializing our recurrent neural network rnn = Sequential() #Adding our first LSTM layer rnn.add(LSTM(units = 45, return_sequences = True, input_shape = (x_training_data.shape[1], 1))) #Perform some dropout regularization rnn.add(Dropout(0.2)) #Adding three more LSTM layers with dropout regularization for i in [True, True, False]: rnn.add(LSTM(units = 45, return_sequences = i)) rnn.add(Dropout(0.2)) #(Original code for the three additional LSTM layers) # rnn.add(LSTM(units = 45, return_sequences = True)) # rnn.add(Dropout(0.2)) # rnn.add(LSTM(units = 45, return_sequences = True)) # rnn.add(Dropout(0.2)) # rnn.add(LSTM(units = 45)) # rnn.add(Dropout(0.2)) #Adding our output layer rnn.add(Dense(units = 1)) #Compiling the recurrent neural network rnn.compile(optimizer = 'adam', loss = 'mean_squared_error') g = sns.countplot(df['Mode_Locked']) g.set_xticklabels(['Negative','Positive']) plt.show() # + id="9J3pDnKUHERu" # + [markdown] id="JlLSM6JHAB70" # # + id="1UH35A4VACVi" import tensorflow as tf model = tf.keras.models.Sequential() Dense = tf.keras.layers.Dense Dropout = tf.keras.layers.Dropout LSTM = tf.keras.layers.LSTM model.add(LSTM(256, activation='relu', return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(256, activation='relu')) model.add(Dropout(0.1)) model.add(Dense(32, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) optimizer = tf.keras.optimizers.Adam(lr=1e-4, decay=1e-6) # Compile model model.compile( loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'], ) # The specification of loss=’sparse_categorical_crossentropy’ is very important here as our targets are # integers and not one-hot encoded categories. model.fit(x_train_std, yy_train, epochs=4, validation_data=(x_test, y_test)) # + id="GRUaXy-YATLX" from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.layers import Dropout #Initializing our recurrent neural network rnn = Sequential() #Adding our first LSTM layer rnn.add(LSTM(units = 45, return_sequences = True, input_shape = (x_train_std.shape[1], 1))) #Perform some dropout regularization rnn.add(Dropout(0.2)) #Adding three more LSTM layers with dropout regularization for i in [True, True, False]: rnn.add(LSTM(units = 45, return_sequences = i)) rnn.add(Dropout(0.2)) #Adding our output layer rnn.add(Dense(units = 1)) #Compiling the recurrent neural network rnn.compile(optimizer = 'adam', loss = 'mean_squared_error') # + colab={"base_uri": "https://localhost:8080/"} id="_ZmgGrNnLXNq" outputId="edb27ce1-6518-4d63-a66a-1053cd62a53e" rnn.fit(x_train_std, yy_train, epochs = 100, batch_size = 32) # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="GbiA7JinL1db" outputId="91959348-8a32-4481-e91b-af1c9a048d99" predictions = rnn.predict(x_test_std) plt.clf() #This clears the old plot from our canvas plt.plot(predictions)
IAI5101_Group3_Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + nbsphinx="hidden" # Delete this cell to re-enable tracebacks import sys ipython = get_ipython() def hide_traceback(exc_tuple=None, filename=None, tb_offset=None, exception_only=False, running_compiled_code=False): etype, value, tb = sys.exc_info() return ipython._showtraceback(etype, value, ipython.InteractiveTB.get_exception_only(etype, value)) ipython.showtraceback = hide_traceback # + nbsphinx="hidden" # JSON output syntax highlighting from __future__ import print_function from pygments import highlight from pygments.lexers import JsonLexer from pygments.formatters import HtmlFormatter from IPython.display import HTML original_print = print def json_print(inpt): string = str(inpt) if string[0] == '{': formatter = HtmlFormatter() return HTML('<style type="text/css">{}</style>{}'.format( formatter.get_style_defs('.highlight'), highlight(string, JsonLexer(), formatter))) else: original_print(inpt) print = json_print # - # ## Creating STIX Content # ### Creating STIX Domain Objects # # To create a STIX object, provide keyword arguments to the type's constructor: # + from stix2 import Indicator indicator = Indicator(name="File hash for malware variant", labels=["malicious-activity"], pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']") print(indicator) # - # Certain required attributes of all objects will be set automatically if not provided as keyword arguments: # # - If not provided, ``type`` will be set automatically to the correct type. You can also provide the type explicitly, but this is not necessary: indicator2 = Indicator(type='indicator', labels=["malicious-activity"], pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']") # Passing a value for ``type`` that does not match the class being constructed will cause an error: indicator3 = Indicator(type='xxx', labels=["malicious-activity"], pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']") # - If not provided, ``id`` will be generated randomly. If you provide an # ``id`` argument, it must begin with the correct prefix: indicator4 = Indicator(id="campaign--63ce9068-b5ab-47fa-a2cf-a602ea01f21a", labels=["malicious-activity"], pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']") # For indicators, ``labels`` and ``pattern`` are required and cannot be set automatically. Trying to create an indicator that is missing one of these properties will result in an error: indicator = Indicator() # However, the required ``valid_from`` attribute on Indicators will be set to the current time if not provided as a keyword argument. # # Once created, the object acts like a frozen dictionary. Properties can be accessed using the standard Python dictionary syntax: indicator['name'] # Or access properties using the standard Python attribute syntax: indicator.name # Attempting to modify any attributes will raise an error: indicator['name'] = "This is a revised name" indicator.name = "This is a revised name" # To update the properties of an object, see the [Versioning](versioning.ipynb) section. # Creating a Malware object follows the same pattern: # + from stix2 import Malware malware = Malware(name="<NAME>", labels=['remote-access-trojan']) print(malware) # - # As with indicators, the ``type``, ``id``, ``created``, and ``modified`` properties will be set automatically if not provided. For Malware objects, the ``labels`` and ``name`` properties must be provided. # # You can see the full list of SDO classes [here](../api/stix2.v20.sdo.rst). # ### Creating Relationships # # STIX 2 Relationships are separate objects, not properties of the object on either side of the relationship. They are constructed similarly to other STIX objects. The ``type``, ``id``, ``created``, and ``modified`` properties are added automatically if not provided. Callers must provide the ``relationship_type``, ``source_ref``, and ``target_ref`` properties. # + from stix2 import Relationship relationship = Relationship(relationship_type='indicates', source_ref=indicator.id, target_ref=malware.id) print(relationship) # - # The ``source_ref`` and ``target_ref`` properties can be either the ID's of other STIX objects, or the STIX objects themselves. For readability, Relationship objects can also be constructed with the ``source_ref``, ``relationship_type``, and ``target_ref`` as positional (non-keyword) arguments: relationship2 = Relationship(indicator, 'indicates', malware) print(relationship2) # ### Creating Bundles # # STIX Bundles can be created by passing objects as arguments to the Bundle constructor. All required properties (``type``, ``id``, and ``spec_version``) will be set automatically if not provided, or can be provided as keyword arguments: # + from stix2 import Bundle bundle = Bundle(indicator, malware, relationship) print(bundle)
docs/guide/creating.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline from sklearn.datasets import load_iris, load_breast_cancer,load_wine import matplotlib.pyplot as plt import numpy as np import seaborn as sn; import pandas as pd; #dataset = load_iris() #dataset = load_breast_cancer(); dataset = load_wine() def analysisPlot(x,y, title='',xlabel='x'): y = np.array(y, dtype=np.float32) index = np.argsort(x); cny=(1-y)[index].cumsum()/(1-y).sum(); cy = y[index].cumsum()/y.sum(); ks = np.abs(cny-cy).max(); plt.title(title +" KS %0.2f" % ks) plt.hist(x[y==False],alpha=0.7,color='red', bins=10) plt.hist(x[y==True],alpha=0.7,color='blue') plt.xlabel(xlabel) ax2 = plt.twinx() ax2.plot(x[index],cny,color='red') ax2.plot(x[index],cy,color='blue') ax2.set_ylabel('Frequency', color='g') ax2.set_ylabel('cummulated curve', color='b') ax2.set_ylim((0,1.1)) dataset['target_names'] # + from scipy import stats; corr = []; for i in range(dataset['data'].shape[1]): corr.append(stats.pearsonr(dataset['data'][:,i],dataset['target'])[0]) df = pd.DataFrame({ 'feature_names':dataset['feature_names'], 'arrayKey':range(len(dataset['feature_names'])), 'personr':corr, 'personr_abs':np.abs(corr) }).sort_values('personr_abs',ascending=False).head(10) df # - from sklearn.feature_selection import RFE from sklearn.svm import SVC rfe =RFE(estimator=SVC(kernel="linear", C=1),n_features_to_select=5) rfe.fit(dataset['data'],dataset['target']==1); dataset['data'] = dataset['data'][:,rfe.support_] dataset['feature_names'] = np.array(dataset['feature_names'])[rfe.support_] df = pd.DataFrame(dataset['data'], columns=dataset['feature_names']); df['Y'] = dataset['target']==0; df = df.corr().abs(); sn.clustermap(df,annot=True, cmap="YlGnBu", xticklabels=df.columns.values, yticklabels=df.columns.values, fmt="0.0%", figsize=(10,10)); # + row = len(dataset['target_names']) col = len(dataset['feature_names']) plt.figure(figsize=(4*col,4*row)) counter = 1; for i, target_name in enumerate(dataset['target_names']): for j, feature_names in enumerate(dataset['feature_names']): plt.subplot(row,col, counter); counter +=1; analysisPlot(dataset['data'][:,j],dataset['target'] == i, title=target_name, xlabel=feature_names) plt.tight_layout() # - # # outlier analysis from sklearn.metrics import pairwise_distances, pairwise_distances_argmin_min from sklearn.preprocessing import robust_scale, minmax_scale dist = pairwise_distances(robust_scale(dataset['data'],quantile_range=(2.5,97.5)),metric='l2') #dist = pairwise_distances(minmax_scale(dataset['data'], axis=0),metric='minkowski',p=100) sn.clustermap(dist,metric='euclidean') np.set_printoptions(precision=2) loc = np.mean(np.sort(dist,axis=1)[:,1:50],axis=1) loc = np.clip(loc,a_min=np.percentile(loc,1), a_max=np.percentile(loc,99)) loc = (loc - loc.min())/(loc.max()-loc.min()) plt.hist(loc); def contourValues(x,y,pred,h=15): x_min, x_max = x.min(), x.max(); y_min, y_max = y.min(), y.max(); x_r = x_max-x_min; y_r = y_max-y_min; x_min -= x_r*0.05; x_max += x_r*0.05; y_min -= y_r*0.05; y_max += y_r*0.05; X = np.linspace(x_min, x_max, h); Y = np.linspace(y_min, y_max, h); X, Y = np.meshgrid(X, Y); z = pairwise_distances_argmin_min(np.c_[X.ravel(), Y.ravel()], np.c_[x,y],axis=1)[0] return X,Y, pred[z.reshape((h,h))] np.unique(dataset['target']) # + r = len(dataset['feature_names']) print(r) fig, axes = plt.subplots(figsize=(15,15), sharex=False, sharey=False, ncols=r, nrows=r) for i in range(r): for j in range(r): #print(i,j) ax = axes[j,i]; #ax.set_title("%0.f %0.f" % (i,j)) x=dataset['data'][:,i]; y=dataset['data'][:,j]; if j == i: for t in np.unique(dataset['target']): ax.hist(x[dataset['target'] == t], label=dataset['target_names'][t], alpha=0.7) ax.set_xlabel(dataset['feature_names'][i]); continue threshold = stats.scoreatpercentile(loc*1.1, np.linspace(0,100,5)) xx, yy, z = contourValues(x,y, loc); ax.contourf(xx, yy, z,threshold, cmap=plt.cm.Blues, alpha=0.9) ax.scatter(x, y, s=50*loc, c=dataset['target'], cmap=plt.cm.viridis, alpha=0.9) ax.set_xlabel(dataset['feature_names'][i]); ax.set_ylabel(dataset['feature_names'][j]); plt.tight_layout() # - def plotKSROC(y_pred, y_true): plt.figure(figsize=(10,3)) plt.subplot(1,2,1); ks =histKS(y_pred, y_true); plt.title("Kolmogorov-Simirnov %0.2f | %0.2f | %0.2f" % (ks[0], ks[1], ks[2])) plt.subplot(1,2,2); fpr, tpr, thresholds = roc_curve(y_score=y_pred,y_true=y_true) plt.plot(fpr, tpr, color='darkorange',lw=2, label='ROC curve (area = %0.2f)' % auc(fpr,tpr)); plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.01]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() import numpy as np notes = [ ('C0',16.351598), ('C_S_0',17.323914), ('D0',18.354048), ('D_S_0',19.445436), ('E0',20.601722), ('F0',21.826764), ('F_S_0',23.124651), ('G0',24.499715), ('G_S_0',25.956544), ('A0',27.500000), ('A_S_0',29.135235), ('B0',30.867706), ('C1',32.703196), ('C_S_1',34.647829), ('D1',36.708096), ('D_S_1',38.890873), ('E1',41.203445), ('F1',43.653529), ('F_S_1',46.249303), ('G1',48.999429), ('G_S_1',51.913087), ('A1',55.000000), ('A_S_1',58.270470), ('B1',61.735413), ('C2',65.406391), ('C_S_2',69.295658), ('D2',73.416192), ('D_S_2',77.781746), ('E2',82.406889), ('F2',87.307058), ('F_S_2',92.498606), ('G2',97.998859), ('G_S_2',103.826174), ('A2',110.000000), ('A_S_2',116.540940), ('B2',123.470825), ('C3',130.812783), ('C_S_3',138.591315), ('D3',146.832384), ('D_S_3',155.563492), ('E3',164.813778), ('F3',174.614116), ('F_S_3',184.997211), ('G3',195.997718), ('G_S_3',207.652349), ('A3',220.000000), ('A_S_3',233.081881), ('B3',246.941651), ('C4',261.625565), ('C_S_4',277.182631), ('D4',293.664768), ('D_S_4',311.126984), ('E4',329.627557), ('F4',349.228231), ('F_S_4',369.994423), ('G4',391.995436), ('G_S_4',415.304698), ('A4',440.000000), ('A_S_4',466.163762), ('B4',493.883301), ('C5',523.251131), ('C_S_5',554.365262), ('D5',587.329536), ('D_S_5',622.253967), ('E5',659.255114), ('F5',698.456463), ('F_S_5',739.988845), ('G5',783.990872), ('G_S_5',830.609395), ('A5',880.000000), ('A_S_5',932.327523), ('B5',987.766603), ('C6',1046.502261), ('C_S_6',1108.730524), ('D6',1174.659072), ('D_S_6',1244.507935), ('E6',1318.510228), ('F6',1396.912926), ('F_S_6',1479.977691), ('G6',1567.981744), ('G_S_6',1661.218790), ('A6',1760.000000), ('A_S_6',1864.655046), ('B6',1975.533205), ('C7',2093.004522), ('C_S_7',2217.461048), ('D7',2349.318143), ('D_S_7',2489.015870), ('E7',2637.020455), ('F7',2793.825851), ('F_S_7',2959.955382), ('G7',3135.963488), ('G_S_7',3322.437581), ('A7',3520.000000), ('A_S_7',3729.310092), ('B7',3951.066410), ('C8',4186.009045), ('C_S_8',4434.922096), ('D8',4698.636287), ('D_S_8',4978.031740), ('E8',5274.040911), ('F8',5587.651703), ('F_S_8',5919.910763), ('G8',6271.926976), ('G_S_8',6644.875161), ('A8',7040.000000), ('A_S_8',7458.620184), ('B8',7902.132820), ] # + import sounddevice as sd import random import numpy as np import time from __future__ import print_function duration=0.5 amp=1E4 rate=44100 def note(freq, duration, amp, rate): t = np.linspace(0, duration, duration * rate) data = np.sin(2*np.pi*freq*t)*amp return data.astype(np.float) # two byte integers tone0 = note(0, duration, amp, rate) #silence tone1 = note(261.63, duration, amp, rate) # C4 tone2 = note(329.63, duration, amp, rate) # E4 tone3 = note(392.00, duration, amp, rate) # G4 tone4 = note(440.00, duration, amp, rate) # G4 # - sd.play(tone4, 44100) sd.play(note(notes['A4'], duration, amp, rate), 44100) for name, f in notes: print(name, end=" ") sd.play(note(f, duration, amp, rate), 44100) time.sleep(0.2) def recaman(n): seq = [0]; seqCurrent =1; for i in range(1,n): seqCurrent = seq[-1] - i; if seqCurrent <0 or seqCurrent in seq: seqCurrent = seq[-1] + i; seq.append(seqCurrent) return seq; notes[0] len(notes) # + duration=0.2 amp=1E4 rate=44100 dicNotes = dict(notes); for n in 'C C D C F E C C D C G F C C C A F F E D B B A F G F'.split(' '): sd.play(note(dicNotes[n+"2"], duration, amp, rate), 44100) time.sleep(duration) # + duration=0.2 amp=1E6 rate=44100 for i in recaman(30): j = (i) % len(notes); i = (i+30) % len(notes); print(i, notes[i][0]) n = note(notes[i][1], duration, amp, rate); n += note(notes[j][1], duration, amp, rate); sd.play(n, 44100) time.sleep(duration) # -
HDRIO/Iris Exploration.ipynb
# # Decision tree in depth # # In this notebook, we will go into details on the internal algorithm used to # build the decision tree. First, we will focus on the decision tree used for # classification. Then, we will highlight the fundamental difference between # decision tree used in classification and in regression. Finally, we will # quickly discuss the importance of the hyperparameters to be aware of when # using decision trees. # # ## Presentation of the dataset # # We use the # [Palmer penguins dataset](https://allisonhorst.github.io/palmerpenguins/). # This dataset is composed of penguins records and ultimately, we want to # identify from which specie a penguin belongs to. # # A penguin is from one of the three following species: Adelie, Gentoo, and # Chinstrap. See the illustration below depicting of the three different bird # species: # # ![Image of penguins](https://github.com/allisonhorst/palmerpenguins/raw/master/man/figures/lter_penguins.png) # # This problem is a classification problem since the target is made of # categories. We will limit our input data to a subset of the original features # to simplify our explanations when presenting the decision tree algorithm. # Indeed, we will use feature based on penguins' culmen measurement. You can # learn more about the penguins' culmen with illustration below: # # ![Image of culmen](https://github.com/allisonhorst/palmerpenguins/raw/master/man/figures/culmen_depth.png) # + import pandas as pd data = pd.read_csv("../datasets/penguins.csv") # select the features of interest culmen_columns = ["Culmen Length (mm)", "Culmen Depth (mm)"] target_column = "Species" data = data[culmen_columns + [target_column]] data[target_column] = data[target_column].str.split().str[0] # - # Let's check the dataset more into details. data.info() # We can observe that they are 2 missing records in this dataset and for the # sake of simplicity, we will drop the records corresponding to these 2 # samples. data = data.dropna() data.info() # We will separate the target from the data and we will create a training and a # testing set. # + from sklearn.model_selection import train_test_split X, y = data[culmen_columns], data[target_column] X_train, X_test, y_train, y_test = train_test_split( X, y, stratify=y, random_state=0, ) # - # Before going into details in the decision tree algorithm, we will quickly # inspect our dataset. # + import seaborn as sns _ = sns.pairplot(data=data, hue="Species") # - # We can first check the feature distributions by looking at the diagonal plots # of the pairplot. We can build the following intuitions: # # * The Adelie specie is separable from the Gentoo and Chinstrap species using # the culmen length; # * The Gentoo specie is separable from the Adelie and Chinstrap species using # the culmen depth. # # ## How decision tree are built? # # In a previous notebook, we learnt that a linear classifier will define a # linear separation to split classes using a linear combination of the input # features. In our 2-dimensional space, it means that a linear classifier will # defined some oblique lines that best separate our classes. We define a # function below that given a set of data point and a classifier will plot the # decision boundaries learnt by the classifier. # + import numpy as np import matplotlib.pyplot as plt def plot_decision_function(X, y, clf, ax=None): """Plot the boundary of the decision function of a classifier.""" from sklearn.preprocessing import LabelEncoder clf.fit(X, y) # create a grid to evaluate all possible samples plot_step = 0.02 feature_0_min, feature_0_max = (X.iloc[:, 0].min() - 1, X.iloc[:, 0].max() + 1) feature_1_min, feature_1_max = (X.iloc[:, 1].min() - 1, X.iloc[:, 1].max() + 1) xx, yy = np.meshgrid( np.arange(feature_0_min, feature_0_max, plot_step), np.arange(feature_1_min, feature_1_max, plot_step) ) # compute the associated prediction Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = LabelEncoder().fit_transform(Z) Z = Z.reshape(xx.shape) # make the plot of the boundary and the data samples if ax is None: _, ax = plt.subplots() ax.contourf(xx, yy, Z, alpha=0.4) sns.scatterplot( data=pd.concat([X, y], axis=1), x=X.columns[0], y=X.columns[1], hue=y.name, ax=ax, ) # - # Thus, for a linear classifier, we will obtain the following decision # boundaries. # + from sklearn.linear_model import LogisticRegression linear_model = LogisticRegression() plot_decision_function(X_train, y_train, linear_model) # - # We see that the lines are a combination of the input features since they are # not perpendicular a specific axis. In addition, it seems that the linear # model would be a good candidate model for such problem, giving a good # accuracy. print( f"Accuracy of the {linear_model.__class__.__name__}: " f"{linear_model.fit(X_train, y_train).score(X_test, y_test):.2f}" ) # Unlike linear model, decision tree will partition the space considering a # single feature at a time. Let's illustrate this behaviour by having # a decision tree which makes a single split to partition the feature space. # the decision tree to make a single split to partition our feature space. # + from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier(max_depth=1) plot_decision_function(X_train, y_train, tree) # - # The partition found separate the data along the axis "Culmen Length", # discarding the feature "Culmen Depth". Thus, it highlights that a decision # tree does not use a combination of feature when making a split. # # However, such a split is not powerful enough to isolate the three species and # the model accuracy is low compared to the linear model. print( f"Accuracy of the {tree.__class__.__name__}: " f"{tree.fit(X_train, y_train).score(X_test, y_test):.2f}" ) # Indeed, it is not a surprise. We earlier saw that a single feature will not # help separating the three species. However, from the previous analysis we # saw that using both features should be useful to get fairly good results. # Considering the mechanism of the decision tree illustrated above, we should # repeat the partitioning on each rectangle that was previously created. In # this regard, we expect that the partition will be using the feature "Culmen # Depth" this time. tree.set_params(max_depth=2) plot_decision_function(X_train, y_train, tree) # As expected, the decision tree made 2 new partitions using the "Culmen # Depth". Now, our tree is more powerful with similar performance to our linear # model. print( f"Accuracy of the {tree.__class__.__name__}: " f"{tree.fit(X_train, y_train).score(X_test, y_test):.2f}" ) # At this stage, we have the intuition that a decision tree is built by # successively partitioning the feature space, considering one feature at a # time. # Subsequently, we will present the details regarding the partitioning # mechanism. # # ## Partitioning mechanism # # Let's isolate a single feature. We will present the mechanism allowing to # find the optimal partition for these one-dimensional data. single_feature = X_train["Culmen Length (mm)"] # Let's check once more the distribution of this feature. for klass in y_train.unique(): mask_penguin_species = y_train == klass plt.hist( single_feature[mask_penguin_species], alpha=0.7, label=f'{klass}', density=True ) plt.legend() plt.xlabel(single_feature.name) _ = plt.ylabel('Class probability') # Seeing this graph, we can easily separate the Adelie specie from # the other species. Alternatively, we can have a scatter plot of all # samples. df = pd.concat( [single_feature, y_train, pd.Series([""] * y_train.size, index=single_feature.index, name="")], axis=1, ) _ = sns.swarmplot(x=single_feature.name, y="", hue=y_train.name, data=df) # Finding a split comes to define a threshold value which will be used to # separate the different classes. To give an example, we will pick a random # threshold value and we will qualify the quality of the split. # + rng = np.random.RandomState(0) random_indice = rng.choice(single_feature.index) threshold_value = single_feature.loc[random_indice] _, ax = plt.subplots() _ = sns.swarmplot( x=single_feature.name, y="", hue=y_train.name, data=df, ax=ax ) ax.axvline(threshold_value, linestyle="--", color="black") _ = ax.set_title(f"Random threshold value: {threshold_value} mm") # - # A random split does not ensure that we pick up a threshold value which # best separate the species. Thus, an intuition will be to find a # threshold value that best divide the Adelie class from other classes. A # threshold around 42 mm would be ideal. Once this split is defined, we could # specify that the sample < 42 mm would belong to the class Adelie and the # samples > 42 mm would belong to the class the most probable (the most # represented in the partition) between the Gentoo and the Chinstrap. In this # case, it seems to be the Gentoo specie, which is in-line with what we # observed earlier when fitting a `DecisionTreeClassifier` with a # `max_depth=1`. # + threshold_value = 42 _, ax = plt.subplots() _ = sns.swarmplot( x=single_feature.name, y="", hue=y_train.name, data=df, ax=ax ) ax.axvline(threshold_value, linestyle="--", color="black") _ = ax.set_title(f"Manual threshold value: {threshold_value} mm") # - # Intuitively, we expect the best possible threshold to be around this value # (42 mm) because it is the split leading to the least amount of error. Thus, # if we want to automatically find such a threshold, we would need a way to # evaluate the goodness (or pureness) of a given threshold. # # ### The split purity criterion # # To evaluate the effectiveness of a split, we will use a criterion to qualify # the class purity on the different partitions. # # First, let's define a threshold at 42 mm. Then, we will divide the data into # 2 sub-groups: a group for samples < 42 mm and a group for samples >= 42 mm. # Then, we will store the class label for these samples. threshold_value = 42 mask_below_threshold = single_feature < threshold_value labels_below_threshold = y_train[mask_below_threshold] labels_above_threshold = y_train[~mask_below_threshold] # We can check the proportion of samples of each class in both partitions. This # proportion is the probability of each class when considering # the partition. labels_below_threshold.value_counts(normalize=True).sort_index() labels_above_threshold.value_counts(normalize=True).sort_index() # As we visually assess, the partition defined by < 42 mm has mainly Adelie # penguin and only 2 samples which we could considered misclassified. However, # on the partition >= 42 mm, we cannot differentiate Gentoo and Chinstrap # (while they are almost twice more Gentoo). # # We should come with a statistical measure which combine the class # probabilities together that can be used as a criterion to qualify the purity # of a partition. We will choose as an example the entropy criterion (also used # in scikit-learn) which is one of the possible classification criterion. # # The entropy is defined as: $H(X) = - \sum_{k=1}^{K} p(X_k) \log p(X_k)$ # # For a binary problem, the entropy function for one of the class can be # depicted as follows: # # ![title](https://upload.wikimedia.org/wikipedia/commons/2/22/Binary_entropy_plot.svg) # # Therefore, the entropy will be maximum when the proportion of sample from # each class will be equal and minimum when only samples for a single class # is present. # # Therefore, one searches to minimize the entropy in each partition. # + def classification_criterion(labels): from scipy.stats import entropy return entropy( labels.value_counts(normalize=True).sort_index() ) entropy_below_threshold = classification_criterion(labels_below_threshold) entropy_above_threshold = classification_criterion(labels_above_threshold) print(f"Entropy for partition below the threshold: \n" f"{entropy_below_threshold:.3f}") print(f"Entropy for partition above the threshold: \n" f"{entropy_above_threshold:.3f}") # - # In our case, we can see that the entropy in the partition < 42 mm is close to # 0 meaning that this partition is "pure" and contain a single class while # the partition >= 42 mm is much higher due to the fact that 2 of the classes # are still mixed. # # Now, we are able to assess the quality of each partition. However, the # ultimate goal is to evaluate the quality of the split and thus combine both # measures of entropy to obtain a single statistic. # # ### Information gain # # This statistic is known as the information gain. It combines the entropy of # the different partitions to give us a single statistic qualifying the quality # of a split. The information gain is defined as the difference of the entropy # before making a split and the sum of the entropies of each partition, # normalized by the frequencies of class samples on each partition. The goal is # to maximize the information gain. # # We will define a function to compute the information gain given the different # partitions. # + def information_gain(labels_below_threshold, labels_above_threshold): # compute the entropies in the different partitions entropy_below_threshold = classification_criterion(labels_below_threshold) entropy_above_threshold = classification_criterion(labels_above_threshold) entropy_parent = classification_criterion( pd.concat([labels_below_threshold, labels_above_threshold]) ) # compute the normalized entropies n_samples_below_threshold = labels_below_threshold.size n_samples_above_threshold = labels_above_threshold.size n_samples_parent = n_samples_below_threshold + n_samples_above_threshold normalized_entropy_below_threshold = ( (n_samples_below_threshold / n_samples_parent) * entropy_below_threshold ) normalized_entropy_above_threshold = ( (n_samples_above_threshold / n_samples_parent) * entropy_above_threshold ) # compute the information gain return (entropy_parent - normalized_entropy_below_threshold - normalized_entropy_above_threshold) print( f"The information gain for the split with a threshold at 42 mm is " f"{information_gain(labels_below_threshold, labels_above_threshold):.3f}" ) # - # Now, we are able to quantify any split. Thus, we can evaluate every possible # split and compute the information gain for each split. splits_information_gain = [] possible_thresholds = np.sort(single_feature.unique())[1:-1] for threshold_value in possible_thresholds: mask_below_threshold = single_feature < threshold_value labels_below_threshold = y_train.loc[mask_below_threshold] labels_above_threshold = y_train.loc[~mask_below_threshold] splits_information_gain.append( information_gain(labels_below_threshold, labels_above_threshold) ) plt.plot(possible_thresholds, splits_information_gain) plt.xlabel(single_feature.name) _ = plt.ylabel("Information gain") # As previously mentioned, we would like to find the threshold value maximizing # the information gain. # + best_threshold_indice = np.argmax(splits_information_gain) best_threshold_value = possible_thresholds[best_threshold_indice] _, ax = plt.subplots() ax.plot(possible_thresholds, splits_information_gain) ax.set_xlabel(single_feature.name) ax.set_ylabel("Information gain") ax.axvline(best_threshold_value, color="tab:orange", linestyle="--") ax.set_title(f"Best threshold: {best_threshold_value} mm") # - # By making this brute-force search, we find that the threshold maximizing the # information gain is 43.3 mm. # # Let's check if this results is similar than the one found with the # `DecisionTreeClassifier` from scikit-learn. # + from sklearn.tree import plot_tree tree = DecisionTreeClassifier(criterion="entropy", max_depth=1) tree.fit(single_feature.to_frame(), y_train) _ = plot_tree(tree) # - # The implementation in scikit-learn gives similar results: 43.25 mm. The # slight difference are only due to some low-level implementation details. # # As we previously explained, the split mechanism will be repeated several # times (until we don't have any classification error on the training set). In # the above example, it corresponds to increasing the `max_depth` parameter. # # ## How prediction works? # # We showed the way a decision tree is constructed. However, we did not explain # how and what will be predicted from the decision tree. # # First, let's recall the tree structure that we fitted earlier. _ = plot_tree(tree) # We recall that the threshold found is 43.25 mm. Thus, let's see the class # prediction for a sample with a feature value below the threshold and another # above the # threshold. print(f"The class predicted for a value below the threshold is: " f"{tree.predict([[35]])}") print(f"The class predicted for a value above the threshold is: " f"{tree.predict([[45]])}") # We predict an Adelie penguin for a value below the threshold which is not # surprising since this partition was almost pure. In the other case we # predicted the Gentoo penguin. Indeed, we predict the class the # most probable. # # ## What about decision tree for regression? # # We explained the construction of the decision tree in a classification # problem. The entropy criterion to split the nodes used the class # probabilities. Thus, this criterion is not adapted when the target `y` is # continuous. In this case, we will need specific criterion adapted to # regression problems. # # Before going into details with regression criterion, let's observe and # build some intuitions on the characteristics of decision tree used # in regression. # # ### Decision tree: a non-parametric model # # We use the same penguins dataset. However, this time we will formulate a # regression problem instead of a classification problem. Thus, we will try to # infer the body mass of a penguin given its flipper length. # + data = pd.read_csv("../datasets/penguins.csv") data_columns = ["Flipper Length (mm)"] target_column = "Body Mass (g)" data = data[data_columns + [target_column]] data = data.dropna() X, y = data[data_columns], data[target_column] X_train, X_test, y_train, y_test = train_test_split( X, y, random_state=0, ) # - sns.scatterplot(data=data, x="Flipper Length (mm)", y="Body Mass (g)") # Here, we deal with a regression problem because our target is a continuous # variable ranging from 2.7 kg to 6.3 kg. From the scatter plot above, we can # observe that we have a linear relationship between the flipper length # and the body mass. Longer is the flipper of a penguin, heavier will be the # penguin. # # For this problem, we would expect the simpler linear model to be able to # model this relationship. # + from sklearn.linear_model import LinearRegression linear_model = LinearRegression() # - # We will first create a function in charge of plotting the dataset and # all possible predictions. This function is equivalent to the earlier # function used for classification. def plot_regression_model(X, y, model, extrapolate=False, ax=None): """Plot the dataset and the prediction of a learnt regression model.""" # train our model model.fit(X, y) # make a scatter plot of the input data and target training_data = pd.concat([X, y], axis=1) if ax is None: _, ax = plt.subplots() sns.scatterplot( data=training_data, x="Flipper Length (mm)", y="Body Mass (g)", ax=ax, color="black", alpha=0.5, ) # only necessary if we want to see the extrapolation of our model offset = 20 if extrapolate else 0 # generate a testing set spanning between min and max of the training set X_test = np.linspace( X.min() - offset, X.max() + offset, num=100 ).reshape(-1, 1) # predict for this testing set and plot the response y_pred = model.predict(X_test) ax.plot( X_test, y_pred, label=f"{model.__class__.__name__} trained", linewidth=3, ) plt.legend() # return the axes in case we want to add something to it return ax _ = plot_regression_model(X_train, y_train, linear_model) # On the plot above, we see that a non-regularized `LinearRegression` is able # to fit the data. The specificity of the model is that any new predictions # will occur on the line. # + X_test_subset = X_test[:10] ax = plot_regression_model(X_train, y_train, linear_model) y_pred = linear_model.predict(X_test_subset) ax.plot( X_test_subset, y_pred, label="Test predictions", color="tab:green", marker="^", markersize=10, linestyle="", ) plt.legend() # - # On the contrary of linear model, decision trees are non-parametric # models, so they do not rely on the way data should be distributed. In this # regard, it will affect the prediction scheme. Repeating the # above experiment will highlights the differences. # + from sklearn.tree import DecisionTreeRegressor tree = DecisionTreeRegressor() # - _ = plot_regression_model(X_train, y_train, tree) # We see that the decision tree model does not have a priori and do not end-up # with a straight line to regress flipper length and body mass. The prediction # of a new sample, which was already present in the training set, will give the # same target than this training sample. However, having different body masses # for a same flipper length, the tree will be predicting the mean of the # targets. # # So in classification setting, we saw that the predicted value was the most # probable value in the node of the tree. In the case of regression, the # predicted value corresponds to the mean of the target in the node. # # This lead us to question whether or not our decision trees are able to # extrapolate to unseen data. We can highlight that this is possible with the # linear model because it is a parametric model. plot_regression_model(X_train, y_train, linear_model, extrapolate=True) # The linear model will extrapolate using the fitted model for flipper length # < 175 mm and > 235 mm. Let's see the difference with the trees. ax = plot_regression_model(X_train, y_train, linear_model, extrapolate=True) _ = plot_regression_model(X_train, y_train, tree, extrapolate=True, ax=ax) # For the tree, we see that it cannot extrapolate below and above the minimum # and maximum, respectively, of the flipper length encountered during the # training. Indeed, we are predicting the minimum and maximum values of the # training set. # # ### The regression criterion # # In the previous section, we explained the differences between using decision # tree in classification or in regression: the predicted value will be the # most probable class for the classification case while the it will be the mean # in the case of the regression. The second difference that we already # mentioned is the criterion. The classification criterion cannot be applied # in regression setting and we need to use a specific set of criterion. # # One of the criterion that can be used in regression is the mean squared # error. In this case, we will compute this criterion in each partition # as in the case of the entropy and select the split leading to the best # improvement (i.e. information gain). # # ## Importance of decision tree hyper-parameters on generalization # # This last section will illustrate the importance of some key hyper-parameters # of the decision tree. We will both illustrate it on classification and # regression datasets that we previously used. # # ### Creation of the classification and regression dataset # # We will first regenerate the classification and regression dataset. data = pd.read_csv("../datasets/penguins.csv") # + data_clf_columns = ["Culmen Length (mm)", "Culmen Depth (mm)"] target_clf_column = "Species" data_clf = data[ data_clf_columns + [target_clf_column] ] data_clf[target_clf_column] = data_clf[ target_clf_column].str.split().str[0] data_clf = data_clf.dropna() X_clf, y_clf = data_clf[data_clf_columns], data_clf[target_clf_column] X_train_clf, X_test_clf, y_train_clf, y_test_clf = train_test_split( X_clf, y_clf, stratify=y_clf, random_state=0, ) # + data_reg_columns = ["Flipper Length (mm)"] target_reg_column = "Body Mass (g)" data_reg = data[data_reg_columns + [target_reg_column]] data_reg = data_reg.dropna() X_reg, y_reg = data_reg[data_reg_columns], data_reg[target_reg_column] X_train_reg, X_test_reg, y_train_reg, y_test_reg = train_test_split( X_reg, y_reg, random_state=0, ) # - _, axs = plt.subplots(ncols=2, figsize=(10, 5)) sns.scatterplot( data=data_clf, x="Culmen Length (mm)", y="Culmen Depth (mm)", hue="Species", ax=axs[0], ) axs[0].set_title("Classification dataset") sns.scatterplot( data=data_reg, x="Flipper Length (mm)", y="Body Mass (g)", ax=axs[1], ) _ = axs[1].set_title("Regression dataset") # ### Effect of the `max_depth` parameter # # In decision tree, the most important parameter to get a trade-off between # under-fitting and over-fitting is the `max_depth` parameter. Let's build # a shallow tree (for both classification and regression) and a deeper tree. # + max_depth = 2 tree_clf = DecisionTreeClassifier(max_depth=max_depth) tree_reg = DecisionTreeRegressor(max_depth=max_depth) fig, axs = plt.subplots(ncols=2, figsize=(10, 5)) plot_decision_function(X_train_clf, y_train_clf, tree_clf, ax=axs[0]) plot_regression_model(X_train_reg, y_train_reg, tree_reg, ax=axs[1]) _ = fig.suptitle(f"Shallow tree with a max-depth of {max_depth}") # + max_depth = 30 tree_clf.set_params(max_depth=max_depth) tree_reg.set_params(max_depth=max_depth) fig, axs = plt.subplots(ncols=2, figsize=(10, 5)) plot_decision_function(X_train_clf, y_train_clf, tree_clf, ax=axs[0]) plot_regression_model(X_train_reg, y_train_reg, tree_reg, ax=axs[1]) _ = fig.suptitle(f"Deep tree with a max-depth of {max_depth}") # - # In both classification and regression setting, we can observe that increasing # the depth will make the tree model more expressive. However, a tree which is # too deep will overfit the training data, creating partitions which will only # be correct for "outliers". The `max_depth` is one of the parameter that one # would like to optimize via cross-validation and a grid-search. # + from sklearn.model_selection import GridSearchCV param_grid = {"max_depth": np.arange(2, 10, 1)} tree_clf = GridSearchCV(DecisionTreeClassifier(), param_grid=param_grid) tree_reg = GridSearchCV(DecisionTreeRegressor(), param_grid=param_grid) # - fig, axs = plt.subplots(ncols=2, figsize=(10, 5)) plot_decision_function(X_train_clf, y_train_clf, tree_clf, ax=axs[0]) axs[0].set_title( f"Optimal depth found via CV: {tree_clf.best_params_['max_depth']}" ) plot_regression_model(X_train_reg, y_train_reg, tree_reg, ax=axs[1]) _ = axs[1].set_title( f"Optimal depth found via CV: {tree_reg.best_params_['max_depth']}" ) # The other parameters are used to fine tune the decision tree and have less # impact than `max_depth`.
rendered_notebooks/trees.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:holovizgen1202] # language: python # name: conda-env-holovizgen1202-py # --- # # CFSv2 Comparison and assessment # # 12/17,11/2020 # ## Dataset notes # - There are met files for two time periods: 2011-2016 and 2018-2019 # - cfsv2_2018-09-01_2019-10-01_tair.tif: Starts 2018-09-01 # - 6-hour time step # - The first time step is 00:00 # - Per dataset metadata, there are 1580 time steps # - `xarray.DataArray (band: 1580, y: 14, x: 11)` # - Note how *regular* (rounded off) the spatial boundaries are, compared to what I get with my code and Ryan's original GEE lat-long bounding box # # ## TO-DOs # - Spatial alignment: Ensure the grid extent and cell resolution and boundary for the dataset I generate/download align with the sample dataset. This will take some iteration, but can do it on a single-time-step basis (processing a specified time interval will require a change to my code) # - Actual comparisons: whole individual time steps; time series at points (lat, lon coords) # + # %matplotlib inline import matplotlib.pyplot as plt from datetime import datetime, timedelta import xarray as xr from shapely.geometry import box import geopandas as gpd # - # ## Target bounding box bbox_latlon = gpd.GeoSeries(box(-111.155208, 42.363116, -109.477849, 44.582480), crs='epsg:4326') # crs='epsg:32612' bbox = bbox_latlon.to_crs(epsg=32612) # ## GEE tif # With `gdalinfo`: # # ```bash # <EMAIL>:/home/mayorga/Downloads$ gdalinfo cfsv2_2018-09-01_2019-10-01_tair.tif # Driver: GTiff/GeoTIFF # Files: cfsv2_2018-09-01_2019-10-01_tair.tif # Size is 11, 14 # Coordinate System is: # PROJCRS["WGS 84 / UTM zone 12N", # BASEGEOGCRS["WGS 84", # DATUM["World Geodetic System 1984", # ELLIPSOID["WGS 84",6378137,298.257223563, # LENGTHUNIT["metre",1]]], # PRIMEM["Greenwich",0, # ANGLEUNIT["degree",0.0174532925199433]], # ID["EPSG",4326]], # CONVERSION["UTM zone 12N", # METHOD["Transverse Mercator", # ID["EPSG",9807]], # PARAMETER["Latitude of natural origin",0, # ANGLEUNIT["degree",0.0174532925199433], # ID["EPSG",8801]], # PARAMETER["Longitude of natural origin",-111, # ANGLEUNIT["degree",0.0174532925199433], # ID["EPSG",8802]], # PARAMETER["Scale factor at natural origin",0.9996, # SCALEUNIT["unity",1], # ID["EPSG",8805]], # PARAMETER["False easting",500000, # LENGTHUNIT["metre",1], # ID["EPSG",8806]], # PARAMETER["False northing",0, # LENGTHUNIT["metre",1], # ID["EPSG",8807]]], # CS[Cartesian,2], # AXIS["(E)",east, # ORDER[1], # LENGTHUNIT["metre",1]], # AXIS["(N)",north, # ORDER[2], # LENGTHUNIT["metre",1]], # USAGE[ # SCOPE["unknown"], # AREA["World - N hemisphere - 114°W to 108°W - by country"], # BBOX[0,-114,84,-108]], # ID["EPSG",32612]] # Data axis to CRS axis mapping: 1,2 # Origin = (444000.000000000000000,4972800.000000000000000) # Pixel Size = (22200.000000000000000,-22200.000000000000000) # Metadata: # AREA_OR_POINT=Area # Image Structure Metadata: # COMPRESSION=LZW # INTERLEAVE=PIXEL # Corner Coordinates: # Upper Left ( 444000.000, 4972800.000) (111d42'33.73"W, 44d54'23.12"N) # Lower Left ( 444000.000, 4662000.000) (111d40'38.39"W, 42d 6'29.14"N) # Upper Right ( 688200.000, 4972800.000) (108d37' 1.04"W, 44d53' 1.52"N) # Lower Right ( 688200.000, 4662000.000) (108d43'28.12"W, 42d 5'15.10"N) # Center ( 566100.000, 4817400.000) (110d10'56.25"W, 43d30'23.79"N) # Band 1 Block=256x256 Type=Float32, ColorInterp=Gray # Band 2 Block=256x256 Type=Float32, ColorInterp=Undefined # ... # Band 1579 Block=256x256 Type=Float32, ColorInterp=Undefined # Band 1580 Block=256x256 Type=Float32, ColorInterp=Undefined # ``` gee_tif_fname = "cfsv2_2018-09-01_2019-10-01_tair.tif" airt_gee_da = xr.open_rasterio(gee_tif_fname) # TO-DO: # - Set crs information correctly (per CF) in order for a direct comparison with the TDS-derived dataset to be possible # - Note that what's returned is a data array, not a dataset print(airt_gee_da) (airt_gee_da.x[1] - airt_gee_da.x[0]).values, (airt_gee_da.y[1] - airt_gee_da.y[0]).values (airt_gee_da.x[0].values, airt_gee_da.x[-1].values), (airt_gee_da.y[0].values, airt_gee_da.y[-1].values) # Note that northing (y) coordinate values are sorted from high to low timesteps_cnt = len(airt_gee_da.coords['band']) timesteps_cnt datetime(2018, 9, 1, 0, 0) + timesteps_cnt * timedelta(hours=6) # + # Note that using subplots forced an aspect ratio of "equal", with cells now being square # When plotting w/o subplots (just the data array plot), cells were not square f, ax = plt.subplots(ncols=2, figsize=(10, 5)) # NOTE: band=0 is supposed to be 2018-09-01 00:00, but based on these plots, I'm not so sure ... airt_gee_da.isel(band=0).plot(ax=ax[0]) bbox.plot(ax=ax[0], edgecolor='black', facecolor='none') airt_gee_da.isel(band=3).plot(ax=ax[1]) bbox.plot(ax=ax[1], edgecolor='black', facecolor='none'); # - # ## NetCDF from TDS # - Generate new file spanning a time period covered by the GEE tif. # - The current, recent data are in `CFSv2_OperationalAnalysis_WY_2018-09.nc` # - **TO-DO:** When the reprojected resolution in TDS notebook is set to 20km (possibly coarser than the original resolution), some/many cells are set to the `_FillValue` but xarray didn't seem to properly apply the `_FillValue`. Need to investigate this tds_ds_fname = 'CFSv2_OperationalAnalysis_WY_2018-09.nc' tds_ds = xr.open_dataset(tds_ds_fname) tds_ds airt_tds_da = tds_ds['Temperature_height_above_ground'] (airt_tds_da.easting[1] - airt_tds_da.easting[0]).values, (airt_tds_da.northing[1] - airt_tds_da.northing[0]).values (airt_tds_da.easting[0].values, airt_tds_da.easting[-1].values), (airt_tds_da.northing[0].values, airt_tds_da.northing[-1].values) # - Note that northing coordinate values are sorted from high to low # - Investigate why the no-data areas are occurring on the edges; could it have something to do with the resampling scheme chosen? # - Another option to investigate is using a larger bbox buffer on the TDS data request, so that the reprojection resampling is not impacted by edge effects airt_tds_da.coords['time'].values # Plot two time steps # + f, ax = plt.subplots(ncols=2, figsize=(10, 5)) airt_tds_da.isel(time=0).plot(ax=ax[0]) bbox.plot(ax=ax[0], edgecolor='black', facecolor='none') airt_tds_da.isel(time=3).plot(ax=ax[1]) bbox.plot(ax=ax[1], edgecolor='black', facecolor='none'); # - airt_tds_da.isel(time=3).coords['time'].values # ## Compare GEE vs TDS # ### Plot overlaying the histograms for the same time period, for the two datasets # # But keep in mind that the spatial domains are different; the TDS domain is larger. airt_tds_da.plot.hist(density=True, range=(265,305), bins=20, edgecolor='b', facecolor='green', alpha=0.3) # Select a time (band) slice matching the TDS dataset time period airt_gee_da.isel(band=slice(0,12)).plot.hist(density=True, range=(265,305), bins=20, edgecolor='r', facecolor='yellow', alpha=0.3) plt.legend(['TDS', 'GEE']); # ### Time series plot averaging all cells at each time step fig, ax = plt.subplots(ncols=2, sharey=True, figsize=(14, 5)) airt_tds_da.mean(['easting', 'northing']).plot(ax=ax[0], color='b') ax[0].set_title('TDS') airt_gee_da.mean(['x', 'y']).isel(band=slice(0,12)).plot(ax=ax[1], color='r') ax[1].set_ylim(276, 295) ax[1].set_title('GEE'); # For `airt_gee_da`, the `band` dimension is just an incremental integer, not a datetime. So, we can't actually plot the two time series together yet # ### Map plot of the two datasets at the same time period (best guess), using the same x-y ranges and value range # # **NOTE:** The best match to TDS `isel(time=3)` (2018-09-01T00:00) seems to be `isel(band=3)`, NOT `isel(band=0)` as I expected # + bounds = bbox.geometry.bounds f, ax = plt.subplots(ncols=2, sharex=True, sharey=True, figsize=(10, 6)) airt_gee_da.isel(band=3).plot(ax=ax[0], vmin=274, vmax=296) bbox.plot(ax=ax[0], edgecolor='black', facecolor='none') ax[0].set_title('GEE') airt_tds_da.isel(time=3).plot(ax=ax[1], vmin=274, vmax=296) bbox.plot(ax=ax[1], edgecolor='black', facecolor='none') ax[1].set_xlim([bounds.minx.min(), bounds.maxx.max()]) ax[1].set_ylim([bounds.miny.min(), bounds.maxy.max()]) ax[1].set_title('TDS'); # - # ## Examine timestamps, whether data match expected diurnal cycles # # The Wyoming domain is in the Mountain time zone. MST UTC offset is -7 hours fig, ax = plt.subplots(1, figsize=(16, 4)) tds_ds['Downward_Short-Wave_Radiation_Flux_surface_6_Hour_Average'].mean(['easting', 'northing']).plot(ax=ax); # + #tds_ds.close() # -
CFSv2_GEEcomparison.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import urllib from bs4 import BeautifulSoup import pandas as pd from collections import Counter import glob import os import re import numpy as np import string from tqdm import tqdm # # Categorizing Judgments # + #Setting up some basic stuff to run Topic Modelling import logging import itertools import numpy as np import gensim logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO) logging.root.level = logging.INFO # ipython sometimes messes up the logging setup; restore def head(stream, n=10): """Convenience fnc: return the first `n` elements of the stream, as plain list.""" return list(itertools.islice(stream, n)) # - #Setting up some basic stuff to run Topic Modelling import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) import os import tempfile TEMP_FOLDER = tempfile.gettempdir() print('Folder "{}" will be used to save temporary dictionary and corpus.'.format(TEMP_FOLDER)) from gensim import corpora from gensim.parsing.preprocessing import STOPWORDS from gensim.parsing.preprocessing import remove_stopwords,preprocess_documents,strip_punctuation,strip_non_alphanum,strip_numeric from pprint import pprint legalstopwords=['court','case','order','appeal','petitioner','judge','appellant','section','case','dated','respondent','petitioner','high','act','the','in','a','it','no'] #Code to tokenize judgments into words. We only take words that occur more than five times. def tokenize(document): document=strip_punctuation(document) document=remove_stopwords(document) document=strip_non_alphanum(document) document=strip_numeric(document) text= [word for word in document.lower().split() if word not in legalstopwords] from collections import defaultdict frequency = defaultdict(int) for token in text: frequency[token] += 1 text = [token for token in text if frequency[token] > 5 and len(token)>3] # text_frequency=dict((k, v) for k, v in frequency.items() if v >= 5) # return text_frequency return text # + #We assemble the corpus necessary for topic modelling texts=[] text_order=[] for file in tqdm(glob.glob('C:\\Users\\D110489\\Desktop\\Projects\\Supreme Court\\new\\html_docs\\*.html')): category=None with open(file,'r', encoding='utf8') as infile: html = BeautifulSoup(infile, "html.parser") judgement = html.find_all('p') data={ 'id':file.split("\\")[-1][:-5] } output = ' '.join([item.text for item in judgement]) judgment=output text=tokenize(judgment) texts.append(text) text_order.append(data) # - #We push this into a table df5=pd.DataFrame(text_order) dictionary = corpora.Dictionary(texts) print(dictionary) # We assemble the corpus using the inbuilt doc2bow function and store the matrix to a temporary place corpus = [dictionary.doc2bow(text) for text in texts] corpora.MmCorpus.serialize(os.path.join(TEMP_FOLDER, 'judgments.mm'), corpus) #We run the LDA model. We can define the number of topics and how often we want to run passes from gensim.models import LdaModel ldamodel = LdaModel(corpus=corpus, num_topics=10, id2word=dictionary,passes=20) ldamodel.print_topics(10) lda_topic_assignment = [max(p,key=lambda item: item[1]) for p in ldamodel[corpus]] lda_topic_assignment df5['category']=lda_topic_assignment df5.to_csv("classifieddata3.csv")
sc-network-judgments/Topic Modelling of Judgments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from PIL import Image import os import math import random import uuid Image.MAX_IMAGE_PIXELS = None # 处理的所有图片及结果存放的总目录 dir = "F:/study1/研一/Python-CommonCode/HappyTimes/ValentineDay/" # 白底图片所在的路径 whiteImagePath = ["F:/study1/研一/Python-CommonCode/HappyTimes/ValentineDay/whitebackground.jpg"] # 白底图片所在的目录 whiteGoalPath = "F:/study1/研一/Python-CommonCode/HappyTimes/ValentineDay/" # 拼接后的图片的总尺寸,根据尺寸和图片数量计算出每张图片的大小 totalSize = 40000 # 重设大小后的图片所在的目录 transferDir = '1' # 拼接结果图片所在的目录 resultDir = '2' # 计算每个拼接大图需要多少个小图作为边长拼接 def getSize(num): # 最小边长从5开始,由其心形规律得出n的计算公式(推理步骤不放出来了) n = math.floor(math.sqrt(2 * num + 27 / 4) - 1.5) # 若为偶数,则减一 if n % 2 == 0: n -= 1 return n # 获取指定路径下的所有图片 def getImagesName(dir): allPicPath = [] # 所有图片 for root, dirs, files in os.walk(dir): for file in files: if file.endswith('.png') or file.endswith('.jpg') or file.endswith('.jpeg'): allPicPath.append(dir + '/' + file) return allPicPath # 将图片转化为指定大小 def transferSize(allPicPath, height, width, goalPath): for i in range(len(allPicPath)): im = Image.open(allPicPath[i]) out = im.resize((height, width), Image.ANTIALIAS) out.save(goalPath + str((allPicPath[i].split('/')[len(allPicPath[i].split('/')) - 1]))) # + # 获取所有指定目录下所有图片的路径 allPicPath = getImagesName(dir+'photos/') # 得到图片数量 numOfPic = len(allPicPath) # 获取生成图片的边长大小,并计算出每个图片的边长应该是多大 size = getSize(numOfPic) height = math.floor(totalSize / size) width = math.floor(totalSize / size) # 将用于拼接的图片都格式化为统一大小的图片 transferSize(allPicPath, height, width, dir + '1/') # 获取所有格式化后的拼接图片的路径 allTransPicPath = getImagesName(dir + transferDir + '/') # 获取用于填充多余部分的格式化后的白色图片的image # transferSize(whiteImagePath, height, width, whiteGoalPath[0]) # perPicNum = math.floor(math.sqrt(numOfPic)) toImage = Image.new('RGBA', (totalSize, totalSize)) # 随机打乱用于拼接的图片的顺序,这样可保证每次拼接出来的图片顺序都是不同的 random.shuffle(allTransPicPath) # 用于统计使用的拼图的图的数量 j = 0 print(len(allTransPicPath)) # 获取白底图片的image,并设置好同样大小备用 im = Image.open(whiteImagePath[0]) out = im.resize((height, width), Image.ANTIALIAS) # + m = 1 # 给每行分页粘贴size个小图片 for i in range(size): fromImage = "" if i == 0: k = 0 # 打印一个空白格 loc = ((k % size) * width, (int(i % size) * height)) print(loc) toImage.paste(out, loc) k += 1 # 打印一个图案 loc = ((k % size) * width, (int(i % size) * height)) print(loc) fromImage = Image.open(allTransPicPath[j]) j += 1 toImage.paste(fromImage, loc) k += 1 # 打印 (size-4) 个空白格 for h in range(size - 4): loc = ((k % size) * width, (int(i % size) * height)) print(loc) toImage.paste(out, loc) k += 1 # 打印一个图案 loc = ((k % size) * width, (int(i % size) * height)) print(loc) fromImage = Image.open(allTransPicPath[j]) j += 1 toImage.paste(fromImage, loc) k += 1 # 打印一个空白格 loc = ((k % size) * width, (int(i % size) * height)) print(loc) toImage.paste(out, loc) k += 1 elif i <= (size - 3) / 2 - 1 and i>0: k = 0 # 根据规律,先打印 i+2 个图片 for s in range(i+2): loc = ((k % size) * width, (int(i % size) * height)) print(loc) fromImage = Image.open(allTransPicPath[j]) j += 1 toImage.paste(fromImage, loc) k += 1 # 然后打印 (size-(i+2)*2)个空白格 for h in range(size-(i+2)*2): loc = ((k % size) * width, (int(i % size) * height)) print(loc) toImage.paste(out, loc) k += 1 # 最后再打印 (i+2)个图片 for s in range(i+2): loc = ((k % size) * width, (int(i % size) * height)) print(loc) fromImage = Image.open(allTransPicPath[j]) j += 1 toImage.paste(fromImage, loc) k += 1 elif i>(size - 3) / 2 - 1 and i<=(size - 3) / 2 + 1: # i在满足条件的范围内打印(size)个图片 for k in range(size): loc = ((k % size) * width, (int(i % size) * height)) print(loc) fromImage = Image.open(allTransPicPath[j]) j += 1 toImage.paste(fromImage, loc) else: k = 0 for x in range(m): loc = ((k % size) * width, (int(i % size) * height)) print(loc) toImage.paste(out, loc) k += 1 for y in range(size-2*m): loc = ((k % size) * width, (int(i % size) * height)) print(loc) fromImage = Image.open(allTransPicPath[j]) j += 1 toImage.paste(fromImage, loc) k += 1 for x in range(m): loc = ((k % size) * width, (int(i % size) * height)) print(loc) toImage.paste(out, loc) k += 1 m += 1 # 在将所有的图片都粘贴到大画布后将合成图保存到指定目录下,并随机分配一个名字 toImage.save(dir + '/' + resultDir + '/' + str(uuid.uuid4()) +'.png')
HappyTimes/ValentineDay/GiveHeartPit2Love.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Instructions # # You're going to practice working in Pandas. # # # You'll walk through instantiating a `DataFrame`, reading data into it, looking at and examining that data, and then playing with it. # # # A dataset on the [quality of red wines](https://archive.ics.uci.edu/ml/datasets/wine+quality) is used for this purpose. # It is lokated in the `data` folder within this directory. It's called `winequality-red.csv`. # # # Typically, we use Jupyter notebooks like this for a very specific set of things - presentations and EDA. # # # Today, as we'll be playing around with `Pandas`, much of what we'll be doing is considered EDA. Therefore, by using a notebook, we'll get a tighter feedback loop with our work than we would trying to write a script. But, in general, **we do not use Jupyter notebooks for development**. # # Below, we've put a set of questions and then a cell for you to work on answers. However, feel free to add additional cells if you'd like. Often it will make sense to use more than one cell for your answers. # # # Assignment Questions # # ### Part 1 - The Basics of DataFrames # # Let's start off by following the general workflow that we use when moving data into a DataFrame: # # * Importing Pandas # * Reading data into the DataFrame # * Getting a general sense of the data # # So, in terms of what you should do for this part... # # 1. Import pandas # 2. Read the wine data into a DataFrame. # 3. Use the `attributes` and `methods` available on DataFrames to answer the following questions: # * How many rows and columns are in the DataFrame? # * What data type is in each column? # * Are all of the variables continuous, or are any categorical? # * How many non-null values are in each column? # * What are the min, mean, max, median for all numeric columns? # + import pandas as pd df = pd.read_csv("data/winequality-red.csv", delimiter= ";") df.head() # - df.shape df.info() df.describe() df # ### Part 2 - Practice with Grabbing Data # # Let's now get some practice with grabbing certain parts of the data. If you'd like some extra practice, try answering each of the questions in more than one way (because remember, we can often grab our data in a couple of different ways). # # 1. Grab the first 10 rows of the `chlorides` column. # 2. Grab the last 10 rows of the `chlorides` column. # 3. Grab indices 264-282 of the `chlorides` **and** `density` columns. # 4. Grab all rows where the `chlorides` value is less than 0.10. # 5. Now grab all the rows where the `chlorides` value is greater than the column's mean (try **not** to use a hard-coded value for the mean, but instead a method). # 6. Grab all those rows where the `pH` is greater than 3.0 and less than 3.5. # 7. Further filter the results from 6 to grab only those rows that have a `residual sugar` less than 2.0. df.loc[0:9,["chlorides"]] df["chlorides"].tail(10) df.loc[264:282,["chlorides", "density"]] df[df["chlorides"] < 0.1] df[df["chlorides"] > df["chlorides"].mean()] df[(df["pH"] > 3.0) & (df["pH"] < 3.5)] df[(df["pH"] > 3.0) & (df["pH"] < 3.5) & (df["residual sugar"] < 2.0)] # ### Part 3 - More Practice # # Let's move on to some more complicated things. Use your knowledge of `groupby`s, `sorting` to answer the following. # # 1. Get the average amount of `chlorides` for each `quality` value. # 2. For observations with a `pH` greater than 3.0 and less than 4.0, find the average `alcohol` value by `pH`. # 3. For observations with an `alcohol` value between 9.25 and 9.5, find the highest amount of `residual sugar`. # 4. Create a new column, called `total_acidity`, that is the sum of `fixed acidity` and `volatile acidity`. # 5. Find the average `total_acidity` for each of the `quality` values. # 6. Find the top 5 `density` values. # 7. Find the 10 lowest `sulphates` values. df.rename(columns={'fixed acidity': 'fixed_acidity'}, inplace=True) df.rename(columns={'volatile acidity': 'volatile_acidity'}, inplace=True) df.groupby(["quality"]).mean()['chlorides'] a = df[(df["pH"] > 3.0) & (df["pH"] < 4.0)] a.mean()["alcohol"] a = df[(df["alcohol"] > 9.25) & (df["alcohol"] < 9.5)] a.groupby(["alcohol"]).max()["residual sugar"] df.eval('total_acidity = fixed_acidity + volatile_acidity', inplace = True) df.columns a = df.groupby("total_acidity") a = a.mean()['quality'] a df.sort_values(["density"]).head(5) df.sort_values(["sulphates"], ascending=False).head(10) df
02_pandas_practice_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Kili Tutorial: Importing assets # # In this tutorial, we will walk through the process of using Kili to import assets. The goal of this tutorial is to illustrate some basic components and concepts of Kili in a simple way. # # Additionally: # # For an overview of Kili, visit kili-technology.com # You can also check out the Kili documentation https://kili-technology.github.io/kili-docs. # # The tutorial is divided into four parts: # # 1. Importing assets # 2. Setting up a labeling priority # 3. Setting up who can label the asset # # This next cell connect the notebook to the Kili API. You need to update the credentials `email`, `password` and `project_id` before. # + # !pip install kili from kili.authentication import KiliAuth from kili.playground import Playground email = 'YOUR EMAIL' password = '<PASSWORD>' project_id = 'YOUR PROJECT ID' kauth = KiliAuth(email=email, password=password) playground = Playground(kauth) # - # ## Importing assets # # An Asset is the smallest piece of information we want to label. It can be: a text, an image, a sound, etc... # # To insert an asset in Kili, you need two essential pieces of information: # # - The content of the asset (i.e. the text for a text document, or an url that points to the document, or the path to the image file on the hard drive). # - The name of the asset in Kili to be able to recognize it once inserted: `externalId`. This name must be unique within the same project. # # It is also possible to associate meta information to certain assets that can be displayed in certain interfaces. We will not go into these details in this tutorial. # # The cell below inserts three image files: two are in the form of image urls and one is in the form of a file on the hard drive. **Warning, before running this snippet, make sure you have an `example.jpg` file in your Downloads folder.** # + from tqdm import tqdm CHUNK_SIZE = 100 def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] assets = [ { 'externalId': 'example 1', 'content': 'https://images.caradisiac.com/logos/3/8/6/7/253867/S0-tesla-enregistre-d-importantes-pertes-au-premier-trimestre-175948.jpg', 'metadata': {}, 'toBeLabeledBy':['<EMAIL>'] }, { 'externalId': 'example 2', 'content': 'https://img.sportauto.fr/news/2018/11/28/1533574/1920%7C1280%7Cc096243e5460db3e5e70c773.jpg', 'metadata': {}, 'priority': 1 }, { 'externalId': 'example 3', 'content': os.path.join(os.getenv('HOME'), 'Downloads', 'example.jpg'), 'metadata': {}, }, ] for asset_chunk in tqdm(list(chunks(assets, CHUNK_SIZE))): external_id_array = [a.get('externalId') for a in asset_chunk] content_array = [a.get('content') for a in asset_chunk] json_metadata_array = [json.loads(a.get('metadata')) for a in asset_chunk] playground.append_many_to_dataset(project_id=project_id, content_array=content_array, external_id_array=external_id_array, json_metadata_array=json_metadata_array) # - # ## Setting up a labeling priority # In Kili, by default, the order in which the assets are labeled is the order in which they are inserted in Kili. # # It is possible to change this order at any time, for example for active learning. To put an asset at the beginning of the stack, just change its `priority` property, which by default is 0, to a higher value. The higher the `priority` property, the higher the priority of the asset. # # The cell below will put the asset `example 2` at the top of the stack. for asset in assets: if 'priority' not in asset: continue playground.update_properties_in_asset(asset_id=asset.get('externalId'), priority=asset.get('priority')) # ## Setting up who can label the asset # In some cases we also want to be able to choose who among the annotators will label the assets. To do this, simply pass to the asset the list of emails from the annotators in question with the `toBeLabeledBy` property. # # The cell below will allow asset `example 1` to be only seen by <EMAIL>. for asset in assets: if 'toBeLabeledBy' not in asset: continue playground.update_properties_in_asset(asset_id=asset.get('externalId'), to_be_labeled_by=asset.get('toBeLabeledBy')) # In this tutorial, we accomplished the following: # # We introduced the concept of Kili asset content and asset external ID. We demonstrated how to append an asset to a project with the API. # We showed that an an asset can have a priority and a list a labelers. # If you enjoyed this tutorial, check out the other Recipes for other tutorials that you may find interesting, including demonstrations of how to use Kili. # # You can also visit the [Kili website](https://kili-technology.com) or [Kili documentation](https://kili-technology.github.io/kili-docs) for more info!
recipes/import_assets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import cv2 import pyrealsense2 from realsense_depth import * point = (400, 300) def show_distance(event, x, y, args, params): global point point = (x, y) # Initialize Camera Intel Realsense dc = DepthCamera() # Create mouse event cv2.namedWindow("Color frame") cv2.setMouseCallback("Color frame", show_distance) while True: ret, depth_frame, color_frame = dc.get_frame() # Show distance for a specific point cv2.circle(color_frame, point, 4, (0, 0, 255)) distance = depth_frame[point[1], point[0]] cv2.putText(color_frame, "{}mm".format(distance), (point[0], point[1] - 20), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 0), 2) cv2.imshow("depth frame", depth_frame) cv2.imshow("Color frame", color_frame) key = cv2.waitKey(1) if key == 27: break # -
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import scipy.stats import numpy as np import pandas as pd import datetime from matplotlib import pyplot as plt from matplotlib.pyplot import figure import matplotlib.dates as mdates # %matplotlib inline # + columns = ['category', 'sentiment', 'match', 'text'] dir_data = '../data/' en_q1_matches = pd.read_csv(dir_data + 'en_q1.csv', names=columns) en_q2_matches = pd.read_csv(dir_data + 'en_q2.csv', names=columns) en_q3_matches = pd.read_csv(dir_data + 'en_q3.csv', names=columns) es_matches = pd.read_csv(dir_data + 'es.csv', names=columns) en_q1_matches.head() # + frames = [en_q1_matches, en_q2_matches, en_q3_matches] en_matches = pd.concat(frames) en_matches['category'].value_counts() # - en_matches['sentiment'].value_counts() es_matches['category'].value_counts() en_matches.groupby(['category','sentiment']).size() es_matches.groupby(['category','sentiment']).size()
notebooks/category distribution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] deletable=false nbgrader={"checksum": "9f233eb32b3773049e805057c9cafee7", "grade": false, "grade_id": "random_desc", "locked": true, "solution": false} # # Pure Python evaluation of vector norms # # Generate a list of random floats of a given dimension (dim), and store its result in the variable `vec`. # + deletable=false nbgrader={"checksum": "b1ed817404c5a21f9dcf7f89748a0c8d", "grade": false, "grade_id": "random", "locked": false, "solution": true} # This is used for plots and numpy # %pylab inline import random as rand dim = int(10) vec = [] for i in range(dim): elem = rand.random() vec.append(elem) print vec # YOUR CODE HERE #raise NotImplementedError() # + deletable=false nbgrader={"checksum": "223c15c7276b82fe757f18357a42b7a2", "grade": true, "grade_id": "random_check", "locked": true, "points": 3, "solution": false} from numpy.testing import * assert_equal(type(vec), list) assert_equal(len(vec), dim) for ob in vec: assert_equal(type(ob), float) # + [markdown] deletable=false nbgrader={"checksum": "51a82e789d9ec00a9660d9e01a93ac95", "grade": false, "grade_id": "norm-desc", "locked": true, "solution": false} # Write a function that evaluates the $l_p$ norm of a vector in $R^d$. We remind: # $$ # \|v \|_{p} := \left(\sum_i (v_i)^p\right)^{1/p} # $$ # # the function should take as arguments a `list`, containing your $R^d$ vector, and a number `p` in the range $[1, \infty]$, indicating the exponent of the norm. # # **Note:** an infinite float number is given by `float("inf")`. # # Throw an assertion (look it up on google!) if the exponent is not in the range you expect. # + deletable=false nbgrader={"checksum": "00a224e6d87bdf9db02149ceb7103a50", "grade": false, "grade_id": "norm", "locked": false, "solution": true} def p_norm(vector,p): assert(p >= 1) abs_vec = map(abs,vector) # returns the abs of every elem of the list: max(abs(i) for i in ll) if p != float("inf"): sum_tmp = sum( elem**float(p) for elem in abs_vec) sum_tmp = pow(sum_tmp, 1/float(p)) else: sum_tmp = max(abs_vec) return sum_tmp #print p_norm(vec,2) #print p_norm(vec, "inf") # + deletable=false nbgrader={"checksum": "b5d2b6748279174b783a902b0b03f5f0", "grade": true, "grade_id": "norm-check-1", "locked": true, "points": 1, "solution": false} assert_equal(p_norm(range(10),1), 45.0) assert_equal(p_norm([3,4], 2), 5.0) # + deletable=false nbgrader={"checksum": "ce79af156d645e12269d9f5d3a6d2e36", "grade": true, "grade_id": "norm-check-2", "locked": true, "points": 1, "solution": false} assert_equal(p_norm([-1,-.5,.5], float("inf")), 1) # + deletable=false nbgrader={"checksum": "bdf0947f5e6d0b4be27d2eda93dc27dd", "grade": true, "grade_id": "norm-check-3", "locked": true, "points": 1, "solution": false} assert_raises(AssertionError, p_norm, [2,3], 0) assert_raises(AssertionError, p_norm, [2,3], -1) # + [markdown] deletable=false nbgrader={"checksum": "c79fcf0a64e458c15f5949bbaf39a44e", "grade": false, "grade_id": "fd-desc", "locked": true, "solution": false} # # Playing with condition numbers # # # In this exercise you will have to figure out what are the optimal # values of the stepping interval when approximating derivatives using # the finite difference method. See here_ for a short introduction on # how to run these programs on SISSA machines. # # ## 1. Finite differences # Write a program to compute the finite difference (`FD`) # approximation of the derivative of a function `f`, computed at # point `x`, using a stepping of size `h`. Recall the definition of # approximate derivative: # # $$ # FD(f,x,h) := \frac{f(x+h)-f(x)}{h} # $$ # + deletable=false nbgrader={"checksum": "76e4488d27cc346f6ac53d1a145aad17", "grade": false, "grade_id": "fd", "locked": false, "solution": true} def FD(f, x, h): return (f(x + h) - f(x))/float(h) # + deletable=false nbgrader={"checksum": "f9924babcbbb399825ad6f2087bc795b", "grade": true, "grade_id": "fd-check-1", "locked": true, "points": 1, "solution": false} assert_equal(FD(lambda x: x, 0, .125), 1.0) # + [markdown] deletable=false nbgrader={"checksum": "2c98afc1a075f16ab23e206a827dc233", "grade": false, "grade_id": "fd2-desc", "locked": true, "solution": false} # ## 2. Compute FD # # Evaluate this function for the derivative of `sin(x)` evaluated at `x=1`, for values of `h` equal to `1e-i`, with `i=0,...,20`. Store the values of the finite differences in the list `fd1`. # + deletable=false nbgrader={"checksum": "b4dcf56a59887d4319b2cb98ffa98976", "grade": false, "grade_id": "fd2", "locked": false, "points": 1, "solution": true} fd1 = [] for i in range(21): value_der = FD(sin, 1, pow(10,-i)) fd1.append(value_der) print fd1 # + deletable=false nbgrader={"checksum": "182c007dceec8bf65659546a6c28a59f", "grade": true, "grade_id": "fd2-check1", "locked": true, "points": 1, "solution": false} assert_equal(len(fd1), 21) expected = [0.067826442017785205, 0.49736375253538911, 0.53608598101186899, 0.5398814803603269, 0.54026023141862112, 0.54029809850586474, 0.54030188512133037, 0.54030226404044868, 0.54030229179602429, 0.54030235840940577, 0.54030224738710331, 0.54030113716407868, 0.54034554608506369, 0.53956838996782608, 0.53290705182007514, 0.55511151231257827, 0.0, 0.0, 0.0, 0.0, 0.0] assert_almost_equal(fd1,expected,decimal=4) # + [markdown] deletable=false nbgrader={"checksum": "4e46ca6958c6f03f3a259490f4d9844d", "grade": false, "grade_id": "fd3-desc", "locked": true, "solution": false} # ## 3. Error plots # # Plot the error, defined as `abs(FD-cos(1.0))` where `FD` is your approximation, in `loglog` format and explain what you see. A good way to emphasize the result is to give the option `'-o'` to the plot command. # + deletable=false nbgrader={"checksum": "58f3d772034530fae4f1394fd9ac9065", "grade": true, "grade_id": "fd3", "locked": false, "points": 1, "solution": true} error = [] for i in range(21): error_der = abs(FD(sin, 1, pow(10,-i)) - cos(1)) error.append(error_der) h = [pow(10,-i) for i in range(21)] #figure(figsize=[10,10]) #_ = loglog(h, error) figure(figsize=[5,5]) plt.loglog(h,error, "-o") plt.show() # + [markdown] deletable=false nbgrader={"checksum": "0f366e5b02bb5272ce93a6f24ef607a0", "grade": true, "grade_id": "fd3-comment", "locked": false, "points": 5, "solution": true} # We have three sources of error. # # FD_err : error of the numerical derivarive. It is proportional to h: when h decreases, it does the same # # difference_err : error due to the difference between two numbers.If h is very small, we have less significant digits. It goes as 1/h. # # binary_err : h goes as like powers of 10, while it would be better that h go as power of 2 # # At a certain point, around 10^16, the error saturates since we have reached the precision of the machine # + [markdown] deletable=false nbgrader={"checksum": "6105b9c6350f6856a194da4d139b3794", "grade": false, "grade_id": "fd-base2-desc", "locked": true, "solution": false} # ## 4. Error plots base 2 # Repeate step 2 and 3 above, but using powers of `2` instead of powers of `10`, i.e., using `h` equal to `2**(-i)` for `i=1,...,60`. Do you see differences? How do you explain these differences? Shortly comment. A good way to emphasize the result is to give the option `'-o'` to the plot command. # + error = [] for i in range(61): error_der = abs(FD(sin, 1, pow(2,-i)) - cos(1)) error.append(error_der) h = [pow(2,-i) for i in range(61)] #figure(figsize=[10,10]) #_ = loglog(h, error) figure(figsize=[5,5]) plt.loglog(h,error, "-o") plt.show() # + [markdown] deletable=false nbgrader={"checksum": "673e9b7faebb995a403a6a1e9ea76f47", "grade": true, "grade_id": "fd-base2", "locked": false, "points": 1, "solution": true} # The considerations are similar to the ones before, but now we do not have the binary error contribution # + [markdown] deletable=false nbgrader={"checksum": "42758fc5eb31ceb960921a409325d936", "grade": true, "grade_id": "fd-base2-comment", "locked": false, "points": 5, "solution": true} # # + [markdown] deletable=false nbgrader={"checksum": "7f0300838939fb3637561d2da88d79dd", "grade": false, "grade_id": "cfd-desc", "locked": true, "solution": false} # ## 5. Central Finite Differences # Write a function that computes the central finite difference approximation (`CFD`), defined as # # $$ # CFD(f,x,h) := \frac{f(x+h)-f(x-h)}{2h} # $$ # # + deletable=false nbgrader={"checksum": "d5de503bdfc25f777473402d07a89211", "grade": false, "grade_id": "cfd", "locked": false, "solution": true} def CFD(f, x, h): return (f(x + h) - f(x - h))/(2*float(h)) # + deletable=false nbgrader={"checksum": "2851b0f40a3773162f52614524b25963", "grade": true, "grade_id": "cfd-check", "locked": true, "points": 1, "solution": false} assert_equal(CFD(lambda x: x**2, 0.0, .5), 0.0) assert_equal(CFD(lambda x: x**2, 1.0, .5), 2.0) # + [markdown] deletable=false nbgrader={"checksum": "0da2440c559f1c3ee4c746c498914b48", "grade": false, "grade_id": "cfd2-desc", "locked": true, "solution": false} # ## 6. Error plots for CFD # # Repeat steps 2., 3. and 4. and explain what you see. What is the *order* of the approximation 1. and what is the order of the approximation 5.? What's the order of the cancellation errors? # + deletable=false nbgrader={"checksum": "cb123e54409262ac79e1fb86fdb478f1", "grade": true, "grade_id": "cfd2-plots", "locked": false, "points": 2, "solution": true} error = [] for i in range(21): error_der = abs(CFD(sin, 1, pow(10,-i)) - cos(1)) error.append(error_der) h = [pow(10,-i) for i in range(21)] #figure(figsize=[10,10]) #_ = loglog(h, error) figure(figsize=[5,5]) plt.loglog(h,error, "-o") plt.show() # + error = [] for i in range(61): error_der = abs(CFD(sin, 1, pow(2,-i)) - cos(1)) error.append(error_der) h = [pow(2,-i) for i in range(61)] #figure(figsize=[10,10]) #_ = loglog(h, error) figure(figsize=[5,5]) plt.loglog(h,error, "-o") plt.show() # + [markdown] deletable=false nbgrader={"checksum": "f90479827a3e6e2a1062164895849b6d", "grade": true, "grade_id": "cfd2-plots-comment", "locked": false, "points": 5, "solution": true} # In the CFD case we are evaluating f(x + h) and f(x - h) and subtracting them. It is as you take the FD derivative on the right and the FD derivative on the left. # # err_dx = c*h + O(h^2) # err_sx = -c*h + O(h^2) # # Therefore the error goes as h^2. # + [markdown] deletable=false nbgrader={"checksum": "10bc99f084a9050397f0c1f708565964", "grade": false, "grade_id": "array-desc", "locked": true, "solution": false} # # Numpy # # Numpy provides a very powerful array container. The first line of this ipython notebook has imported all of numpy functionalities in your notebook, just as if you typed:: # # from numpy import * # # Create a numpy array whith entries that range form 0 to 64. Use the correct numpy function to do so. Call it `x`. # + deletable=false nbgrader={"checksum": "6550f75e6ff7e6ce91a8be32deafc301", "grade": false, "grade_id": "array", "locked": false, "solution": true} x = np.array([k for k in range(64)]) # + deletable=false nbgrader={"checksum": "8faab133eca46158c2547fa7b0967542", "grade": true, "grade_id": "array-check", "locked": true, "points": 1, "solution": false} assert_equal(type(x), ndarray) assert_equal(len(x), 64) for i in xrange(64): assert_equal(x[i], float(i)) # + [markdown] deletable=false nbgrader={"checksum": "4cd4b51907f835b495ddbf7dbaa51712", "grade": false, "grade_id": "array2-desc", "locked": true, "solution": false} # Reshape the one dimensional array, to become a 4 rows 2 dimensional array, let numpy evaluate the correct number of culumns. Call it `y`. # + deletable=false nbgrader={"checksum": "5f4fd1f167cb5329adf97b2ec8419aeb", "grade": false, "grade_id": "array2", "locked": false, "solution": true} y = np.reshape(x,(4,16)) print y # + deletable=false nbgrader={"checksum": "ea78fb4567e08e5401b67e4d085856b2", "grade": true, "grade_id": "array2-check", "locked": true, "points": 1, "solution": false} assert_equal(shape(y), (4,16)) # + [markdown] deletable=false nbgrader={"checksum": "d0bbc9f6e1e02dbbc95ec51cf7122c3b", "grade": false, "grade_id": "array3-desc", "locked": true, "solution": false} # Get the following *slices* of `y`: # # * All the rows and the first three colums. Name it `sl1`. # * All the colums and the first three rows. Name it `sl2`. # * Third to sixth (included) columns and all the rows. Name it `sl3`. # * The last three columns and all the rows. Name it `sl4`. # + deletable=false nbgrader={"checksum": "e1a3a50073b0902f287b4b0c38651fd3", "grade": false, "grade_id": "array3", "locked": false, "points": 1, "solution": true} sl1 = y[0:4] sl1 = sl1[:,0:3] sl2 = y[:,0:] sl2 = sl2[0:3] sl3 = y[:,3:7] sl4 = y[:,-3:] print sl1 print sl2 print sl3 print sl4 # + deletable=false nbgrader={"checksum": "1badf276fe91c6c14eb9338c19b73e0b", "grade": true, "grade_id": "array3-test", "locked": true, "points": 1, "solution": false} assert_equal(sl1,[[0,1,2],[16,17,18],[32,33,34],[48,49,50]]) assert_equal(sl2,[[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],[32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47]]) assert_equal(sl3,[[3,4,5,6],[19,20,21,22],[35,36,37,38],[51,52,53,54]]) assert_equal(sl4,[[13,14,15],[29,30,31],[45,46,47],[61,62,63]]) # + [markdown] deletable=false nbgrader={"checksum": "33b9e1780c0dd4ffea9751aebf84025e", "grade": false, "grade_id": "array4-desc", "locked": true, "solution": false} # Now reshape the array, as if you wanted to feed it to a fortran routine. Call it `z`. # + deletable=false nbgrader={"checksum": "a1d8e18c4b52c6ffbd79742b57141ec0", "grade": true, "grade_id": "array4", "locked": false, "points": 1, "solution": true} z = np.reshape(x,(4,16), order = 'F') print z print y # + [markdown] deletable=false nbgrader={"checksum": "89ccb4f60ec33efae83377292eb3a8db", "grade": false, "grade_id": "array5-desc", "locked": true, "points": 1, "solution": false} # Comment on the result, what has changed with respect to `y`? # + [markdown] deletable=false nbgrader={"checksum": "5e50e6c6ca7f382d3ce2e2be28ba537a", "grade": true, "grade_id": "array5", "locked": false, "points": 1, "solution": true} # The difference is that in python elements are ordered row-wise, while in fortran they are ordered colum-wise. # + [markdown] deletable=false nbgrader={"checksum": "1829db8f96489200d1dc93edcb14b265", "grade": false, "grade_id": "array6-desc", "locked": true, "solution": false} # Set the fourth element of `x` to 666666, and print `x`, `y`, `z`. Comment on the result # + deletable=false nbgrader={"checksum": "87726e086d7a7a73c1b8655540f7e852", "grade": true, "grade_id": "array6-1", "locked": false, "points": 1, "solution": true} x[3] = 666666 print x print y print z # + [markdown] deletable=false nbgrader={"checksum": "847fd2ffa5fc2351fcfd401fb9c1421b", "grade": true, "grade_id": "array6-2", "locked": false, "points": 3, "solution": true} # The element x[3] has been overwritten with a new value. # + [markdown] deletable=false nbgrader={"checksum": "9873725533c9c62aa22dc841f6ff3c04", "grade": false, "grade_id": "array7-1-desc", "locked": true, "solution": false} # ## Arrays and Matrices # # Define 2 arrays, `A` of dimensions (2,3) and `B` of dimension (3,4). # # * Perform the operation `C = A.dot(B)`. Comment the result, or the error you get. # + deletable=false nbgrader={"checksum": "15bc5991a98ef17ada24a81a38ccf098", "grade": false, "grade_id": "array7-0", "locked": false, "solution": true} A = np.array([[i*j for i in range(1,4)] for j in range(1,3)]) B = np.array([[i*j for i in range(1,5)] for j in range(1,4)]) C = A.dot(B) print A print B print C # + deletable=false nbgrader={"checksum": "e004ee40f8603bf8cbf63b77072489b6", "grade": true, "grade_id": "array7-0-test", "locked": true, "points": 1, "solution": false} assert_equal(A.shape,(2,3)) assert_equal(B.shape,(3,4)) assert_equal(C.shape,(2,4)) expected = sum(A[1,:]*B[:,2]) assert_equal(C[1,2],expected) # + [markdown] deletable=false nbgrader={"checksum": "0fffe3b48b380095d30a35596935b2b2", "grade": true, "grade_id": "array7-1", "locked": false, "points": 1, "solution": true} # YOUR ANSWER HERE # + [markdown] deletable=false nbgrader={"checksum": "8daa41c2ab4249f4c3a24697a3834cfe", "grade": false, "grade_id": "array7-2-desc", "locked": true, "solution": false} # * Perform the operation `C = A*(B)`. Comment the result, or the error you get. # - C = A*B # + [markdown] deletable=false nbgrader={"checksum": "47db906d12d079403949954c4f79c316", "grade": true, "grade_id": "array7-2", "locked": false, "points": 1, "solution": true} # YOUR ANSWER HERE # - # * Convert A and B, from arrays to matrices and perform `A*B`. Comment the result. # + deletable=false nbgrader={"checksum": "b81bf715c9c1d4ccbc8ea94abe5e7e0b", "grade": false, "grade_id": "array7-3", "locked": false, "points": 1, "solution": true} A = np.asmatrix(A) B = np.asmatrix(B) C = A*B print C print type(C) # + deletable=false nbgrader={"checksum": "30fb1a393cb4628e67361886ad51a242", "grade": true, "grade_id": "array7-3-test", "locked": true, "points": 1, "solution": false} assert_equal(type(A),numpy.matrixlib.defmatrix.matrix) assert_equal(type(B),numpy.matrixlib.defmatrix.matrix) assert_equal(type(C),numpy.matrixlib.defmatrix.matrix) assert_equal(A.shape,(2,3)) assert_equal(B.shape,(3,4)) assert_equal(C.shape,(2,4)) expected = sum(A[1,:]*B[:,2]) assert_equal(C[1,2],expected) # + [markdown] deletable=false nbgrader={"checksum": "08676ca6235af8c49ec80aa4a66a590e", "grade": true, "grade_id": "array7-3-1", "locked": false, "points": 1, "solution": true} # YOUR ANSWER HERE # + [markdown] deletable=false nbgrader={"checksum": "8b8d0c8c57aae75207e34b8fb2000846", "grade": false, "grade_id": "polynomials-desc", "locked": true, "solution": false} # # Playing with polynomials # # The polynomial `(1-x)^6` can be expanded to:: # # x^6 - 6*x^5 + 15*x^4 - 20*x^3 + 15*x^2 - 6*x + 1 # # The two forms above are equivalent from a mathematical point of # view, but may yield different results in a computer machine. # # Compute and plot the values of this polynomial, using each of the # two forms, for 101 equally spaced points in the interval # `[0.995,1.005]`, i.e., with a spacing of 0.0001 (use linspace). # # Can you explain this behavior? # # + deletable=false nbgrader={"checksum": "4cdc4cafeca5d880ebd261209ed618a3", "grade": true, "grade_id": "polynomials-1", "locked": false, "points": 2, "solution": true} pol = lambda x: (1 - x)**6 pol_expanded = lambda x: x**6 - 6*x**5 + 15*x**4 - 20*x**3 + 15*x**2 - 6*x + 1 x = np.linspace(0.995,1.005,(1.005-0.995)/0.0001) pol_value = pol(x) pol_exp_value = pol_expanded(x) plt.plot(x,pol_value, color = "green") plt.plot(x,pol_exp_value, color = "blue") plt.show() # + [markdown] deletable=false nbgrader={"checksum": "6756da172811699667da7cf0fe44b1f0", "grade": true, "grade_id": "polynomials-2", "locked": false, "points": 1, "solution": true} # Differences between small numbers yield to big oscillations (errors), since the significant digits are few. # + [markdown] deletable=false nbgrader={"checksum": "0a479e894bbf31104f4ec5fd00345c57", "grade": false, "grade_id": "lagrange-desc", "locked": true, "solution": false} # **Playing with interpolation in python** # # 1. Given a set of $n+1$ points $x_i$ as input (either a list of floats, or a numpy array of floats), construct a function `lagrange_basis(xi,i,x)` that returns the $i$-th Lagrange # polynomial associated to $x_i$, evaluated at $x$. The $i$-th Lagrange polynomial is defined as polynomial of degree $n$ such that $l_i(x_j) = \delta_{ij}$, where $\delta$ is one if $i == j$ and zero otherwise. # # Recall the mathematical definition of the $l_i(x)$ polynomials: # # $$ # l_i(x) := \prod_{j=0, j\neq i}^{n} \frac{x-x_j}{x_i-x_j} # $$ # # + deletable=false nbgrader={"checksum": "af917ca6cdf69420a0d0f725c3fb34ab", "grade": false, "grade_id": "lagrange", "locked": false, "solution": true} def lagrange_basis(xi, i, x): assert(i < len(xi) and i >= 0) li = np.product([(x - xi[j])/(xi[i]-xi[j]) for j in range(len(xi)) if j!=i], axis = 0) return li x = linspace(0,1,5) d = 3 xi = linspace(0,1,d) print lagrange_basis(xi,1,x) # + deletable=false nbgrader={"checksum": "73158993cb16085f319edae1418cccbb", "grade": true, "grade_id": "lagrange-check", "locked": true, "points": 4, "solution": false} x = linspace(0,1,5) d = 3 xi = linspace(0,1,d) assert_equal(list(lagrange_basis(xi, 0, x)),[1.0, 0.375, -0.0, -0.125, 0.0]) assert_equal(list(lagrange_basis(xi, 1, x)),[0.0, 0.75, 1.0, 0.75, -0.0]) assert_equal(list(lagrange_basis(xi, 2, x)),[-0.0, -0.125, 0.0, 0.375, 1.0]) assert_raises(AssertionError, lagrange_basis, xi, -1, x) assert_raises(AssertionError, lagrange_basis, xi, 10, x) # + [markdown] deletable=false nbgrader={"checksum": "450949a960185bfab0adb1e2780677b5", "grade": false, "grade_id": "lagrange2-desc", "locked": true, "solution": false} # Construct the function `lagrange_interpolation(xi,g)` that, given the set of interpolation points `xi` and a function `g`, it returns **another function** that when evaluated at **x** returns the Lagrange interpolation polynomial of `g` defined as # # $$ # \mathcal{L} g(x) := \sum_{i=0}^n g(x_i) l_i(x) # $$ # # You could use this function as follows:: # # Lg = lagrange_interpolation(xi, g) # xi = linspace(0,1,101) # plot(x, g(x)) # plot(x, Lg(x)) # plot(xi, g(xi), 'or') # + deletable=false nbgrader={"checksum": "72546294ade57aa55dd327b7a989660e", "grade": false, "grade_id": "lagrange2", "locked": false, "solution": true} def lagrange_interpolation(xi,g): def L_g(x,xi,g): summ = zeros(x.shape) for i in range(len(xi)): summ += g(xi[i])*lagrange_basis(xi, i, x) return summ return lambda x: L_g(x,xi,g) # + deletable=false nbgrader={"checksum": "40ee1bebe6d691744dccbef3c40fe196", "grade": true, "grade_id": "lagrange2-check", "locked": true, "points": 4, "solution": false} # Check for polynomials. This should be **exact** g = lambda x: x**3+x**2 xi = linspace(0,1,4) Lg = lagrange_interpolation(xi, g) x = linspace(0,1,1001) assert p_norm(g(x) - Lg(x),float('inf')) < 1e-15, 'This should be zero...' # -
assignments/release/Basics/D1-exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ # $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $ # $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $ # $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $ # $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $ # $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $ # <font style="font-size:28px;" align="left"><b> <font color="blue"> Solutions for </font>Probabilistic Operators </b></font> # <br> # _prepared by <NAME>_ # <br><br> # <a id="task2"></a> # <h3>Task 2</h3> # # We are given the following probabilistic operator: $ B = \mymatrix{ccc}{ 0.4 & 0.6 & 0 \\ 0.2 & 0.1 & 0.7 \\ 0.4 & 0.3 & 0.3 } $. # # What is the transition probability from the second state to the third state? # # What is the transition probability from the third state to the first state? # # What is the transition probability from the first state to the second state? # <h3>Solution</h3> # Remember that the transitions are from the top to the left: # # $ # \begin{array}{c|ccc} \hookleftarrow & \mathbf{s1} & \mathbf{s2} & \mathbf{s3} \\ \hline # \mathbf{s1} & 0.4 & 0.6 & 0 \\ # \mathbf{s2} & 0.2 & 0.1 & 0.7 \\ # \mathbf{s3} & 0.4 & 0.3 & 0.3 \end{array} # $ # # The transition probability **from the second state to the third state** is represented by the entry at **the 2nd column and 3rd row**, and so it is 0.3. # # $ # \begin{array}{c|ccc} \hookleftarrow & \mathbf{s1} & \color{blue}{\mathbf{from}} & \mathbf{s3} \\ \hline # \mathbf{s1} & 0.4 & 0.6 & 0 \\ # \mathbf{s2} & 0.2 & 0.1 & 0.7 \\ # \color{blue}{\mathbf{to}} & 0.4 & {\color{blue}{\mathbf{0.3}}} & 0.3 \end{array} # $ # # The transition probability **from the third state to the first state** is represented by the entry at **the 3rd column and 1st row**, and so it is 0.3. # # $ # \begin{array}{c|ccc} \hookleftarrow & \mathbf{s1} & \mathbf{s2} & \color{blue}{\mathbf{from}} \\ \hline # \color{blue}{\mathbf{to}} & 0.4 & 0.6 & {\color{blue}{\mathbf{0}}} \\ # \mathbf{s2} & 0.2 & 0.1 & 0.7 \\ # \mathbf{s3} & 0.4 & 0.3 & 0.3 \end{array} # $ # # The transition probability **from the first state to the second state** is represented by the entry at **the 1st column and 2nd row**, and so it is 0.2. # # $ # \begin{array}{c|ccc} \hookleftarrow & \color{blue}{\mathbf{from}} & \mathbf{s2} & \mathbf{s3} \\ \hline # \mathbf{s1} & 0.4 & 0.6 & 0 \\ # \color{blue}{\mathbf{to}} & {\color{blue}{\mathbf{0.2}}} & 0.1 & 0.7 \\ # \mathbf{s3} & 0.4 & 0.3 & 0.3 \end{array} # $ # <a id="task3"></a> # <h3>Task 3</h3> # # Randomly construct a $ (3 \times 3 ) $-dimensional probabilistic operator. # # Randomly determine the entries of the matrix that represents a probabilistic operator. # <h3>Solution</h3> # + # let's start with a zero matrix A = [ [0,0,0], [0,0,0], [0,0,0] ] # we will randomly pick the entries and then make normalization for each column # it will be easier to iteratively construct the columns # you may notice that each column is a probabilistic state from random import randrange normalization_factor = [0,0,0] # the normalization factor of each column may be different for j in range(3): # each column is iteratively constructed normalization_factor[j] = 0 while normalization_factor[j]==0: # the normalization factor cannot be zero for i in range(3): A[i][j] = randrange(101) # pick a random value between 0 and 100 normalization_factor[j] += A[i][j] # let's print matrix A before the normalization # the entries are between 0 and 100 print("matrix A before normalization:") for i in range(3): print(A[i]) # let's normalize each column for j in range(3): for i in range(3): A[i][j] /= normalization_factor[j] # shorter form of A[i][j] = A[i][j] / normalization_factor[j] # let's print matrix A after the normalization print() # print an empty line print("matrix A after normalization:") for i in range(3): print(A[i]) print() print("the column summations must be 1") sum = [0,0,0] for j in range(3): for i in range(3): sum[j] += A[i][j] print(sum) # - # <a id="task4"></a> # <h3>Task 4</h3> # # What is the new probabilistic state if the operator $ B = \mymatrix{ccc}{ 0.4 & 0.6 & 0 \\ 0.2 & 0.1 & 0.7 \\ 0.4 & 0.3 & 0.3 } $ is applied to the state $ \myvector{ 0.1 \\ 0.3 \\ 0.6 } $. # # Please find the result by using matrix-vector multiplication. # # Please do not use any python library for matrix-vector multiplication. # # <i> The new probabilistic state should be $ \myvector{0.22 \\ 0.47 \\ 0.31} $. </i> # <h3>Solution</h3> # + # operator B B = [ [0.4,0.6,0], [0.2,0.1,0.7], [0.4,0.3,0.3] ] # the current state v = [0.1,0.3,0.6] newstate = [] index = 0 for row in B: newstate.append(0) for i in range(len(row)): newstate[index] = newstate[index] + row[i] * v[i] index = index + 1 print(newstate) # - # <a id="task5"></a> # <h3> Task 5 </h3> # # Write a function that takes a probabilistic operator and a probabilistic state, and then returns the new probabilistic state. # # Your function should work for any dimension. # # Test your function on $ \mymatrix{ccc}{ 0.4 & 0.6 & 0 \\ 0.2 & 0.1 & 0.7 \\ 0.4 & 0.3 & 0.3 } $ and $ \myvector{0.1 \\ 0.3 \\ 0.6} $. # # The new probabilistic state should be $ \myvector{0.22 \\ 0.47 \\ 0.31} $. # # Then, evolve your system for 5, 10, 20, and 40 steps. # # This system should evolve to a fixed probabilistic state. # # Change your initial state to $ \myvector{1 \\ 0 \\ 0} $, and see whether the converged state is the same or not. # <h3>Solution</h3> def linear_evolve(operator,state): newstate=[] for i in range(len(operator)): # for each row # we calculate the corresponding entry of the new state newstate.append(0) # we set this value to 0 for the initialization for j in range(len(state)): # for each element in state newstate[i] = newstate[i] + operator[i][j] * state[j] # summation of pairwise multiplications return newstate # return the new probabilistic state # + # test the function # operator for the test B = [ [0.4,0.6,0], [0.2,0.1,0.7], [0.4,0.3,0.3] ] # state for test v = [0.1,0.3,0.6] newstate = linear_evolve(B,v) print(newstate) # - for step in [5,10,20,40]: new_state = [0.1,0.3,0.6] # initial state for i in range(step): new_state = linear_evolve(B,new_state) print(new_state) # <b> The system seems to converge to a fixed probabilistic state </b>. # # Moreover, the converged probabilistic state is an equal distribution. # + # change the initial state for step in [5,10,20,40]: new_state = [1,0,0] # initial state for i in range(step): new_state = linear_evolve(B,new_state) print(new_state) # - # <b> The initial state do not change the convergance. </b>.
Bronze/classical-systems/CS20_Probabilistic_Operators_Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Word tokenize from nltk.tokenize import word_tokenize text = "<NAME>, Countess of Lovelace (née Byron; 10 December 1815 – 27 November 1852) was an English mathematician and writer, chiefly known for her work on <NAME>'s proposed mechanical general-purpose computer, the Analytical Engine. She was the first to recognise that the machine had applications beyond pure calculation, and to have published the first algorithm intended to be carried out by such a machine. As a result, she is often regarded as one of the first computer programmers." print(text) for token in word_tokenize(text): print(token)
wordTokenize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # Mask and Plot Remote Sensing Data with EarthPy # ============================================== # # Learn how to mask out pixels in a raster dataset. This example shows how to apply a cloud mask to # Landsat 8 data. # # Plotting with EarthPy # --------------------- # # <div class="alert alert-info"><h4>Note</h4><p>Below we walk through a typical workflow using Landsat data with EarthPy.</p></div> # # The example below uses Landsat 8 data. In the example below, the landsat_qa layer is the # quality assurance data layer that comes with Landsat 8 to identify pixels that may represent # cloud, shadow and water. The mask values used below are suggested values associated with the # landsat_qa layer that represent pixels with clouds and cloud shadows. # # # Import Packages # ------------------------------ # # To begin, import the needed packages. You will use a combination of several EarthPy # modules including spatial, plot and mask. # # # + from glob import glob import os import matplotlib.pyplot as plt import rasterio as rio from rasterio.plot import plotting_extent import earthpy as et import earthpy.spatial as es import earthpy.plot as ep import earthpy.mask as em # Get data and set your home working directory data = et.data.get_data("cold-springs-fire") # - # Import Example Data # ------------------------------ # To get started, make sure your directory is set. Create a stack from all of the # Landsat .tif files (one per band) and import the ``landsat_qa`` layer which provides # the locations of cloudy and shadowed pixels in the scene. # # # + os.chdir(os.path.join(et.io.HOME, "earth-analytics")) # Stack the landsat bands # This creates a numpy array with each "layer" representing a single band landsat_paths_pre = glob( "data/cold-springs-fire/landsat_collect/LC080340322016070701T1-SC20180214145604/crop/*band*.tif" ) landsat_paths_pre.sort() arr_st, meta = es.stack(landsat_paths_pre) # Import the landsat qa layer with rio.open( "data/cold-springs-fire/landsat_collect/LC080340322016070701T1-SC20180214145604/crop/LC08_L1TP_034032_20160707_20170221_01_T1_pixel_qa_crop.tif" ) as landsat_pre_cl: landsat_qa = landsat_pre_cl.read(1) landsat_ext = plotting_extent(landsat_pre_cl) # - # Plot Histogram of Each Band in Your Data # ---------------------------------------- # You can view a histogram for each band in your dataset by using the # ``hist()`` function from the ``earthpy.plot`` module. # # ep.hist(arr_st) plt.show() # Customize Histogram Plot with Titles and Colors # ----------------------------------------------- # # ep.hist( arr_st, colors=["blue"], title=[ "Band 1", "Band 2", "Band 3", "Band 4", "Band 5", "Band 6", "Band 7", ], ) plt.show() # View Single Band Plots # ----------------------------------------------- # Next, have a look at the data, it looks like there is a large cloud that you # may want to mask out. # # ep.plot_bands(arr_st) plt.show() # Mask the Data # ----------------------------------------------- # You can use the EarthPy ``mask()`` function to handle this cloud. # To begin you need to have a layer that defines the pixels that # you wish to mask. In this case, the ``landsat_qa`` layer will be used. # # ep.plot_bands( landsat_qa, title="The Landsat QA Layer Comes with Landsat Data\n It can be used to remove clouds and shadows", ) plt.show() # Plot The Masked Data # ~~~~~~~~~~~~~~~~~~~~~ # Now apply the mask and plot the masked data. The mask applies to every band in your data. # The mask values below are values documented in the Landsat 8 documentation that represent # clouds and cloud shadows. # # # + # Generate array of all possible cloud / shadow values cloud_shadow = [328, 392, 840, 904, 1350] cloud = [352, 368, 416, 432, 480, 864, 880, 928, 944, 992] high_confidence_cloud = [480, 992] # Mask the data all_masked_values = cloud_shadow + cloud + high_confidence_cloud arr_ma = em.mask_pixels(arr_st, landsat_qa, vals=all_masked_values) # sphinx_gallery_thumbnail_number = 5 ep.plot_rgb( arr_ma, rgb=[4, 3, 2], title="Array with Clouds and Shadows Masked" ) plt.show()
examples/plot_stack_masks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Overview # The main point of this script which is dependent on `resynthesize.py` and `signal_processing.py` is to resynthesize a a tone with quantifiable timbral features based on the harmonic distribution of an imported sound wav. # # &copy; <i><NAME> 2021</i> # + #Formatting (to center plots): from IPython.display import display, HTML CSS = """ .output { align-items: center; } """ HTML('<style>{}</style>'.format(CSS)) # - # # Importing Relevant Functions from resynthesize import resynthesize, extract_harmonics, play_alma_mater from signal_processing import pure_tone_complex, sound, magphase, get_spect import matplotlib.pyplot as plt from scipy.signal import spectrogram as sp import numpy as np # # Generating a Simulated Tone # + harmonics = 20; first = 0; dur_sec = 1; toPlay = np.array( [0,1,2,3,4,5,6,7,8,9,10,11]) extract = extract_harmonics('instruments/violin_A4_normal.wav', fs = 44100, f_0 = 440, n_harms = harmonics); fs_Hz = extract[4]; amp = extract[1][toPlay]; phase = extract[2][toPlay]; freq_Hz = extract[0][toPlay]; t_vect = np.arange(0,dur_sec*fs_Hz)/fs_Hz; env_banj = np.exp(-9*t_vect); env_string = (1+0.15*np.sin(6*np.pi*2*t_vect))*np.sin(.5*np.pi*2*t_vect); tone = resynthesize(amp, 'violin_resolved.wav', fs_Hz = 44100,freq_Hz = freq_Hz, dur_sec = 1, phi = phase, scale = 1, tone_shift = 1, env_fxn = env_string, type = 'sin', play_write = True, plot = False) sound(tone, fs_Hz) # - # # Spectrogram of Output get_spect(tone, fs_Hz, DR = 200, BW = 75, xlim = [0,1], ylim = [0,6000], colormap = 'magma',title = 'Simulated Violin | Resolved Range'); plt.figure() plt.plot(t_vect, tone) plt.xlim(0.3,0.32)
signal_processing/timbral_inspection/timbre_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import matplotlib.pyplot as plt from cartopy.feature import NaturalEarthFeature import cartotools.crs as ccrs countries = NaturalEarthFeature( category='cultural', name='admin_0_countries', scale='50m', edgecolor='#524c50', facecolor='none', alpha=.5) # - fig = plt.figure(figsize=(5, 5)) ax = plt.axes(projection=ccrs.EuroPP()) ax.coastlines(resolution='50m') ax.add_feature(countries) ax.gridlines() fig = plt.figure(figsize=(5, 5)) ax = plt.axes(projection=ccrs.Lambert93()) ax.coastlines(resolution='50m') ax.add_feature(countries) ax.gridlines() fig = plt.figure(figsize=(5, 5)) ax = plt.axes(projection=ccrs.Amersfoort()) ax.coastlines(resolution='50m') ax.add_feature(countries) ax.gridlines() fig = plt.figure(figsize=(5, 5)) ax = plt.axes(projection=ccrs.OSGB()) ax.coastlines(resolution='50m') ax.add_feature(countries) ax.gridlines() fig = plt.figure(figsize=(5, 5)) ax = plt.axes(projection=ccrs.OSNI()) ax.coastlines(resolution='50m') ax.add_feature(countries) ax.gridlines() fig = plt.figure(figsize=(5, 5)) ax = plt.axes(projection=ccrs.GaussKruger()) ax.coastlines(resolution='50m') ax.add_feature(countries) ax.gridlines() fig = plt.figure(figsize=(5, 5)) ax = plt.axes(projection=ccrs.CH1903()) ax.coastlines(resolution='50m') ax.add_feature(countries) ax.gridlines()
notebooks/cartotools_crs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # default_exp constraints # hide _FNAME='constraints' import unittest from unittest import mock from nbdev.export import notebook2script import os TESTCASE = unittest.TestCase() _nbpath = os.path.join(_dh[0], _FNAME+'.ipynb') # + #export import logging logger = logging.getLogger() import traceback from neo4j.exceptions import ClientError from mranderson.db import Query def uniqueness(label:str, property_name:str, safely=True): ''' Adds a uniqueness constraint on the property property_name on nodes of label e.g. uniqueness('Device', 'device_id') --> Device nodes must have a unique 'device_id' property safely: If False, throw an exception if the constraint already exists ''' constraint = "CREATE CONSTRAINT ON (node:{label}) ASSERT node.{property_name} IS UNIQUE".format( label=label, property_name=property_name) q = Query() q.add(constraint) try: resp = q.create() except ClientError as e: logger.info("Constraint {} already exists".format(constraint)) if not safely: raise e # - from mranderson.sandbox import start_neo4j, start_fresh, create_totem container_name = 'mranderson_constraints_unittest' container, driver = start_neo4j(container_name=container_name) # + import mranderson.node as node from neo4j.exceptions import ConstraintError def test_uniqueness(): TESTCASE.assertTrue(start_fresh()) create_totem() uniqueness('Test', 'eid') #creating the constraint safely is idempotent uniqueness('Test', 'eid', safely=True) node.create("(:Test {eid:1})") #new nodes with eid=1 cannot be created with TESTCASE.assertRaises(ConstraintError): node.create("(:Test {eid:1})") #nodes with different eid are OK node.create("(:Test {eid:2})") #creating the constraint again without `safely` fails with TESTCASE.assertRaises(ClientError): uniqueness('Test', 'eid', safely=False) print("SUCCESS") test_uniqueness() # - notebook2script(_nbpath)
nbs/constraints.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv("Movie_regression.csv", header=0) df.head() df.info() # ### Missing Value Imputation df['Time_taken'].mean() df['Time_taken'].fillna(value = df['Time_taken'].mean(), inplace = True) df.info() # ### Dummy Variable Creation df.head() df = pd.get_dummies(df,columns = ["3D_available","Genre"],drop_first = True) df.head() # ### X-y split X = df.loc[:,df.columns!="Collection"] type(X) X.head() X.shape y = df["Collection"] type(y) y.head() y.shape # ### Test-Train Split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2,random_state=0) X_train.head() X_train.shape X_test.shape # ### Traing Regression Tree from sklearn import tree regtree = tree.DecisionTreeRegressor(max_depth = 3) regtree.fit(X_train, y_train) # ### Predict values using trained model y_train_pred = regtree.predict(X_train) y_test_pred = regtree.predict(X_test) y_test_pred # ### Model Performance from sklearn.metrics import mean_squared_error, r2_score mean_squared_error(y_test, y_test_pred) r2_score(y_train, y_train_pred) r2_score(y_test, y_test_pred) # ### Plotting decision tree dot_data = tree.export_graphviz(regtree, out_file=None,feature_names= X_train.columns, filled = True) from IPython.display import Image import pydotplus graph = pydotplus.graph_from_dot_data(dot_data) Image(graph.create_png()) X_train.columns # ## Controlling Tree growth # ### Maximum number of levels in tree regtree1 = tree.DecisionTreeRegressor(max_depth = 3) regtree1.fit(X_train, y_train) dot_data = tree.export_graphviz(regtree1, out_file=None,feature_names= X_train.columns, filled = True) graph1 = pydotplus.graph_from_dot_data(dot_data) Image(graph1.create_png()) # ### Minimum observations at internal node regtree2 = tree.DecisionTreeRegressor(min_samples_split = 40) regtree2.fit(X_train, y_train) dot_data = tree.export_graphviz(regtree2, out_file=None,feature_names= X_train.columns, filled = True) graph2 = pydotplus.graph_from_dot_data(dot_data) Image(graph2.create_png()) # ### Minimum observations at leaf node regtree3 = tree.DecisionTreeRegressor(min_samples_leaf = 25, max_depth=4) regtree3.fit(X_train, y_train) dot_data = tree.export_graphviz(regtree3, out_file=None,feature_names= X_train.columns, filled = True) graph3 = pydotplus.graph_from_dot_data(dot_data) Image(graph3.create_png())
estudos_python/Machine_Learning/Others/3. ST Academy - Decision Trees resource files/Python Code/regression_tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Modeling and Simulation in Python # # Chapter 2: Simulation # # Copyright 2017 <NAME> # # License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0) # # We'll start with the same code we saw last time: the magic command that tells Jupyter where to put the figures, and the import statement that gets the functions defined in the `modsim` module. # + # If you want the figures to appear in the notebook, # and you want to interact with them, use # # %matplotlib notebook # If you want the figures to appear in the notebook, # and you don't want to interact with them, use # # %matplotlib inline # If you want the figures to appear in separate windows, use # # %matplotlib qt5 # %matplotlib notebook from modsim import * # - # ## More than one System object # # Here's the code from the previous chapter, with two changes: # # 1. I've added DocStrings that explain what each function does, and what parameters it takes. # # 2. I've added a parameter named `system` to the functions so they work with whatever `System` object we give them, instead of always using `bikeshare`. That will be useful soon when we have more than one `System` object. # + def run_steps(system, num_steps=1, p1=0.5, p2=0.5): """Simulate the given number of time steps. system: bikeshare System object num_steps: number of time steps p1: probability of an Olin->Wellesley customer arrival p2: probability of a Wellesley->Olin customer arrival """ for i in range(num_steps): step(system, p1, p2) plot_system(system) def step(system, p1=0.5, p2=0.5): """Simulate one minute of time. system: bikeshare System object p1: probability of an Olin->Wellesley customer arrival p2: probability of a Wellesley->Olin customer arrival """ if flip(p1): bike_to_wellesley(system) if flip(p2): bike_to_olin(system) def bike_to_wellesley(system): """Move one bike from Olin to Wellesley. system: bikeshare System object """ move_bike(system, 1) def bike_to_olin(system): """Move one bike from Wellesley to Olin. system: bikeshare System object """ move_bike(system, -1) def move_bike(system, n): """Move a bike. system: bikeshare System object n: +1 to move from Olin to Wellesley or -1 to move from Wellesley to Olin """ system.olin -= n system.wellesley += n def plot_system(system): """Plot the current system of the bikeshare system. system: bikeshare System object """ plot(system.olin, 'rs-', label='Olin') plot(system.wellesley, 'bo-', label='Wellesley') def decorate_bikeshare(): """Add a title and label the axes.""" decorate(title='Olin-Wellesley Bikeshare', xlabel='Time step (min)', ylabel='Number of bikes') # - # Now we can create more than one `System` object: bikeshare1 = System(olin=10, wellesley=2) bikeshare1 bikeshare2 = System(olin=10, wellesley=2) bikeshare2 # And whenever we call a function, we indicate which `System` object to work with: bike_to_olin(bikeshare1) bike_to_wellesley(bikeshare2) # And you can confirm that the different systems are getting updated independently: bikeshare1 bikeshare2 # ## Negative bikes # In the code we have so far, the number of bikes at one of the locations can go negative, and the number of bikes at the other location can exceed the actual number of bikes in the system. # # If you run this simulation a few times, it happens quite often. bikeshare = System(olin=10, wellesley=2) newfig() plot_system(bikeshare) decorate_bikeshare() run_steps(bikeshare, 60, 0.4, 0.2) # But this is relatively easy to fix, using the `return` statement to exit the function early if the update would cause negative bikes. # # If the second `if` statement seems confusing, remember that `n` can be negative. def move_bike(system, n): # make sure the number of bikes won't go negative olin_temp = system.olin - n if olin_temp < 0: # print("Olin has 0 bikes, so one was not moved to Wellesley.") return wellesley_temp = system.wellesley + n if wellesley_temp < 0: # print("Wellesley has 0 bikes, so one was not moved to Olin") return # update the system system.olin = olin_temp system.wellesley = wellesley_temp # Now if you run the simulation again, it should behave. bikeshare = System(olin=10, wellesley=2) newfig() plot_system(bikeshare) decorate_bikeshare() run_steps(bikeshare, 60, 0.4, 0.2) # The variables `olin` and `wellesley` are created inside `move_bike`, so they are local. When the function ends, they go away. # # If you try to access a local variable from outside its function, you get an error: # + # If you remove the # from the last line in this cell and run it, you'll get # NameError: name 'olin' is not defined olin # - # **Exercise:** Add print statements in `move_bike` so it prints a message each time a customer arrives and doesn't find a bike. Run the simulation again to confirm that it works as you expect. Then you might want to remove the print statements before you go on. # ## Comparison operators # The `if` statements in the previous section used the comparison operator `<`. The other comparison operators are listed in the book. # # It is easy to confuse the comparison operator `==` with the assignment operator `=`. # # Remember that `=` creates a variable or gives an existing variable a new value. x = 5 # Whereas `==` compared two values and returns `True` if they are equal. x == 5 # You can use `==` in an `if` statement. if x == 5: print('yes, x is 5') else: print("no, x is not 5") # But if you use `=` in an `if` statement, you get an error. # + # If you remove the # from the if statement and run it, you'll get # SyntaxError: invalid syntax if x = 5: print('yes, x is 5') else: print("no, x is not 5") # - # **Exercise:** Add an `else` clause to the `if` statement above, and print an appropriate message. # # Replace the `==` operator with one or two of the other comparison operators, and confirm they do what you expect. # ## Metrics # Now that we have a working simulation, we'll use it to evaluate alternative designs and see how good or bad they are. The metric we'll use is the number of customers who arrive and find no bikes available, which might indicate a design problem. # First we'll make a new `System` object that creates and initializes the system variables that will keep track of the metrics. bikeshare = System(olin=10, wellesley=2, olin_empty=0, wellesley_empty=0, clock = 0) # Next we need a version of `move_bike` that updates the metrics. def move_bike(system, n): olin_temp = system.olin - n if olin_temp < 0: system.olin_empty += 1 return wellesley_temp = system.wellesley + n if wellesley_temp < 0: system.wellesley_empty += 1 return system.olin = olin_temp system.wellesley = wellesley_temp # Now when we run a simulation, it keeps track of unhappy customers. newfig() plot_system(bikeshare) decorate_bikeshare() run_steps(bikeshare, 60, 0.4, 0.2) # After the simulation, we can print the number of unhappy customers at each location. bikeshare.olin_empty bikeshare.wellesley_empty # **Exercise:** Let's add a "clock" to keep track of how many time steps have elapsed: # # 1. Add a new system variable named `clock` to `bikeshare`, initialized to 0, and # # 2. Modify `step` so it increments (adds one to) `clock` each time it is invoked. # # Test your code by adding a print statement that prints the value of `clock` at the beginning of each time step. # Here's a copy of step to get you started bikeshare = System(olin=10, wellesley=2, olin_empty=0, wellesley_empty=0, clock=0) # Solution goes here def step(system, p1=0.5, p2=0.5): """Simulate one minute of time. system: bikeshare System object p1: probability of an Olin->Wellesley customer arrival p2: probability of a Wellesley->Olin customer arrival """ # Add one to the clock each time. system.clock +=1 # print system.clock if flip(p1): bike_to_wellesley(system) if flip(p2): bike_to_olin(system) # Solution goes here newfig() plot_system(bikeshare) decorate_bikeshare() run_steps(bikeshare, 60, 0.4, 0.2) # Solution goes here print(bikeshare.clock) # After the simulation, check the final value of `clock`. print(bikeshare.clock) # **Exercise:** Now suppose we'd like to know how long it takes to run out of bikes at either location. Modify `move_bike` so the first time a student arrives at Olin and doesn't find a bike, it records the value of `clock` in a system variable. # # Hint: create a system variable named `t_first_empty` and initialize it to `-1` to indicate that it has not been set yet. # # Test your code by running a simulation for 60 minutes and checking the metrics. # Solution goes here def move_bike(system, n): # make sure the number of bikes won't go negative olin_temp = system.olin - n if olin_temp < 0: # print("Olin has 0 bikes, so one was not moved to Wellesley.") if system.t_first_empty > -1: return else: system.t_first_empty = system.clock return wellesley_temp = system.wellesley + n if wellesley_temp < 0: # print("Wellesley has 0 bikes, so one was not moved to Olin") return # update the system system.olin = olin_temp system.wellesley = wellesley_temp # Solution goes here bikeshare = System(olin=10, wellesley=2, olin_empty=0, wellesley_empty=0, clock = 0, t_first_empty = -1) # Solution goes here newfig() plot_system(bikeshare) decorate_bikeshare() run_steps(bikeshare, 60, 0.4, 0.2) # After the simulation, check the final value of `t_first_empty`. print(bikeshare.t_first_empty) # Before we go on, let's put `step` and `move_bike` back the way we found them, so they don't break the examples below. # + def step(system, p1=0.5, p2=0.5): if flip(p1): bike_to_wellesley(system) if flip(p2): bike_to_olin(system) def move_bike(system, n): olin_temp = system.olin - n if olin_temp < 0: system.olin_empty += 1 return wellesley_temp = system.wellesley + n if wellesley_temp < 0: system.wellesley_empty += 1 return system.olin = olin_temp system.wellesley = wellesley_temp # - # ## Returning values # Here's a simple function that returns a value: def add_five(x): return x + 5 # And here's how we call it. y = add_five(3) y # If you run a function on the last line of a cell, Jupyter displays the result: add_five(5) # But that can be a bad habit, because usually if you call a function and don't assign the result in a variable, the result gets discarded. # # In the following example, Jupyter shows the second result, but the first result just disappears. add_five(3) add_five(5) # When you call a function that returns a variable, it is generally a good idea to assign the result to a variable. # + y1 = add_five(3) y2 = add_five(5) print(y1, y2) # - # **Exercise:** Write a function called `make_system` that creates a `System` object with the system variables `olin=10` and `wellesley=2`, and then returns the new `System` object. # # Write a line of code that calls `make_system` and assigns the result to a variable. # Solution goes here def make_system(): """ Returns a new Sytem object where Olin = 10 and Wellesley = 2 """ return System(olin=10, wellesley=2) # Solution goes here bikeshare = make_system() bikeshare # ## Running simulations # Before we go on, I want to update `run_steps` so it doesn't always plot the results. The new version takes an additional parameter, `plot_flag`, to indicate whether we want to plot. # # "flag" is a conventional name for a boolean variable that indicates whether or not a condition is true. # # This version of `run_steps` works even if `num_steps` is not an integer. It uses the `int` function to round down. See https://docs.python.org/3/library/functions.html#int def run_steps(system, num_steps=1, p1=0.5, p2=0.5, plot_flag=True): """Simulate the given number of time steps. `num_steps` should be an integer; if not, it gets rounded down. system: bikeshare System object num_steps: number of time steps p1: probability of an Olin->Wellesley customer arrival p2: probability of a Wellesley->Olin customer arrival plot_flag: boolean, whether to plot """ for i in range(int(num_steps)): step(system, p1, p2) if plot_flag: plot_system(system) # Now when we run a simulation, we can choose not to plot the results: bikeshare = System(olin=10, wellesley=2, olin_empty=0, wellesley_empty=0) run_steps(bikeshare, 60, 0.4, 0.2, plot_flag=False) # But after the simulation, we can still read the metrics. bikeshare.olin_empty # Let's wrap all that in a function. def run_simulation(): system = System(olin=10, wellesley=2, olin_empty=0, wellesley_empty=0) run_steps(system, 60, 0.4, 0.2, plot_flag=False) return system # And test it. system = run_simulation() print(system.olin_empty, system.wellesley_empty) # If we generalize `run_simulation` to take `p1` and `p2`, we can use it to run simulations with a range of values for the parameters. def run_simulation(p1=0.4, p2=0.2): bikeshare = System(olin=10, wellesley=2, olin_empty=0, wellesley_empty=0) run_steps(bikeshare, 60, p1, p2, plot_flag=False) return bikeshare # When `p1` is small, we probably don't run out of bikes at Olin. system = run_simulation(p1=0.2) system.olin_empty # When `p1` is large, we probably do. system = run_simulation(p1=0.6) system.olin_empty # **Exercise:** Write a version of `run_simulation` that takes all five model parameters as function parameters. # Solution goes here def run_simulation(p1=.4, p2=.2, olin=10, wellesley=2, num_steps=60): """ Runs simulation with all five model parameters as parameters. system : The system being modified in this case num_steps: The number of steps to run for p1: Probability of a bike going from Olin to Wellesley p2: Probability of a bike going from Wellesley to Olin olin: The number of bikes on Olin's campus. Wellesley: The number of bikes on Wellesley's campus. num_steps: The number of steps that the algorithm should run for. """ system = System(olin=olin, wellesley=wellesley, olin_empty=0, wellesley_empty=0) run_steps(system=system,num_steps=num_steps, p1=p1, p2=p2, plot_flag=False) return system # Solution goes here run_simulation() # ## More for loops # `linspace` creates a NumPy array of equally spaced numbers. p1_array = linspace(start=0, stop=1, num=5) p1_array # We can use an array in a `for` loop, like this: for p1 in p1_array: print(p1) # This will come in handy in the next section. # **Exercise:** The function `linspace` is part of NumPy. [You can read the documentation here](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html). # # Use `linspace` to make an array of 10 equally spaced numbers from 1 to 10 (including both). # Solution goes here p1_array = linspace(start = 1, stop = 10, num = 10) p1_array # **Exercise:** The `modsim` library provides a related function called `linrange`. You can view the documentation by running the following cell: help(linrange) # Use `linrange` to make an array of numbers from 1 to 11 with a step size of 2. # Solution goes here p1_array = linrange(start = 1, stop = 11, step = 2) p1_array # ## Sweeping parameters # The following example runs simulations with a range of values for `p1`; after each simulation, it prints the number of unhappy customers at the Olin station: p1_array = linspace(0, 1, 11) p1_array for p1 in p1_array: system = run_simulation(p1=p1) print(p1, system.olin_empty) # Now we can do the same thing, but plotting the results instead of printing them. # # newfig() for p1 in p1_array: system = run_simulation(p1=p1) plot(p1, system.olin_empty, 'rs', label='olin') # As always, we should decorate the figure. This version of `decorate_bikeshare` takes `xlabel` as a parameter, for reasons you will see soon. def decorate_bikeshare(xlabel): decorate(title='Olin-Wellesley Bikeshare', xlabel=xlabel, ylabel='Number of unhappy customers') decorate_bikeshare(xlabel='Arrival rate at Olin (p1 in customers/min)') # **Exercise:** Wrap this code in a function named `parameter_sweep` that takes an array called `p1_array` as a parameter. It should create a new figure, run a simulation for each value of `p1` in `p1_array`, and plot the results. # # Once you have the function working, modify it so it also plots the number of unhappy customers at Wellesley. Looking at the plot, can you estimate a range of values for `p1` that minimizes the total number of unhappy customers? # Solution goes here def parameter_sweep(p1_array): """ Creates a new figure and runs the simulation with different p1 values p1_array: An array of p values. """ newfig() for p1 in p1_array: system = run_simulation(p1=p1) plot(p1, system.olin_empty, 'rs', label='olin') plot(p1, system.wellesley_empty, 'bo', label="Wellesley") # # Solution goes here newfig() p1_array = linspace(0, 1, 50) parameter_sweep(p1_array) # **Exercise:** Write a function called `parameter_sweep2` that runs simulations with `p1=0.2` and a range of values for `p2`. # # Note: If you run `parameter_sweep2` a few times without calling `newfig`, you can plot multiple runs on the same axes, which will give you a sense of how much random variation there is from one run to the next. # Solution goes here # Solution goes here def parameter_sweep2(p2_array): """ Creates a new figure and runs the simulation with different p2 values, holding p1 at 0.2 p2_array: An array of p values. """ newfig() for p2 in p1_array: system = run_simulation(p1=0.2, p2=p2) plot(p2, system.olin_empty, 'rs', label='olin') plot(p2, system.wellesley_empty, 'bo', label="Wellesley") # Solution goes here newfig() p1_array = linspace(0, 1, 50) parameter_sweep2(p1_array) # + # Solution goes here # - # **Exercise:** Hold `p1=0.4` and `p2=0.2`, and sweep a range of values for `num_steps`. # # Hint: You will need a version of `run_simulation` that takes `num_steps` as a parameter. # # Hint: Because `num_steps` is supposed to be an integer use `range` rather than `linrange`. # Solution goes here def parameter_sweep_steps(step_array): """ Creates a new figure and runs the simulation with different numbers of steps. """ newfig() for num_steps in step_array: system = run_simulation(p1=0.2, p2=0.2, num_steps=num_steps) plot(num_steps, system.olin_empty, 'rs', label='olin') plot(num_steps, system.wellesley_empty, 'bo', label="Wellesley") # Solution goes here newfig() step_array = range(50) parameter_sweep_steps(step_array) # + # Solution goes here # - # **Exercise:** The code below runs a simulation with the same parameters 10 times and computes the average number of unhappy customers. # # 1. Wrap this code in a function called `run_simulations` that takes `num_runs` as a parameter. # # 2. Test `run_simulations`, and increase `num_runs` until the results are reasonably consistent from one run to the next. # # 3. Generalize `run_simulations` so it also takes the initial value of `olin` as a parameter. # # 4. Run the generalized version with `olin=12`. How much do the two extra bikes decrease the average number of unhappy customers. # # 5. Make a plot that shows the average number of unhappy customers as a function of the initial number of bikes at Olin. num_runs = 10 total = 0 for i in range(num_runs): system = run_simulation(p1=0.4, p2=0.2, olin=10, wellesley=2, num_steps=60) total += system.olin_empty + system.wellesley_empty total / num_runs # Solution goes here def run_simulations(num_runs): total = 0 for i in range(num_runs): system = run_simulation(p1=0.4, p2=0.2, olin=10, wellesley=2, num_steps=60) total += system.olin_empty + system.wellesley_empty return total / num_runs # Solution goes here print(run_simulations(1000)) # Solution goes here def run_simulations(num_runs, olin=10): """ Runs given amount of simulations and returns average number of unhappy customers. num_runs: The number of runs to try olin: The initial number of bikes. """ total = 0 for i in range(num_runs): system = run_simulation(p1=0.4, p2=0.2, olin=olin, wellesley=2, num_steps=60) total += system.olin_empty + system.wellesley_empty return total / num_runs # Solution goes here run_simulations(num_runs=1000, olin=12) # Solution goes here newfig() num_bikes = range(20) for number in num_bikes: average_value = run_simulations(num_runs=50, olin = number) plot(number, average_value, "bo-") xlabel("Number of bikes at Olin") ylabel("Number of unhappy customers") title("Relationship between number of bikes at Olin and number of happy customers")
code/chap02mine.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Zr5YJ5xSZqis" # # Email Classification # + [markdown] id="-npoQSyFTTHM" # ## Fetching data from MS-Sql # + id="X3AtNr2SaSCa" # !apt install unixodbc-dev # !pip install pyodbc # + id="BLRNUZ-Pb4iT" language="sh" # curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - # curl https://packages.microsoft.com/config/ubuntu/16.04/prod.list > /etc/apt/sources.list.d/mssql-release.list # sudo apt-get update # sudo ACCEPT_EULA=Y apt-get -q -y install msodbcsql17 # + id="nGEqTyX5a7w2" import os import pyodbc import urllib import pandas as pd from sqlalchemy import create_engine # + id="jVXQEhu4bJt5" driver = [item for item in pyodbc.drivers()][-1] conn_string = f'Driver={driver};Server=tcp:server.<domain>.com,<port>;Database=<db>;Uid=<userid>;Pwd=<<PASSWORD>>;Encrypt=yes;TrustServerCertificate=yes;Connection Timeout=30;' conn = pyodbc.connect(conn_string) cursor = conn.cursor() # + id="MPC2RpHco7C-" # params = urllib.parse.quote_plus(conn_string) # conn_str = 'mssql+pyodbc:///?odbc_connect={}'.format(params) # engine_feat = create_engine(conn_str, echo=True) # print(engine_feat.table_names()) # + id="CQN-9vldsTgA" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1597254541382, "user_tz": -330, "elapsed": 1563, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="831cb0d5-2123-47bc-aa7f-b2f85b2537ea" tname = 'tbl_Final_Lable_Data_18_n_19' query = f'select count(*) from {tname}' cursor.execute(query) cursor.fetchall() # + id="baJZZ5ww2eZm" colab={"base_uri": "https://localhost:8080/", "height": 442} executionInfo={"status": "ok", "timestamp": 1597254977341, "user_tz": -330, "elapsed": 1886, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="08a74fb4-4460-4c75-e632-426b820b36de" query = f'select top 5 * from {tname}' df = pd.read_sql(query, conn) df.info() # + id="6Hy570Spsev7" # %reload_ext google.colab.data_table # + id="IsOsExH-txzH" df # + id="U9uhMDtV1GDk" colab={"base_uri": "https://localhost:8080/", "height": 119} executionInfo={"status": "ok", "timestamp": 1597255219329, "user_tz": -330, "elapsed": 1603, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="8a63c900-b40d-4bfc-8061-26eedcbb45d5" df.columns # + id="C09UEiqUt35P" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1597262681717, "user_tz": -330, "elapsed": 6654848, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="38143115-810a-4095-d27b-ed6aae238fb0" query = f'select tSubject, mMsgContent, QueryType, SubQueryType from {tname}' df = pd.read_sql(query, conn) df.info() # + id="thdlJgVO1Gxy" df.to_pickle('data.p') # + [markdown] id="8q8YbNDWTqo4" # ## Wrangling # + id="Lcbzjg0pCCyh" # wrangling.py import os import numpy as np import pandas as pd spath = '/content/email_class' df = pd.read_pickle(os.path.join(spath,'data','raw','data.p')) df.columns = ['subj','msg','qtype','stype'] df['type'] = df['qtype'] + ' | ' + df['stype'] df = df.replace(r'^\s*$', np.nan, regex=True) df = df.dropna(how='all') df = df.drop_duplicates() df = df.dropna(subset=['subj', 'msg'], how='all') df = df.replace(r'^\s*$', np.nan, regex=True) df = df.dropna(how='all') df = df.drop_duplicates() df = df.dropna(subset=['subj', 'msg'], how='all') df = df.fillna(' ') df['subj&msg'] = df['subj'] + ' sub_eos_token ' + df['msg'] df = df[['subj&msg','type']] df.columns = ['text','target'] df.sample(10000).to_pickle('df_raw_wrangled_sample_10k.p') # df.sample(10000).to_pickle(os.path.join(spath,'data','wrangled','df_raw_wrangled_sample_10k.p')) # df.to_pickle(os.path.join(spath,'data','wrangled','df_raw_wrangled_full.p')) # + id="hlb8-lExVmiw" import os import numpy as np import pandas as pd from tqdm import tqdm import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") tqdm.pandas() # %reload_ext autoreload # %autoreload 2 # %reload_ext google.colab.data_table # %config InlineBackend.figure_format = 'retina' plt.style.use('fivethirtyeight') plt.style.use('seaborn-notebook') # + id="w79QgqP_23__" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1597560238130, "user_tz": -330, "elapsed": 42402, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="15e2bc71-2d04-4454-fbf0-176b80bff19e" df = pd.read_pickle(os.path.join(spath,'data','raw','data.p')) df.info() # + id="7XK3KUYZ34tZ" df.sample(20) # + id="wYm1-URg7C66" df.columns # + id="gj2zOluv7wPt" df.columns = ['subj','msg','qtype','stype'] # + id="2n7iU3FO8AXW" df.qtype.nunique() # + id="o5tEpnms8KOY" df.qtype.value_counts()[:50] # + id="Hfqmljfx8bVB" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1597341029592, "user_tz": -330, "elapsed": 1346, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="070d2412-b788-4ed7-fea6-9a3e6e5afea3" df.stype.nunique() # + id="YdF-08448Peu" df.stype.value_counts()[:50] # + id="D5SalQHJz2yW" df['type'] = df['qtype'] + ' | ' + df['stype'] # + id="6gMwjYmp0C7W" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1597406059662, "user_tz": -330, "elapsed": 902, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="4bed2b4b-a399-482d-f714-2fb719ad8ee4" df['type'].nunique() # + id="V1WutEGB0PJf" df['type'].value_counts()[:50] # + id="TRGQM5TR9Qq-" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1597341249538, "user_tz": -330, "elapsed": 1786, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3a106282-634e-4ba7-fcd4-bb4892bc3d04" df.subj.nunique() # + id="t8mJgC5d8WBq" df.subj.value_counts()[:50] # + id="kSKsChwu9euw" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1597341313014, "user_tz": -330, "elapsed": 4615, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="eaaee5f2-4680-47ad-b9e1-7f0051dab6f9" df.msg.nunique() # + id="Ik-uzC1X9B-B" df.msg.value_counts()[:50] # + id="uEA7-SWw9ZMg" df[df.msg.isnull()].sample(10) # + id="j0Dg_-ii-CdT" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1597341729282, "user_tz": -330, "elapsed": 1832, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="2caf82a9-e0df-497d-ace5-b68fa7ca75ba" df[(df.msg.isnull()) & (df.subj.isnull())].info() # + id="a4h8ahQ1-v23" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1597560296004, "user_tz": -330, "elapsed": 38247, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="2ff96fdc-341a-4976-89e7-06f16f279ab4" df2 = df.replace(r'^\s*$', np.nan, regex=True) df2.info() # + id="0jrw87Xq_ftb" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1597560297220, "user_tz": -330, "elapsed": 12766, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="f7b8bfa3-f52e-4ce1-f2dd-75ea8960b797" df3 = df2.dropna(how='all') df3.info() # + id="SKOlzfUNAbXP" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1597560302536, "user_tz": -330, "elapsed": 14100, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a9526948-0b12-4312-c34b-4577e8cc3286" df4 = df3.drop_duplicates() df4.info() # + id="OuRnQAbyAlPB" df4[(df4.msg.isnull()) & (df4.subj.isnull())] # + id="uhzvQ4tHBWQh" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1597560302539, "user_tz": -330, "elapsed": 5413, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="7afb675a-2a10-43fb-f1fe-5a4bf657325a" df5 = df4.dropna(subset=['subj', 'msg'], how='all') df5.info() # + id="lRdLXC_XCBv_" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1597560308040, "user_tz": -330, "elapsed": 1665, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a2c8c1f7-68bb-4157-a266-77f3bee8b906" df4.shape, df5.shape # + id="_XgU0TDhCMYR" sample = df5.sample(10) sample # + id="X1QhUkCUYRA9" # !pip install ekphrasis from ekphrasis.classes.preprocessor import TextPreProcessor from ekphrasis.classes.tokenizer import SocialTokenizer from ekphrasis.dicts.emoticons import emoticons text_processor = TextPreProcessor( normalize=['url', 'email', 'percent', 'money', 'phone', 'user', 'time', 'date', 'number'], # annotate={"hashtag", "allcaps", "elongated", "repeated", # 'emphasis', 'censored'}, fix_html=True, segmenter="twitter", corrector="twitter", unpack_hashtags=True, unpack_contractions=True, spell_correct_elong=False, tokenizer=SocialTokenizer(lowercase=False).tokenize, dicts=[emoticons] ) # + id="ZXHVLF4f1lSB" import re from bs4 import BeautifulSoup # + id="B8AeiZmRGy9M" # text_raw = sample.loc[[299500]].msg.tolist()[0] # text = text_raw # text = BeautifulSoup(text, "lxml").text # text = re.sub(r'<.*?>', ' ', text) # text = re.sub(r'\{[^{}]*\}', ' ', text) # text = re.sub(r'\s', ' ', text) # text = re.sub(r'.*\..*ID.*?(?=\s)', ' ', text) # text = re.sub(r'DIV..*?(?=\s)', ' ', text) # text = BeautifulSoup(text, "lxml").text # text = text.strip() # text = " ".join(text_processor.pre_process_doc(text)) # text # + id="s2aAG-BXEHSg" from itertools import groupby # + id="RhH8AaG4reu8" html_residual = 'P . ImprintUniqueID LI . ImprintUniqueID DIV . ImprintUniqueID TABLE . ImprintUniqueIDTable DIV . Section ' caution_residual = 'CAUTION This email originated from outside of the organization . Do not click links or open attachments unless you recognize the sender and know the content is safe . ' # + id="875RLvXEZfwj" def clean_text(text): text = ' ' + text + ' ' text = BeautifulSoup(text, "lxml").text text = re.sub(r'<.*?>', ' ', text) text = re.sub(r'\{[^{}]*\}', ' ', text) text = re.sub(r'\s', ' ', text) # text = re.sub(r'(?=\s).*\..*ID.*?(?=\s)', ' ', text) # text = re.sub(r'(?=\s)DIV..*?(?=\s)', ' ', text) text = re.sub(r'Forwarded message.*?(?=____)', ' ', text) text = BeautifulSoup(text, "lxml").text text = ' '.join(text_processor.pre_process_doc(text)) text = re.sub(r'[^A-Za-z<>. ]', ' ', text) text = ' '.join(text.split()) text = re.sub(html_residual, '', text) text = re.sub(caution_residual, '', text) text = re.sub(r'(?:\d+[a-zA-Z]+|[a-zA-Z]+\d+)', '<hash>', text) # text = re.sub(r'\b\w{1,2}\b', '', text) text = ' '.join(text.split()) text = ' '.join([k for k,v in groupby(text.split())]) return text # + id="ndCIDKuek8nj" # # text_raw = sample.loc[[75806]].msg.tolist()[0] # text = text_raw # text = BeautifulSoup(text, "lxml").text # text = re.sub(r'<.*?>', ' ', text) # text = re.sub(r'\{[^{}]*\}', ' ', text) # text = re.sub(r'\s', ' ', text) # # text = re.sub(r'.*\..*ID.*?(?=\s)', ' ', text) # # text = re.sub(r'DIV..*?(?=\s)', ' ', text) # text = re.sub(r'Forwarded message.*?(?=____)', ' ', text) # text = BeautifulSoup(text, "lxml").text # text = " ".join(text_processor.pre_process_doc(text)) # text = re.sub(r'[^A-Za-z0-9<>. ]', ' ', text) # text = ' '.join(text.split()) # text # + id="ZNWWpQLGjfJB" sample = df5.sample(1000) # + id="doOt0vI3vnsq" sample['subj_clean'] = sample['subj'].fillna(' ').apply(clean_text) # + id="A_R-SBxY2aST" [(x,y) for x,y in zip(sample.subj.tolist()[:50],sample.subj_clean.tolist()[:50])] # + id="QB_k4RX7auVr" sample['msg_clean'] = sample['msg'].fillna(' ').apply(clean_text) sample.msg.tolist() # + id="gyp01dH0hwsL" sample.msg_clean.tolist() # + id="ZOq1zBOj4F8S" colab={"base_uri": "https://localhost:8080/", "height": 170} executionInfo={"status": "ok", "timestamp": 1597410713287, "user_tz": -330, "elapsed": 7056, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="6f75514a-fec3-408a-eae5-8fa6f454c7ea" sample = df5.sample(1000, random_state=40) sample['subj_clean'] = sample['subj'].fillna(' ').apply(clean_text) sample['msg_clean'] = sample['msg'].fillna(' ').apply(clean_text) sample['subj&msg'] = sample['subj_clean'] + ' | ' + sample['msg_clean'] sample = sample[['subj&msg','type']] sample.columns = ['text','target'] sample.info() # + id="vDePhF264w__" sample # + id="v7RXJnql5aAX" colab={"base_uri": "https://localhost:8080/", "height": 170} executionInfo={"status": "ok", "timestamp": 1597426275158, "user_tz": -330, "elapsed": 4538335, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="381b5dac-8a76-4fdc-b21c-00bcf535f970" sample = df5.copy() sample['subj_clean'] = sample['subj'].fillna(' ').apply(clean_text) sample['msg_clean'] = sample['msg'].fillna(' ').apply(clean_text) sample['subj&msg'] = sample['subj_clean'] + ' | ' + sample['msg_clean'] sample = sample[['subj&msg','type']] sample.columns = ['text','target'] sample.info() # + id="TVVTXU7kFM3a" colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"status": "ok", "timestamp": 1597427222296, "user_tz": -330, "elapsed": 3555, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3d277c72-06cd-4deb-82fb-a482546d578d" sample.nunique() # + id="cAFQ27-hE8-X" colab={"base_uri": "https://localhost:8080/", "height": 170} executionInfo={"status": "ok", "timestamp": 1597427243634, "user_tz": -330, "elapsed": 4552, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="1f848bd6-6255-47d4-d999-c56df0e8caac" sample2 = sample.drop_duplicates() sample2.info() # + id="DbvywuvOFhso" colab={"base_uri": "https://localhost:8080/", "height": 170} executionInfo={"status": "ok", "timestamp": 1597427354490, "user_tz": -330, "elapsed": 11001, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="001d1e54-7f9b-4aea-a6a5-7ab3dfb544ec" sample3 = sample2.replace(r'^\s*$', np.nan, regex=True) sample3.info() # + id="OXUDri7LB8JK" df5.to_pickle('df_raw_wrangled.p') # + id="Eq78wXKwF-7d" colab={"base_uri": "https://localhost:8080/", "height": 170} executionInfo={"status": "ok", "timestamp": 1597427495497, "user_tz": -330, "elapsed": 1060, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="58727a8f-d9d7-4184-ccdc-739319a2d45f" sample2.info() # + id="31iN8-7PGSoh" colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"status": "ok", "timestamp": 1597427506045, "user_tz": -330, "elapsed": 3731, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="ee4bfc45-f0b1-4202-e08a-ff5341e55ef2" sample2.nunique() # + id="gxFziBUVGUmh" sample2.describe() # + [markdown] id="itwk9pEuGXU4" # ## Text Cleaning # + id="TMAiI_G3sDew" import os import re import numpy as np import pandas as pd from tqdm import tqdm import seaborn as sns import matplotlib.pyplot as plt from bs4 import BeautifulSoup import nltk nltk.download('stopwords') from nltk.corpus import stopwords stopwords = list(set(stopwords.words('english'))) from nltk.stem import WordNetLemmatizer nltk.download('wordnet') lemmatizer = WordNetLemmatizer() from nltk.stem import PorterStemmer ps = PorterStemmer() import warnings warnings.filterwarnings("ignore") tqdm.pandas() # %reload_ext autoreload # %autoreload 2 # %reload_ext google.colab.data_table # %config InlineBackend.figure_format = 'retina' plt.style.use('fivethirtyeight') plt.style.use('seaborn-notebook') # + id="nMk12EPzsd5B" colab={"base_uri": "https://localhost:8080/", "height": 170} executionInfo={"status": "ok", "timestamp": 1597897404475, "user_tz": -330, "elapsed": 6977, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c27caa87-def8-49df-8994-985726270da3" df = pd.read_pickle(os.path.join(path,'data','wrangled','df_raw_wrangled_sample_10k.p')) df.info() # + id="1gfvcYwqtVsJ" df.sample(5) # + id="JEFv-QYqyO_q" df[df.target=='<redacted>'].sample(5) # + id="Llthvl8wtYa2" df.target.value_counts() # + id="4pVEL8xlvVnp" # # cleaning pipe # - lowercase # - remove nonalpha # - stopword # - lemmatization # - stemming # - min occurence # - max occurence # - ngram # - misspell # - contraction # - encode plus tokenizer # + id="a_rpa62rG6O0" df = df.reset_index(drop=True) # + id="Ot-con96Ia5V" # label tokens caution_label = 'CAUTION: This email originated from outside of the organization. \ Do not click links or open attachments unless you recognize the sender and know the \ content is safe' confidential_label = '<redacted>' confidential_label = '<redacted>' retransmit_label = '<redacted>' alert_label = '<redacted>' html_labels = ['P.ImprintUniqueID', 'LI.ImprintUniqueID', 'DIV.ImprintUniqueID', 'TABLE.ImprintUniqueIDTable', 'DIV.Section1'] html_regex = re.compile('|'.join(map(re.escape, html_labels))) newline_token = '\n' custom_stopwords = ['best', 'regard', 'direct', 'number', 'phone', 'mobile', 'number', 'reply', 'url', 'com'] # + id="8OkAUM4wYv9D" # !pip install clean-text # + id="78M8BYE2J8dZ" def clean_l1(text): text = re.sub(caution_label, ' cautionlabel ', text) text = re.sub(confidential_label, ' confidentiallabel ', text) text = html_regex.sub('htmltoken', text) text = re.sub(retransmit_label, ' retransmittoken ', text) text = re.sub(alert_label, ' alerttoken ', text) text = re.sub('sub_eos_token', ' bodytoken ', text) text = ' ' + text + ' ' text = BeautifulSoup(text, "lxml").text text = re.sub(r'<.*?>', ' ', text) text = re.sub(r'\{[^{}]*\}', ' ', text) # # text = re.sub(r'Forwarded message.*?(?=____)', ' ', text) text = BeautifulSoup(text, "lxml").text text = re.sub(newline_token, ' newlinetoken ', text) text = ' '.join(text.split()) text = re.sub(r'[^A-Za-z.,?\'@]', ' ', text) # text = ' '.join(text.split()) return text # + id="4DnIxxwPIzM5" xx = clean_l1(df.loc[idx,'text']); xx # print(xx) # + id="PhPlfTu5J4ar" df.loc[idx,'text'] # + id="Ju5aimGEHMOD" idx = np.random.randint(0,len(df)) print(idx) print(df.loc[idx,'text']) # + id="Q_VsdTXgKcf7" # idx = 9 # print(df.text.iloc[[idx]].tolist()[0]) # xx = df.text.iloc[[idx]].apply(clean_l1).tolist()[0] # xx # + id="rZyxmG3DWDl9" df['text_clean_l1'] = df.text.apply(clean_l1) # + id="nE6_ASAAWNxw" df.text_clean_l1.sample().tolist()[0] # + id="cjkMbFtLIOfJ" set1_words = ['best regards', 'regards', 'thanks regards', 'warm regards'] set1_regex = re.compile('|'.join(map(re.escape, set1_words))) # + id="6bGL2Mrbknd6" from itertools import groupby # + id="5oGtFkkWLBui" def replace_words(s, words): for k, v in words.items(): s = s.replace(k, v) return s word_mapping = {' f o ':' fno ', ' a c ':' account ', ' a/c ':' account ', ' fw ':' forward ', ' fwd ':' forward ', ' forwarded ':' forward ', ' no. ':' number ', } # + id="kbpDvIA-FhfB" def clean_l2(text): text = ' ' + text + ' ' text = text.lower() text = ' '.join(text.split()) text = replace_words(text, word_mapping) text = re.sub('[.]', ' . ', text) text = re.sub('[,]', ' , ', text) text = re.sub('[?]', ' ? ', text) text = ' '.join([w for w in text.split() if re.match('^[a-z.,?\'\-\~#`!&*()]+$', w)]) text = re.sub(r'[^a-z.,?\']', ' ', text) text = set1_regex.sub('eostoken', text) text = text + ' eostoken ' text = re.match(r'^.*?eostoken', text).group(0) text = re.sub(r'eostoken', '', text) text = re.sub(r'\b\w{1,1}\b', '', text) text = ' '.join([k for k,v in groupby(text.split())]) text = ' '.join(text.split()) return text # + id="Iy6WBvlvzQRg" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1597590978429, "user_tz": -330, "elapsed": 1714, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="69b1a426-0b62-4f4c-861b-85a408d80bca" xxy = 'warm regards regrads hello regards' xxy = ' '.join([w for w in xxy.split() if re.match('^[a-z.,?\'\-\~#`!&*()]+$', w)]) xxy = re.sub(r'[^a-z.,?\']', ' ', xxy) xxy = set1_regex.sub('eostoken', xxy) re.match(r'^.*?eostoken', xxy).group(0) # + id="3Nv6FemDHsvM" idx = 1 # print(df.text.iloc[[idx]].tolist()[0]) print(df.text_clean_l1.iloc[[idx]].tolist()[0]) df.text_clean_l1.iloc[[idx]].apply(clean_l2).tolist()[0] # + id="KJPVTdKXn7tZ" df['text_clean_l2'] = df.text_clean_l1.apply(clean_l2) # + id="a46TQFx8oDHU" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1597587420633, "user_tz": -330, "elapsed": 1860, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3db75b32-7824-4723-e775-5351d1b9e932" df.drop_duplicates().replace(r'^\s*$', np.nan, regex=True).info() # + id="LUqZormYoalz" colab={"base_uri": "https://localhost:8080/", "height": 385} executionInfo={"status": "ok", "timestamp": 1597590999123, "user_tz": -330, "elapsed": 3587, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="90ad9e43-c1ee-4f46-d495-45259adf9aea" xx = df.text_clean_l2.apply(lambda x: len(x.split())).values sns.distplot(xx[xx<1000]) xx.min(), xx.max() # + id="4C0Y-gtbpnot" # print(np.argmax(-xx)) print(list(np.argsort(-xx))[:20]) df.iloc[[374]] # + id="2G7AVKDSw4oc" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1597589767548, "user_tz": -330, "elapsed": 1106, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="d4b0f2da-85f0-4929-e768-d563aa839367" df1 = df[(df.text_clean_l2.apply(lambda x: len(x.split()))>3) & (df.text_clean_l2.apply(lambda x: len(x.split()))<200)] df1.info() # + id="sk4hc_j0Ak4W" def clean_l3(text): text = re.sub(r'[^a-z]', '', text) text = ' '.join([lemmatizer.lemmatize(w, 'v') for w in text.split()]) text = ' '.join([lemmatizer.lemmatize(w) for w in text.split()]) text = ' '.join([w for w in text.split() if not w in stopwords]) text = ' '.join([w for w in text.split() if not w in custom_stopwords]) # seen = set() # seen_add = seen.add # text = ' '.join([x for x in text.split() if not (x in seen or seen_add(x))]) text = ' '.join([ps.stem(w) for w in text.split()]) return text # + id="97G3Ro9MyLZs" # def temp(text): # text = ' '.join([lemmatizer.lemmatize(w) for w in text.split()]) # text = ' '.join([lemmatizer.lemmatize(w, 'j') for w in text.split()]) # text = ' '.join([lemmatizer.lemmatize(w, 'V') for w in text.split()]) # text = ' '.join([lemmatizer.lemmatize(w, 'R') for w in text.split()]) # return text # # temp('communicate communication') # import spacy # from spacy.lemmatizer import Lemmatizer, ADJ, NOUN, VERB # lemmatizer = nlp.vocab.morphology.lemmatizer # lemmatizer('communicate communication', VERB) # from nltk.stem import PorterStemmer # ps = PorterStemmer() # for w in ['commute', 'communication']: # rootWord=ps.stem(w) # print(rootWord) # + id="uCbKgK3aBaFk" df['text_clean_l2'] = df.text_clean_l1.apply(clean_l2) # + id="h70Rr1RQExjJ" df.text_clean_l2.sample(5, random_state=10).tolist() # + id="lh5d0TOW076y" # import spacy # nlp = spacy.load("en_core_web_sm") # def ners(text): # doc = nlp(text) # for token in doc: # print(token.text) # x = list(set([ent.text for ent in doc.ents if ent.label_=='ORG'])) # x = list(set([(ent.text,ent.label_) for ent in doc.ents])) # return x # df.sample(20).text.apply(ners).tolist() # df.text.iloc[[14]].apply(ners) # + id="hbEjF9mzUhLM" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1597523725451, "user_tz": -330, "elapsed": 3680, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e699411a-0511-4843-966b-7d5de2fd3ff4" from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(stop_words='english', max_df=0.5, min_df=10, ngram_range=(1,3)) vectorizer.fit_transform(df.clean.tolist()) # + id="W1BKNJkcnR6y" idx = 100 print(df.text.iloc[[idx]].tolist()[0]) pd.DataFrame(vectorizer.inverse_transform(vectorizer.transform([df.clean.tolist()[idx]]))).T[0] # + id="KDsuAuhrnv5t" from sklearn.feature_extraction.text import TfidfVectorizer vect = TfidfVectorizer(stop_words='english', max_df=0.5, min_df=10, ngram_range=(1,2)) X = vect.fit_transform(df.clean.tolist()) def top_tfidf_feats(row, features, top_n=20): topn_ids = np.argsort(row)[::-1][:top_n] top_feats = [(features[i], row[i]) for i in topn_ids] df = pd.DataFrame(top_feats, columns=['features', 'score']) return df def top_feats_in_doc(X, features, row_id, top_n=25): row = np.squeeze(X[row_id].toarray()) return top_tfidf_feats(row, features, top_n) features = vect.get_feature_names() # + id="LTbFDHehr05B" idx = 14 print(df.text.iloc[[idx]].tolist()[0]) print(top_feats_in_doc(X, features, idx, 10)) # + id="4TS9jDxjr78N" df[df.clean.str.contains('vora')] # + [markdown] id="6Ogygp5uUl3M" # ## Transformer model # + id="e75ADkO8pQ7m" # !pip install -q clean-text[gpl] && cp '/content/drive/My Drive/clean_v2.py' . from clean_v2 import clean_l1 # + id="qGptuyeHpU6X" import os import re import numpy as np import pandas as pd from tqdm import tqdm import seaborn as sns import matplotlib.pyplot as plt from collections import OrderedDict from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split import csv import warnings warnings.filterwarnings("ignore") tqdm.pandas() # %reload_ext autoreload # %autoreload 2 # %reload_ext google.colab.data_table # %config InlineBackend.figure_format = 'retina' plt.style.use('fivethirtyeight') plt.style.use('seaborn-notebook') # + id="E7MFTAy4pXn_" colab={"base_uri": "https://localhost:8080/", "height": 170} executionInfo={"status": "ok", "timestamp": 1598069413482, "user_tz": -330, "elapsed": 11692, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="f6f6f973-5956-4fc8-ffb0-b6cf50f14998" df_raw = pd.read_pickle(os.path.join(path,'data_clean_v2.p')) df_raw.info() # + id="CQnFjdJdppVQ" df = df_raw.sample(10000, random_state=42) # + id="gUPvgVrhpr18" tokenlist = ['emailtoken', 'urltoken', 'newlinetoken', 'htmltoken', 'currencytoken', 'token', 'digittoken', 'numbertoken'] def preprocess(text): text = text.lower() text = ' '.join([w for w in text.split() if w not in tokenlist]) text = ' '.join(text.split()[:50]) return text df['text'] = df.text.apply(preprocess) df = df[df.text.apply(lambda x: len(x.split()))>3] # + id="cJHwGqArqVs0" df['target'] = df.target.apply(clean_l1).str.lower().str.split().apply(lambda x: OrderedDict.fromkeys(x).keys()).str.join(' ') minority_labels = df.target.value_counts()[df.target.value_counts()<100].index.tolist() df['target'] = df.target.replace(dict.fromkeys(minority_labels, 'other')) # + id="NpKHHoUwqhTu" df = df[df.target!='other'] df.target.value_counts()[:25] # + id="D1NARewJsTbD" target_map = {'<redacted>': '<redacted>'} df = df.replace({'target': target_map}) df.target.value_counts()[:25] # + id="KwyuDewhypDp" label_encoder = LabelEncoder() df['target'] = label_encoder.fit_transform(df['target']) # + id="gDNgxfml1c5P" df.head() # + id="kTtGC-_y0cGu" with open('label.csv', 'w') as f: wr = csv.writer(f,delimiter="\n") wr.writerow(df.target.unique().tolist()) # + id="iEzYr_2mt2-K" train, val = train_test_split(df, test_size=0.2, random_state=42) train.reset_index(drop=True).to_csv('train.csv') val.reset_index(drop=True).to_csv('val.csv') # + id="AFVdzpaM15bF" # !pip install -q fast-bert # + id="Bh7VEErg18sh" colab={"base_uri": "https://localhost:8080/", "height": 115, "referenced_widgets": ["dba40726134d460295836a4cb566a822", "<KEY>", "439858897eef4ebd80cb4eca13aa2da9", "bcc9350609a248578e6d4c66a4160c09", "92f940f1d71d4d6d9b9a0a49b59af1f0", "d354c421b44842348af67c137364a543", "623adbc81043448393e131e430d4b1e8", "4c0bd4b8e0dd47ad85d95faefeb1df15", "<KEY>", "<KEY>", "318d70924d4147a0b99dcd73595a48c2", "72297d4132fa4927ad35e4dec44b7ec2", "bcccbeea95db4985abf2512c8caf447a", "21fdf8c35de0432f96830a65ac761cc3", "f79a5a934626465987e6cdd559dbde46", "313d7079d5b74aee8d91e32a01408d7c"]} executionInfo={"status": "ok", "timestamp": 1597943546060, "user_tz": -330, "elapsed": 40641, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="63ba6376-1879-4570-d025-940107c38864" from fast_bert.data_cls import BertDataBunch databunch = BertDataBunch('/content', '/content', tokenizer='distilbert-base-uncased', train_file='train.csv', val_file='val.csv', label_file='label.csv', text_col='text', label_col='target', batch_size_per_gpu=16, max_seq_length=100, multi_gpu=False, multi_label=False, model_type='distilbert') # + id="EU_8JOp22G3j" from fast_bert.learner_cls import BertLearner from fast_bert.metrics import accuracy import logging import torch logger = logging.getLogger() device_cuda = 'cuda' if torch.cuda.is_available() else 'cpu' metrics = [{'name': 'accuracy', 'function': accuracy}] # + id="iifY-pOp2YU3" colab={"base_uri": "https://localhost:8080/", "height": 171, "referenced_widgets": ["dd6c528596744531b479084c32a7a67d", "929f27b840a24526a806ede845ac1b97", "b6d92920a2434c2f8840e515ec5a7877", "631f2559878c40719df681d781b229ae", "4d3f5288eb0c46c69d0923a2b19714cf", "bc29c3bdf3e84abc919ec6bbc09403c5", "09acb85110ef4ac6b604225f4f403541", "56354758183f463a9dffa4590149c6f2"]} executionInfo={"status": "ok", "timestamp": 1597943583584, "user_tz": -330, "elapsed": 77856, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="d1a59933-3bf1-49b9-8ed6-dfc5ef0c9572" learner = BertLearner.from_pretrained_model( databunch, pretrained_path='distilbert-base-uncased', metrics=metrics, device=device_cuda, logger=logger, output_dir='/content', finetuned_wgts_path=None, warmup_steps=500, multi_gpu=False, is_fp16=True, multi_label=False, logging_steps=50) # + id="ZbQQhEJ62dY2" colab={"base_uri": "https://localhost:8080/", "height": 450, "referenced_widgets": ["ab7e1e874d8e4173b50b54ae4a6f8f9e", "763432504f734cfeaf0bef1cd68e72e9", "4265c59eafcd4d8f962960cf8cc2ac0d", "1fa5de508fae4cc6be576f118d8440a7", "bac660ab018b42799d198ad93b94d0f0", "<KEY>", "cf33508e57d740a4b0eca9b9bbecee22", "958428cf14ab451fb3d111655e305154"]} executionInfo={"status": "ok", "timestamp": 1597943955981, "user_tz": -330, "elapsed": 450154, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="bb664b40-44ca-4e8b-991d-6da9f4821061" learner.lr_find(start_lr=1e-4,optimizer_type='lamb') # + id="v63xoVnp2hjx" colab={"base_uri": "https://localhost:8080/", "height": 401} executionInfo={"status": "ok", "timestamp": 1597944129491, "user_tz": -330, "elapsed": 2545, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="33dd6a6c-428f-4161-baa3-6a31f8c739f5" learner.plot(show_lr=2e-2) # + id="k-O9OQVT3Lk8" colab={"base_uri": "https://localhost:8080/", "height": 168} executionInfo={"status": "ok", "timestamp": 1597944555033, "user_tz": -330, "elapsed": 88988, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="066b343a-954e-4f9e-ae74-e38b0566a1c4" learner.fit(epochs=1, lr=2e-2, validate=True, schedule_type="warmup_cosine", optimizer_type="lamb", return_results=True) # + id="ujFx3Fxx3LiO" colab={"base_uri": "https://localhost:8080/", "height": 54} executionInfo={"status": "ok", "timestamp": 1597944461164, "user_tz": -330, "elapsed": 5574, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="88f57bfd-e0c8-4011-eed8-6131667b0d32" learner.validate() # + id="FEAcnoDn3Ld9" learner.save_model() # + id="-0VZJzq47Xzh" xx = val.sample(5); xx # + id="ID9Fvoqj71RN" colab={"base_uri": "https://localhost:8080/", "height": 505} executionInfo={"status": "ok", "timestamp": 1597944971417, "user_tz": -330, "elapsed": 1626, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="1e41fe26-2b25-418c-88e0-0db2241a6cef" predictions = learner.predict_batch(xx.text.tolist()) pd.DataFrame(predictions).T # + id="GkZWwZfZ3abu" # from fast_bert.prediction import BertClassificationPredictor # MODEL_PATH = '/content/model_out' # predictor = BertClassificationPredictor( # model_path='/content', # label_path='/content', # multi_label=False, # model_type='xlnet', # do_lower_case=True) # single_prediction = predictor.predict("just get me result for this text") # texts = ["this is the first text", "this is the second text"] # multiple_predictions = predictor.predict_batch(texts) # + [markdown] id="8-k7MSeU5N9O" # --- # + id="kHEcJPkL5Okh" train, val = train_test_split(df, test_size=0.2, random_state=42) train = train.reset_index(drop=True) train.columns = ['text','labels'] val = val.reset_index(drop=True) val.columns = ['text','labels'] # + id="sxJsGaPf54qW" # !pip install -q simpletransformers # + id="bxcPfHyZ6Cuq" from simpletransformers.classification import ClassificationModel, ClassificationArgs import logging logging.basicConfig(level=logging.INFO) transformers_logger = logging.getLogger("transformers") transformers_logger.setLevel(logging.WARNING) # + id="QPpKZFw96Cs6" model_args = ClassificationArgs(num_train_epochs=1, learning_rate=1e-2) # + id="qJ9TyF-v6CoZ" model = ClassificationModel('distilbert', 'distilbert-base-uncased', num_labels=df.target.nunique(), args=model_args) # + id="7w7ZKHRI6Cm6" model.train_model(train) # + id="poKWYOpG6Clm" scores1, model_outputs, wrong_predictions = model.eval_model(val) # + id="6vCg2i2P6Ch6" scores1 # + id="vCkX4oSU6CgS" from sklearn.metrics import f1_score, accuracy_score def f1_multiclass(labels, preds): return f1_score(labels, preds, average='micro') # + id="R866_BHt65TT" scores2, model_outputs, wrong_predictions = model.eval_model(val, f1=f1_multiclass, acc=accuracy_score) # + id="RgCPQKny3wbx" scores2 # + id="bHg1kdJJ7DUC" predictions, raw_output = model.predict(['<redacted>']) # + [markdown] id="edV2IZYFXwXk" # ## TFIDF model # + id="w2rxTQFVwUs6" # !pip install -q clean-text[gpl] && cp '/content/drive/My Drive/clean_v2.py' . from clean_v2 import clean_l1 # + id="BCxKJQN9D0HJ" import os import re import numpy as np import pandas as pd from tqdm import tqdm import seaborn as sns import matplotlib.pyplot as plt import nltk nltk.download('stopwords') from nltk.corpus import stopwords stopwords = list(set(stopwords.words('english'))) from nltk.stem import WordNetLemmatizer nltk.download('wordnet') lemmatizer = WordNetLemmatizer() from nltk.stem import PorterStemmer ps = PorterStemmer() from sklearn.feature_extraction.text import CountVectorizer import warnings warnings.filterwarnings("ignore") tqdm.pandas() # %reload_ext autoreload # %autoreload 2 # %reload_ext google.colab.data_table # %config InlineBackend.figure_format = 'retina' plt.style.use('fivethirtyeight') plt.style.use('seaborn-notebook') # + id="dHaDj6kED5dN" colab={"base_uri": "https://localhost:8080/", "height": 170} executionInfo={"status": "ok", "timestamp": 1598025748756, "user_tz": -330, "elapsed": 12885, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="9e0505ce-0f74-4989-9a0b-195a5eb9fd7d" df_raw = pd.read_pickle(os.path.join(path,'data_clean_v2.p')) df_raw.info() # + id="P1trnRboFlKa" df = df_raw.sample(10000, random_state=42) # + id="K_dD9JNuEYqI" def clean_l2(text): text = text.lower() text = re.sub(r'[^a-z ]', '', text) text = ' '.join([lemmatizer.lemmatize(w, 'v') for w in text.split()]) text = ' '.join([lemmatizer.lemmatize(w) for w in text.split()]) text = ' '.join([w for w in text.split() if not w in stopwords]) text = ' '.join([ps.stem(w) for w in text.split()]) return text # + id="OpcrR8EYE95N" from collections import OrderedDict df['target'] = df.target.apply(clean_l1).str.lower().str.split().apply(lambda x: OrderedDict.fromkeys(x).keys()).str.join(' ') # + id="BQFHM1LnGedJ" df['text'] = df['text'].apply(clean_l2) # + id="XUVjS-bYzTzX" df = df[df.text.apply(lambda x: len(x.split()))>3] # + id="WNi46YPuFpcZ" xx = df.sample(5, random_state=40) xx # + id="VMysCgg_8sg2" import gensim import nltk from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.metrics import accuracy_score, confusion_matrix # + id="xPDY-ViLxMWI" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1598026015093, "user_tz": -330, "elapsed": 1749, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3c0eeb62-e9a5-4c47-c95d-59918dea030b" df['text'].apply(lambda x: len(x.split(' '))).sum() # + id="YukuB5cqypYr" minority_labels = df.target.value_counts()[df.target.value_counts()<100].index.tolist() df['target'] = df.target.replace(dict.fromkeys(minority_labels, 'Other')) df = df[df.target!='Other'] # + id="gpe9jMv0xdnn" X = df.text y = df.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state = 42) # + id="3MLtHV2ExvSd" colab={"base_uri": "https://localhost:8080/", "height": 306} executionInfo={"status": "ok", "timestamp": 1598026355049, "user_tz": -330, "elapsed": 2131, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a04a8225-c64f-4af9-d632-a3b1dc79bae3" from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline from sklearn.feature_extraction.text import TfidfTransformer nb = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB()), ]) nb.fit(X_train, y_train) # + id="vOK-cf2gyDCa" label_list = list(df.target.unique()) # + id="qcTbWA9Bx0pY" # %%time from sklearn.metrics import classification_report y_pred = nb.predict(X_test) print('accuracy %s' % accuracy_score(y_pred, y_test)) print(classification_report(y_test, y_pred,target_names=label_list)) # + id="OT2kXPSLx5RD" colab={"base_uri": "https://localhost:8080/", "height": 374} executionInfo={"status": "ok", "timestamp": 1598026475330, "user_tz": -330, "elapsed": 2058, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="8138ec8a-5486-43db-e50f-b5be04373221" from sklearn.linear_model import SGDClassifier sgd = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', SGDClassifier(loss='hinge', penalty='l2',alpha=1e-3, random_state=42, max_iter=5, tol=None)), ]) sgd.fit(X_train, y_train) # + id="vqbX9-eRydlf" # %%time y_pred = sgd.predict(X_test) print('accuracy %s' % accuracy_score(y_pred, y_test)) print(classification_report(y_test, y_pred,target_names=label_list)) # + id="vR1em2v7zXvm" colab={"base_uri": "https://localhost:8080/", "height": 391} executionInfo={"status": "ok", "timestamp": 1598026559596, "user_tz": -330, "elapsed": 8725, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="1036625a-4632-4b94-9a74-048782a1c8e2" from sklearn.linear_model import LogisticRegression logreg = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', LogisticRegression(n_jobs=1, C=1e5)), ]) logreg.fit(X_train, y_train) # + id="XOs8bkx3zg24" # %%time y_pred = logreg.predict(X_test) print('accuracy %s' % accuracy_score(y_pred, y_test)) print(classification_report(y_test, y_pred,target_names=label_list)) # + id="Dt1107X2zkyj" from tqdm import tqdm tqdm.pandas(desc="progress-bar") from gensim.models import Doc2Vec from sklearn import utils import gensim from gensim.models.doc2vec import TaggedDocument import re # + id="3CWldrzcz4vf" def label_sentences(corpus, label_type): """ Gensim's Doc2Vec implementation requires each document/paragraph to have a label associated with it. We do this by using the TaggedDocument method. The format will be "TRAIN_i" or "TEST_i" where "i" is a dummy index of the post. """ labeled = [] for i, v in enumerate(corpus): label = label_type + '_' + str(i) labeled.append(TaggedDocument(v.split(), [label])) return labeled # + id="W-U6Th0yz8o3" X_train, X_test, y_train, y_test = train_test_split(df.text, df.target, random_state=0, test_size=0.3) X_train = label_sentences(X_train, 'Train') X_test = label_sentences(X_test, 'Test') all_data = X_train + X_test # + id="9zac3Ygi0DMc" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1598026783023, "user_tz": -330, "elapsed": 6872, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="573703f7-3589-4017-9d65-d6a0fd89f2ed" model_dbow = Doc2Vec(dm=0, vector_size=300, negative=5, min_count=1, alpha=0.065, min_alpha=0.065) model_dbow.build_vocab([x for x in tqdm(all_data)]) # + id="xg0Fbuyf0X2v" for epoch in range(30): model_dbow.train(utils.shuffle([x for x in tqdm(all_data)]), total_examples=len(all_data), epochs=1) model_dbow.alpha -= 0.002 model_dbow.min_alpha = model_dbow.alpha # + id="X3ENGKxc0caJ" def get_vectors(model, corpus_size, vectors_size, vectors_type): """ Get vectors from trained doc2vec model :param doc2vec_model: Trained Doc2Vec model :param corpus_size: Size of the data :param vectors_size: Size of the embedding vectors :param vectors_type: Training or Testing vectors :return: list of vectors """ vectors = np.zeros((corpus_size, vectors_size)) for i in range(0, corpus_size): prefix = vectors_type + '_' + str(i) vectors[i] = model.docvecs[prefix] return vectors # + id="hwAVgSAU0g0k" train_vectors_dbow = get_vectors(model_dbow, len(X_train), 300, 'Train') test_vectors_dbow = get_vectors(model_dbow, len(X_test), 300, 'Test') # + id="XdX8xFBb0kyY" colab={"base_uri": "https://localhost:8080/", "height": 102} executionInfo={"status": "ok", "timestamp": 1598026839795, "user_tz": -330, "elapsed": 3244, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="bd6e6da0-a839-4cd8-9dfe-f156008ca53c" logreg = LogisticRegression(n_jobs=1, C=1e5) logreg.fit(train_vectors_dbow, y_train) # + id="Edtpau890kwg" y_pred = logreg.predict(test_vectors_dbow) # + id="V5n-mojn0krx" print('accuracy %s' % accuracy_score(y_pred, y_test)) print(classification_report(y_test, y_pred,target_names=label_list)) # + id="oruSKku20wfa" import itertools import tensorflow as tf from sklearn.preprocessing import LabelBinarizer, LabelEncoder from sklearn.metrics import confusion_matrix from tensorflow import keras from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.preprocessing import text, sequence from keras import utils # + id="iNb9hzDI082w" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1598026935794, "user_tz": -330, "elapsed": 1487, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b279f9be-656d-454a-aef6-95e21cc92542" train_size = int(len(df) * .7) print ("Train size: %d" % train_size) print ("Test size: %d" % (len(df) - train_size)) # + id="Du9CVz-J0-eK" train_posts = df['text'][:train_size] train_tags = df['target'][:train_size] test_posts = df['text'][train_size:] test_tags = df['target'][train_size:] # + id="G_HFk3_Q1AiU" max_words = 1000 tokenize = text.Tokenizer(num_words=max_words, char_level=False) # + id="V2zpZfyo1H0N" tokenize.fit_on_texts(train_posts) # only fit on train x_train = tokenize.texts_to_matrix(train_posts) x_test = tokenize.texts_to_matrix(test_posts) # + id="Syys4hmy1Je8" encoder = LabelEncoder() encoder.fit(train_tags) y_train = encoder.transform(train_tags) y_test = encoder.transform(test_tags) # + id="j7SRaWY01LPo" num_classes = np.max(y_train) + 1 y_train = utils.to_categorical(y_train, num_classes) y_test = utils.to_categorical(y_test, num_classes) # + id="uJI2IDyk1Vf5" colab={"base_uri": "https://localhost:8080/", "height": 85} executionInfo={"status": "ok", "timestamp": 1598027039642, "user_tz": -330, "elapsed": 1878, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="cb9df911-b3c2-433d-9393-016aa556b95e" print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('y_train shape:', y_train.shape) print('y_test shape:', y_test.shape) # + id="qWIAD7V41Xut" batch_size = 32 epochs = 2 # + id="LYauGei31ZP9" # Build the model model = Sequential() model.add(Dense(512, input_shape=(max_words,))) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # + id="4ck3MgmI1axp" colab={"base_uri": "https://localhost:8080/", "height": 85} executionInfo={"status": "ok", "timestamp": 1598027064606, "user_tz": -330, "elapsed": 3661, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="eacabdaa-3eac-4bd4-ac88-4ea2ba142e0e" history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_split=0.1) # + id="sbqcsfWS1dZ9" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1598027074869, "user_tz": -330, "elapsed": 2953, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="72dcd089-e432-408b-a9d8-f19e9f6b30ad" score = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1) print('Test accuracy:', score[1]) # + id="-8dqS1akb9lD" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1598104750040, "user_tz": -330, "elapsed": 2505, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="d9ef6a95-3c0d-4420-a7d6-317c770e61a5" def checkinside(V, T): lAB = ((V[1][0] - V[0][0])**2 + (V[1][1] - V[0][1])**2)**(0.5) lBC = ((V[2][0] - V[1][0])**2 + (V[2][1] - V[1][1])**2)**(0.5) uAB = ((V[1][0] - V[0][0]) / lAB, (V[1][1] - V[0][1]) / lAB) uBC = ((V[2][0] - V[1][0]) / lBC, (V[2][1] - V[1][1]) / lBC) BP = ((T[0][0] - V[1][0]), (T[0][1] - V[1][1])) SignedDistABP = BP[0] * uAB[1] - BP[1] * uAB[0] SignedDistBCP = - BP[0] * uBC[1] + BP[1] * uBC[0] result = 'inside' if ((SignedDistABP*SignedDistBCP > 0) and \ (abs(SignedDistABP) <= lBC) and \ abs(SignedDistBCP) <= lAB) \ else 'not inside' return result V = [(670273, 4879507), (677241, 4859302), (670388, 4856938), (663420, 4877144)] T = [(670831, 4867989), (675097, 4869543)] print(checkinside(V,[T[0]])) print(checkinside(V,[T[1]])) # + [markdown] id="twJPvJEGXyKk" # ## FastText model # + id="Y5VvWh-WaTne" # !pip install -q clean-text[gpl] && cp '/content/drive/My Drive/clean_v2.py' . from clean_v2 import clean_l1 # + id="b77ZKHQCRzuJ" import os import re import numpy as np import pandas as pd from tqdm import tqdm import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") tqdm.pandas() # %reload_ext autoreload # %autoreload 2 # %reload_ext google.colab.data_table # %config InlineBackend.figure_format = 'retina' plt.style.use('fivethirtyeight') plt.style.use('seaborn-notebook') # + id="k37X92FzSEwz" colab={"base_uri": "https://localhost:8080/", "height": 170} executionInfo={"status": "ok", "timestamp": 1597917161960, "user_tz": -330, "elapsed": 11302, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="55ed77cd-1275-46ea-e44f-a8cc98a73512" df_raw = pd.read_pickle(os.path.join(path,'data_clean_v2.p')) df_raw.info() # + id="mxCQwouYZ-Ql" df = df_raw.sample(100000, random_state=42) # + id="h7DefE4RSL0Y" # # !pip install fasttext # + id="h0Kgbtg5jFBu" # preprocessing # lowercase # remove tokens # truncate post-regards # lower th remove # upper th truncate # clean categories # collate categories # train test split # tokenlist = ' '.join([i for i in df['text']]).split() # tokenlist = list(set([w for w in tokenlist if 'token' in w])) tokenlist = ['emailtoken', 'urltoken', 'htmltoken', 'currencytoken', 'token', 'digittoken', 'numbertoken'] def preprocess(text): text = text.lower() text = ' '.join([w for w in text.split() if w not in tokenlist]) text = ' '.join(text.split()[:50]) return text # + id="GJcUoM_ez8UH" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1597926142587, "user_tz": -330, "elapsed": 1068, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="57d05561-5682-459c-a894-00f86b56f5a8" print(tokenlist) # + id="J-69986Ymk_d" # xx = df.sample() # xx df['text'] = df.text.apply(preprocess) df = df[df.text.apply(lambda x: len(x.split()))>3] # preprocess(xx.text.tolist()[0]) # + id="UHd3lGOqak4-" # from collections import OrderedDict df['target'] = df.target.apply(clean_l1).str.lower().str.split().apply(lambda x: OrderedDict.fromkeys(x).keys()).str.join(' ') # + id="Iqd33UCirrzx" df.sample(5) # + id="oGOlYA3GtyjX" df.target.value_counts()[:20] # + id="DV1k-Uazsmet" # sns.distplot(df.target.value_counts()) # + id="l7TRmf6Ot_fR" minority_labels = df.target.value_counts()[df.target.value_counts()<100].index.tolist() df['target'] = df.target.replace(dict.fromkeys(minority_labels, 'Other')) # + id="M1Wb7nf5uFKx" colab={"base_uri": "https://localhost:8080/", "height": 382} executionInfo={"status": "ok", "timestamp": 1597926235447, "user_tz": -330, "elapsed": 3572, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="7a040b88-eb71-4b67-f3ac-d33ebbb29db2" df = df[df.target!='Other'] sns.distplot(df.target.value_counts()); # + id="6xoPSW7qaqyV" from sklearn.preprocessing import LabelEncoder label_encoder = LabelEncoder() df['target'] = label_encoder.fit_transform(df.target) # + id="qqccvFYydi2g" colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"status": "ok", "timestamp": 1597926248196, "user_tz": -330, "elapsed": 824, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="8f41991d-7542-491d-e11c-b3d188876e03" df.isna().any() # + id="quc1nKu1dlcG" df['target'] = ['__label__'+str(s) for s in df['target']] df = df[['target','text']] # + id="d04SXHjauhaK" from sklearn.model_selection import train_test_split train, test = train_test_split(df, test_size=0.2, random_state=42) # + id="6VlaKqzTfuI5" import csv train.to_csv('train.txt', index=False, sep=' ', header=False, quoting=csv.QUOTE_NONE, quotechar="", escapechar=" ") test.to_csv('test.txt', index=False, sep=' ', header=False, quoting=csv.QUOTE_NONE, quotechar="", escapechar=" ") # + id="CMAB78pAfS8v" # train.sample(5).to_csv('sample.txt', index=False, sep=' ', header=False, quoting=csv.QUOTE_NONE, quotechar="", escapechar=" ") # + id="f0yzMHb5gZIp" # import fasttext # model = fasttext.train_supervised(input='train.txt', epoch=50) --> (17874, 0.5248405505203089, 0.5248405505203089) # model = fasttext.train_supervised(input='train.txt', epoch=50, lr=0.5, wordNgrams=2, loss='hs') --> (17874, 0.46620789974264293, 0.46620789974264293) # model = fasttext.train_supervised(input='train.txt', --> (17874, 0.4858453619782925, 0.4858453619782925) # epoch=25, # lr=0.2, # loss='hs', # autotuneMetric='f1', # verbose=5, # minCount=10, # ) # model = fasttext.train_supervised(input='train.txt', --> (17874, 0.5262392301667226, 0.5262392301667226) # epoch=50, # lr=0.1, # loss='softmax', # autotuneMetric='f1', # verbose=5, # minCount=20, # ) model = fasttext.train_supervised(input='train.txt', epoch=50, lr=0.1, loss='softmax', autotuneMetric='f1', verbose=5, minCount=20, ) # + id="n9EgtOfghzi5" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1597927998097, "user_tz": -330, "elapsed": 215881, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="47f32006-ac94-4dd6-eed5-fe6697257057" model.test("test.txt", k=1) # + id="EqnP3RyAygzs" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1597926832763, "user_tz": -330, "elapsed": 2453, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0e024b71-cb25-4b70-fea6-392fb6508839" model.test("test.txt", k=5) # + id="t7juI-eVhRjo" colab={"base_uri": "https://localhost:8080/", "height": 110} executionInfo={"status": "ok", "timestamp": 1597927022572, "user_tz": -330, "elapsed": 1139, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0255e384-e78c-412e-9b4c-cb15ea786979" xx = df.sample(); xx # + id="CJFvEgHIhcG3" colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"status": "ok", "timestamp": 1597927022574, "user_tz": -330, "elapsed": 891, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="06383cff-b69a-4306-a3a0-2c6d493bd09d" model.predict(xx.text.tolist()[0], k=5) # + [markdown] id="hHXMC73lYsr4" # ## Pipeline # + id="KiF2mUb9fAqS" # !pip install -q fast-bert # !pip install -q fasttext # !pip install -q clean-text[gpl] # + id="8kJPedUNcCAq" # setup import os import pickle import shutil import warnings import numpy as np import pandas as pd from tqdm import tqdm from pathlib import Path from collections import OrderedDict warnings.filterwarnings("ignore") Path(work_path).mkdir(parents=True, exist_ok=True) os.chdir(work_path) # + id="15As3RASYIYP" shutil.copyfile(os.path.join(save_path,'utils_clean.py'), os.path.join(work_path,'utils_clean.py')) from utils_clean import clean_l1, clean_l2 shutil.copyfile(os.path.join(save_path,'utils_preprocess.py'), os.path.join(work_path,'utils_preprocess.py')) from utils_preprocess import * shutil.copyfile(os.path.join(save_path,'label_encoder.p'), os.path.join(work_path,'label_encoder.p')) label_encoder = pickle.load(open('label_encoder.p', 'rb')) shutil.copyfile(os.path.join(save_path,'label_map.p'), os.path.join(work_path,'label_map.p')) label_map = pickle.load(open('label_map.p', 'rb')) import fasttext shutil.copyfile(os.path.join(save_path,'fasttext.bin'), os.path.join(work_path,'fasttext.bin')) model_fasttext = fasttext.load_model('fasttext.bin') from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import linear_kernel vectorizer = CountVectorizer(ngram_range=(1,1)) shutil.copyfile(os.path.join(save_path,'model_countvectorizer_large.p'), os.path.join(work_path,'model_countvectorizer.p')) model_countvectorizer = pickle.load(open('model_countvectorizer.p', 'rb')) dtmx = model_countvectorizer['dtm'] vectorizerx = model_countvectorizer['vectorizer'] target_categories = model_countvectorizer['target_categories'] target_labels = model_countvectorizer['target_labels'] shutil.copyfile(os.path.join(save_path,'model_tfidf_large.p'), os.path.join(work_path,'model_tfidf.p')) model_tfidf = pickle.load(open(os.path.join(work_path,'model_tfidf.p'), 'rb')) from fast_bert.prediction import BertClassificationPredictor shutil.unpack_archive(os.path.join(save_path,'fastbert_large_iter2.zip'), work_path, 'zip') MODEL_PATH = os.path.join(work_path, 'model_out') model_fastbert = BertClassificationPredictor( model_path=MODEL_PATH, label_path=work_path, multi_label=False, model_type='distilbert', do_lower_case=True) # + id="jyekm-vdHzSo" X = pd.DataFrame(label_map, index=[0]).T.reset_index() X.columns = ['Raw_data_labels','processed_labels'] X.to_csv('') # + id="UCqjgBOscTsn" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1598870887239, "user_tz": -330, "elapsed": 41252, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="fcc50ed7-60ef-450f-bd13-13ea1935cf72" df_raw = pd.read_pickle(os.path.join('/content/drive/My Drive/','data','raw','data_raw.p')) df_raw.info() # + id="ISpHU-TQIQhx" df_raw['labels_raw'] = df_raw['QueryType'] + ' | ' + df_raw['SubQueryType'] df_raw['labels_raw'].value_counts().to_csv('labels_rawdata.csv') # + id="KuiKRsQaXm8F" # test_set = df_raw.sample(50000, random_state=10) # def preprocess(X): # X = X.drop_duplicates() # X = X.dropna(subset=['tSubject', 'mMsgContent'], how='all') # X['type'] = X['QueryType'] + ' | ' + X['SubQueryType'] # X['type'] = X['type'].fillna(' ') # X['type_orig'] = X['type'] # X['type'] = X['type'].apply(clean_l1).str.lower().str.split().apply(lambda x: OrderedDict.fromkeys(x).keys()).str.join(' ').apply(clean_l1) # X = X[X['type'].isin(list(label_encoder.classes_))] # X['subj&msg'] = X['tSubject'] + ' sub_eos_token ' + X['mMsgContent'] # X = X[['subj&msg','type', 'type_orig']] # X.columns = ['text','target', 'type_orig'] # X = X.dropna() # return X # test_set = preprocess(test_set) # test_set.describe() # label_map = test_set[['type_orig','target']].set_index('type_orig').to_dict()['target'] # pickle.dump(label_map, open(os.path.join(#save_path, 'label_map.p'), 'wb')) # + id="pAZKv5cndsPA" # test_set = df_raw.sample(10000, random_state=10) # def preprocess(X): # X = X.drop_duplicates() # X = X.dropna(subset=['tSubject', 'mMsgContent'], how='all') # X['type'] = X['QueryType'] + ' | ' + X['SubQueryType'] # X['type'] = X['type'].fillna(' ') # X['type'] = X['type'].apply(clean_l1).str.lower().str.split().apply(lambda x: OrderedDict.fromkeys(x).keys()).str.join(' ').apply(clean_l1) # X = X[X['type'].isin(list(label_encoder.classes_))] # X['subj&msg'] = X['tSubject'] + ' sub_eos_token ' + X['mMsgContent'] # X = X[['subj&msg','type']] # X.columns = ['text','target'] # X = X.dropna() # return X # test_set = preprocess(test_set) # test_set.describe() # + id="gFI4fLzNeXrv" ##### <----- freezed backup -----> ##### # def predict_fasttext(text): # text = clean_l1(text) # text = preprocess_fasttext(text) # preds = model_fasttext.predict(text, k=-1) # label_names = label_encoder.inverse_transform([int(x.split('__')[-1]) for x in preds[0]]) # preds = [(x,y) for x,y in zip(label_names,preds[1])] # return preds # def predict_fastbert(text): # text = clean_l1(text) # text = preprocess_fastbert(text) # preds = model_fastbert.predict(text) # preds = [(label_encoder.inverse_transform([int(x[0])])[0],x[1]) for x in preds] # return preds # def predict_countvect(text): # text = clean_l1(text) # text = clean_l2(text) # text = preprocess_countvectorizer(text) # cosim = linear_kernel(vectorizerx.transform([text]), dtmx).flatten() # preds = [(target_categories[i],cosim[i]) for i in range(len(cosim))] # preds = [target_categories[x] for x in np.argsort(-cosim)[:20]] # return preds # def predict_tfidf(text): # text = clean_l1(text) # text = clean_l2(text) # preds = model_tfidf.predict_proba([text])[0] # preds = [(label_encoder.inverse_transform([int(x)])[0],y) for x,y in zip(model_tfidf.classes_, preds)] # preds.sort(key = lambda x: x[1], reverse=True) # return preds # query = test_set.sample() # print('Text: ',query.text.values[0]) # print('Actual Label: ',query.target.values[0]) # print('Predicted Labels: ') # pred1 = predict_fasttext(query.text.values[0]) # pred2 = predict_fastbert(query.text.values[0]) # pred3 = predict_countvect(query.text.values[0]) # pred4 = predict_tfidf(query.text.values[0]) # + id="qj8t9ovJfkK8" # lmr = {label_map[v]:v for v in label_map.keys()} # + id="bXERZM74br8l" def predict_fasttext(text): text = clean_l1(text) text = preprocess_fasttext(text) preds = model_fasttext.predict(text, k=-1) preds = [int(x.split('__')[-1]) for x in preds[0]] preds = pd.DataFrame([(x,(1/(i+1))) for i,x in enumerate(preds)], columns=['label','rank_fasttext']).set_index('label') return preds def predict_fastbert(text): text = clean_l1(text) text = preprocess_fastbert(text) preds = model_fastbert.predict(text) preds = pd.DataFrame([(int(x[0]),(1/(i+1))) for i,x in enumerate(preds)], columns=['label','rank_fastbert']).set_index('label') return preds def predict_tfidf(text): text = clean_l1(text) text = clean_l2(text) preds = model_tfidf.predict_proba([text])[0] preds = [(int(x),y) for x,y in zip(model_tfidf.classes_, preds)] preds.sort(key = lambda x: x[1], reverse=True) preds = pd.DataFrame([(int(x[0]),(1/(i+1))) for i,x in enumerate(preds)], columns=['label','rank_tfidf']).set_index('label') return preds def predict_countvect(text): text = clean_l1(text) text = clean_l2(text) text = preprocess_countvectorizer(text) cosim = linear_kernel(vectorizerx.transform([text]), dtmx).flatten() preds = [(int(target_categories[i]),cosim[i]) for i in range(len(cosim))] preds.sort(key = lambda x: x[1], reverse=True) preds = pd.DataFrame([(int(x[0]),(1/(i+1))) for i,x in enumerate(preds)], columns=['label','rank_cvt']).set_index('label') return preds model_weight = {'fasttext':10, 'fastbert':5, 'tfidf':3, 'cvt':2} def predict(text): pred = predict_fasttext(text) pred = pred.join(predict_fastbert(text), on='label') pred = pred.join(predict_tfidf(text), on='label') pred = pred.join(predict_countvect(text), on='label') pred['score'] = (pred['rank_fasttext']*model_weight['fasttext']) + \ (pred['rank_fastbert']*model_weight['fastbert']) + \ (pred['rank_tfidf']*model_weight['tfidf']) + \ (pred['rank_cvt']*model_weight['cvt']) pred = pred.sort_values(by='score', ascending=False) return pred def predict(text): pred = predict_fasttext(text) pred = pred.join(predict_tfidf(text), on='label') pred = pred.join(predict_countvect(text), on='label') pred['score'] = (pred['rank_fasttext']*model_weight['fasttext']) + \ (pred['rank_tfidf']*model_weight['tfidf']) + \ (pred['rank_cvt']*model_weight['cvt']) pred = pred.sort_values(by='score', ascending=False) return pred # + id="I0x4viGLdUp0" testx = pd.read_excel(os.path.join(save_path,'TSD_v1.xlsx'), sheet_name='Database records') testx.head() # + id="kunTKw5Df5W0" print(testx.info()) X = testx[(testx['OriginalQuery']+ ' | '+testx['OriginalSubQuery']).isin(list(set(label_map.keys())))] print(X.info()) X.head() # + id="1MuEmxv5fi3Q" X['text'] = X['tSubject'] + ' sub_eos_token ' + X['mMsgContent'] X['label'] = X['OriginalQuery'] + ' | '+ X['OriginalSubQuery'] X['plabel'] = X['label'].apply(lambda x: label_map[x]) X['clabel'] = label_encoder.transform(X.plabel) X = X[['text','clabel']] print(X.info()) X = X.dropna().drop_duplicates() print(X.info()) X.head() # + id="wEtRLtiYirzF" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1598812687665, "user_tz": -330, "elapsed": 1808921, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="335db9db-1786-44db-dbd0-89c825ac5c23" from tqdm import tqdm tqdm.pandas() top1 = top2 = top3 = 0 for index, row in tqdm(X.iterrows(), total=X.shape[0]): text = row.text label = row.clabel preds = predict(text).index.tolist()[:3] if label==preds[0]: top1+=1 elif label==preds[1]: top2+=1 elif label==preds[2]: top3+=1 print(top1, top2, top3) # + id="muSU6h9Hqc3l" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1598813603171, "user_tz": -330, "elapsed": 1007, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c44255dd-0d5d-4af0-b17d-15bc6dd2694d" top1p = top1/X.shape[0] top2p = top1p + top2/X.shape[0] top3p = top2p + top3/X.shape[0] print(top1p, top2p, top3p) # + id="ay-dfCenbe8v" colab={"base_uri": "https://localhost:8080/", "height": 269} executionInfo={"status": "ok", "timestamp": 1598532374357, "user_tz": -330, "elapsed": 5335, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3d266c76-686e-47a5-f666-8c148aebc265" query = test_set.sample() # print('Text: ',query.text.values[0]) print('Actual Label: ',query.target.values[0]) print('Actual Label: ',label_encoder.transform([query.target.values[0]])) predict(query.text.values[0]).head() # + id="gajtJsfoyXVS" test_set['target_cat'] = label_encoder.transform(test_set.target) test_set.head() # + id="x_cU7vl8vDCe" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1598389586728, "user_tz": -330, "elapsed": 7238727, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3b791ccf-c7b4-40dc-a163-6a75137d4afc" from tqdm import tqdm tqdm.pandas() top1 = top2 = top3 = 0 for index, row in tqdm(test_set.iterrows(), total=test_set.shape[0]): text = row.text label = row.target_cat preds = predict(text).index.tolist()[:3] if label==preds[0]: top1+=1 elif label==preds[1]: top2+=1 elif label==preds[2]: top3+=1 print(top1, top2, top3) # + id="QeHWkdVoz_ws" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1598389942491, "user_tz": -330, "elapsed": 1430, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0dcdae98-f886-4712-fb93-d74ce0da6fe9" top1p = top1/test_set.shape[0] top2p = top1p + top2/test_set.shape[0] top3p = top2p + top3/test_set.shape[0] print(top1p, top2p, top3p) # + id="leriRNFRdvIL" def func(subj, msg): text = str(subj) + ' sub_eos_token ' + str(msg) preds = predict(text).head(3) preds.index = label_encoder.inverse_transform(preds.index.tolist()) preds = preds.rename_axis('label').reset_index()[['label','score']] preds.label = preds.label.apply(lambda x: label_map[x]) preds = preds.T.to_dict() preds = str(preds) return preds # + id="-1gtjKrdYaUU" x = test_set.sample() x # + id="vl3T4yCKYmvJ" x.text.tolist()[0] # + id="WPfidXIiYyTC" subj = '<redacted>' msg = '<redacted>' # + id="Jm77CpiCC8QA" xx = func(subj, msg) xx # + id="z_RKqaTOZFH2" xx = func(subj, msg) xx # + id="G_66smUxZyH_" pd.Series(test_set['type_orig'].unique()).to_csv('label_list.csv', index=False) # + id="8DWczOcybExT" # !pip install -q gradio import gradio as gr # + id="ZL9uUzJ3cB68" gr.Interface(func, [ gr.inputs.Textbox(lines=2, label='Email Subject'), gr.inputs.Textbox(lines=10, label='Email Body'), ], gr.outputs.Textbox(label='Output Labels')).launch(); # + id="xKIgGftUdIs2" xx = pd.DataFrame(label_map, index=[0]).T.reset_index() xx.columns = ['plabel', 'olabel'] xx.head() # + id="tzqykAQ8Q6EI" xx.olabel.value_counts()
_notebooks/2022-01-02-email-classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _execution_state="idle" _cell_guid="befd77cb-4e21-4920-a150-e3c72eafb161" _uuid="58beef0b43c60cfbbc7c3af8f107c5f603abbbcf" # # Titanic: Machine Learning from Disaster # + [markdown] _execution_state="idle" _cell_guid="7e5bb065-67ad-41a8-a91b-c3656c3d2933" _uuid="2524341ed59d7d83dcc5dd9de387f0967f0b73a0" # This tutorial is basically aimed at beginners who just started their journey of Data Science. Even I wrote this tutorial with the knowledge that I gained in my first 2 months of Data Science experience. # # This notebook mainly talks about the "Feature Engineering". We will find different methods to fill missing data, combine features to create new relations and relationships. We will also show you how to create categorical values from continuous values. # # This notebook uses three ensembles - RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier and then the VotingClassifier on top of them to predict the 'Survival' of each passenger in the Test Data. # + [markdown] _execution_state="idle" _cell_guid="490278e8-f459-47aa-adf8-a2d968d6b41e" _uuid="f4d3aca148eeac07e3c4f2a4d1b7a22271ed02da" # ## 1. Importing Required Libraries # + _execution_state="idle" _cell_guid="ce7acd68-fca9-427a-be0e-73249e96e0cc" _uuid="a06709bddba29cf9293f88ae8698e6b51806d6c6" import os import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd from sklearn import preprocessing from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn import ensemble from sklearn import model_selection from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns # + [markdown] _execution_state="idle" _cell_guid="1fb38a82-4783-48eb-8999-894d5e42fa05" _uuid="db4fc7b2b870f136d7c2259c23e4b8c39609620f" # ## 2. Basic Functions to create new category features # + [markdown] _execution_state="idle" _cell_guid="a69e96aa-6a2c-446d-a257-120ecd5db386" _uuid="e0084fbb785a0cb59824a29c6e2a408a6dd15860" # Below are the functions that we use to create new category features from continuous values. Below are the new category features that are created from the already available features: # # 1. Fare Category # 2. Pclass Fare Category # 3. Family Size Category # 4. Age Group Category # 5. Name Length Category # + [markdown] _execution_state="idle" _cell_guid="585c58d5-9b30-498b-a64d-918922ddd897" _uuid="b6ea7e1e46ce43e085ff9965d9bd5b14fa35bdcd" # ### 2.1 Drop unnecessary columns # + _execution_state="idle" _cell_guid="088fc6d3-d2bf-4078-85f8-fd53f5fe76ce" _uuid="363fda68af1dfba55a11e6504d4a960fc5349d8a" def drop_col_not_req(df, cols): df.drop(cols, axis = 1, inplace = True) # + [markdown] _execution_state="idle" _cell_guid="30a095d9-0e20-44ae-9dd4-3d4679da24d8" _uuid="46280908e7b2836557531d4a6f1aabe0f0950110" # ### 2.2 Create a Fare category # + _execution_state="idle" _cell_guid="8176469e-ff41-4da8-a5ab-b382875cdd6c" _uuid="99c52beec48768e321c8adeda09c2d2206d92f57" def fare_category(fare): if (fare <= 4): return 'Very_Low_Fare' elif (fare <= 10): return 'Low_Fare' elif (fare <= 30): return 'Med_Fare' elif (fare <= 45): return 'High_Fare' else: return 'Very_High_Fare' # + [markdown] _execution_state="idle" _cell_guid="de7fbb18-3635-4e3f-bb49-5b601937cd7a" _uuid="527c5b52ca4f1d370e68385225e69c8279ac67f3" # ### 2.3 Create a PClass Fare category # + _execution_state="idle" _cell_guid="b5f6289a-abce-4af4-9586-21b16aada8a2" _uuid="95e199ff8a15bbf2aaed28830ab832e75bf4520b" def pclass_fare_category(df, Pclass_1_mean_fare, Pclass_2_mean_fare, Pclass_3_mean_fare): if (df['Pclass'] == 1): if (df['Fare'] <= Pclass_1_mean_fare): return 'Pclass_1_Low_Fare' else: return 'Pclass_1_High_Fare' elif (df['Pclass'] == 2): if (df['Fare'] <= Pclass_2_mean_fare): return 'Pclass_2_Low_Fare' else: return 'Pclass_2_High_Fare' elif (df['Pclass'] == 3): if (df['Fare'] <= Pclass_3_mean_fare): return 'Pclass_3_Low_Fare' else: return 'Pclass_3_High_Fare' # + [markdown] _execution_state="idle" _cell_guid="c102fe82-07b7-4ae5-b74b-044ff2d068c9" _uuid="df153ee5adf11c7b0f252a1ba2223f8ded0d0cae" # ### 2.4 Create a Family Size category # + _execution_state="idle" _cell_guid="f6ca2a18-9c3e-4292-abd0-d58dca93d3c0" _uuid="a3a961fc8ee037b8ded88bf1362e77b425862abd" def family_size_category(family_size): if (family_size <= 1): return 'Single' elif (family_size <= 3): return 'Small_Family' else: return 'Large_Family' # + [markdown] _execution_state="idle" _cell_guid="50a4e8b4-50f8-4ee1-8067-01e0349e888e" _uuid="19825575b9780346f643db1e2f5e1bb0dd703c8a" # ### 2.5 Create Age Group category # + _execution_state="idle" _cell_guid="60bdb0c7-56e7-4194-9b25-d752b9a69b0c" _uuid="a4f06e346a48f3507f25c4bad82af6bd1f0b371d" def age_group_cat(age): if (age <= 1): return 'Baby' if (age <= 4): return 'Toddler' elif(age <= 12): return 'Child' elif (age <= 19): return 'Teenager' elif (age <= 30): return 'Adult' elif (age <= 50): return 'Middle_Aged' elif(age < 60): return 'Senior_Citizen' else: return 'Old' # + [markdown] _execution_state="idle" _cell_guid="e45325c3-4864-4772-ac68-d569e96d2d6c" _uuid="aca2fd2dd1b5a2a99d519a6102184b3ef1dbc2b3" # ### 2.6 Create Name_Length_Category # + _execution_state="idle" _cell_guid="0d9a7546-549b-42be-b086-16b777d359ab" _uuid="c50b675facb673e0d3410e4b2828879c58e4a38f" def name_len_category(name_len): if (name_len <= 20): return 'Very_Short_Name' elif (name_len <= 28): return 'Short_Name' elif (name_len <= 45): return 'Medium_Name' else: return 'Long_Name' # + [markdown] _execution_state="idle" _cell_guid="698d3df5-3874-49fb-aad9-8caad6065876" _uuid="ddccfbbfe2777e5d70cfac745925ea0587b4d250" # ## 3. Function to Fill Missing Age Values # + [markdown] _execution_state="idle" _cell_guid="d257a3fb-0fc6-4929-a529-962c6ef0c606" _uuid="ab144ae0dd3e1f4426f3599bc9183912ee3b5a35" # As there are about 20% of Age values with NaN, instead of just filling them with the Mean or Mean based on their Age group we will use GradientBoostingRegressor and LinearRegression to fill the missing values. # # This function takes two parameters: # # - missing_age_train - This is the data corresponding to the Age with Non-Null values # - missing_age_test - This data corresponds to that with the missing Age values # # This routine uses two ensemble methods to calculate the missing Age values and then use their mean to fill the Age values in the original data set. # + _execution_state="idle" _cell_guid="8398add3-232c-43b7-8cd8-fe0697591dea" _uuid="0d1ec4ff6cf0ea313f2813e2f26ea09839976ec9" def fill_missing_age(missing_age_train, missing_age_test): missing_age_X_train = missing_age_train.drop(['Age'], axis = 1) missing_age_y_train = missing_age_train['Age'] missing_age_X_test = missing_age_test.drop(['Age'], axis = 1) gbm_reg = ensemble.GradientBoostingRegressor(random_state = 42) gbm_reg_param_grid = {'n_estimators': [2000], 'max_depth': [3], 'learning_rate': [0.01], 'max_features': [3]} gbm_reg_grid = model_selection.GridSearchCV(gbm_reg, gbm_reg_param_grid, cv = 5, n_jobs = 25, verbose = 1, scoring = 'neg_mean_squared_error') gbm_reg_grid.fit(missing_age_X_train, missing_age_y_train) print("Age feature Best GB Params: " + str(gbm_reg_grid.best_params_)) print("Age feature Best GB Score: " + str(gbm_reg_grid.best_score_)) print("GB Train Error for 'Age' Feature Regressor: " + str(gbm_reg_grid.score(missing_age_X_train, missing_age_y_train))) missing_age_test['Age_GB'] = gbm_reg_grid.predict(missing_age_X_test) print(missing_age_test['Age_GB'][:4]) lrf_reg = LinearRegression() lrf_reg_param_grid = {'fit_intercept': [True], 'normalize': [True]} lrf_reg_grid = model_selection.GridSearchCV(lrf_reg, lrf_reg_param_grid, cv = 5, n_jobs = 25, verbose = 1, scoring = 'neg_mean_squared_error') lrf_reg_grid.fit(missing_age_X_train, missing_age_y_train) print("Age feature Best LR Params: " + str(lrf_reg_grid.best_params_)) print("Age feature Best LR Score: " + str(lrf_reg_grid.best_score_)) print("LR Train Error for 'Age' Feature Regressor: " + str(lrf_reg_grid.score(missing_age_X_train, missing_age_y_train))) missing_age_test['Age_LRF'] = lrf_reg_grid.predict(missing_age_X_test) print(missing_age_test['Age_LRF'][:4]) missing_age_test['Age'] = missing_age_test[['Age_GB', 'Age_LRF']].mean(axis = 1) print(missing_age_test['Age'][:4]) drop_col_not_req(missing_age_test, ['Age_GB', 'Age_LRF']) return missing_age_test # + [markdown] _execution_state="idle" _cell_guid="bf56a423-987f-4294-8eda-ce2ee46b0fb9" _uuid="3d54c89467d01ff8d7cdee0d1058f58a151d780f" # ## 4. Function to Pick Top 'N' Features # + [markdown] _execution_state="idle" _cell_guid="69c120d2-126c-4c84-bd7d-dbe104aeef68" _uuid="dd682af479e13125531da03c60d9774846d322ed" # The below routine is used to pick the top 'N' features using three ensemble models - RandomForestClassifier, AdaBoostClassifier and ExtraTreesClassifier. # # Each of the ensemble models are used to get the top 'N' features based on the parameter sent to the function. Later all these features are Union'ed so that our final model picks the best features from the three ensembles. # + _execution_state="idle" _cell_guid="76ba98ef-3d5d-4383-bba6-223635f7c868" _uuid="43399b5c2edabfb8a5b446a50b7e361971efd5bb" def get_top_n_features(titanic_train_data_X, titanic_train_data_y, top_n_features): rf_est = RandomForestClassifier(random_state = 42) rf_param_grid = {'n_estimators' : [500], 'min_samples_split':[2, 3], 'max_depth':[20]} rf_grid = model_selection.GridSearchCV(rf_est, rf_param_grid, n_jobs = 25, cv = 10, verbose = 1) rf_grid.fit(titanic_train_data_X, titanic_train_data_y) print("Top N Features Best RF Params: " + str(rf_grid.best_params_)) print("Top N Features Best RF Score: " + str(rf_grid.best_score_)) print("Top N Features RF Train Error: " + str(rf_grid.score(titanic_train_data_X, titanic_train_data_y))) feature_imp_sorted_rf = pd.DataFrame({'feature': list(titanic_train_data_X), 'importance': rf_grid.best_estimator_.feature_importances_}).sort_values('importance', ascending = False) features_top_n_rf = feature_imp_sorted_rf.head(top_n_features)['feature'] print("Sample 25 Features from RF Classifier:") print(str(features_top_n_rf[:25])) ada_est = ensemble.AdaBoostClassifier(random_state = 42) ada_param_grid = {'n_estimators' : [500], 'learning_rate': [0.5, 0.6]} ada_grid = model_selection.GridSearchCV(ada_est, ada_param_grid, n_jobs = 25, cv = 10, verbose = 1) ada_grid.fit(titanic_train_data_X, titanic_train_data_y) print("Top N Features Best Ada Params: " + str(ada_grid.best_params_)) print("Top N Features Best Ada Score: " + str(ada_grid.best_score_)) print("Top N Features Ada Train Error: " + str(ada_grid.score(titanic_train_data_X, titanic_train_data_y))) feature_imp_sorted_ada = pd.DataFrame({'feature': list(titanic_train_data_X), 'importance': ada_grid.best_estimator_.feature_importances_}).sort_values('importance', ascending = False) features_top_n_ada = feature_imp_sorted_ada.head(top_n_features)['feature'] print("Sample 25 Features from Ada Classifier:") print(str(features_top_n_ada[:25])) et_est = ensemble.ExtraTreesClassifier(random_state = 42) et_param_grid = {'n_estimators' : [500], 'min_samples_split':[3, 4], 'max_depth':[15]} et_grid = model_selection.GridSearchCV(et_est, et_param_grid, n_jobs = 25, cv = 10, verbose = 1) et_grid.fit(titanic_train_data_X, titanic_train_data_y) print("Top N Features Best ET Params: " + str(et_grid.best_params_)) print("Top N Features Best ET Score: " + str(et_grid.best_score_)) print("Top N Features ET Train Error: " + str(et_grid.score(titanic_train_data_X, titanic_train_data_y))) feature_imp_sorted_et = pd.DataFrame({'feature': list(titanic_train_data_X), 'importance': et_grid.best_estimator_.feature_importances_}).sort_values('importance', ascending = False) features_top_n_et = feature_imp_sorted_et.head(top_n_features)['feature'] print("Sample 25 Features from ET Classifier:") print(str(features_top_n_et[:25])) #### Merge top_n_features from all three models features_top_n = pd.concat([features_top_n_rf, features_top_n_ada, features_top_n_et], ignore_index = True).drop_duplicates() return features_top_n # + [markdown] _execution_state="idle" _cell_guid="353b2c94-b827-4c57-8524-96bfd30eb952" _uuid="3e551842591132cd9483850841a9a444bf15dc84" # ## 5. Train and Test Data # + [markdown] _execution_state="idle" _cell_guid="b68edd34-3b9e-4dc1-b22d-e15d4b172ae9" _uuid="03411b2626783eea0598a1b5bd35006aa1211364" # ### 5.1 Read the Train and Test Data # + _execution_state="idle" _cell_guid="d7abdb4d-b2de-4bbf-9c4b-5a8ea5848303" _uuid="26450046bba4e030fa2fa0ec764d0176693da937" train_data_orig = pd.read_csv('../input/train.csv') test_data_orig = pd.read_csv('../input/test.csv') # + [markdown] _execution_state="idle" _cell_guid="dc7cef76-954e-49f8-904a-6b2f790c5cf6" _uuid="efe77433f1900efc723a15a473bca285750e4a0b" # ### 5.2 Basic info of Train data # + _execution_state="idle" _cell_guid="64228cda-54f8-42ec-b464-fbb162165d74" _uuid="6b329cb6903ec5166ebf590b81f7de29ea1dbe85" train_data_orig.shape train_data_orig.info() train_data_orig.describe() train_data_orig.head() # + [markdown] _execution_state="idle" _cell_guid="7eb0195a-143a-4d66-bb30-92e79bf55786" _uuid="cfcf922f3c85a045445615210044dae21997830e" # ### 5.3 Basic info of Test data # + _execution_state="idle" _cell_guid="260174cb-229b-4c09-931d-440c1af21654" _uuid="c95a12106b99abced9138cf1a84e671b6ae9e8da" test_data_orig.shape test_data_orig.info() test_data_orig.describe() test_data_orig.head() # + [markdown] _execution_state="idle" _cell_guid="e26f35f4-ac9a-4c1d-bcb5-ecb87064fc17" _uuid="0a80d0288479adea08002e5ea73e638e7d946352" # ### 5.4 Combine Train and Test data # + [markdown] _execution_state="idle" _cell_guid="251fd0bd-5211-446f-9a60-17c1de9c7d2d" _uuid="ef501f758bdb6fa0e398188af0e2b3fe2ae2f88a" # The basic reason to combine Train and Test data is to get better insights during Feature Engineering. We use the combined train and test data to fill the missing values with much accurate values. # + _execution_state="idle" _cell_guid="e3497fdb-4f2a-44fb-ab4f-290c8f624708" _uuid="924475566b95a2dbe3f2afac2b03ad5677a1026b" test_data_orig['Survived'] = 0 combined_train_test = train_data_orig.append(test_data_orig) combined_train_test.shape combined_train_test.info() combined_train_test.describe() # + [markdown] _execution_state="idle" _cell_guid="e71c19dc-a0f1-490c-adeb-bfb8ef990441" _uuid="def343bc966d1a349b3104c60821d665f48ecfe0" # ## 6. Feature Engineering # + [markdown] _execution_state="idle" _cell_guid="ee58dd57-6d59-48fb-bbe8-247e4a4dd221" _uuid="072c2b56f6b13f925e3273dc73b5fea400f183d9" # ### 6.1 Embarked # + [markdown] _execution_state="idle" _cell_guid="4b93cc4d-4e00-4f17-95f8-e4aafe0ffb0f" _uuid="e73683eb57163c69221c2a4cabef10cf590ddbab" # Fill basic missing values for 'Embarked' feature and convert it in to dummy variable # + _execution_state="idle" _cell_guid="f2284ee6-ead7-42c9-84e7-50e728ea6bff" _uuid="211c0de77fccc939779542fa849a6c3423d9c6dd" print(combined_train_test.groupby(['Survived', 'Embarked'])['Survived'].count()) print(combined_train_test['PassengerId'].groupby(by = combined_train_test['Embarked']).count().sort_values(ascending = False)) print(combined_train_test['Fare'].groupby(by = combined_train_test['Embarked']).mean().sort_values(ascending = False)) if (combined_train_test['Embarked'].isnull().sum() != 0): combined_train_test['Embarked'].fillna(combined_train_test['Embarked'].mode().iloc[0], inplace=True) combined_train_test.info() emb_dummies_df = pd.get_dummies(combined_train_test['Embarked'], prefix = combined_train_test[['Embarked']].columns[0]) combined_train_test = pd.concat([combined_train_test, emb_dummies_df], axis = 1) # + [markdown] _execution_state="idle" _cell_guid="8007d572-7c1c-4675-b09e-9501f2040376" _uuid="be115f784734d4f5ab39e726953ef7997dd5e45c" # ### 6.2 Sex # + [markdown] _execution_state="idle" _cell_guid="ec75436e-d81e-46b9-aed3-18b6b8317a8e" _uuid="b5c5ea813834a940360f7091d581c8fa6a59718c" # Convert feature variable 'Sex' into dummy variable # + _execution_state="idle" _cell_guid="3709357c-31ed-4196-8e8b-4efb9a3a629f" _uuid="06cf8de8ea79c5d955d48632eac1b10e335de21f" print(combined_train_test['Sex'].groupby(by = combined_train_test['Sex']).count().sort_values(ascending = False)) print(combined_train_test.groupby(['Survived', 'Sex'])['Survived'].count()) ''' lb_sex = preprocessing.LabelBinarizer() lb_sex.fit(np.array(['male', 'female'])) combined_train_test['Sex'] = lb_sex.transform(combined_train_test['Sex']) ''' sex_dummies_df = pd.get_dummies(combined_train_test['Sex'], prefix = combined_train_test[['Sex']].columns[0]) combined_train_test = pd.concat([combined_train_test, sex_dummies_df], axis = 1) # + [markdown] _execution_state="idle" _cell_guid="9e3f1f5b-4fa3-42c7-95bd-980157a87c5d" _uuid="385d05d4fc54fb747154be1c7782f0707fda727e" # ### 6.3 Name # + [markdown] _execution_state="idle" _cell_guid="671ed4eb-0389-4899-b695-2232b89d847d" _uuid="e72a30f9f8426f61e44ab761341bf791b6d1bf60" # Extract Titles from Name feature and create a new column # + _execution_state="idle" _cell_guid="65805f90-1a8a-41f9-8973-b49c23290979" _uuid="f8160b4efd03f97603b93e4b0de906057510b7ba" combined_train_test['Title'] = combined_train_test['Name'].str.extract('.+,(.+)').str.extract('^(.+?)\.').str.strip() print(combined_train_test['Title'].unique()) print(combined_train_test['Title'].groupby(by = combined_train_test['Title']).count().sort_values(ascending = False)) # + [markdown] _execution_state="idle" _cell_guid="37de4e29-61ab-4a47-bd5f-c67cc2e14c08" _uuid="2457dc23bebe8119d2c5ba98b3791fc8c71ff22a" # Create a Dictionary to map the Title's # + _execution_state="idle" _cell_guid="8bb351ab-10d9-4c95-8fde-6844bbf87447" _uuid="7715711271e92079f75c4d9375c1fa6e7d2fe338" title_Dict = {} title_Dict.update(dict.fromkeys(["Capt", "Col", "Major", "Dr", "Rev"], "Officer")) title_Dict.update(dict.fromkeys(["Jonkheer", "Don", "Sir", "<NAME>", "Dona", "Lady"], "Royalty")) title_Dict.update(dict.fromkeys(["Mme", "Ms", "Mrs"], "Mrs")) title_Dict.update(dict.fromkeys(["Mlle", "Miss"], "Miss")) title_Dict.update(dict.fromkeys(["Mr", "Ms"], "Mr")) title_Dict.update(dict.fromkeys(["Master"], "Master")) # + _execution_state="idle" _cell_guid="f6d82750-d9db-4a52-974e-b89243ac45c6" _uuid="81e057c245a5c9ec289366aa4838a25028e163f1" combined_train_test['Title'] = combined_train_test['Title'].map(title_Dict) print(combined_train_test['Title'].groupby(by = combined_train_test['Title']).count().sort_values(ascending = False)) title_dummies_df = pd.get_dummies(combined_train_test['Title'], prefix = combined_train_test[['Title']].columns[0]) combined_train_test = pd.concat([combined_train_test, title_dummies_df], axis = 1) # + [markdown] _execution_state="idle" _cell_guid="58e1774f-a21f-4744-b211-ccf2da9ff79a" _uuid="bad2d1adadc5c4f896d3181cf03d99e0826d6cba" # Create Name_Length # + _execution_state="idle" _cell_guid="bf4cb590-bde9-4959-b942-74b61f24c0e9" _uuid="fcf676d65f677cce62b607ddc4a8c8b7eb295287" combined_train_test['Name_Length'] = combined_train_test['Name'].str.len() print(combined_train_test['Name_Length'].groupby(by = combined_train_test['Name_Length']).count().sort_values(ascending = False)[:5]) # + [markdown] _execution_state="idle" _cell_guid="37f023ee-d71f-4ee0-8cfb-cd8edd3f6386" _uuid="ed431837337f88c0c0e3c892b3504ea5de07b5ab" # Create Name_Length_Category # + _execution_state="idle" _cell_guid="2ede0d60-34d0-4970-869d-11fa09d928fb" _uuid="7ec01ef8a15e05b227bfd6fee018eac499c3e621" combined_train_test['Name_Length_Category'] = combined_train_test['Name_Length'].map(name_len_category) print(combined_train_test['Name_Length_Category'].groupby(by = combined_train_test['Name_Length_Category']).count().sort_values(ascending = False)) le_fare = LabelEncoder() le_fare.fit(np.array(['Very_Short_Name', 'Short_Name', 'Medium_Name', 'Long_Name', 'Very_High_Fare'])) combined_train_test['Name_Length_Category'] = le_fare.transform(combined_train_test['Name_Length_Category']) print(combined_train_test[['Name_Length_Category', 'Survived']].corr()) first_name_dummies_df = pd.get_dummies(combined_train_test['Name_Length_Category'], prefix = combined_train_test[['Name_Length_Category']].columns[0]) combined_train_test = pd.concat([combined_train_test, first_name_dummies_df], axis = 1) # + [markdown] _execution_state="idle" _cell_guid="1ed2860d-9d90-4af6-b682-a49eebbb9db8" _uuid="12be31faa6e77b51bafd78917528391a0b8c144e" # First_Name # + _execution_state="idle" _cell_guid="fe7adbb8-a909-4214-b1fb-19cca12da7da" _uuid="fab22b8dc3e15e906324c61fd92dd865ef523023" combined_train_test['First_Name'] = combined_train_test['Name'].str.extract('^(.+?),').str.strip() print(combined_train_test['First_Name'].groupby(by = combined_train_test['First_Name']).count().sort_values(ascending = False)[:5]) first_name_dummies_df = pd.get_dummies(combined_train_test['First_Name'], prefix = combined_train_test[['First_Name']].columns[0]) combined_train_test = pd.concat([combined_train_test, first_name_dummies_df], axis = 1) # + [markdown] _execution_state="idle" _cell_guid="76198459-a0b4-40a4-b74b-8e61d59eb6bb" _uuid="94dbb2824c927b84032182a88c9e833cff8e2734" # Last_Name # + _execution_state="idle" _cell_guid="f0105fcc-c191-4a6f-b070-c53805ac1296" _uuid="1d662ad6e8f0383f140f20bd08fa7942c5210409" combined_train_test['Last_Name'] = combined_train_test['Name'].str.split("\.").str[1].str.strip() combined_train_test['Last_Name'] = combined_train_test['Last_Name'].str.strip("\([^)]*\)") combined_train_test['Last_Name'].fillna(combined_train_test['Name'].str.split("\.").str[1].str.strip()) print(combined_train_test['Last_Name'].groupby(by = combined_train_test['Last_Name']).count().sort_values(ascending = False)[:5]) last_name_dummies_df = pd.get_dummies(combined_train_test['Last_Name'], prefix = combined_train_test[['Last_Name']].columns[0]) combined_train_test = pd.concat([combined_train_test, last_name_dummies_df], axis = 1) # + [markdown] _execution_state="idle" _cell_guid="54c5fcec-ee77-4d34-94ec-2cc9bc6fd424" _uuid="50dd0a0e8af1db789a72a09a1fb41d96618445e4" # Original_Name # + _execution_state="idle" _cell_guid="435678ae-3c1c-4333-a877-736e4abd3baf" _uuid="4eb8e6210408f77a03605e6fb03af30e3c2a8da6" combined_train_test['Original_Name'] = combined_train_test['Name'].str.split("\((.*?)\)").str[1].str.strip("\"").str.strip() print(combined_train_test['Original_Name'].groupby(by = combined_train_test['Original_Name']).count().sort_values(ascending = False)[:5]) last_name_dummies_df = pd.get_dummies(combined_train_test['Original_Name'], prefix = combined_train_test[['Original_Name']].columns[0]) combined_train_test = pd.concat([combined_train_test, last_name_dummies_df], axis = 1) # + [markdown] _execution_state="idle" _cell_guid="46fcb96e-33f3-4d2d-9468-4dd0b5928b85" _uuid="2a37ec0e9118f2b011ba9c79ce991f245ac4d906" # ### 6.4 Fare # + [markdown] _execution_state="idle" _cell_guid="33ffce64-98ff-48fc-9b87-0f5a5fca4d81" _uuid="01c5eee4c61a3243f85b6bf13172e8a2739bb327" # Fill basic missing values for 'Fare' feature # + _execution_state="idle" _cell_guid="eccaf13c-6294-4c4c-971d-309c2e29f776" _uuid="f150515ab9738959d23e9e980dd5993064531b52" if (combined_train_test['Fare'].isnull().sum() != 0): combined_train_test['Fare'] = combined_train_test[['Fare']].fillna(combined_train_test.groupby('Pclass').transform('mean')) combined_train_test.info() # + [markdown] _execution_state="idle" _cell_guid="44664720-9bb1-4783-9c63-cfe7c3f99042" _uuid="894597f66cb37bc4cb50cecc4b22511e1d44114c" # Divide Fare for those sharing the same Ticket # + _execution_state="idle" _cell_guid="584dd2a8-6a3d-4ad0-ac27-64b4c5f4683d" _uuid="98dd024e7ee2465688c3b5e1a1ad6130d8648123" combined_train_test['Group_Ticket'] = combined_train_test['Fare'].groupby(by = combined_train_test['Ticket']).transform('count') combined_train_test['Fare'] = combined_train_test['Fare']/combined_train_test['Group_Ticket'] combined_train_test.drop(['Group_Ticket'], axis = 1, inplace = True) # + [markdown] _execution_state="idle" _cell_guid="5e1e2576-1de3-4efc-ab3d-53a2afdbc4ee" _uuid="d8c8b685ad781c20c8b2c5c4fd2e313f97847939" # Check if there are any unexpected values # + _execution_state="idle" _cell_guid="36a5688b-8c51-4859-a588-b92dd06d194b" _uuid="c52a3960efb6f654daf9c717fc720699f32f8338" if (sum(n == 0 for n in combined_train_test.Fare.values.flatten()) > 0): combined_train_test.loc[combined_train_test.Fare == 0, 'Fare'] = np.nan combined_train_test['Fare'] = combined_train_test[['Fare']].fillna(combined_train_test.groupby('Pclass').transform('mean')) combined_train_test['Fare'].describe() # + [markdown] _execution_state="idle" _cell_guid="b0e902de-64f6-41bd-af50-0aa4d6ecea1b" _uuid="92e5d2b28625bef52d6ad4a8fc28f3e50e0f63a9" # Create a new Fare_Category category variable from Fare feature # + _execution_state="idle" _cell_guid="7bd32097-cecf-4657-a475-39a7b5c4dd91" _uuid="a2b97c889f6f29d9de98c0d1649e731f49fe592b" combined_train_test['Fare_Category'] = combined_train_test['Fare'].map(fare_category) le_fare = LabelEncoder() le_fare.fit(np.array(['Very_Low_Fare', 'Low_Fare', 'Med_Fare', 'High_Fare', 'Very_High_Fare'])) combined_train_test['Fare_Category'] = le_fare.transform(combined_train_test['Fare_Category']) fare_cat_dummies_df = pd.get_dummies(combined_train_test['Fare_Category'], prefix = combined_train_test[['Fare_Category']].columns[0]) combined_train_test = pd.concat([combined_train_test, fare_cat_dummies_df], axis = 1) print(combined_train_test['Fare_Category'].groupby(by = combined_train_test['Fare_Category']).count().sort_values(ascending = False)) # + [markdown] _execution_state="idle" _cell_guid="6bd6fd9a-4016-4126-879e-5899172df91c" _uuid="1eaeae36fa851f33dc9d37f13e4d1a087b37ba5c" # ### 6.5 Pclass # + _execution_state="idle" _cell_guid="c7be7307-0919-4565-8bf1-b35ef4794df1" _uuid="447b34980a5c2c220e0b5e11fb3aab2ee94e9377" print(combined_train_test['Fare'].groupby(by = combined_train_test['Pclass']).mean()) Pclass_1_mean_fare = combined_train_test['Fare'].groupby(by = combined_train_test['Pclass']).mean().get([1]).values[0] Pclass_2_mean_fare = combined_train_test['Fare'].groupby(by = combined_train_test['Pclass']).mean().get([2]).values[0] Pclass_3_mean_fare = combined_train_test['Fare'].groupby(by = combined_train_test['Pclass']).mean().get([3]).values[0] # + [markdown] _execution_state="idle" _cell_guid="c8215f88-f69b-4527-b7d8-4f695686c5ac" _uuid="c28f055a1f0ce10189a1e65ca665164df5c0b0d0" # Create a new Pclass_Fare_Category variable from Pclass and Fare features # + _execution_state="idle" _cell_guid="b3936e37-509d-4054-a50f-36fb3ffbb67f" _uuid="d29ed64acb19cdf4379396e213a45f8b79374bf1" combined_train_test['Pclass_Fare_Category'] = combined_train_test.apply(pclass_fare_category, args=(Pclass_1_mean_fare, Pclass_2_mean_fare, Pclass_3_mean_fare), axis = 1) print(combined_train_test['Pclass_Fare_Category'].groupby(by = combined_train_test['Pclass_Fare_Category']).count().sort_values(ascending = False)) le_fare = LabelEncoder() le_fare.fit(np.array(['Pclass_1_Low_Fare', 'Pclass_1_High_Fare', 'Pclass_2_Low_Fare', 'Pclass_2_High_Fare', 'Pclass_3_Low_Fare', 'Pclass_3_High_Fare'])) combined_train_test['Pclass_Fare_Category'] = le_fare.transform(combined_train_test['Pclass_Fare_Category']) # + [markdown] _execution_state="idle" _cell_guid="63905709-2762-4d2b-84db-836d1e89278b" _uuid="f1ad75d8dd96e27d6ae8a8a023cee2db6a4503cc" # As the chance of survival is more for Pclass 1, we change the numerical values so that more weightage is added to Pclass 1 instead of Pclass 3. # + _execution_state="idle" _cell_guid="647e6fd0-a9ed-4030-b448-f9bf12bab2eb" _uuid="f47bd1f01bf363e43d311f2ee8326a5150a65624" print(combined_train_test['Fare'].groupby(by = combined_train_test['Pclass']).mean().sort_values(ascending = True)) combined_train_test['Pclass'].replace([1, 2, 3],[Pclass_1_mean_fare, Pclass_2_mean_fare, Pclass_3_mean_fare], inplace = True) # + [markdown] _execution_state="idle" _cell_guid="27fac003-a9c4-484c-8a23-e27d01fa346e" _uuid="5b36a3a0e39089ff5e1da83c6ce7935c29867755" # ### 6.6 Parch and SibSp # + _execution_state="idle" _cell_guid="d1090555-0790-4b9d-916e-98c98afe10e3" _uuid="f46bccae9f905bdf3e154ef69b1dac68c1e2dac5" combined_train_test['Family_Size'] = combined_train_test['Parch'] + combined_train_test['SibSp'] + 1 print(combined_train_test['Family_Size'].groupby(by = combined_train_test['Family_Size']).count().sort_values(ascending = False)) combined_train_test['Family_Size_Category'] = combined_train_test['Family_Size'].map(family_size_category) print(combined_train_test['Family_Size_Category'].groupby(by = combined_train_test['Family_Size_Category']).count().sort_values(ascending = False)) print(combined_train_test.groupby(['Survived', 'Family_Size_Category'])['Survived'].count()) le_family = LabelEncoder() le_family.fit(np.array(['Single', 'Small_Family', 'Large_Family'])) combined_train_test['Family_Size_Category'] = le_family.transform(combined_train_test['Family_Size_Category']) fam_size_cat_dummies_df = pd.get_dummies(combined_train_test['Family_Size_Category'], prefix = combined_train_test[['Family_Size_Category']].columns[0]) combined_train_test = pd.concat([combined_train_test, fam_size_cat_dummies_df], axis = 1) # + [markdown] _execution_state="idle" _cell_guid="a939e228-f63c-474e-ba18-7896baa6a40b" _uuid="83c1d8353388bc323c1328f55c5b2e8ba13fc492" # ### 6.7 Age # + [markdown] _execution_state="idle" _cell_guid="9a82fb8e-84cc-41a2-8242-8aa579102b09" _uuid="304735aecadcaeead54cc4c87dd93d8d399f1029" # Fill Missing values for 'Age' using relevant features like Name, Sex, Parch, SibSp etc. # + [markdown] _execution_state="idle" _cell_guid="41af9d16-6fc2-41eb-94d1-5815ae1a09dd" _uuid="a9ed755dac12b02702421c5a1058f821bea76543" # Print the average age based on their Title before filling the missing values # + _execution_state="idle" _cell_guid="24b75938-ab1e-419d-af68-976b419b48e1" _uuid="c0f8b073111f830db1225172265b6c85aea43506" print(combined_train_test['Age'].groupby(by = combined_train_test['Title']).mean().sort_values(ascending = True)) # + [markdown] _execution_state="idle" _cell_guid="a1013efc-af86-4219-a114-cd24df4f20db" _uuid="be5b141ae9128ca5fa4fca33d49824a2996a7874" # Create Age_Null columns to indicate NaN values # + _execution_state="idle" _cell_guid="b9b2d795-41d3-4b5c-879d-b03560fe15ac" _uuid="121df3e8fff939802fe2832e59abd9382b9374d2" combined_train_test['Age_Null'] = combined_train_test['Age'].apply(lambda x: 1 if(pd.notnull(x)) else 0) # + [markdown] _execution_state="idle" _cell_guid="788309d7-c89f-4838-ba15-895659403fa2" _uuid="43daa9b83d12b95a6478a42c4e9e29c99cff860b" # Create the DataFrames to fill missing Age values # + _execution_state="idle" _cell_guid="20553e8e-afc0-4973-a55e-3d139117deed" _uuid="8f108afc7729c39551466e9437319eac3fc3ee36" missing_age_df = pd.DataFrame(combined_train_test[['Age', 'Parch', 'Sex', 'SibSp', 'Family_Size', 'Family_Size_Category', 'Title', 'Fare']]) missing_age_df = pd.get_dummies(missing_age_df, columns = ['Title', 'Family_Size_Category', 'Sex']) missing_age_df.shape missing_age_df.info() missing_age_train = missing_age_df[missing_age_df['Age'].notnull()] missing_age_test = missing_age_df[missing_age_df['Age'].isnull()] # + [markdown] _execution_state="idle" _cell_guid="97dc28f6-0089-43f6-aac5-8f69fb6dfd1c" _uuid="63ff1d9a103e87255929e1aea1dca7c16ebba313" # Fill the missing Age values by calling the routine fill_missing_age() # + _execution_state="idle" _cell_guid="c4fc6861-2733-4917-a744-856a6787a4c8" _uuid="20f1fbca74eeeac668ab080e7ef234ba9fd29862" combined_train_test.loc[(combined_train_test.Age.isnull()), 'Age'] = fill_missing_age(missing_age_train, missing_age_test) # + [markdown] _execution_state="idle" _cell_guid="3946e955-69fb-4813-80de-a2f6d1406b56" _uuid="ca39748c51a709a27bda3179f06a2ee7632b3599" # Check if there are any unexpected values # + _execution_state="idle" _cell_guid="dc0bb7ff-0199-45d0-a99d-aaa4f42d700b" _uuid="af3a4b153d198f0a04de278f14db93464bf51b84" if (sum(n < 0 for n in combined_train_test.Age.values.flatten()) > 0): combined_train_test.loc[combined_train_test.Age < 0, 'Age'] = np.nan combined_train_test['Age'] = combined_train_test[['Age']].fillna(combined_train_test.groupby('Title').transform('mean')) # + [markdown] _execution_state="idle" _cell_guid="29d53a89-9e2a-4e5d-86ad-2cc7d4272eb6" _uuid="405c2980e77ba25801c858699c18ff708eba86b2" # Print the average age based on their Title after filling the missing values # + _execution_state="idle" _cell_guid="76aa7e42-cbc6-4e3d-a7ae-f20bd7bdc188" _uuid="7f3ebf0593f173b1b3f72a5707c3b85ea9f056da" print(combined_train_test['Age'].groupby(by = combined_train_test['Title']).mean().sort_values(ascending = True)) # + [markdown] _execution_state="idle" _cell_guid="6d44935d-a37e-4687-ab6f-94ff79563655" _uuid="53cb2c83f205eaefd63e47085920f4c17e810e2b" # Create a new Age_Category category variable from Age feature # + _execution_state="idle" _cell_guid="af38a3d1-5487-4451-b4a8-a6c3926df1b5" _uuid="30cbfa97ea89a355861f94b0cb51fb607b2e43b4" combined_train_test['Age_Category'] = combined_train_test['Age'].map(age_group_cat) le_age = LabelEncoder() le_age.fit(np.array(['Baby', 'Toddler', 'Child', 'Teenager', 'Adult', 'Middle_Aged', 'Senior_Citizen', 'Old'])) combined_train_test['Age_Category'] = le_age.transform(combined_train_test['Age_Category']) age_cat_dummies_df = pd.get_dummies(combined_train_test['Age_Category'], prefix = combined_train_test[['Age_Category']].columns[0]) combined_train_test = pd.concat([combined_train_test, age_cat_dummies_df], axis = 1) # + [markdown] _execution_state="idle" _cell_guid="83f08b4e-01f3-469b-88e5-67e5e498282f" _uuid="446c8912b87d93f1f7d65f02494fbf5c20b6a37f" # ### 6.8 Ticket # + _execution_state="idle" _cell_guid="eafa0b00-ed0b-4162-9600-938db15b7d5c" _uuid="e2c0495769c2e643a11a8aaf080f8bf60cebaca0" combined_train_test['Ticket_Letter'] = combined_train_test['Ticket'].str.split().str[0] combined_train_test['Ticket_Letter'] = combined_train_test['Ticket_Letter'].apply(lambda x: np.NaN if x.isnumeric() else x) combined_train_test['Ticket_Number'] = combined_train_test['Ticket'].apply(lambda x: pd.to_numeric(x, errors='coerce')) combined_train_test['Ticket_Number'].fillna(0, inplace = True) combined_train_test = pd.get_dummies(combined_train_test, columns = ['Ticket', 'Ticket_Letter']) combined_train_test.shape # + [markdown] _execution_state="idle" _cell_guid="540466bb-14bd-43fb-922d-104447caf7e4" _uuid="0cbef59d0b3ba5b7d1567b81c7a6f2c55c7ef96b" # ### 6.9 Cabin # + _execution_state="idle" _cell_guid="be6561ac-9381-44fe-8b46-c9174027f972" _uuid="5a19b41b971065fd552c6195a9c1633f25f5c7b3" combined_train_test['Cabin_Letter'] = combined_train_test['Cabin'].apply(lambda x: str(x)[0] if(pd.notnull(x)) else x) combined_train_test = pd.get_dummies(combined_train_test, columns = ['Cabin', 'Cabin_Letter']) combined_train_test.shape # + [markdown] _execution_state="idle" _cell_guid="9fabfeb2-d82f-417e-b51b-2dd98aee313a" _uuid="dcf79ab839d8a2610a90fb02ec94c1063067486e" # ### 6.10 Normalize Age and Fare # + _execution_state="idle" _cell_guid="a478f82c-d7d1-47a8-b077-cb2eb89f5e8f" _uuid="49df41bbaf54bc30140b642c2de2f124ece2cc61" scale_age_fare = preprocessing.StandardScaler().fit(combined_train_test[['Age', 'Fare']]) combined_train_test[['Age', 'Fare']] = scale_age_fare.transform(combined_train_test[['Age', 'Fare']]) # + [markdown] _execution_state="idle" _cell_guid="afbc3253-c915-46db-b9d6-e3e1036fee77" _uuid="3a2abab474094055a3cff0fd079c7457ac568226" # ### 6.11 Drop columns that are not required # + _execution_state="idle" _cell_guid="44c4d99a-55e9-4432-a9e2-4eb600ae2baa" _uuid="b8ec93fb460fa493e3d5077df188cfcd39da2483" combined_train_test.drop(['Name', 'PassengerId', 'Embarked', 'Sex', 'Title', 'Fare_Category', 'Family_Size_Category', 'Age_Category', 'First_Name', 'Last_Name', 'Original_Name', 'Name_Length_Category'], axis = 1, inplace = True) # + [markdown] _execution_state="idle" _cell_guid="8bdbca2b-ab78-42f4-aace-8a0e67b2d4af" _uuid="fa21b0cdc2e5f934f46ef1f8c6d5db098c4c631c" # ## 6.12 Divide the Train and Test data # + _execution_state="idle" _cell_guid="58e129ff-b47b-42aa-a6cc-dd19da7a5cdc" _uuid="98f3fe9f0d12197fbb61a46520f14447406dd650" train_data = combined_train_test[:891] test_data = combined_train_test[891:] titanic_train_data_X = train_data.drop(['Survived'], axis = 1) titanic_train_data_y = train_data['Survived'] titanic_test_data_X = test_data.drop(['Survived'], axis = 1) # + [markdown] _execution_state="idle" _cell_guid="dd405bb0-37b4-435f-b73e-8d9beb394987" _uuid="96217f3277794de9d5019c96aacd21c1bb781d49" # ### 6.13 Use Feature Importance to drop features that may not add value # + _execution_state="idle" _cell_guid="f0c1dae1-01dd-41d5-abd9-616aadb1789c" _uuid="feffab8dcd181541994ff9e3ce011dca4cb0939a" features_to_pick = 150 features_top_n = get_top_n_features(titanic_train_data_X, titanic_train_data_y, features_to_pick) print("Total Features: " + str(combined_train_test.shape)) print("Picked Features: " + str(features_top_n.shape)) titanic_train_data_X = titanic_train_data_X[features_top_n] titanic_train_data_X.shape titanic_train_data_X.info() titanic_test_data_X = titanic_test_data_X[features_top_n] titanic_test_data_X.shape titanic_test_data_X.info() # + [markdown] _execution_state="idle" _cell_guid="7750aa4f-bcd0-4234-9b5d-76f0b12697b0" _uuid="77dc0ab9b8af50b6330974e78dad4f1d81069cc4" # ## 7. Model Building # + _execution_state="idle" _cell_guid="f1875792-a5f3-403b-b021-0c2db4ffa51b" _uuid="8b38d3298649eedb12ce3c8c43a2445f9897d40a" rf_est = ensemble.RandomForestClassifier(n_estimators = 750, criterion = 'gini', max_features = 'sqrt', max_depth = 3, min_samples_split = 4, min_samples_leaf = 2, n_jobs = 50, random_state = 42, verbose = 1) gbm_est = ensemble.GradientBoostingClassifier(n_estimators = 900, learning_rate = 0.0008, loss = 'exponential', min_samples_split = 3, min_samples_leaf = 2, max_features ='sqrt', max_depth = 3, random_state = 42, verbose = 1) et_est = ensemble.ExtraTreesClassifier(n_estimators = 750, max_features = 'sqrt', max_depth = 35, n_jobs = 50, criterion = 'entropy', random_state = 42, verbose = 1) voting_est = ensemble.VotingClassifier(estimators = [('rf', rf_est),('gbm', gbm_est),('et', et_est)], voting = 'soft', weights = [3,5,2], n_jobs = 50) voting_est.fit(titanic_train_data_X, titanic_train_data_y) print("VotingClassifier Score: " + str(voting_est.score(titanic_train_data_X, titanic_train_data_y))) print("VotingClassifier Estimators: " + str(voting_est.estimators_)) # + [markdown] _execution_state="idle" _cell_guid="5b0acb58-317f-4db6-9bba-f435b6a361fe" _uuid="e6d25d86539a77b3846f440893cfba021e7bedbf" # ## 8. Predict the output # + _execution_state="idle" _cell_guid="b1d1530a-4e63-44e0-8351-53dc697758bd" _uuid="410e32345e1c14a28d5446463c8e6c7613dcd6fc" titanic_test_data_X['Survived'] = voting_est.predict(titanic_test_data_X) # + [markdown] _execution_state="idle" _cell_guid="3e490a85-0296-4bad-a2ee-283a07f7a606" _uuid="5527d2f86c7461eebbee2d4e6b6e240bdd0203db" # ## 9. Prepare submission file # + _execution_state="idle" _cell_guid="c8319a22-0ba7-4c45-bddc-05704e779ec6" _uuid="2db70c9ae3ba545ea731ef8a4e425b801fe56c21" submission = pd.DataFrame({'PassengerId': test_data_orig.loc[:, 'PassengerId'], 'Survived': titanic_test_data_X.loc[:, 'Survived']}) submission.to_csv("../working/submission.csv", index = False)
titanic/beginner-tutorial-using-votingclassifier-82-27.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # CASE STUDY 7B: Sentiment Analysis of Text Documents # <img src="https://ithaka-labs.s3.amazonaws.com/static-files/images/tdm/tdmdocs/CC_BY.png" align=left alt="CC BY license logo" /><br /><br /> # Created by [<NAME>](http://manika-lamba.github.io) under [Creative Commons CC BY License](https://creativecommons.org/licenses/by/4.0/)<br /> # **For questions/comments, email <EMAIL>**<br /> # ____ #Load libraries library(syuzhet) library(tm) library(twitteR) #Load dataset data<- read.csv("https://raw.githubusercontent.com/textmining-infopros/chapter7/master/7b_dataset.csv") #Avoid error related to tolower() invalid multibyte string data[,sapply(data,is.character)] <- sapply( data[,sapply(data,is.character)], iconv,"WINDOWS-1252","UTF-8") #syuzhet package works only on vectors. So, the data was converted to a vector vector <- as.vector(t(data)) #Sentiment analysis emotion.data <- get_nrc_sentiment(vector) emotion.data2 <- cbind(data, emotion.data) sentiment.score <- get_sentiment(vector) sentiment.data = cbind(sentiment.score, emotion.data2) # + #Getting positive, negative, and neutral reviews with associated scores positive.reviews <- sentiment.data[which(sentiment.data$sentiment.score > 0),] write.csv(positive.reviews, "positive.reviews.csv") negative.reviews <- sentiment.data[which(sentiment.data$sentiment.score < 0),] write.csv(negative.reviews, "negative.reviews.csv") neutral.reviews <- sentiment.data[which(sentiment.data$sentiment.score == 0),] write.csv(neutral.reviews, "neutral.reviews.csv") # + #Plot1: Percentage-Based Means percent_vals <- get_percentage_values(sentiment.score, bins=20) plot(percent_vals, type="l", main="Amazon Book Reviews using Percentage-Based Means", xlab="Narrative Time", ylab="Emotional Valence", col="red") # + #Plot2: Discrete Cosine Transformation (DCT) dct_values <- get_dct_transform(sentiment.score, low_pass_size = 5, x_reverse_len = 100, scale_vals = F, scale_range = T) plot(dct_values, type ="l", main ="Amazon Book Reviews using Transformed Values", xlab = "Narrative Time", ylab = "Emotional Valence", col = "red") # - #Plot3: Emotions Graph barplot(sort(colSums(prop.table(emotion.data[, 1:8]))), horiz=TRUE, cex.names=0.7, las=1, main="Emotions in Amazon Book Reviews", xlab = "Percentage")
Case_Study_7B.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="xi9yUrxTWNUI" colab_type="text" # ### 1. 거리 # # - 유클리드 거리 # # $\|a-b\|$ : 벡터의 차의 norm # # $\|a-b\|^2 = \|a\|^2 + \|b\|^2 - 2a^Tb$ : 거리의 제곱 # + [markdown] id="OWMr0YFaWNUK" colab_type="text" # ### 2. 각도 # # - 직각삼각형 ($\theta$ 의 각도) # # - 삼각함수 # # $sin\theta = \frac{a}{h}$ # # $cos\theta = \frac{b}{h}$ # ---- # # - 직교 # # $a \perp b \rightarrow a^Tb = b^Ta = 0$ # # - 단위벡터 : 길이가 1인 벡터 # - 단위벡터인데 직교를 하는 것은 "정규직교(orthonormal)" # + [markdown] id="ygge1ce4WNUK" colab_type="text" # ### RANK # + id="oIIfC6fmWNUL" colab_type="code" colab={} outputId="244690c5-0d89-4f35-e42f-ce7e8a5192a6" x1 = np.array([[1], [1]]) np.linalg.matrix_rank(x1) # + id="QpSwblrXWNUQ" colab_type="code" colab={} outputId="96d5028d-8c0c-4589-9657-55e0102d3112" x1 = np.array([[1], [1]]) x2 = np.array([[1], [-1]]) x = np.hstack([x1, x2]) y = np.vstack([x1.T, x2.T]) m1 = x.T@y.T np.linalg.matrix_rank(m1) # + id="ykYlM0EsWNUT" colab_type="code" colab={} import matplotlib.pylab as plt gray = {"facecolor": "gray"} green = {"facecolor": "green"} red = {"facecolor": "red"} black = {"facecolor": "black"} blue = {"facecolor": "blue"} Lightgreen = {"facecolor": "Lightgreen"} # + id="Rylb_519WNUV" colab_type="code" colab={} outputId="80422d18-31d3-46c6-8d9c-e923253fea61" e1 = np.array([1, 0]) e2 = np.array([0, 1]) x = np.array([2, 2]) g1 = np.array([1, 1]) / np.sqrt(2) g2 = np.array([-1, 1]) / np.sqrt(2) plt.annotate('', xy=e1, xytext=(0, 0), arrowprops=green) plt.annotate('', xy=e2, xytext=(0, 0), arrowprops=green) plt.annotate('', xy=x, xytext=(0, 0), arrowprops=gray) plt.annotate('', xy=g1, xytext=(0, 0), arrowprops=red) plt.annotate('', xy=g2, xytext=(0, 0), arrowprops=red) plt.plot(0, 0, 'ro', ms=10) plt.plot(x[0], x[1], 'ro', ms=10) plt.text(1.05, 1.35, "$x$", fontdict={"size": 18}) plt.text(-0.3, 0.5, "$e_2$", fontdict={"size": 18}) plt.text(0.5, -0.2, "$e_1$", fontdict={"size": 18}) plt.text(0.2, 0.5, "$g_1$", fontdict={"size": 18}) plt.text(-0.6, 0.2, "$g_2$", fontdict={"size": 18}) plt.xticks(np.arange(-2, 4)) plt.yticks(np.arange(-1, 4)) plt.xlim(-1.5, 3.5) plt.ylim(-0.5, 3) plt.show() # + id="uHLexYRmWNUY" colab_type="code" colab={} import scipy as sp import scipy.misc import scipy.ndimage f = sp.misc.face(gray=False) # + id="OWUk-fa5WNUb" colab_type="code" colab={} outputId="66552e88-0cc1-4a70-828e-e3b632f66343" f # + id="FeykmXezWNUe" colab_type="code" colab={} outputId="46b60d24-398f-475f-e393-2402fd2b61d7" plt.imshow(f) plt.axis("off") plt.show() # + id="QW3ADd4IWNUg" colab_type="code" colab={} outputId="e0fbc6bb-8349-4dfc-dd99-b5643efc1756" import scipy as sp import scipy.misc import scipy.ndimage f = sp.misc.face(gray=True) e1 = np.array([0, 1]) e2 = np.array([1, 0]) E = np.vstack([e1, e2]).T g1 = np.array([1, 1]) / np.sqrt(2) g2 = np.array([-1, 1]) / np.sqrt(2) A = np.vstack([g1, g2]).T gc1 = E@g1 gc2 = E@g2 plt.subplot(121) plt.imshow(f, cmap=mpl.cm.bone, alpha=0.9) plt.annotate('', xy=500*e1, xytext=(0, 0), arrowprops=green) plt.annotate('$e_1$', xy=500*e1, xytext=500*e1+[-100, 0]) plt.annotate('', xy=500*e2, xytext=(0, 0), arrowprops=green) plt.annotate('$e_2$', xy=500*e2, xytext=500*e2+[0, -50]) plt.annotate('', xy=500*gc1, xytext=(0, 0), arrowprops=red) plt.annotate('$g_1$', xy=500*gc1, xytext=500*gc1+[50, -50]) plt.annotate('', xy=500*gc2, xytext=(0, 0), arrowprops=red) plt.annotate('$g_2$', xy=500*gc2, xytext=500*gc2+[50, 0]) plt.axis("off") plt.xlim(-200, 1000) plt.ylim(800, -500) plt.title("before") f1 = sp.ndimage.affine_transform(f, A) plt.subplot(122) plt.imshow(f1, cmap=mpl.cm.bone, alpha=0.8) plt.annotate('', xy=500*e1, xytext=(0, 0), arrowprops=red) plt.annotate('$g_1$', xy=500*e1, xytext=500*e1+[-100, 0]) plt.annotate('', xy=500*e2, xytext=(0, 0), arrowprops=red) plt.annotate('$g_2$', xy=500*e2, xytext=500*e2+[0, -50]) plt.axis("off") plt.xlim(-200, 1000) plt.ylim(800, -500) plt.title('after') plt.show() # + id="cNJZ2-aAWNUk" colab_type="code" colab={}
MATH/02_Linear_algebra_04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import xarray as xr import numpy as np import os import sys from dask.distributed import Client c = Client() c # + grd2_path = os.path.join(os.environ.get('rawdir'),'gdata','waom_frc','waom2_grd.nc') grd2 = xr.open_dataset(grd2_path) rst2_path = os.path.join(os.environ.get('rawdir'),'waom2','ocean_rst.nc') rst2 = xr.open_mfdataset(rst2_path).squeeze() # - rst2.ubar # %matplotlib notebook plt.close() rst2.ubar.isel(three=2).where(grd2.mask_u).plot(size=7) plt.show() plt.close() rst2.zeta.isel(three=2).where(grd2.mask_rho).plot(size=7,vmax=-1) plt.show() plt.close() (grd2.h-grd2.zice).where(grd2.mask_rho).isel(eta_rho=slice(1620,1790),xi_rho=slice(2320,2480)).plot(size=7) plt.show() plt.close() (grd2.h).where(grd2.mask_rho).isel(eta_rho=slice(1620,1790),xi_rho=slice(2320,2480)).plot(size=7) plt.show() plt.close() (-grd2.zice).where(grd2.mask_rho).isel(eta_rho=slice(1620,1790),xi_rho=slice(2320,2480)).plot(size=7) plt.show()
notebooks/exploratory/blowUpDiagnosis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PASCAL Context - Light-Weight Refinenet # # ## 59 semantic classes + background # # ### Light-Weight RefineNet based on ResNet-101/152 # + import six import sys sys.path.append('../../') from models.resnet import rf_lw101, rf_lw152 # - from utils.helpers import prepare_img # + # %matplotlib inline import glob import cv2 import matplotlib.pyplot as plt import numpy as np import torch from PIL import Image # - cmap = np.load('../../utils/cmap.npy') has_cuda = torch.cuda.is_available() img_dir = '../imgs/Context/' imgs = glob.glob('{}*.jpg'.format(img_dir)) n_classes = 60 # + # Initialise models model_inits = { 'rf_lw101_context' : rf_lw101, # key / constructor 'rf_lw152_context' : rf_lw152, } models = dict() for key,fun in six.iteritems(model_inits): net = fun(n_classes, pretrained=True).eval() if has_cuda: net = net.cuda() models[key] = net # + # Figure 1 from the supplementary n_cols = len(models) + 2 # 1 - for image, 1 - for GT n_rows = len(imgs) plt.figure(figsize=(16, 12)) idx = 1 with torch.no_grad(): for img_path in imgs: img = np.array(Image.open(img_path)) msk = np.array(Image.open(img_path.replace('jpg', 'png'))) orig_size = img.shape[:2][::-1] img_inp = torch.tensor(prepare_img(img).transpose(2, 0, 1)[None]).float() if has_cuda: img_inp = img_inp.cuda() plt.subplot(n_rows, n_cols, idx) plt.imshow(img) plt.title('img') plt.axis('off') idx += 1 plt.subplot(n_rows, n_cols, idx) plt.imshow(msk) plt.title('gt') plt.axis('off') idx += 1 for mname, mnet in six.iteritems(models): segm = mnet(img_inp)[0].data.cpu().numpy().transpose(1, 2, 0) segm = cv2.resize(segm, orig_size, interpolation=cv2.INTER_CUBIC) segm = cmap[segm.argmax(axis=2).astype(np.uint8)] plt.subplot(n_rows, n_cols, idx) plt.imshow(segm) plt.title(mname) plt.axis('off') idx += 1 # -
examples/notebooks/Context.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np, pandas as pd from glob import glob from astropy.table import Table from matplotlib import pyplot as plt, colors as colors, colorbar as cbar from astroquery.simbad import Simbad import astropy.coordinates as coord import astropy.units as u # %matplotlib inline # - good = pd.read_csv('../data/bright_sample/massive.csv.gz') len(good),good.columns # + #We need a list of LBVs because SIMBAD is bad at classifying LBVs #Let's go with only confirmed LBVs from Richardson & Mehner (2018, RNAAS) lbv_commonname1 = ['HD 90177','* eta Car','V* AG Car','MR 35','[GKM2012] WS1', 'WRAY 16-137','EM* VRMF 55','Cl* Westerlund 1 W 243','[GKF2010] MN48', 'HD 160529','[GKF2010] MN58','GCIRS 34W','[NHS93] 22','[GMC99] D6', 'HD 168607','EM* MWC 930','V* V481 Sct','IRAS 18576+0341','* P Cyg', 'HD 5980','HD 6884','HD 269006','HD 269216','V* S Dor','HD 269582', 'HD 269662','HD 269700','HD 269858','CPD-69 463'] lbv_commonname2 = ['V* HR Car','HD 93308','HD 94910','V* V432 Car', 'UCAC2 3729120','2MASS J13501536-6148552','[GKF2010] MN44', '2MASS J16470749-4552290','2MASS J16493770-4535592','V* V905 Sco', '2MASS J17374754-3137333','WR 101db','V* V4998 Sgr', 'V* V4650 Sgr','V* V4029 Sgr','V* V446 Sct','G24.73+0.69', 'V* V1672 Aql','HD 193237','RMC 14','RMC 40','RMC 71', '2MASS J05133077-6932236','HD 35343','SV* HV 5495', '2MASS J05305147-6902587','RMC 116','RMC 127','RMC 143'] lbv_gaia = [5255045082580350080,5350358580171706624,5338220285385672064,5337309477433273728, 5864989022016713728,5865577604279098112,5940576971716473344,5940105830990286208, 5940216130049700480,4053887521876855808,4055062727939772800, 0, 0, 0,4097791502146559872,4159973866869462784, 4255908794692238848, 0,2061242908036996352,4690516883290136832, 4687436704549343488,4654621505126284288,4658204053297963392,4658193814095915776, 4658481718680657792,4658431553453824256,4658474743652257664,4657655435693883776, 4657679551902223616] #Note: [GKF2010] MN58 is close to Gaia DR2 4055062727939772800 = 2MASS J17374754-3137333, #so we're calling them the same source # - #Let's write a decision tree that classifies things, and does a very coarse classification because #SIMBAD sucks classifications= [] coarse_class = [] isbinary = [] for i,row in good.iterrows(): # Pull out lbvs if (row['source_id'] in lbv_gaia) | (row['CommonName'] in lbv_commonname1) | (row['CommonName'] in lbv_commonname2): classifications.append('LBV') coarse_class.append('EM') isbinary.append(0) continue # Pull out WRs. Note, we also get the Ofpe/WN9 LBV candidate HD 269445 elif ('W' in str(row['SpT'])) | (row['SimbadOType'] == 'WR*'): classifications.append('WR') coarse_class.append('EM') #Cool stars elif (str(row['SpT'])[0] in ['K','M']): if 'III' in row['SpT']: classifications.append('C/S/Giant') coarse_class.append('Contaminant') else: classifications.append('RSG') coarse_class.append('Cool') #Warm stars elif (str(row['SpT'])[0] in ['F','G']): #this grabs some hot stars if ('O' in str(row['MKType'])) or ('B' in str(row['MKType'])) or ('A' in str(row['MKType'])): if 'e' in str(row['MKType']): classifications.append('OBAe') coarse_class.append('EM') else: coarse_class.append('Hot') if ('IV' in str(row['MKType']))|('III' in str(row['MKType'])): classifications.append('EvolvedOBA') #OBA dwarfs elif 'V' in str(row['MKType']): classifications.append('MainSequenceOBA') #OBA bright giants and supergiants elif ('I' in str(row['MKType'])): classifications.append('SupergiantOBA') #Generic OBA else: classifications.append('OBA') else: if ('III' in str(row['SpT'])) or ('V' in str(row['SpT'])) or ('III' in str(row['MKType'])) or ('V' in str(row['MKType'])): classifications.append('Yellow Dwarf') coarse_class.append('Contaminant') else: classifications.append('YSG') coarse_class.append('Cool') #O/B [e] stars elif '[e]' in str(row['SpT']): classifications.append('OB[e]') coarse_class.append('EM') #O/B/A e stars elif np.any(['e' in foo for foo in str(row['SpT']).split('pec')]) | ('Em' in str(row['SimbadOType'])): classifications.append('OBAe') coarse_class.append('EM') elif (str(row['SpT'])[0] in ['O','B','A']): #OBA subgiants and giants coarse_class.append('Hot') if ('IV' in str(row['SpT']))|('III' in str(row['SpT'])): classifications.append('EvolvedOBA') #OBA dwarfs elif 'V' in str(row['SpT']): classifications.append('MainSequenceOBA') #OBA bright giants and supergiants elif ('I' in str(row['SpT'])) | ('SG' in str(row['SimbadOType'])): classifications.append('SupergiantOBA') #Generic OBA else: classifications.append('OBA') #C and S stars elif ('C' in str(row['SpT'])) | (str(row['SimbadOType']) == 'C*'): classifications.append('C/S/Giant') coarse_class.append('Contaminant') elif 'S' in str(row['SpT']): classifications.append('C/S/Giant') coarse_class.append('Contaminant') #Everything else. This includes 'Candidate' other types, as well as LPVs, semi-regular Variables, etc. elif 'V*' in str(row['SimbadOType']): classifications.append('Misc. Variable') coarse_class.append('Unknown/Candidate') else: classifications.append('Unknown/Candidate') coarse_class.append('Unknown/Candidate') #finally, get binaries if ('EB' in str(row['SimbadOType'])) | ('SB' in str(row['SimbadOType'])) | ('Ellip' in str(row['SimbadOType'])) | ('HMXB' in str(row['SimbadOType'])) | ('+' in str(row['SpT'])[1:-2]) | (str(row['SpT']) == 'B+K'): isbinary.append(1)#classifications.append('Binary') else: isbinary.append(0) print(len(classifications),len(isbinary),sum(isbinary),len(coarse_class)) classification_df = pd.DataFrame({'source_id':good['source_id'], 'CommonName':good['CommonName'], 'Class':classifications, 'CoarseClass':coarse_class, 'IsBinary':isbinary}) classification_df.to_csv('classifications.csv',index=False) #How many giants are contaminating our sample? contaminant = 0 rsg = 0 for i,row in good.iterrows(): clas = classification_df['Class'][classification_df['source_id']==row['source_id']].values if (clas == 'RSG'): rsg += 1 if ('II' in str(row['SpT'])) or ('V' in str(row['SpT'])): contaminant += 1 print(row['SpT']) print(rsg,contaminant) #How many yellow stars are contaminating our sample? contaminant = 0 ysg = 0 for i,row in good.iterrows(): clas = classification_df['Class'][classification_df['source_id']==row['source_id']].values if (clas == 'YSG'): ysg += 1 if ('II' in str(row['SpT'])) or ('V' in str(row['SpT'])): contaminant += 1 print(row['SpT']) print(ysg,contaminant) # + classes = ['MainSequenceOBA','EvolvedOBA','SupergiantOBA','OBA','OBAe','OB[e]','WR','LBV','YSG', 'RSG','C/S/Giant','Yellow Dwarf','Misc. Variable','Unknown/Candidate'] num_output = [] for cl in classes: num = len(classification_df[classification_df['Class'].values == cl]) num_output.append(num) plt.figure(dpi=300,figsize=(8.5,6)) plt.bar(np.arange(len(classes)),num_output) """unique_classes, num = np.unique(classifications, return_counts=True) unique_coarse, cnum = np.unique(coarse_class, return_counts=True) unique_classes = unique_classes[np.argsort(num)] num = np.sort(num) unique_coarse = unique_coarse[np.argsort(cnum)] cnum = np.sort(cnum) fig = plt.figure(dpi=300,figsize=(8.5,6)) plt.bar(np.arange(len(unique_classes)),num)""" plt.yscale('log') plt.xticks(ticks=np.arange(len(classes)),labels=classes,rotation=45,ha="right", rotation_mode="anchor") plt.xlabel('Class',fontsize=20) plt.ylabel('$N$',fontsize=20) plt.ylim(1,5000) ax = plt.gca() for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(20) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(20) plt.tight_layout() plt.savefig('../plots/sample_makeup.pdf') # + m_dict = {'MainSequenceOBA':'o','EvolvedOBA':'v','SupergiantOBA':'^','OBA':'s', 'OBAe':'P','OB[e]':'*','WR':'X','LBV':'D','YSG':'8','RSG':'p','C/S/Giant':'H','Yellow Dwarf':'d'} c_dict = {'MainSequenceOBA':'slateblue','EvolvedOBA':'cornflowerblue','SupergiantOBA':'b', 'OBA':'dodgerblue','OBAe':'lightseagreen','OB[e]':'darkgreen','WR':'C4', 'LBV':'mediumorchid','YSG':'y','RSG':'lightcoral','C/S/Giant':'lightsalmon','Yellow Dwarf':'goldenrod'} fig=plt.figure(figsize=(16,12),dpi=150) joined = good.merge(classification_df,on='source_id') for cl in c_dict.keys(): plt.scatter(joined['G-J'][joined['Class']==cl],joined['M_G'][joined['Class']==cl],s=10,label=cl,marker=m_dict[cl],c=c_dict[cl],rasterized=True) plt.ylim(-3,-10) plt.xlim(-2,4.5) plt.legend(ncol=2,markerscale=5,fontsize=20) plt.xlabel('$G-J$') plt.ylabel('$M_G$') ax= plt.gca() ax.xaxis.label.set_size(28) ax.yaxis.label.set_size(28) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(24) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(24) plt.savefig('../plots/classCMD.pdf') # - YSGs = joined[joined['Class']=='YSG'] len(YSGs),len(YSGs[YSGs['G-J']<1]),len(YSGs[YSGs['G-J']<1])/len(YSGs) # + fig=plt.figure(figsize=(16,12),dpi=150) joined = good.merge(classification_df,on='source_id') for cl in ['Hot','Cool','EM','Contaminant']: if cl == 'EM': label = 'Emission' else: label = cl plt.scatter(joined['G-J'][joined['CoarseClass']==cl],joined['M_G'][joined['CoarseClass']==cl],s=5,label=label) plt.ylim(-3,-10) plt.xlim(-2,4.5) plt.legend(ncol=2,markerscale=5,fontsize=20) plt.xlabel('$G-J$') plt.ylabel('$M_G$') ax= plt.gca() ax.xaxis.label.set_size(28) ax.yaxis.label.set_size(28) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(24) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(24) plt.savefig('../plots/coarse_classCMD.pdf') # - joined = good.merge(classification_df,on='source_id') joined.columns # + header = r"""\begin{deluxetable*}{lccccccc} \tabletypesize{\scriptsize} \tablecaption{Common names, coordinates, host galaxies, and \Gaia measurements of 6,484 putative massive stars. $r_{est}$ from \citet{bailerjones18} is given for Galactic stars. Listed values of $G$ and $G_{BP}-G_{RP}$ are uncorrected for extinction.\label{tab:sample}} \tablehead{\colhead{Common Name} & \colhead{R.A. [deg]} & \colhead{Dec [deg]} & \colhead{Host Galaxy} & \colhead{$r_{est}$ [kpc]} & \colhead{$G$ [mag]} & \colhead{$A_G$ [mag]} & \colhead{$G_{BP}-G_{RP}$ [mag]}} \startdata """ j=0 for i,row in joined.sort_values('ra').iterrows(): if j == 10: break else: j+=1 name = row['CommonName_x'].lstrip('V*').replace('[','{[').replace(']',']}').replace('Cl*','') if name[0:2] == 'SV': name = name.lstrip('SV*') if name[:4] == 'WISE': name = name[:4] + ' ' + name[4:] ra = f"{row['ra']:.8f}" dec = f"{row['dec']:.8f}" gal = row['Galaxy'] if 'MC' in gal: rest = ' - ' else: rest = f"{row['r_est']*1e-3:.3f}" g = row['phot_g_mean_mag'] ag = row['a_g_val'] bmr = f"{row['phot_bp_mean_mag'] - row['phot_rp_mean_mag']:.8f}" entry = list(map(str,[name,ra,dec,gal,rest,g,ag,bmr])) sep = ' & ' header += sep.join(entry) header += ' \\\\ \n' header.rstrip() header.rstrip('\\') header.rstrip() header += r"""\enddata \tablecomments{The first ten rows are shown to illustrate the content of the table. A complete machine-readable version will be made available online via Vizier.} \end{deluxetable*} """ with open("sample_table.txt", "w") as text_file: text_file.write(header) # -
WISE/code/classify.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tensor_py3_env # language: python # name: tensor_py3_env # --- # + [markdown] deletable=true editable=true # # Session 3: Unsupervised and Supervised Learning # # <p class="lead"> # <NAME><br /> # <a href="https://www.kadenze.com/courses/creative-applications-of-deep-learning-with-tensorflow/info">Creative Applications of Deep Learning w/ Tensorflow</a><br /> # <a href="https://www.kadenze.com/partners/kadenze-academy">Kadenze Academy</a><br /> # <a href="https://twitter.com/hashtag/CADL">#CADL</a> # </p> # # # <a name="learning-goals"></a> # # Learning Goals # # * Build an autoencoder w/ linear and convolutional layers # * Understand how one hot encodings work # * Build a classification network w/ linear and convolutional layers # # <!-- MarkdownTOC autolink=true autoanchor=true bracket=round --> # # - [Introduction](#introduction) # - [Unsupervised vs. Supervised Learning](#unsupervised-vs-supervised-learning) # - [Autoencoders](#autoencoders) # - [MNIST](#mnist) # - [Fully Connected Model](#fully-connected-model) # - [Convolutional Autoencoder](#convolutional-autoencoder) # - [Denoising Autoencoder](#denoising-autoencoder) # - [Variational Autoencoders](#variational-autoencoders) # - [Predicting Image Labels](#predicting-image-labels) # - [One-Hot Encoding](#one-hot-encoding) # - [Using Regression for Classification](#using-regression-for-classification) # - [Fully Connected Network](#fully-connected-network) # - [Convolutional Networks](#convolutional-networks) # - [Saving/Loading Models](#savingloading-models) # - [Checkpoint](#checkpoint) # - [Protobuf](#protobuf) # - [Wrap Up](#wrap-up) # - [Reading](#reading) # # <!-- /MarkdownTOC --> # # <a name="introduction"></a> # # Introduction # # In the last session we created our first neural network. # # We saw that in order to create a neural network, we needed to define a cost function which would allow gradient descent to optimize all the parameters in our network <TODO: Insert animation of gradient descent from previous session>. We also saw how neural networks become much more expressive by introducing series of linearities followed by non-linearities, or activation functions. <TODO: Insert graphic of activation functions from previous session>. # # We then explored a fun application of neural networks using regression to learn to paint color values given x, y positions. This allowed us to build up a sort of painterly like version of an image. # # In this session, we'll see how to use some simple deep nets with about 3 or 4 layers capable of performing unsupervised and supervised learning, and I'll explain those terms in a bit. The components we learn here will let us explore data in some very interesting ways. # # <a name="unsupervised-vs-supervised-learning"></a> # # Unsupervised vs. Supervised Learning # # Machine learning research in deep networks performs one of two types of learning. You either have a lot of data and you want the computer to reason about it, maybe to encode the data using less data, and just explore what patterns there might be. That's useful for clustering data, reducing the dimensionality of the data, or even for generating new data. That's generally known as unsupervised learning. In the supervised case, you actually know what you want out of your data. You have something like a label or a class that is paired with every single piece of data. In this first half of this session, we'll see how unsupervised learning works using something called an autoencoder and how it can be extended using convolution.. Then we'll get into supervised learning and show how we can build networks for performing regression and classification. By the end of this session, hopefully all of that will make a little more sense. Don't worry if it doesn't yet! Really the best way to learn is to put this stuff into practice in the homeworks. # # <a name="autoencoders"></a> # # Autoencoders # # <TODO: Graphic of autoencoder network diagram> # # An autoencoder is a type of neural network that learns to encode its inputs, often using much less data. It does so in a way that it can still output the original input with just the encoded values. For it to learn, it does not require "labels" as its output. Instead, it tries to output whatever it was given as input. So in goes an image, and out should also go the same image. But it has to be able to retain all the details of the image, even after possibly reducing the information down to just a few numbers. # # We'll also explore how this method can be extended and used to cluster or organize a dataset, or to explore latent dimensions of a dataset that explain some interesting ideas. For instance, we'll see how with handwritten numbers, we will be able to see how each number can be encoded in the autoencoder without ever telling it which number is which. # # <TODO: place teaser of MNIST video learning> # # But before we get there, we're going to need to develop an understanding of a few more concepts. # # First, imagine a network that takes as input an image. The network can be composed of either matrix multiplications or convolutions to any number of filters or dimensions. At the end of any processing, the network has to be able to recompose the original image it was input. # # In the last session, we saw how to build a network capable of taking 2 inputs representing the row and column of an image, and predicting 3 outputs, the red, green, and blue colors. Instead if having 2 inputs, we'll now have an entire image as an input, the brightness of every pixel in our image. And as output, we're going to have the same thing, the entire image being output. # # <a name="mnist"></a> # ## MNIST # # Let's first get some standard imports: # + deletable=true editable=true # imports # %matplotlib inline # # %pylab osx import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.cm as cmx # Some additional libraries which we'll use just # to produce some visualizations of our training from libs.utils import montage from libs import gif import IPython.display as ipyd plt.style.use('ggplot') # Bit of formatting because I don't like the default inline code style: from IPython.core.display import HTML HTML("""<style> .rendered_html code { padding: 2px 4px; color: #c7254e; background-color: #f9f2f4; border-radius: 4px; } </style>""") # + [markdown] deletable=true editable=true # Then we're going to try this with the MNIST dataset, which I've included a simple interface for in the `libs` module. # + deletable=true editable=true from libs.datasets import MNIST ds = MNIST() # + [markdown] deletable=true editable=true # Let's take a look at what this returns: # + deletable=true editable=true # ds.<tab> # + [markdown] deletable=true editable=true # So we can see that there are a few interesting accessors. ... we're not going to worry about the labels until a bit later when we talk about a different type of model which can go from the input image to predicting which label the image is. But for now, we're going to focus on trying to encode the image and be able to reconstruct the image from our encoding. let's take a look at the images which are stored in the variable `X`. Remember, in this course, we'll always use the variable `X` to denote the input to a network. and we'll use the variable `Y` to denote its output. # + deletable=true editable=true print(ds.X.shape) # + [markdown] deletable=true editable=true # So each image has 784 features, and there are 70k of them. If we want to draw the image, we're going to have to reshape it to a square. 28 x 28 is 784. So we're just going to reshape it to a square so that we can see all the pixels arranged in rows and columns instead of one giant vector. # + deletable=true editable=true plt.imshow(ds.X[0].reshape((28, 28))) # + deletable=true editable=true # Let's get the first 1000 images of the dataset and reshape them imgs = ds.X[:1000].reshape((-1, 28, 28)) # Then create a montage and draw the montage plt.imshow(montage(imgs), cmap='gray') # + [markdown] deletable=true editable=true # Let's take a look at the mean of the dataset: # + deletable=true editable=true # Take the mean across all images mean_img = np.mean(ds.X, axis=0) # Then plot the mean image. plt.figure() plt.imshow(mean_img.reshape((28, 28)), cmap='gray') # + [markdown] deletable=true editable=true # And the standard deviation # + deletable=true editable=true # Take the std across all images std_img = np.std(ds.X, axis=0) # Then plot the std image. plt.figure() plt.imshow(std_img.reshape((28, 28))) # + [markdown] deletable=true editable=true # So recall from session 1 that these two images are really saying whats more or less contant across every image, and what's changing. We're going to try and use an autoencoder to try to encode everything that could possibly change in the image. # # <a name="fully-connected-model"></a> # ## Fully Connected Model # # To try and encode our dataset, we are going to build a series of fully connected layers that get progressively smaller. So in neural net speak, every pixel is going to become its own input neuron. And from the original 784 neurons, we're going to slowly reduce that information down to smaller and smaller numbers. It's often standard practice to use other powers of 2 or 10. I'll create a list of the number of dimensions we'll use for each new layer. # + deletable=true editable=true dimensions = [512, 256, 128, 64] # + [markdown] deletable=true editable=true # So we're going to reduce our 784 dimensions down to 512 by multiplyling them by a 784 x 512 dimensional matrix. Then we'll do the same thing again using a 512 x 256 dimensional matrix, to reduce our dimensions down to 256 dimensions, and then again to 128 dimensions, then finally to 64. To get back to the size of the image, we're going to just going to do the reverse. But we're going to use the exact same matrices. We do that by taking the transpose of the matrix, which reshapes the matrix so that the rows become columns, and vice-versa. So our last matrix which was 128 rows x 64 columns, when transposed, becomes 64 rows x 128 columns. # # So by sharing the weights in the network, we're only really learning half of the network, and those 4 matrices are going to make up the bulk of our model. We just have to find out what they are using gradient descent. # # We're first going to create `placeholders` for our tensorflow graph. We're going to set the first dimension to `None`. This is something special for placeholders which tells tensorflow "let this dimension be any possible value". 1, 5, 100, 1000, it doesn't matter. We're going to pass our entire dataset in minibatches. So we'll send 100 images at a time. But we'd also like to be able to send in only 1 image and see what the prediction of the network is. That's why we let this dimension be flexible in the graph. # + deletable=true editable=true # So the number of features is the second dimension of our inputs matrix, 784 n_features = ds.X.shape[1] # And we'll create a placeholder in the tensorflow graph that will be able to get any number of n_feature inputs. X = tf.placeholder(tf.float32, [None, n_features]) # + [markdown] deletable=true editable=true # Now we're going to create a network which will perform a series of multiplications on `X`, followed by adding a bias, and then wrapping all of this in a non-linearity: # + deletable=true editable=true # let's first copy our X placeholder to the name current_input current_input = X n_input = n_features # We're going to keep every matrix we create so let's create a list to hold them all Ws = [] # We'll create a for loop to create each layer: for layer_i, n_output in enumerate(dimensions): # just like in the last session, # we'll use a variable scope to help encapsulate our variables # This will simply prefix all the variables made in this scope # with the name we give it. with tf.variable_scope("encoder/layer/{}".format(layer_i)): # Create a weight matrix which will increasingly reduce # down the amount of information in the input by performing # a matrix multiplication W = tf.get_variable( name='W', shape=[n_input, n_output], initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02)) # Now we'll multiply our input by our newly created W matrix # and add the bias h = tf.matmul(current_input, W) # And then use a relu activation function on its output current_input = tf.nn.relu(h) # Finally we'll store the weight matrix so we can build the decoder. Ws.append(W) # We'll also replace n_input with the current n_output, so that on the # next iteration, our new number inputs will be correct. n_input = n_output # + [markdown] deletable=true editable=true # So now we've created a series of multiplications in our graph which take us from our input of batch size times number of features which started as `None` x `784`, and then we're multiplying it by a series of matrices which will change the size down to `None` x `64`. # + deletable=true editable=true print(current_input.get_shape()) # + [markdown] deletable=true editable=true # In order to get back to the original dimensions of the image, we're going to reverse everything we just did. Let's see how we do that: # + deletable=true editable=true # We'll first reverse the order of our weight matrices Ws = Ws[::-1] # then reverse the order of our dimensions # appending the last layers number of inputs. dimensions = dimensions[::-1][1:] + [ds.X.shape[1]] print(dimensions) # + deletable=true editable=true for layer_i, n_output in enumerate(dimensions): # we'll use a variable scope again to help encapsulate our variables # This will simply prefix all the variables made in this scope # with the name we give it. with tf.variable_scope("decoder/layer/{}".format(layer_i)): # Now we'll grab the weight matrix we created before and transpose it # So a 3072 x 784 matrix would become 784 x 3072 # or a 256 x 64 matrix, would become 64 x 256 W = tf.transpose(Ws[layer_i]) # Now we'll multiply our input by our transposed W matrix h = tf.matmul(current_input, W) # And then use a relu activation function on its output current_input = tf.nn.relu(h) # We'll also replace n_input with the current n_output, so that on the # next iteration, our new number inputs will be correct. n_input = n_output # + [markdown] deletable=true editable=true # After this, our `current_input` will become the output of the network: # + deletable=true editable=true Y = current_input # + [markdown] deletable=true editable=true # Now that we have the output of the network, we just need to define a training signal to train the network with. To do that, we create a cost function which will measure how well the network is doing: # + deletable=true editable=true # We'll first measure the average difference across every pixel cost = tf.reduce_mean(tf.squared_difference(X, Y), 1) print(cost.get_shape()) # + [markdown] deletable=true editable=true # And then take the mean again across batches: # + deletable=true editable=true cost = tf.reduce_mean(cost) # + [markdown] deletable=true editable=true # We can now train our network just like we did in the last session. We'll need to create an optimizer which takes a parameter `learning_rate`. And we tell it that we want to minimize our cost, which is measuring the difference between the output of the network and the input. # + deletable=true editable=true learning_rate = 0.001 optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) # + [markdown] deletable=true editable=true # Now we'll create a session to manage the training in minibatches: # + deletable=true editable=true # %% # We create a session to use the graph sess = tf.Session() sess.run(tf.global_variables_initializer()) # + [markdown] deletable=true editable=true # Now we'll train: # + deletable=true editable=true # Some parameters for training batch_size = 100 n_epochs = 5 # We'll try to reconstruct the same first 100 images and show how # The network does over the course of training. examples = ds.X[:100] # We'll store the reconstructions in a list imgs = [] fig, ax = plt.subplots(1, 1) for epoch_i in range(n_epochs): for batch_X, _ in ds.train.next_batch(): sess.run(optimizer, feed_dict={X: batch_X - mean_img}) recon = sess.run(Y, feed_dict={X: examples - mean_img}) recon = np.clip((recon + mean_img).reshape((-1, 28, 28)), 0, 255) img_i = montage(recon).astype(np.uint8) imgs.append(img_i) ax.imshow(img_i, cmap='gray') fig.canvas.draw() print(epoch_i, sess.run(cost, feed_dict={X: batch_X - mean_img})) gif.build_gif(imgs, saveto='ae.gif', cmap='gray') # + deletable=true editable=true ipyd.Image(url='ae.gif?{}'.format(np.random.rand()), height=500, width=500) # + [markdown] deletable=true editable=true # <a name="convolutional-autoencoder"></a> # ## Convolutional Autoencoder # # To get even better encodings, we can also try building a convolutional network. Why would a convolutional network perform any different to a fully connected one? Let's see what we were doing in the fully connected network. For every pixel in our input, we have a set of weights corresponding to every output neuron. Those weights are unique to each pixel. Each pixel gets its own row in the weight matrix. That really doesn't make a lot of sense, since we would guess that nearby pixels are probably not going to be so different. And we're not really encoding what's happening around that pixel, just what that one pixel is doing. # # In a convolutional model, we're explicitly modeling what happens around a pixel. And we're using the exact same convolutions no matter where in the image we are. But we're going to use a lot of different convolutions. # # Recall in session 1 we created a Gaussian and Gabor kernel and used this to convolve an image to either blur it or to accentuate edges. Armed with what you know now, you could try to train a network to learn the parameters that map an untouched image to a blurred or edge filtered version of it. What you should find is the kernel will look sort of what we built by hand. I'll leave that as an excercise for you. # # But in fact, that's too easy really. That's just 1 filter you would have to learn. We're going to see how we can use many convolutional filters, way more than 1, and how it will help us to encode the MNIST dataset. # # To begin we'll need to reset the current graph and start over. # + deletable=true editable=true from tensorflow.python.framework.ops import reset_default_graph reset_default_graph() # + deletable=true editable=true # And we'll create a placeholder in the tensorflow graph that will be able to get any number of n_feature inputs. X = tf.placeholder(tf.float32, [None, n_features]) # + [markdown] deletable=true editable=true # Since `X` is currently `[batch, height*width]`, we need to reshape it to a # 4-D tensor to use it in a convolutional graph. Remember back to the first session that in order to perform convolution, we have to use 4-dimensional tensors describing the: # # `N x H x W x C` # # We'll reshape our input placeholder by telling the `shape` parameter to be these new dimensions. However, since our batch dimension is `None`, we cannot reshape without using the special value `-1`, which says that the size of that dimension should be computed so that the total size remains constant. Since we haven't defined the batch dimension's shape yet, we use `-1` to denote this # dimension should not change size. # + deletable=true editable=true X_tensor = tf.reshape(X, [-1, 28, 28, 1]) # + [markdown] deletable=true editable=true # We'll now setup the first convolutional layer. Remember from Session 2 that the weight matrix for convolution should be # # `[height x width x input_channels x output_channels]` # # Think a moment about how this is different to the fully connected network. In the fully connected network, every pixel was being multiplied by its own weight to every other neuron. With a convolutional network, we use the extra dimensions to allow the same set of filters to be applied everywhere across an image. This is also known in the literature as weight sharing, since we're sharing the weights no matter where in the input we are. That's unlike the fully connected approach, which has unique weights for every pixel. What's more is after we've performed the convolution, we've retained the spatial organization of the input. We still have dimensions of height and width. That's again unlike the fully connected network which effectively shuffles or takes int account information from everywhere, not at all caring about where anything is. That can be useful or not depending on what we're trying to achieve. Often, it is something we might want to do after a series of convolutions to encode translation invariance. Don't worry about that for now. With MNIST especially we won't need to do that since all of the numbers are in the same position. # # Now with our tensor ready, we're going to do what we've just done with the fully connected autoencoder. Except, instead of performing matrix multiplications, we're going to create convolution operations. To do that, we'll need to decide on a few parameters including the filter size, how many convolution filters we want, and how many layers we want. I'll start with a fairly small network, and let you scale this up in your own time. # + deletable=true editable=true n_filters = [16, 16, 16] filter_sizes = [4, 4, 4] # + [markdown] deletable=true editable=true # Now we'll create a loop to create every layer's convolution, storing the convolution operations we create so that we can do the reverse. # + deletable=true editable=true current_input = X_tensor # notice instead of having 784 as our input features, we're going to have # just 1, corresponding to the number of channels in the image. # We're going to use convolution to find 16 filters, or 16 channels of information in each spatial location we perform convolution at. n_input = 1 # We're going to keep every matrix we create so let's create a list to hold them all Ws = [] shapes = [] # We'll create a for loop to create each layer: for layer_i, n_output in enumerate(n_filters): # just like in the last session, # we'll use a variable scope to help encapsulate our variables # This will simply prefix all the variables made in this scope # with the name we give it. with tf.variable_scope("encoder/layer/{}".format(layer_i)): # we'll keep track of the shapes of each layer # As we'll need these for the decoder shapes.append(current_input.get_shape().as_list()) # Create a weight matrix which will increasingly reduce # down the amount of information in the input by performing # a matrix multiplication W = tf.get_variable( name='W', shape=[ filter_sizes[layer_i], filter_sizes[layer_i], n_input, n_output], initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02)) # Now we'll convolve our input by our newly created W matrix h = tf.nn.conv2d(current_input, W, strides=[1, 2, 2, 1], padding='SAME') # And then use a relu activation function on its output current_input = tf.nn.relu(h) # Finally we'll store the weight matrix so we can build the decoder. Ws.append(W) # We'll also replace n_input with the current n_output, so that on the # next iteration, our new number inputs will be correct. n_input = n_output # + [markdown] deletable=true editable=true # Now with our convolutional encoder built and the encoding weights stored, we'll reverse the whole process to decode everything back out to the original image. # + deletable=true editable=true # We'll first reverse the order of our weight matrices Ws.reverse() # and the shapes of each layer shapes.reverse() # and the number of filters (which is the same but could have been different) n_filters.reverse() # and append the last filter size which is our input image's number of channels n_filters = n_filters[1:] + [1] print(n_filters, filter_sizes, shapes) # + deletable=true editable=true # and then loop through our convolution filters and get back our input image # we'll enumerate the shapes list to get us there for layer_i, shape in enumerate(shapes): # we'll use a variable scope to help encapsulate our variables # This will simply prefix all the variables made in this scope # with the name we give it. with tf.variable_scope("decoder/layer/{}".format(layer_i)): # Create a weight matrix which will increasingly reduce # down the amount of information in the input by performing # a matrix multiplication W = Ws[layer_i] # Now we'll convolve by the transpose of our previous convolution tensor h = tf.nn.conv2d_transpose(current_input, W, tf.stack([tf.shape(X)[0], shape[1], shape[2], shape[3]]), strides=[1, 2, 2, 1], padding='SAME') # And then use a relu activation function on its output current_input = tf.nn.relu(h) # + [markdown] deletable=true editable=true # Now we have the reconstruction through the network: # + deletable=true editable=true Y = current_input Y = tf.reshape(Y, [-1, n_features]) # + [markdown] deletable=true editable=true # We can measure the cost and train exactly like before with the fully connected network: # + deletable=true editable=true cost = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(X, Y), 1)) learning_rate = 0.001 # pass learning rate and cost to optimize optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) # Session to manage vars/train sess = tf.Session() sess.run(tf.global_variables_initializer()) # Some parameters for training batch_size = 100 n_epochs = 5 # We'll try to reconstruct the same first 100 images and show how # The network does over the course of training. examples = ds.X[:100] # We'll store the reconstructions in a list imgs = [] fig, ax = plt.subplots(1, 1) for epoch_i in range(n_epochs): for batch_X, _ in ds.train.next_batch(): sess.run(optimizer, feed_dict={X: batch_X - mean_img}) recon = sess.run(Y, feed_dict={X: examples - mean_img}) recon = np.clip((recon + mean_img).reshape((-1, 28, 28)), 0, 255) img_i = montage(recon).astype(np.uint8) imgs.append(img_i) ax.imshow(img_i, cmap='gray') fig.canvas.draw() print(epoch_i, sess.run(cost, feed_dict={X: batch_X - mean_img})) gif.build_gif(imgs, saveto='conv-ae.gif', cmap='gray') # + deletable=true editable=true ipyd.Image(url='conv-ae.gif?{}'.format(np.random.rand()), height=500, width=500) # + [markdown] deletable=true editable=true # <a name="denoising-autoencoder"></a> # ## Denoising Autoencoder # # The denoising autoencoder is a very simple extension to an autoencoder. Instead of seeing the input, it is corrupted, for instance by masked noise. but the reconstruction loss is still measured on the original uncorrupted image. What this does is lets the model try to interpret occluded or missing parts of the thing it is reasoning about. It would make sense for many models, that not every datapoint in an input is necessary to understand what is going on. Denoising autoencoders try to enforce that, and as a result, the encodings at the middle most layer are often far more representative of the actual classes of different objects. # # In the resources section, you'll see that I've included a general framework autoencoder allowing you to use either a fully connected or convolutional autoencoder, and whether or not to include denoising. If you interested in the mechanics of how this works, I encourage you to have a look at the code. # # <a name="variational-autoencoders"></a> # ## Variational Autoencoders # # A variational autoencoder extends the traditional autoencoder by using an additional layer called the variational layer. It is actually two networks that are cleverly connected using a simple reparameterization trick, to help the gradient flow through both networks during backpropagation allowing both to be optimized. # # We dont' have enough time to get into the details, but I'll try to quickly explain: it tries to optimize the likelihood that a particular distribution would create an image, rather than trying to optimize simply the L2 loss at the end of the network. Or put another way it hopes that there is some distribution that a distribution of image encodings could be defined as. This is a bit tricky to grasp, so don't worry if you don't understand the details. The major difference to hone in on is that instead of optimizing distance in the input space of pixel to pixel distance, which is actually quite arbitrary if you think about it... why would we care about the exact pixels being the same? Human vision would not care for most cases, if there was a slight translation of our image, then the distance could be very high, but we would never be able to tell the difference. So intuitively, measuring error based on raw pixel to pixel distance is not such a great approach. # # Instead of relying on raw pixel differences, the variational autoencoder tries to optimize two networks. One which says that given my pixels, I am pretty sure I can encode them to the parameters of some well known distribution, like a set of Gaussians, instead of some artbitrary density of values. And then I can optimize the latent space, by saying that particular distribution should be able to represent my entire dataset, and I try to optimize the likelihood that it will create the images I feed through a network. So distance is somehow encoded in this latent space. Of course I appreciate that is a difficult concept so forgive me for not being able to expand on it in more details. # # But to make up for the lack of time and explanation, I've included this model under the resources section for you to play with! Just like the "vanilla" autoencoder, this one supports both fully connected, convolutional, and denoising models. # # This model performs so much better than the vanilla autoencoder. In fact, it performs so well that I can even manage to encode the majority of MNIST into 2 values. The following visualization demonstrates the learning of a variational autoencoder over time. # # <mnist visualization> # # There are of course a lot more interesting applications of such a model. You could for instance, try encoding a more interesting dataset, such as CIFAR which you'll find a wrapper for in the libs/datasets module. # # <TODO: produce GIF visualization madness> # # Or the celeb faces dataset: # # <celeb dataset> # # Or you could try encoding an entire movie. We tried it with the copyleft movie, "Sita Sings The Blues". Every 2 seconds, we stored an image of this movie, and then fed all of these images to a deep variational autoencoder. This is the result. # # <show sita sings the blues training images> # # And I'm sure we can get closer with deeper nets and more train time. But notice how in both celeb faces and sita sings the blues, the decoding is really blurred. That is because of the assumption of the underlying representational space. We're saying the latent space must be modeled as a gaussian, and those factors must be distributed as a gaussian. This enforces a sort of discretization of my representation, enforced by the noise parameter of the gaussian. In the last session, we'll see how we can avoid this sort of blurred representation and get even better decodings using a generative adversarial network. # # For now, consider the applications that this method opens up. Once you have an encoding of a movie, or image dataset, you are able to do some very interesting things. You have effectively stored all the representations of that movie, although its not perfect of course. But, you could for instance, see how another movie would be interpretted by the same network. That's similar to what <NAME> did for his project on reconstructing blade runner and a scanner darkly, though he made use of both the variational autoencoder and the generative adversarial network. We're going to look at that network in more detail in the last session. # # We'll also look at how to properly handle very large datasets like celeb faces or the one used here to create the sita sings the blues autoencoder. Taking every 60th frame of Sita Sings The Blues gives you aobut 300k images. And that's a lot of data to try and load in all at once. We had to size it down considerably, and make use of what's called a tensorflow input pipeline. I've included all the code for training this network, which took about 1 day on a fairly powerful machine, but I will not get into the details of the image pipeline bits until session 5 when we look at generative adversarial networks. I'm delaying this because we'll need to learn a few things along the way before we can build such a network. # # <a name="predicting-image-labels"></a> # # Predicting Image Labels # # We've just seen a variety of types of autoencoders and how they are capable of compressing information down to its inner most layer while still being able to retain most of the interesting details. Considering that the CelebNet dataset was nearly 200 thousand images of 64 x 64 x 3 pixels, and we're able to express those with just an inner layer of 50 values, that's just magic basically. Magic. # # Okay, let's move on now to a different type of learning often called supervised learning. Unlike what we just did, which is work with a set of data and not have any idea what that data should be *labeled* as, we're going to explicitly tell the network what we want it to be labeled by saying what the network should output for a given input. In the previous cause, we just had a set of `Xs`, our images. Now, we're going to have `Xs` and `Ys` given to us, and use the `Xs` to try and output the `Ys`. # # With MNIST, the outputs of each image are simply what numbers are drawn in the input image. The wrapper for grabbing this dataset from the libs module takes an additional parameter which I didn't talk about called `one_hot`. # + deletable=true editable=true from libs import datasets # ds = datasets.MNIST(one_hot=True) # + [markdown] deletable=true editable=true # To see what this is doing, let's compare setting it to false versus true: # + deletable=true editable=true ds = datasets.MNIST(one_hot=False) # let's look at the first label print(ds.Y[0]) # okay and what does the input look like plt.imshow(np.reshape(ds.X[0], (28, 28)), cmap='gray') # great it is just the label of the image # + deletable=true editable=true plt.figure() # Let's look at the next one just to be sure print(ds.Y[1]) # Yea the same idea plt.imshow(np.reshape(ds.X[1], (28, 28)), cmap='gray') # + [markdown] deletable=true editable=true # And now let's look at what the one hot version looks like: # + deletable=true editable=true ds = datasets.MNIST(one_hot=True) plt.figure() plt.imshow(np.reshape(ds.X[0], (28, 28)), cmap='gray') print(ds.Y[0]) # array([ 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.]) # Woah a bunch more numbers. 10 to be exact, which is also the number # of different labels in the dataset. plt.imshow(np.reshape(ds.X[1], (28, 28)), cmap='gray') print(ds.Y[1]) # array([ 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]) # + [markdown] deletable=true editable=true # So instead of have a number from 0-9, we have 10 numbers corresponding to the digits, 0-9, and each value is either 0 or 1. Whichever digit the image represents is the one that is 1. # # To summarize, we have all of the images of the dataset stored as: # `n_observations` x `n_features` tensor (n-dim array) # + deletable=true editable=true print(ds.X.shape) # + [markdown] deletable=true editable=true # And labels stored as `n_observations` x `n_labels` where each observation is a one-hot vector, where only one element is 1 indicating which class or label it is. # + deletable=true editable=true print(ds.Y.shape) print(ds.Y[0]) # + [markdown] deletable=true editable=true # <a name="one-hot-encoding"></a> # ## One-Hot Encoding # # Remember in the last session, we saw how to build a network capable of taking 2 inputs representing the row and column of an image, and predicting 3 outputs, the red, green, and blue colors. Just like in our unsupervised model, instead of having 2 inputs, we'll now have 784 inputs, the brightness of every pixel in our image. And instead of 3 outputs, like in our painting network from last session, or the 784 outputs we had in our unsupervised MNIST network, we'll now have 10 outputs representing the one-hot encoding of its label. # # So why don't we just have 1 output? A number from 0-9? Wouldn't having 10 different outputs instead of just 1 be harder to learn? Consider how we normally train the network. We have to give it a cost which it will use to minimize. What could our cost be if our output was just a single number, 0-9? We would still have the true label, and the predicted label. Could we just take the subtraction of the two values? e.g. the network predicted 0, but the image was really the number 8. Okay so then our distance could be: # + deletable=true editable=true # cost = tf.reduce_sum(tf.abs(y_pred - y_true)) # + [markdown] deletable=true editable=true # But in this example, the cost would be 8. If the image was a 4, and the network predicted a 0 again, the cost would be 4... but isn't the network still just as wrong, not half as much as when the image was an 8? In a one-hot encoding, the cost would be 1 for both, meaning they are both just as wrong. So we're able to better measure the cost, by separating each class's label into its own dimension. # # <a name="using-regression-for-classification"></a> # ## Using Regression for Classification # # The network we build will be trained to output values between 0 and 1. They won't output exactly a 0 or 1. But rather, they are able to produce any value. 0, 0.1, 0.2, ... and that means the networks we've been using are actually performing regression. In regression, the output is "continuous", rather than "discrete". The difference is this: a *discrete* output means the network can only output one of a few things. Like, 0, 1, 2, or 3, and that's it. But a *continuous* output means it can output any real number. # # In order to perform what's called classification, we're just simply going to look at whichever value is the highest in our one hot encoding. In order to do that a little better, we're actually going interpret our one hot encodings as probabilities by scaling the total output by their sum. What this does is allows us to understand that as we grow more confident in one prediction, we should grow less confident in all other predictions. We only have so much certainty to go around, enough to add up to 1. If we think the image might also be the number 1, then we lose some certainty of it being the number 0. # # It turns out there is a better cost function that simply measuring the distance between two vectors when they are probabilities. It's called cross entropy: # # \begin{align} # \Large{H(x) = -\sum{y_{\text{t}}(x) * \log(y_{\text{p}}(x))}} # \end{align} # # What this equation does is measures the similarity of our prediction with our true distribution, by exponentially increasing error whenever our prediction gets closer to 1 when it should be 0, and similarly by exponentially increasing error whenever our prediction gets closer to 0, when it should be 1. I won't go into more detail here, but just know that we'll be using this measure instead of a normal distance measure. # # <a name="fully-connected-network"></a> # ## Fully Connected Network # # ### Defining the Network # # Let's see how our one hot encoding and our new cost function will come into play. We'll create our network for predicting image classes in pretty much the same way we've created previous networks: # # We will have as input to the network 28 x 28 values. # + deletable=true editable=true import tensorflow as tf from libs import datasets ds = datasets.MNIST(split=[0.8, 0.1, 0.1]) n_input = 28 * 28 # + [markdown] deletable=true editable=true # As output, we have our 10 one-hot-encoding values # + deletable=true editable=true n_output = 10 # + [markdown] deletable=true editable=true # We're going to create placeholders for our tensorflow graph. We're going to set the first dimension to `None`. Remember from our unsupervised model, this is just something special for placeholders which tells tensorflow "let this dimension be any possible value". 1, 5, 100, 1000, it doesn't matter. Since we're going to pass our entire dataset in batches we'll need this to be say 100 images at a time. But we'd also like to be able to send in only 1 image and see what the prediction of the network is. That's why we let this dimension be flexible. # + deletable=true editable=true X = tf.placeholder(tf.float32, [None, n_input]) # + [markdown] deletable=true editable=true # For the output, we'll have `None` again, since for every input, we'll have the same number of images that have outputs. # + deletable=true editable=true Y = tf.placeholder(tf.float32, [None, n_output]) # + [markdown] deletable=true editable=true # Now we'll connect our input to the output with a linear layer. Instead of `relu`, we're going to use `softmax`. This will perform our exponential scaling of the outputs and make sure the output sums to 1, making it a probability. # + deletable=true editable=true # We'll use the linear layer we created in the last session, which I've stored in the libs file: # NOTE: The lecture used an older version of this function which had a slightly different definition. from libs import utils Y_pred, W = utils.linear( x=X, n_output=n_output, activation=tf.nn.softmax, name='layer1') # + [markdown] deletable=true editable=true # And then we write our loss function as the cross entropy. And then we'll give our optimizer the `cross_entropy` measure just like we would with GradientDescent. The formula for cross entropy is: # # \begin{align} # \Large{H(x) = -\sum{\text{Y}_{\text{true}} * log(\text{Y}_{pred})}} # \end{align} # + deletable=true editable=true # We add 1e-12 because the log is undefined at 0. cross_entropy = -tf.reduce_sum(Y * tf.log(Y_pred + 1e-12)) optimizer = tf.train.AdamOptimizer(0.001).minimize(cross_entropy) # + [markdown] deletable=true editable=true # To determine the correct class from our regression output, we have to take the maximum index. # + deletable=true editable=true predicted_y = tf.argmax(Y_pred, 1) actual_y = tf.argmax(Y, 1) # + [markdown] deletable=true editable=true # We can then measure the accuracy by seeing whenever these are equal. Note, this is just for us to see, and is not at all used to "train" the network! # + deletable=true editable=true correct_prediction = tf.equal(predicted_y, actual_y) accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) # + [markdown] deletable=true editable=true # ### Training the Network # # The rest of the code will be exactly the same as before. We chunk the training dataset into `batch_size` chunks, and let these images help train the network over a number of iterations. # + deletable=true editable=true sess = tf.Session() sess.run(tf.global_variables_initializer()) # Now actually do some training: batch_size = 50 n_epochs = 5 for epoch_i in range(n_epochs): for batch_xs, batch_ys in ds.train.next_batch(): sess.run(optimizer, feed_dict={ X: batch_xs, Y: batch_ys }) valid = ds.valid print(sess.run(accuracy, feed_dict={ X: valid.images, Y: valid.labels })) # Print final test accuracy: test = ds.test print(sess.run(accuracy, feed_dict={ X: test.images, Y: test.labels })) # + [markdown] deletable=true editable=true # What we should see is the accuracy being printed after each "epoch", or after every run over the entire dataset. Since we're using batches, we use the notion of an "epoch" to denote whenever we've gone through the entire dataset. # # <a name="inspecting-the-network"></a> # ### Inspecting the Trained Network # # Let's try and now inspect *how* the network is accomplishing this task. We know that our network is a single matrix multiplication of our 784 pixel values. The weight matrix, `W`, should therefore have 784 rows. As outputs, it has 10 values. So the matrix is composed in the `linear` function as `n_input` x `n_output` values. So the matrix is 784 rows x 10 columns. # # <TODO: graphic w/ wacom showing network and matrix multiplication and pulling out single neuron/column> # # In order to get this matrix, we could have had our `linear` function return the `tf.Tensor`. But since everything is part of the tensorflow graph, and we've started using nice names for all of our operations, we can actually find this tensor using tensorflow: # + deletable=true editable=true # We first get the graph that we used to compute the network g = tf.get_default_graph() # And can inspect everything inside of it [op.name for op in g.get_operations()] # + [markdown] deletable=true editable=true # Looking at the names of the operations, we see there is one `linear/W`. But this is the `tf.Operation`. Not the `tf.Tensor`. The tensor is the result of the operation. To get the result of the operation, we simply add ":0" to the name of the operation: # + deletable=true editable=true W = g.get_tensor_by_name('layer1/W:0') # + [markdown] deletable=true editable=true # We can use the existing session to compute the current value of this tensor: # + deletable=true editable=true W_arr = np.array(W.eval(session=sess)) print(W_arr.shape) # + [markdown] deletable=true editable=true # And now we have our tensor! Let's try visualizing every neuron, or every column of this matrix: # + deletable=true editable=true fig, ax = plt.subplots(1, 10, figsize=(20, 3)) for col_i in range(10): ax[col_i].imshow(W_arr[:, col_i].reshape((28, 28)), cmap='coolwarm') # + [markdown] deletable=true editable=true # We're going to use the `coolwarm` color map, which will use "cool" values, or blue-ish colors for low values. And "warm" colors, red, basically, for high values. So what we begin to see is that there is a weighting of all the input values, where pixels that are likely to describe that number are being weighted high, and pixels that are not likely to describe that number are being weighted low. By summing all of these multiplications together, the network is able to begin to predict what number is in the image. This is not a very good network though, and the representations it learns could still do a much better job. We were only right about 93% of the time according to our accuracy. State of the art models will get about 99.9% accuracy. # # <a name="convolutional-networks"></a> # ## Convolutional Networks # # To get better performance, we can build a convolutional network. We've already seen how to create a convolutional network with our unsupervised model. We're going to make the same modifications here to help us predict the digit labels in MNIST. # # ### Defining the Network # # I'll first reset the current graph, so we can build a new one. We'll use tensorflow's nice helper function for doing this. # + deletable=true editable=true from tensorflow.python.framework.ops import reset_default_graph reset_default_graph() # + [markdown] deletable=true editable=true # And just to confirm, let's see what's in our graph: # + deletable=true editable=true # We first get the graph that we used to compute the network g = tf.get_default_graph() # And can inspect everything inside of it [op.name for op in g.get_operations()] # + [markdown] deletable=true editable=true # Great. Empty. # # Now let's get our dataset, and create some placeholders like before: # + deletable=true editable=true # We'll have placeholders just like before which we'll fill in later. ds = datasets.MNIST(one_hot=True, split=[0.8, 0.1, 0.1]) X = tf.placeholder(tf.float32, [None, 784]) Y = tf.placeholder(tf.float32, [None, 10]) # + [markdown] deletable=true editable=true # Since `X` is currently `[batch, height*width]`, we need to reshape to a # 4-D tensor to use it in a convolutional graph. Remember, in order to perform convolution, we have to use 4-dimensional tensors describing the: # # `N x H x W x C` # # We'll reshape our input placeholder by telling the `shape` parameter to be these new dimensions and we'll use `-1` to denote this dimension should not change size. # + deletable=true editable=true X_tensor = tf.reshape(X, [-1, 28, 28, 1]) # + [markdown] deletable=true editable=true # We'll now setup the first convolutional layer. Remember that the weight matrix for convolution should be # # `[height x width x input_channels x output_channels]` # # Let's create 32 filters. That means every location in the image, depending on the stride I set when we perform the convolution, will be filtered by this many different kernels. In session 1, we convolved our image with just 2 different types of kernels. Now, we're going to let the computer try to find out what 32 filters helps it map the input to our desired output via our training signal. # + deletable=true editable=true filter_size = 5 n_filters_in = 1 n_filters_out = 32 W_1 = tf.get_variable( name='W', shape=[filter_size, filter_size, n_filters_in, n_filters_out], initializer=tf.random_normal_initializer()) # + [markdown] deletable=true editable=true # Bias is always `[output_channels]` in size. # + deletable=true editable=true b_1 = tf.get_variable( name='b', shape=[n_filters_out], initializer=tf.constant_initializer()) # + [markdown] deletable=true editable=true # Now we can build a graph which does the first layer of convolution: # We define our stride as `batch` x `height` x `width` x `channels`. This has the effect of resampling the image down to half of the size. # + deletable=true editable=true h_1 = tf.nn.relu( tf.nn.bias_add( tf.nn.conv2d(input=X_tensor, filter=W_1, strides=[1, 2, 2, 1], padding='SAME'), b_1)) # + [markdown] deletable=true editable=true # And just like the first layer, add additional layers to create a deep net. # + deletable=true editable=true n_filters_in = 32 n_filters_out = 64 W_2 = tf.get_variable( name='W2', shape=[filter_size, filter_size, n_filters_in, n_filters_out], initializer=tf.random_normal_initializer()) b_2 = tf.get_variable( name='b2', shape=[n_filters_out], initializer=tf.constant_initializer()) h_2 = tf.nn.relu( tf.nn.bias_add( tf.nn.conv2d(input=h_1, filter=W_2, strides=[1, 2, 2, 1], padding='SAME'), b_2)) # + [markdown] deletable=true editable=true # 4d -> 2d # + deletable=true editable=true # We'll now reshape so we can connect to a fully-connected/linear layer: h_2_flat = tf.reshape(h_2, [-1, 7 * 7 * n_filters_out]) # + [markdown] deletable=true editable=true # Create a fully-connected layer: # + deletable=true editable=true # NOTE: This uses a slightly different version of the linear function than the lecture! h_3, W = utils.linear(h_2_flat, 128, activation=tf.nn.relu, name='fc_1') # + [markdown] deletable=true editable=true # And one last fully-connected layer which will give us the correct number of outputs, and use a softmax to expoentially scale the outputs and convert them to a probability: # + deletable=true editable=true # NOTE: This uses a slightly different version of the linear function than the lecture! Y_pred, W = utils.linear(h_3, n_output, activation=tf.nn.softmax, name='fc_2') # + [markdown] deletable=true editable=true # <TODO: Draw as graphical representation> # # ### Training the Network # # The rest of the training process is the same as the previous network. We'll define loss/eval/training functions: # + deletable=true editable=true cross_entropy = -tf.reduce_sum(Y * tf.log(Y_pred + 1e-12)) optimizer = tf.train.AdamOptimizer().minimize(cross_entropy) # + [markdown] deletable=true editable=true # Monitor accuracy: # + deletable=true editable=true correct_prediction = tf.equal(tf.argmax(Y_pred, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float')) # + [markdown] deletable=true editable=true # And create a new session to actually perform the initialization of all the variables: # + deletable=true editable=true sess = tf.Session() sess.run(tf.global_variables_initializer()) # + [markdown] deletable=true editable=true # Then we'll train in minibatches and report accuracy: # + deletable=true editable=true batch_size = 50 n_epochs = 10 for epoch_i in range(n_epochs): for batch_xs, batch_ys in ds.train.next_batch(): sess.run(optimizer, feed_dict={ X: batch_xs, Y: batch_ys }) valid = ds.valid print(sess.run(accuracy, feed_dict={ X: valid.images, Y: valid.labels })) # Print final test accuracy: test = ds.test print(sess.run(accuracy, feed_dict={ X: test.images, Y: test.labels })) # + [markdown] deletable=true editable=true # <TODO: Fun timelapse of waiting> # # ### Inspecting the Trained Network # # Let's take a look at the kernels we've learned using the following montage function, similar to the one we've been using for creating image montages, except this one is suited for the dimensions of convolution kernels instead of 4-d images. So it has the height and width first, unlike images which have batch then height then width. We'll use this function to visualize every convolution kernel in the first and second layers of our network. # + deletable=true editable=true from libs.utils import montage_filters W1 = sess.run(W_1) plt.figure(figsize=(10, 10)) plt.imshow(montage_filters(W1), cmap='coolwarm', interpolation='nearest') # + [markdown] deletable=true editable=true # What we're looking at are all of the convolution kernels that have been learned. Compared to the previous network we've learned, it is much harder to understand what's happening here. But let's try and explain these a little more. The kernels that have been automatically learned here are responding to edges of different scales, orientations, and rotations. It's likely these are really describing parts of letters, or the strokes that make up letters. Put another way, they are trying to get at the "information" in the image by seeing what changes. # # That's a pretty fundamental idea. That information would be things that change. Of course, there are filters for things that aren't changing as well. Some filters may even seem to respond to things that are mostly constant. However, if our network has learned a lot of filters that look like that, it's likely that the network hasn't really learned anything at all. The flip side of this is if the filters all look more or less random. That's also a bad sign. # # Let's try looking at the second layer's kernels: # + deletable=true editable=true W2 = sess.run(W_2) plt.imshow(montage_filters(W2 / np.max(W2)), cmap='coolwarm') # + [markdown] deletable=true editable=true # It's really difficult to know what's happening here. There are many more kernels in this layer. They've already passed through a set of filters and an additional non-linearity. How can we really know what the network is doing to learn its objective function? The important thing for now is to see that most of these filters are different, and that they are not all constant or uniformly activated. That means it's really doing something, but we aren't really sure yet how to see how that effects the way we think of and perceive the image. In the next session, we'll learn more about how we can start to interrogate these deeper representations and try to understand what they are encoding. Along the way, we'll learn some pretty amazing tricks for producing entirely new aesthetics that eventually led to the "deep dream" viral craze. # # <a name="savingloading-models"></a> # # Saving/Loading Models # # Tensorflow provides a few ways of saving/loading models. The easiest way is to use a checkpoint. Though, this really useful while you are training your network. When you are ready to deploy or hand out your network to others, you don't want to pass checkpoints around as they contain a lot of unnecessary information, and it also requires you to still write code to create your network. Instead, you can create a protobuf which contains the definition of your graph and the model's weights. Let's see how to do both: # # <a name="checkpoint"></a> # ## Checkpoint # # Creating a checkpoint requires you to have already created a set of operations in your tensorflow graph. Once you've done this, you'll create a session like normal and initialize all of the variables. After this, you create a `tf.train.Saver` which can restore a previously saved checkpoint, overwriting all of the variables with your saved parameters. # + deletable=true editable=true import os sess = tf.Session() init_op = tf.global_variables_initializer() saver = tf.train.Saver() sess.run(init_op) if os.path.exists("model.ckpt"): saver.restore(sess, "model.ckpt") print("Model restored.") # + [markdown] deletable=true editable=true # Creating the checkpoint is easy. After a few iterations of training, depending on your application say between 1/10 of the time to train the full model, you'll want to write the saved model. You can do this like so: # + deletable=true editable=true save_path = saver.save(sess, "./model.ckpt") print("Model saved in file: %s" % save_path) # + [markdown] deletable=true editable=true # <a name="protobuf"></a> # ## Protobuf # # The second way of saving a model is really useful for when you don't want to pass around the code for producing the tensors or computational graph itself. It is also useful for moving the code to deployment or for use in the C++ version of Tensorflow. To do this, you'll want to run an operation to convert all of your trained parameters into constants. Then, you'll create a second graph which copies the necessary tensors, extracts the subgraph, and writes this to a model. The summarized code below shows you how you could use a checkpoint to restore your models parameters, and then export the saved model as a protobuf. # + deletable=true editable=true path='./' ckpt_name = './model.ckpt' fname = 'model.tfmodel' dst_nodes = ['Y'] g_1 = tf.Graph() with tf.Session(graph=g_1) as sess: x = tf.placeholder(tf.float32, shape=(1, 224, 224, 3)) # Replace this with some code which will create your tensorflow graph: net = create_network() sess.run(tf.global_variables_initializer()) saver.restore(sess, ckpt_name) graph_def = tf.python.graph_util.convert_variables_to_constants( sess, sess.graph_def, dst_nodes) g_2 = tf.Graph() with tf.Session(graph=g_2) as sess: tf.train.write_graph( tf.python.graph_util.extract_sub_graph( graph_def, dst_nodes), path, fname, as_text=False) # + [markdown] deletable=true editable=true # When you wanted to import this model, now you wouldn't need to refer to the checkpoint or create the network by specifying its placeholders or operations. Instead, you'd use the `import_graph_def` operation like so: # + deletable=true editable=true with open("model.tfmodel", mode='rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(net['graph_def'], name='model') # + [markdown] deletable=true editable=true # <a name="wrap-up"></a> # # Wrap Up # # In the next session, we'll learn some very powerful techniques for exploring the representations learned by these kernels, and how we can better understand what they are learning. We'll look at state of the art deep networks for image recognition and interrogate what they've learned using techniques that led the public to Deep Dream. # # <a name="reading"></a> # # Reading # # <NAME>.; <NAME>. (1988). "Auto-association by multilayer perceptrons and singular value decomposition". Biological Cybernetics 59 (4–5): 291–294. # # <NAME>, <NAME>. Reducing the Dimensionality of Data with Neural Networks. Science, 28 Jul 2006. Vol. 313, Issue 5786, pp. 504-507. # DOI: 10.1126/science.1127647. http://science.sciencemag.org/content/313/5786/504.abstract # # <NAME>. (2009). "Learning Deep Architectures for AI". Foundations and Trends in Machine Learning 2. doi:10.1561/2200000006 # # <NAME>; <NAME>; <NAME>; <NAME>; <NAME> (2010). "Stacked Denoising Autoencoders: Learning Useful Representations in a Deep Network with a Local Denoising Criterion". The Journal of Machine Learning Research 11: 3371–3408. # # Auto-Encoding Variational Bayes, <NAME>. and <NAME>., ArXiv e-prints, 2013 http://arxiv.org/abs/1312.6114
session-3/.ipynb_checkpoints/lecture-3-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="1NWFlHbpcOYS" executionInfo={"status": "ok", "timestamp": 1618425561495, "user_tz": 180, "elapsed": 1196, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgmdBe4rTCFWx5-agZ7yNxvLwlX2BCyDHL0Xnak=s64", "userId": "17557421270393561228"}} outputId="db77ca56-ee10-47ac-8010-88f69ca277f0" from google.colab import drive drive.mount('/content/drive') # + id="uSpxtKTJcYB-" import pandas as pd import numpy as np # Machine Learning: split, grid search e cross validation from sklearn.model_selection import train_test_split from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_validate from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import GridSearchCV from sklearn import svm from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import precision_score from sklearn.metrics import roc_auc_score from sklearn.neighbors import KNeighborsClassifier # + id="WtqboY7tcx-M" pd.options.display.float_format= "{:.5f}".format pd.options.display.max_colwidth = 5000 # + colab={"base_uri": "https://localhost:8080/", "height": 436} id="y54lTU4idHNZ" executionInfo={"status": "ok", "timestamp": 1618425567686, "user_tz": 180, "elapsed": 716, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgmdBe4rTCFWx5-agZ7yNxvLwlX2BCyDHL0Xnak=s64", "userId": "17557421270393561228"}} outputId="9cfc3079-1b15-4353-f988-494513853768" df = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Trabalho em grupo/Limpa_G4") df = df.iloc[:,1:] df # + colab={"base_uri": "https://localhost:8080/", "height": 80} id="X9yeEv40dHhE" executionInfo={"status": "ok", "timestamp": 1618425571093, "user_tz": 180, "elapsed": 647, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgmdBe4rTCFWx5-agZ7yNxvLwlX2BCyDHL0Xnak=s64", "userId": "17557421270393561228"}} outputId="e49aa0ea-4b8d-42f8-fcd1-ecda14c695b9" # Separando X e y X = df.iloc[:,:-1] y = df.iloc[:,-1] #Separando os dados em teste e treino X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, stratify = y, random_state= 6) #Separando os dados de treinamento em treinamento (final) e validação X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, stratify = y_train, random_state= 6) # Calculando as proporções do valor 1 entre os splits prop_real = df[df["Made Donation in March 2007"] == 1]["Made Donation in March 2007"].count()/df["Made Donation in March 2007"].count() prop_treino = y_train[y_train == 1].count()/y_train.count() prop_teste = y_test[y_test == 1].count()/y_test.count() prop_validação = y_val[y_val == 1].count()/y_val.count() # Criando um Data Frame para avaliar se a proporção de valor 1 (doou sangue em março de 2007) é identica nos datasets. df_train_test_prop = pd.DataFrame([prop_real,prop_treino,prop_teste, prop_validação]).T df_train_test_prop.rename(columns = {0:"Proporção Real", 1: "Proporção de Treinamento", 2: "Proporção de Teste", 3: "Proporção de Validação"}, inplace = True) df_train_test_prop # + id="X4qKBWTldLC8" # Normalizando X_train scaler = MinMaxScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) # Normalizando o resto X_val = scaler.transform(X_val) X_test = scaler.transform(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="1w9-W3nodkGG" executionInfo={"status": "ok", "timestamp": 1618425578365, "user_tz": 180, "elapsed": 1352, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgmdBe4rTCFWx5-agZ7yNxvLwlX2BCyDHL0Xnak=s64", "userId": "17557421270393561228"}} outputId="d28825fc-58b8-4cb4-a915-e238dcfb6931" # Separando as métricas métricas = ["accuracy", "precision", "recall", "f1_macro", "roc_auc"] range(len(métricas)) # + id="_yNT1TOkdlMs" # Criando o modelo scv = svm.SVC(random_state = 6) # Separando os hiperparâmetros hiperparametros = {"kernel": ["linear", "poly", "rbf", "sigmoid"], "degree": [1,2,3,4,5], "C": [10,1,0.1,0.05, 0.01], "gamma": ["scale", "auto"]} #Realizando o GridSearch resultados = {} for i in range(len(métricas)): GS = GridSearchCV(LR, hiperparametros, scoring = métricas[i]) GS.fit(X_train, y_train) resultados[i] = pd.DataFrame(GS.cv_results_) # + id="3I_l2EPzeQj1" # Renomeando o dicionário resultados["accuracy"] = resultados.pop(0) resultados["precision"] = resultados.pop(1) resultados["recall"] = resultados.pop(2) resultados["f1"] = resultados.pop(3) resultados["roc_auc"] = resultados.pop(4) # + id="m8tEyHgTehQ6" melhores_modelos = {"accuracy": resultados["accuracy"].sort_values("mean_test_score", ascending = False).iloc[0,8], "precision": resultados["precision"].sort_values("mean_test_score", ascending = False).iloc[0,8], "recall":resultados["recall"].sort_values("mean_test_score", ascending = False).iloc[0,8], "f1": resultados["f1"].sort_values("mean_test_score", ascending = False).iloc[0,8], "roc_auc": resultados["roc_auc"].sort_values("mean_test_score", ascending = False).iloc[0,8]} # + colab={"base_uri": "https://localhost:8080/"} id="TRokHKRHerti" executionInfo={"status": "ok", "timestamp": 1618422579043, "user_tz": 180, "elapsed": 1114, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgmdBe4rTCFWx5-agZ7yNxvLwlX2BCyDHL0Xnak=s64", "userId": "17557421270393561228"}} outputId="8d67cb6a-94b7-486d-c075-59cde3f33a71" melhores_modelos # + id="ojBwaV59etpa" # Treinando e testando o SVM.scv com os "melhores" hiperparâmetros SVC = svm.SVC(C = 1, degree = 3, gamma = "scale", kernel = "sigmoid", random_state = 6).fit(X_train, y_train) # Prevendo y y_pred = SVC.predict(X_val) # + id="5b1oXEO5cyTz" # Separando as métricas Acurácia = accuracy_score(y_val, y_pred) Precisão = precision_score(y_val, y_pred) Recall = recall_score(y_val, y_pred) F1 = f1_score(y_val, y_pred, average = "macro") ROC = roc_auc_score(y_val, y_pred) # Organizando as métricas em um DataFrame resultados_val = pd.DataFrame({"Acurácia":Acurácia, "Precisão": Precisão, "Recall": Recall, "F1": F1, "ROC": ROC}, index = [0]) # + colab={"base_uri": "https://localhost:8080/", "height": 80} id="Fzx-T223g3ir" executionInfo={"status": "ok", "timestamp": 1618422765394, "user_tz": 180, "elapsed": 640, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgmdBe4rTCFWx5-agZ7yNxvLwlX2BCyDHL0Xnak=s64", "userId": "17557421270393561228"}} outputId="b5a37617-ab0b-42e6-a188-f438190d4cb8" resultados_val # + colab={"base_uri": "https://localhost:8080/", "height": 80} id="HmocHox6czc7" executionInfo={"status": "ok", "timestamp": 1618422865128, "user_tz": 180, "elapsed": 694, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgmdBe4rTCFWx5-agZ7yNxvLwlX2BCyDHL0Xnak=s64", "userId": "17557421270393561228"}} outputId="8db7b292-9f98-47fe-f992-53516a3057c8" # Treinando e testando o SVM.scv com os "melhores" hiperparâmetros SVC = svm.SVC(C = 10, degree = 2, gamma = "scale", kernel = "sigmoid", random_state = 6).fit(X_train, y_train) # Prevendo y y_pred = SVC.predict(X_val) # Separando as métricas Acurácia = accuracy_score(y_val, y_pred) Precisão = precision_score(y_val, y_pred) Recall = recall_score(y_val, y_pred) F1 = f1_score(y_val, y_pred, average = "macro") ROC = roc_auc_score(y_val, y_pred) # Organizando as métricas em um DataFrame resultados_val = pd.DataFrame({"Acurácia":Acurácia, "Precisão": Precisão, "Recall": Recall, "F1": F1, "ROC": ROC}, index = [0]) resultados_val
Grid Search/SVC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from neo4j import GraphDatabase from tqdm.notebook import tqdm import pandas as pd # # "Populer" une base Neo4j # Pré-requis : # 1. Installation de Neo4j Browser: https://neo4j.com/download/ # 2. Création du Base de données Neo4j en local depuis le Neo4j browser :https://neo4j.com/developer/neo4j-desktop/ #/!\ Penser à changer les credentials db_params = { "uri":"bolt://localhost:7687", "user":"neo4j", "password":"<PASSWORD>" } ranks = ['kingdom','phylum','class','family','order','genus','species'] df = pd.read_csv("../data/gbif_extract.csv",index_col=0) df.head() # ### Préparation des données # restriction du tree of life au phylum Rotifera data = df.loc[ (df['rank']=='SPECIES') & (df.kingdom == 'Animalia') & (df.phylum == 'Rotifera'), ranks].dropna() # ### Générations des éléments du graphes # + def get_edges(df): '''génère l'esemble des couples distincts neoud noeud qui définissent une arête''' edges = [] for i in range(len(ranks[:-1])): rank_pair = ranks[i:i+2] rank_edges = df[rank_pair].dropna(how='any').drop_duplicates().apply(tuple,axis=1).values edges.extend(rank_edges) return edges def get_nodes(df): '''génère la liste des différents taxons (noeuds) contenu dans le dataset''' nodes = [] for rank in ranks: rank_nodes = list(zip(df[rank].unique(), [rank for i in range(len(df[rank].unique()))])) nodes.extend(rank_nodes) return nodes # - edges = get_edges(data) nodes = get_nodes(data) nodes[:3] edges[:3] # # Instanciation d'un driver Neo4j # cf : https://neo4j.com/docs/api/python-driver/current/ # # /!\Vérifier préalablement depuis le Neo4j Browser que votre Bdd est bien en "running " # driver = GraphDatabase.driver( db_params['uri'], auth=(db_params['user'], db_params['password']) ) # ### "Helper functions"pour créer le graphe # cf Cypher cheat-sheet : https://mpolinowski.github.io/neo-4-j-cypher-cheat-sheet # + def create_node(tx, name, rank): '''créer un Noeud de type `Entity` ayant pour attributs un nom et un rang taxonomique''' return tx.run( "CREATE (a:Entity {name: $name, rank:$rank}) RETURN id(a)", name=name, rank=rank ).single().value() def add_children(tx, name, child): '''lie 2 noeuds type `Entity` par une relation `HAS_CHILD` (arête orientée)''' tx.run('''MATCH (a: Entity) MATCH (b: Entity) WHERE a.name = $name AND b.name =$child AND a.name <> b.name CREATE (a)-[:HAS_CHILD]->(b)''', name=name, child=child) # - # ### Ecriture en Base (Un peu long) with driver.session() as session: for node in tqdm(nodes, desc='NODES '): session.write_transaction(create_node, node[0], node[1]) for edge in tqdm(edges, desc ="EDGES "): session.write_transaction(add_children, edge[0], edge[1]) driver.close() # ### Requetage avec py2neo # cf: https://py2neo.org/2020.0/ from py2neo import Graph, Node, Relationship from py2neo.matching import * from py2neo.ogm import Model, Property graph = Graph(db_params['uri'], user=db_params['user'], password=db_params['password']) graph.nodes.match("Node", name="Animalia").first()
notebooks/populate_neoDB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #hide #default_exp ingest from nbdev.showdoc import * from fastcore.test import * # + #export import re import datetime import sc2reader import pymongo import sc2reaper.init_ingest as ings # Python std_lib dependencies from typing import Generator, Tuple from dataclasses import dataclass, asdict from pathlib import Path from datetime import datetime # Import dependencies from this package from sc2trainingg.load_config import * # - # # The `ingest` Module # # > This module contains the tools needed to extract game-play data from a set of StarCraft II replays. This process entails extracting various features that describe each players' performance during a 1v1 online match and organising all the information in a set of collections that compose a MongoDB database. # # # `ingest` uses three modules to carry on its principal functions. # # 1. It uses [MongoDB](https://www.mongodb.com/) and [pymongo](https://pymongo.readthedocs.io/en/stable/) to configure a document-based database where it stores the information of the replays. # 2. It uses [sc2reaper (<NAME> & <NAME>, 2019)](https://github.com/miguelgondu/sc2reaper) to extract and load various default collections of information to the database (i.e. actions, players, replays, scores, and states). # 3. It uses [sc2reader (Leung, 2020)](https://pypi.org/project/sc2reader/#history) to extract information about the players' username so that the replays can be grouped by players later. In this case, `ingest` stores this extra information in an additional collection (i.e. replays_info) which extends the database where the previous collections exist. # # This module organises the gameplay data into the following collections: # * actions # * players # * replays = summary of the replays that have been processed. # * states # * scores # * replays_info = summary of the replays that have been processed that includes usernames. # # This database is built using two functions defined in this module; `build_replay_info` and `build_reaper_collections` (see below). # ## Requirements for Proper Functioning # Before anything else, this module looks for the local `config.json` file and loads its information into a `Config_settings` object using the `load_configurations` function from the `load_config` module. # # With this information at hand, the module configures the MongoDB client it needs to store the replay data. # + #exports # Definition of the initial data needed to function CONFIG_PATH = Path("/Users/david/Documents/phdcode/sc2trainingg") / "config.json" CONFIG = load_configurations(CONFIG_PATH) # Define the client and data base to work with MongoDB DB_Client = pymongo.MongoClient(CONFIG.port_address, CONFIG.port_number) DB = DB_Client[CONFIG.db_name] replays_info = DB['replays_info'] # Define the dependency the default to sc2reader assert Path(CONFIG.replay_path).exists(), "Invalid replay path" REPLAY_GEN = sc2reader.load_replays(CONFIG.replay_path) # - # ### Information Management # Fallowing a functional programming approach in the development of this project, two frozen dataclasses are defined to ensure inmutability during the process of the information; `Replay_data` and `Player_data`. # #export @dataclass(frozen=True) class Player_data: """ Immutable dataclass that contains Information that describes a player's attributes in a match. *Attributes:* - name (str): The player's user name. - number (int): Player number in the match. In a 1v1, match there would be a Player 1 and 2. - race (str): The game race (Protoss, Terran, Zerg) with which the player played this match. - result (str): Variable descriving whether the player was the matches winner ('Win') or loser ('Loss'). """ name: str number: int race: str result: str show_doc(Player_data, title_level=4) # # #export @dataclass(frozen=True) class Replay_data: """ Immutable dataclass that contains information summarising a match's main attributes. *Attributes:* - replay_name (str): Absolute path of where the Replay was stored when uploaded. - replay_id (str): Name of the SC2Replay file. - date_time (datetime): Date and time when the match was played and recorded. - match_type (str): Descrives the team configuration of the match (eg '1v1', '2v2'). - game_release (str): Version and patch number for the game release where the match played. - map_name (str): Name of the match's map. - category (str): Descrives if the match was 'Ladder' or other type of match. - winner (str): User name of the match's winner - players (Tuple[Player_data, ...]): Summarised information of the match's players (see Player_data class). """ replay_name: str replay_id: str date_time: datetime match_type: str game_release: str map_name: str category: str winner: str players: Tuple[Player_data, ...] show_doc(Replay_data, title_level=4) # # # ## Module's Functions # # As explained above, the module uses the functions `build_replay_info` and `build_reaper_collections` to construct a document-based database. Of these functions, the former is a custom function, inspired by `sc2reaper`'s information extraction and organisation processes, that selects rough information from the replay file using `sc2reader` and organises it inside the replay_info collection. The latter uses `sc2reaper` to create the rest of the data collections in the database. # ### Auxiliary Functions # Appart from `build_replay_info` and `build_reaper_collections`, this modules defines a number of auxiliary functions that are used within those primary functions. # # Here is a brief summary of those functions. #export """ These auxiliar functions exists to assist the module's main functions. """ # Functions that format data according to the dataclasses def extend_player_info(participant: sc2reader.objects.Participant) -> Player_data: ''' Extracts the players' data from a Participant Object, into a Player_data instance. *Args:* - participant (sc2reader.objects.Participant): Participant object containing all data related to a SC2Player *Returns:* - Player_data: Summary of a player's attributes on a match. ''' return Player_data( participant.name, participant.pid, participant.play_race, participant.result ) show_doc(extend_player_info, title_level=4) #export def get_replay_info(replay: sc2reader.resources.Replay) -> Replay_data: ''' Replay_data dataclass instance with a replay's general information. *Args:* - replay (sc2reader.resources.Replay): Replay object to be analysed. *Returns:* - Replay_data Summary of a matches main descriptive information. ''' file_name_regex = re.compile(r'[^\\]*[.]SC2Replay$') # Collect information about the match in a document. return Replay_data( replay.filename, file_name_regex.search(replay.filename).group(), replay.start_time, replay.type, replay.release_string, replay.map_name, replay.category, replay.winner.players[0].name, tuple(extend_player_info(player) for player in tuple(replay.players)) ) show_doc(get_replay_info, title_level=4) #export # Functions that build the Collections within the database def not_replay_duplicate(replay: sc2reader.resources.Replay, collection_: pymongo.collection.Collection = replays_info) -> bool: ''' Verify that the replay does not exist in a collection. *Args:* - replay (sc2reader.resources.Replay): The replay being cheked - collection_ (pymongo.collection.Collection): The collection where the existance check is being performed. *Returns:* - bool: True if the replay is not in the collection, False if it is. ''' if not collection_.count_documents( {'replay_name': replay.filename}, limit = 1 ): print(f'New replay found: {Path(replay.filename).name} \n adding to replay_info collection.') return True else: print(f'{replay.filename} already exists in the replay_info collection.') return False show_doc(not_replay_duplicate, title_level=4) #export def get_replays_data_set(rp_gen: Generator, collection_: pymongo.collection.Collection) -> Generator: ''' Build a generator that can yield a group of Replay_data instances that represent a the descriptive information of a set of replays that are found by an sc2reader replay generator and that have not been already added to a specific collection. *Args:* - rp_gen (Generator): a sc2reader.resources.Replay generator that yields the replays found in the CONFIG.replay_path. - collection_ (pymongo.collection.Collection): the database collection that could contain the replays Returns: - Generator: Yields Replay_data instances. ''' return (get_replay_info(replay) for replay in rp_gen if not_replay_duplicate(replay, collection_)) show_doc(get_replays_data_set, title_level=4) # ### Main Functions `build_replay_info` and `build_reaper_collections` #export def build_replay_info( rp_gen: Generator = REPLAY_GEN, db_collection:pymongo.collection.Collection = replays_info ) -> bool: ''' Triggers the search for new replays at CONFIG.replay_path. Adds the information description of the replays to the a data collection within a MongoDB data base, if they are not in the database already. *Args:* - rp_gen (Generator = REPLAY_GEN): sc2reader.resources.Replay generator that yields the replays found in the CONFIG.replay_path. - db_collection (pymongo.collection.Collection = replays_info): the database where the function adds the new documents. *Returns:* - bool: True if new replays were found and added to the replay_info collection, False otherwise. ''' replays_data_set = [asdict(replay_data) for replay_data in get_replays_data_set(rp_gen, db_collection) if replay_data != None] if replays_data_set: db_collection.insert_many(replays_data_set) return True else: print(f'No new replays at {CONFIG.replay_path}') return False show_doc(build_replay_info, title_level=4) #export def build_reaper_collections() -> bool: """ Calls the ingest function on the sc2reaper package. Make sure you install the package from https://github.com/miguelgondu/sc2reaper in your environment before running. *Returns:* - bool: True if new replays were found and added to the multiple collections defined by sc2reaper, False otherwise. *Raises:* - ImportError: If sc2reaper is not installed in the environment. """ try: ings.ingest(CONFIG.replay_path, 4) return True except ImportError as ime: print("This program needs sc2reaper to be installed before running.") print("Check install instructions at https://github.com/miguelgondu/sc2reaper") raise ime except ValueError as vale: # If ing.ingest raises a ValueError asume reaper did not find new replays. print("No new .SC2Replays found by sc2reaper") return False show_doc(build_reaper_collections, title_level=4) # ## Execution Example # test_eq(build_replay_info(), True) #hide import sys sys.argv = sys.argv[:1] test_eq(build_reaper_collections(), True) #hide from nbdev.export import notebook2script notebook2script()
02_ingest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # $\color{red}{Axiom}$ # > ## axiom, postulate, assumption # >> ### is a statement(true or false) that is taken to be true # - # # $\color{red}{Theorem}$ # > ## is a statement that has been proved # # Norm # > ### distance from origin # >> ### $ ||x|| := |x|$ # >> ### $ ||x|| := \sqrt{{x_1}^2 + .. + {x_n}^2}$ # >> ### $ ||v|| = \sqrt{v\cdot v}$ # >> ### $ ||q|| = \sqrt{qq^{\star}} = \sqrt{q^{\star}q} = \sqrt{a^2+b^2+c^2+d^2}$ # >> ### $ ||z|| = \sqrt{zz^{\star}} = \sqrt{z^{\star}z} = \sqrt{a^2+b^2}$ # > ### $ # ^{f: \mathbb{V} \to \mathbb{R}}_{}{}^{}_{} # $ # # $\color{red}{\text{vector space}}$ # > ## = Linear space # >> ## a set of objects must satisfy vector axioms # > # vector # >> ## an element of a vector space # >> ## with both magnitude(or length) and direction # >> ## notation # >>> ### point A to point B : $ \vec{AB} = \vec{a}$ # # > ### A vector space $V$ is $\color{magenta}{any}$ set of objects $P,Q,R,...$ satifying: # >> ### $\color{magenta}{\text{ Scalar multiplication } sP}$ and $\color{magenta}{\text { vector addition } P+Q}$ are defined. # >> ### there is a special object 0. # > ### addition # >> ### $ # P + Q = Q + P, (P + Q) + R = P + (Q + R), P + I = P, P + P^{-1} = 0$ # > ### scalar multiplication # >> ### $ # 0P = 0, IP = P, a(bQ) = (ab)Q$ # > ### super position # >> ### $ # (a+b)P = aP + bP # $ # # $\color{red}{multivectors}$ # > ### Four kinds in G3 # >> ### 1. scalars # >> ### 2. vectors # >> ### 3. bivectors # >> ### 4. trivectors # > ### The objects in $\mathbb G^3$ # >> ### form # >>> ### $ M = s + v + B + T$ # >>>> ### where s is a scalar, v a vector, B a bivector, T a trivectr. # >>>> ### M is called a $\color{magenta}{multivector}$.(v is a vector and multivector.) # > ### The multivectors in $\mathbb G^3$ form a vector space # > ## 1. scalar # >> ### an oriented $\color{magenta}{\text{ magnitude}}$ # >> ### are not elements of $\mathbb R^3$ # >>> ### but scalars multiply vectors in $\mathbb R^3$ # > ## 2. Vectors # >> ### an oriented $\color{red}{segment}$ of $\color {magenta}{ \text {a line }}$ # >> ### size # >>> ### length $|V|$ # >> ### Orientation: arrow head # >> ### 1 D object # # > ## 3. Bivector # >> ### an oriented $\color{red}{segment}$ of $\color{magenta}{\text{ a plane}}$ # >>> ### Size: area $|B|$. # >>> ### Orientation: arrow head. # >> ### considered unchanged if: # >>> ### moved paralled to it self; # >>> ### rotated in its plane; # >>> ### reshaped, retaining $|B|$ # >>>> ### have No Shape # >> ### scalar multiplication # >> ### also add vivectors # >>> form a vector space # > ## 4. Triavector # >> ### an oriented segment of $\color{magenta}{space}$ # >> ### Size: volumn $|T|$ # >> ### Orientation: left/right handed. # >> ### T considered unchanged if: # >>> ### moved/rotated/reshaped, retaining $|T|$ # >> ### can define $cT, T_1 + T_2.$ # >>> ### T's form a vector space($\color{magenta}{Dimention 1}$). # >>> ### Every trivector T is a scalar multiple of a given trivector $T_0 \neq 0$ # >> ### Called pseudoscalars # # # Basis # > ### $\{e_1,\; e_2,\; e_3\}$ is an orthonormal basis for $\mathbb R^3$ # > ### A $\mathbb{G}^3$ Basis # >> ### $\{1\}$ is a scalar basis: 1 # >> ### $\{e_1,\; e_2,\; e_3\}$ is a vector basis : 3 # >> ### $\{e_1 \wedge e_2,\; e_1 \wedge e_2,\; e_2 \wedge e_3\}$ is a bivector basis : 3 # >> ### $\{e_1 \wedge e_2 \wedge e_3\}$ is a trivector basis : 1 # > ### Dimention in $\mathbb G^3$ # >> ### 1 + 3 + 3 + 1 = 8 # # # Inner Product ($u \cdot v$) # > ### definition # >> ### $ u \cdot v = |u||v| cos \theta $ # >>> ### this is a scalar # > ### properties # >> ### $ u \parallel v \implies u \cdot v = |u||v|.\\ # \quad u \parallel u \implies u \cdot u = |u|^2.\\ # u \perp v \implies u \cdot v = 0.\\ # v \cdot u = u \cdot v \text{ (Commute)}$ # # Outer Product ($u \wedge v$) # > ### definition # >> ### right hand rule # >> ### $ |u \wedge v| = |u||v|sin\theta$ # > ### properties # >> ### $ # u \parallel v \implies u \wedge v = 0. \\ # \quad u \parallel u \implies u \wedge u = 0. \\ # u \perp v \implies |u \wedge v| = |u||v|.\\ # u \wedge v = - \big(v \wedge v\big) \text{ :Anticommute} # $ # # Wedge product vs Cross product # > ### $ a \wedge b = \left | \begin{array}{}e_{23} & e_{31} & e_{12} \\ # a_1 & a_2 & a_3 \\ # b_1 & b_2 & b_3 \\ \end{array}\right | # \iff a \times b = \left | \begin{array}{}i & j & k \\ # a_1 & a_2 & a_3 \\ # b_1 & b_2 & b_3 \\ \end{array}\right |$ # --- # # 2D $\color{red}{complex}$ number # > # rotation of $\theta$ angle # >> ## $\color{red}{v' = e^{i\theta} v} $ # > ### $ # u = a + bi = r_u(cos\alpha + i\, sin\alpha) = r_ue^{i\alpha} \\ # v = c + di = r_v(cos\beta + i\, sin\beta) = r_ve^{i\beta} \\ # \\ uv = r_ur_v \Big[ # (cos\alpha + i\; sin\alpha ) (cos\beta + i\; sin\beta) \Big] # = r_ur_v\Big[ \big( \; cos\alpha\; cos\beta - \; sin\alpha\; sin\beta \big) + \big( cos\alpha\; sin\beta + sin\alpha\; cos\beta \big)i \Big] # \\ # uv = (a + bi) (c + di) = (ac - bd) + (ad - bc)i # \\ # \therefore uv = r_ur_v\Big[ # (cos\alpha\; cos\beta - sin\alpha \; sin\beta) + # (cos\alpha\; sin\beta + cos\beta \; sin\alpha)i \Big] # \\ = # r_ur_v\Big[\big(cos(\alpha + \beta \big) + \big( sin(\alpha + \beta \big)i \Big] # $ # >> ### Geometric Algebra # $$\require{mhchem}$$ # $\color{magenta}{\large \ce{^{i}_{2d}R^{e^{\theta i}(a + bi)}} = \ce{ e^{\theta i} \;q^{a + bi}}} # \\ # = # \color{magenta}{\large \ce{^{i}_{2d}R^{(a + bi)e^{\theta i}}} = \ce{ q^{a + bi} \;q^{e^{i\theta}}}} # \\ # \large \ce{^{ga}_{2d}R^{(a + bI)e^{\theta I}}} = \ce{q^{a e_1 + b e_2}\; e^{\theta I}_{e_1e_2}} # $ # > ## $v' = e^{i\theta}v = (cos\theta + sin\theta i )(a + bi) = (a cos\theta -bsin\theta) + (bcos\theta + asin\theta)i $ # > ## $v' = v\;e^{i\theta} = (a+bi)(cos\theta + sin\theta i ) = (a cos\theta -bsin\theta) + (bcos\theta + asin\theta)i $ # >> ## $ \therefore e^{i\theta}\,v = v\,e^{i\theta}$ # # 2D GA # > ### $\require{mhchem} # u = u_1e_1 + u_2e_2,\quad v = v_1e_1 + v_2e_2 # \\ # uv = u \cdot v + u \wedge v # = |u||v|cos \theta + |u||v|sin \theta e_1e_2 # = |u||v|\big(cos \theta + sin\theta e_1e_2 \big) # = |u||v|e^{e_1e_2\theta} # = |u||v|e^{I\theta} # \\ # vu = v \cdot u + v \wedge u # = |u||v|cos \theta + |u||v|sin \theta e_2e_1 # = |u||v|cos \theta + |u||v|sin (-\theta (e_1e_2)) # = |u||v|\big(cos (-\theta) + sin(-\theta e_1e_2) \big) # = |u||v|e^{-(e_1e_2\theta)} # = |u||v|e^{-I\theta} # \\ # \color{red}{\therefore} uv = -vu \iff # ve^{I\theta} = e^{-I\theta}v \iff \ce{^{Rot}_{ga_2}g^{ve^{I\theta}}} = - \ce{^{Rot}_{ga_2}g^{$e^{I\theta}v$}} # $ # # --- # # 3D $\color{red}{quaternion}$ # > ## rotation of $\theta$ # >> ## special # >>> ## $ # cos \theta \; q^v_{\flat{(v',v)}} + sin\theta \; q^{\perp}_{(\hat{\uparrow}\times v)}\\ # cos \theta \; q^v_{\flat{(v',v)}} + sin\theta \; q^{\perp}_{\hat{\uparrow}v}\\ # \therefore cos \theta \; \big( q^v \big) + sin\theta \; \big(q^{\hat{\uparrow}}\times q^{v} \big)\\ # \therefore cos \theta \; \big( q^v \big) + sin\theta \; \big( q^{\hat{\uparrow}} q^{v} \big)\\ # \therefore q^{v'} = e^{(\hat{\uparrow}\theta)}q^v # $ # >> ## general # >>> ## $ \large q^{v'} = e^{\hat{\uparrow}\frac{\theta}{2}} \, q^{v} \, e^{-\hat{\uparrow}\frac{\theta}{2}} $ # >>> ## $ q^{v'} = # q^{\parallel}_{\flat(v,\uparrow)} + # cos\theta \, q^{\perp}_{(v-\parallel)} + # sin\theta \, q^{\perp'}_{(\hat{\parallel}\times \perp)} \\ # q^{\parallel}_{\flat(v,\uparrow)} + # cos\theta \, q^{\perp}_{(v-\parallel)} + # sin\theta \, q^{\perp'}_{(\hat{\parallel}\times v)} \\ # q^{\parallel}_{\flat{(v,\hat{\uparrow}})} + e^{\hat{\uparrow}\theta}\; q^{\perp}_{v-\parallel}\\ # e^{\hat{\uparrow}\frac{\theta}{2}}\; e^{-\hat{\uparrow}\frac{\theta}{2}}\; q^{\parallel}_{\flat{(v,\hat{\uparrow}})} + # e^{\hat{\uparrow}\frac{\theta}{2}} e^{\hat{\uparrow}\frac{\theta}{2}}\; q^{\perp}_{v-\parallel} # \\ # e^{\hat{\uparrow}\frac{\theta}{2}} q^{\parallel}_{\hat{\uparrow}\flat{(v,\hat{\uparrow}})} e^{-\hat{\uparrow}\frac{\theta}{2}} + # e^{\hat{\uparrow}\frac{\theta}{2}} q^{\perp}_{(v-\parallel)}e^{-\hat{arrow}\frac{\theta}{2}} # \\ # \color{red}{e^{\hat{\uparrow}\frac{\theta}{2}} \big( q^{\parallel} +q^{\perp} \big) # e^{-\hat{\uparrow}\frac{\theta}{2}}} \\ # \color{red}{e^{\hat{\uparrow}\frac{\theta}{2}} \big( q^{v} \big) # e^{-\hat{\uparrow}\frac{\theta}{2}}} # $ # --- # # Geometric Product ($uv$) # > ### definiton # >> ### $ uv = u \cdot v + u \wedge v = |u||v|e^{i\theta}$ # >>> ### in 2D complex number # >>>> ### $ uv = a_1 b_1 - a_2 b_2 + (a_1 b_2 + a_2b_1)i $ # >>> ### in quaternion # >>>> ### $ uv = a_1 b_1 - (a_v \cdot b_v) + a_1 b_v + b_1 a_v + (a_v \times b_v) \\ # \text{if } a_1, b_1 = 0 \text{ then } \\ # \therefore q^v_aq^v_b = - (q^v_a \cdot q^v_b) + (q^v_a \times q^v_b)$ # > # >> ### $ # vu = v \cdot u + v \wedge u.\\ # vu = u \cdot v - u \wedge v.\\ # \because uv + vu = 2(u \cdot v) \\ # \therefore u \cdot v = \frac{uv + vu}{2} \\ # \because uv - vu = 2(u \wedge v) \\ # \therefore u \wedge v = \frac{uv - vu}{2}$ # >> ### $uv = |u||v|e^{i\theta}$ # >>> ### $\because u \cdot v = |u||v|cos\theta \\ # \because u \wedge v = |u||v|sin\theta \,e_1e_2\\ # \therefore uv = |u||v|(cos\theta + sin\theta\, e_1 e_2) \\ # \therefore uv = |u||v|e^{i\theta}$ # >>>> ### $\color{magenta}{\text{ if } |u| = |v|}$ # >>>>> ### $ # uv = |u||v|e^{i\theta} \\ # uuv = u|u||v|e^{i\theta} \\ # u^2v = u|u||v|e^{i\theta} \\ # (u\cdot u)v = u|u||v|e^{i\theta} \\ # |u||u|v = u|u||v|e^{i\theta} \\ # $ # > ### properties # >> ### $ # u \parallel v \implies uv = u \cdot v = |u||v|. \\ # \quad uv = vu \\ # u \perp v \implies uv = u \wedge v.\\ # \quad vu = -uv. # $ # # Pseudoscalars $ = I = q^{\hat{\uparrow}}$ # > ### Highest dimensional object in $\mathbb G^3:$ # >> ### Trivectors. Form a 1-dimentional vector space. # > ### Highest dimensional objects in $\mathbb G^2:$ # >> ### Bivectors. Form a 1-dimentional vector space. # > ### Highest dimensional objets in $\mathbb R^{2Ds} (\text{ GA of 2D subspace of $\mathbb R^n)$}$ # >> ### Bivectors. Form a 1-dimentional vector space. # # I(i) # > ### definition(2D Unit Pseudoscalar i) # >> ### $\{e_1, e_2\}:$ an orthonormal basis for a 2D subspace of $\mathbb R^n$ # >>> ### $i = e_1e_2 = e_1 \wedge e_2, \text{ a bivector}\\ # \because (e_1e_2)^2 = e_1e_2e_1e_2 = -e_1e_1e_2e_2 = -1$ # # Bivector Angle $i\theta$ # > ### definition (Bivector Angle $i\theta$) # >> ### Let $\theta$ be an in a plane i. # >> ### Call the bivector $i\theta$ an angle also. # >>> ### $i\theta$ specifies both the plane i in which it resides and its size $\theta$ # > ### definition (Exponential $e^{i\theta}$) # >> ### $ e^{i\theta} = cos\theta + i\, sin\theta$ # # # uv # > ### $ # uv = u \cdot v + v \wedge u = |u||v|cost\theta + |u||v|sin\theta I \\ # uv = |u||v|e^{I\theta} = |u||v|e^{\hat{\uparrow}\theta} # $ # >> ### $\text{if } |u| = |v| \text { then} \\ # u(uv) = u(|u||v|e^{I\theta})\\ # (u \cdot u)v = u|u||v|e^{I\theta} \\ # |u||u|v = u|u||v|e^{I\theta} \\ # |u||v|v = u|u||v|e^{I\theta} \\ # v = ue^{I\theta} \text{ in special}\\ # \color{red}{ \therefore u' = ue^{I\theta}} \text{ in special}\\ # \color{red}{ \therefore u' = e^{i\theta}u^{(2D)}} \\ # \color{red}{ \therefore u' = e^{I\frac{\theta}{2}} u^{(3D)} e^{-I\frac{\theta}{2}}}\\ # v = u_{e^{I\theta} \text{ in general}\\ # $ # + # uv commutate
python/GeometryAG/Terms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Common external libraries import pandas as pd import numpy as np import sklearn # scikit-learn import requests from bs4 import BeautifulSoup from time import sleep # + # Visualization libraries # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots plt.rcParams['figure.figsize'] = [15, 5] from IPython import display from ipywidgets import interact, widgets # + # Common standard libraries import datetime import time import os # + # Setting plot appearance # See here for more options: https://matplotlib.org/users/customizing.html # %config InlineBackend.figure_format='retina' sns.set() # Revert to matplotlib defaults plt.rcParams['figure.figsize'] = (9, 6) plt.rcParams['axes.labelpad'] = 10 sns.set_style("darkgrid") # sns.set_context("poster", font_scale=1.0) # - # Managing maps import folium from folium import plugins from folium.plugins import MarkerCluster # + # Ignore warnings (don't display stderr) import warnings warnings.filterwarnings('ignore') # + # if local use, then check presence of local data file (default = 0) LOCAL = 1 # check for local data file and use it if it exists #LOCAL = 0 # don't check for local data file, but download from Zenodo # - # # Glass Submarine Project # ## Mortality Rate Analysis in Italy (2015-2020) # ## Time and Geographical plots generator # # This notebook uses the mortality rates data file from a subset of municipalities in the ANPR system</br> # and the municipality census information to produce a set of customizable time plots # # Version: 1.0-20200504 # ## Step 1: Load the data from Zenodo # + # if local is specified, then try to use a local file if it exists, else call the generator to produce a fresh dataset import os.path if LOCAL: if not os.path.isfile('mortalita_giornaliero_comune_20200515.xlsx'): # %run ./Italy_mortality_rates_dataset_generator_20200515.ipynb else: # %run ./Italy_mortality_rates_dataset_generator_20200515.ipynb print('Loading aggregated data file...') df_2020 = pd.read_excel(io='mortalita_giornaliero_comune_20200515.xlsx') df_2020 = df_2020.drop(labels='Unnamed: 0', axis=1) print('Loaded %d records' %len(df_2020)) df_2020.head() # - # ## Step 2: Reduced data set (debugging) cols = ['NOME_REGIONE','NOME_PROVINCIA','NOME_COMUNE','CL_ETA','GE','T_15','T_16','T_17','T_18','T_19','T_20','POPULATION','LONGITUDE','LATITUDE'] comuni = df_2020.loc[:,cols] comuni.head() # ## Step 3: Consolidate at regional level (debugging) # Time plot # + ### Get Regionwise Data def countrydata(df_cleaned,varia): df_country=df_cleaned.groupby(['NOME_REGIONE','GE'])[varia].sum().reset_index() df_country=df_country.set_index(['NOME_REGIONE','GE']) df_country.index=df_country.index.set_levels([df_country.index.levels[0], df_country.index.levels[1]]) df_country=df_country.sort_values(['NOME_REGIONE','GE'],ascending=True) # df_country=df_country.rename(columns={oldname:newname}) return df_country dfbyregion15=countrydata(comuni,'T_15') dfbyregion16=countrydata(comuni,'T_16') dfbyregion17=countrydata(comuni,'T_17') dfbyregion18=countrydata(comuni,'T_18') dfbyregion19=countrydata(comuni,'T_19') dfbyregion20=countrydata(comuni,'T_20') temp1=pd.merge(dfbyregion15,dfbyregion16,how='left',left_index=True,right_index=True) temp2=pd.merge(temp1,dfbyregion17,how='left',left_index=True,right_index=True) temp3=pd.merge(temp2,dfbyregion18,how='left',left_index=True,right_index=True) temp4=pd.merge(temp3,dfbyregion19,how='left',left_index=True,right_index=True) RegionConsolidated=pd.merge(temp4,dfbyregion20,how='left',left_index=True,right_index=True) #RegionConsolidated.head() GlobalTotals=RegionConsolidated.reset_index().groupby('GE').sum() fig = go.Figure() fig.add_trace(go.Scatter(x=GlobalTotals.index,y=GlobalTotals[ 'T_15'], mode='markers', name='Deaths 2015', line=dict(color='royalblue',width=2))) fig.add_trace(go.Scatter(x=GlobalTotals.index,y=GlobalTotals[ 'T_16'], mode='markers', name='Deaths 2016', line=dict(color='yellow',width=2))) fig.add_trace(go.Scatter(x=GlobalTotals.index,y=GlobalTotals[ 'T_17'], mode='markers', name='Deaths 2017', line=dict(color='black',width=2))) fig.add_trace(go.Scatter(x=GlobalTotals.index,y=GlobalTotals[ 'T_18'], mode='markers', name='Deaths 2018', line=dict(color='green',width=2))) fig.add_trace(go.Scatter(x=GlobalTotals.index,y=GlobalTotals[ 'T_19'], mode='markers', name='Deaths 2019', line=dict(color='magenta',width=2))) fig.add_trace(go.Scatter(x=GlobalTotals.index,y=GlobalTotals['T_20'], mode='markers', name='Deaths 2020', line=dict(color='red',width=2))) fig.update_layout(showlegend=True) fig.update_layout(yaxis_type="log") # - # ## Step 4: Top 10 Regions comparison 2019-2020 # + TotalCasesCountry=RegionConsolidated.sum(level=0)['T_19'].reset_index().set_index('NOME_REGIONE') TotalCasesCountry=TotalCasesCountry.sort_values(by='T_19',ascending=False) TotalCasesCountry=TotalCasesCountry[~TotalCasesCountry.index.isin(['ALL','Others'])] Top10countriesbycases=TotalCasesCountry.head(10) TotalCasesCountrytop10=TotalCasesCountry.head(10) TotalCasesCountry20=RegionConsolidated.sum(level=0)['T_20'].reset_index().set_index('NOME_REGIONE') TotalCasesCountry20=TotalCasesCountry20.sort_values(by='T_20',ascending=False) TotalCasesCountry20=TotalCasesCountry20[~TotalCasesCountry20.index.isin(['ALL','Others'])] Top10countriesbycases20=TotalCasesCountry20.head(10) TotalCasesCountrytop1020=TotalCasesCountry20.head(10) fig = go.Figure() fig.add_trace(go.Bar(x=Top10countriesbycases20.index, y=Top10countriesbycases20['T_20'], text=Top10countriesbycases20['T_20'], textposition='outside', name='2020')) fig.add_trace(go.Bar(x=Top10countriesbycases.index, y=Top10countriesbycases['T_19'], text=Top10countriesbycases['T_19'], textposition='outside', name='2019')) fig.update_layout(title_text='Top 10 Regions by Deaths in 2019 and in 2020') fig.update_yaxes(showticklabels=False) fig.show() # - # ## Step 5: Top 10 Provinces comparison 2019-2020 # + ### Get Provincewise Data def provinciadata(df_cleaned,varia): df_country=df_cleaned.groupby(['NOME_REGIONE','NOME_PROVINCIA','GE'])[varia].sum().reset_index() df_country=df_country.set_index(['NOME_REGIONE','NOME_PROVINCIA','GE']) df_country.index=df_country.index.set_levels([df_country.index.levels[0], df_country.index.levels[1],df_country.index.levels[2]]) df_country=df_country.sort_values(['NOME_REGIONE','NOME_PROVINCIA','GE'],ascending=True) # df_country=df_country.rename(columns={oldname:newname}) return df_country dfbyprovincia15=provinciadata(comuni,'T_15') dfbyprovincia16=provinciadata(comuni,'T_16') dfbyprovincia17=provinciadata(comuni,'T_17') dfbyprovincia18=provinciadata(comuni,'T_18') dfbyprovincia19=provinciadata(comuni,'T_19') dfbyprovincia20=provinciadata(comuni,'T_20') temp1=pd.merge(dfbyprovincia15,dfbyprovincia16,how='left',left_index=True,right_index=True) temp2=pd.merge(temp1,dfbyprovincia17,how='left',left_index=True,right_index=True) temp3=pd.merge(temp2,dfbyprovincia18,how='left',left_index=True,right_index=True) temp4=pd.merge(temp3,dfbyprovincia19,how='left',left_index=True,right_index=True) ProvinciaConsolidated=pd.merge(temp4,dfbyprovincia20,how='left',left_index=True,right_index=True) TotalCasesProvince=ProvinciaConsolidated.sum(level=1)['T_19'].reset_index().set_index('NOME_PROVINCIA') TotalCasesProvince=TotalCasesProvince.sort_values(by='T_19',ascending=False) TotalCasesProvince=TotalCasesProvince[~TotalCasesProvince.index.isin(['ALL','Others'])] Top10Provincebycases=TotalCasesProvince.head(10) TotalCasesProvincetop10=TotalCasesProvince.head(10) TotalCasesProvince20=ProvinciaConsolidated.sum(level=1)['T_20'].reset_index().set_index('NOME_PROVINCIA') TotalCasesProvince20=TotalCasesProvince20.sort_values(by='T_20',ascending=False) TotalCasesProvince20=TotalCasesProvince20[~TotalCasesProvince20.index.isin(['ALL','Others'])] Top10Provincebycases20=TotalCasesProvince20.head(10) TotalCasesProvincetop1020=TotalCasesProvince20.head(10) fig = go.Figure() fig.add_trace(go.Bar(x=Top10Provincebycases20.index, y=Top10Provincebycases20['T_20'], text=Top10Provincebycases20['T_20'], textposition='outside', name='2020')) fig.add_trace(go.Bar(x=Top10Provincebycases.index, y=Top10Provincebycases['T_19'], text=Top10Provincebycases['T_19'], textposition='outside', name='2019')) fig.update_layout(title_text='Top 10 Provinces by Deaths in 2019 and in 2020') fig.update_yaxes(showticklabels=False) fig.show() # - # ## Step 6: Local data for geographical representation # + ### Get Regionwise Data def comunedata(df_cleaned,varia): df_country=df_cleaned.groupby(['NOME_REGIONE','NOME_PROVINCIA','NOME_COMUNE','LONGITUDE','LATITUDE','POPULATION','GE'])[varia].sum().reset_index() df_country=df_country.set_index(['NOME_REGIONE','NOME_PROVINCIA','NOME_COMUNE','LONGITUDE','LATITUDE','POPULATION','GE']) df_country.index=df_country.index.set_levels([df_country.index.levels[0], df_country.index.levels[1],df_country.index.levels[2],df_country.index.levels[3],df_country.index.levels[4],df_country.index.levels[5],df_country.index.levels[6]]) df_country=df_country.sort_values(['NOME_REGIONE','NOME_PROVINCIA','NOME_COMUNE','LONGITUDE','LATITUDE','POPULATION','GE'],ascending=True) # df_country=df_country.rename(columns={oldname:newname}) return df_country dfbycomune15=comunedata(comuni,'T_15') dfbycomune16=comunedata(comuni,'T_16') dfbycomune17=comunedata(comuni,'T_17') dfbycomune18=comunedata(comuni,'T_18') dfbycomune19=comunedata(comuni,'T_19') dfbycomune20=comunedata(comuni,'T_20') temp1=pd.merge(dfbycomune15,dfbycomune16,how='left',left_index=True,right_index=True) temp2=pd.merge(temp1,dfbycomune17,how='left',left_index=True,right_index=True) temp3=pd.merge(temp2,dfbycomune18,how='left',left_index=True,right_index=True) temp4=pd.merge(temp3,dfbycomune19,how='left',left_index=True,right_index=True) ComuniConsolidated=pd.merge(temp4,dfbycomune20,how='left',left_index=True,right_index=True) # df_new = comuni.groupby(['NOME_REGIONE','NOME_PROVINCIA','NOME_COMUNE','LONGITUDE','LATITUDE','POPULATION']).agg({'T_15':sum}) df_new = comuni.groupby(['NOME_REGIONE','NOME_PROVINCIA','NOME_COMUNE','LONGITUDE','LATITUDE','POPULATION']).agg({'T_15':sum,'T_16':sum,'T_17':sum,'T_18':sum,'T_19':sum,'T_20':sum}) # ComuniConsolidated.head() df_new.head() # - # ## Step 7: Prepare to plot maps # + df_geo = df_new.reset_index() # df_geo.head() df_clean = df_geo[(df_geo.LONGITUDE > 0) & (df_geo.LATITUDE > 0)] long19 = list(df_clean.iloc[:,3]) lat19 = list(df_clean.iloc[:,4]) plt.plot(long19,lat19, 'o') plt.title('Italian places') plt.xlabel('Longitude') plt.ylabel('Latitude') plt.axis('equal') plt.show() # - # ## Step 8: First map # + # BBox = ((df_clean.LONGITUDE.min(), df_clean.LONGITUDE.max(), # df_clean.LATITUDE.min(), df_clean.LATITUDE.max())) # print(BBox) my_map1 = folium.Map(location = [42,12], zoom_start = 6 ) coordinates =[] # for la,lo in zip(df_clean.LATITUDE,df_clean.LONGITUDE): coordinates.append([la,lo]) locations = list(zip(df_clean.LATITUDE,df_clean.LONGITUDE)) icons = [folium.Icon(icon="car", prefix="fa") for _ in range(len(locations))] for index, row in df_clean.iterrows(): perc_deaths = 100*(row["T_20"]/row["POPULATION"]) if row["T_20"]>0: increase = 100*((row["T_20"]-row["T_19"])/row["T_20"]) if increase>0 : radio = row["T_20"]/1000. else: radio = 0 else: increase = 0.00 radio = 0 # generate the popup message that is shown on click. popup_text = "{}<br> Population: {}<br> Total Deaths 2020: {} \({} \%\) <br> Increase 2019-20 : {} \%" popup_text = popup_text.format(row["NOME_COMUNE"], row["POPULATION"], row["T_20"], format(round(perc_deaths,2)), format(round(increase,2))) # radius of circles #radio = row["T_20"] # choose the color of the marker if perc_deaths>1.: # color="#FFCE00" # orange # color="#007849" # green color="#E37222" # tangerine else: # color="#0375B4" # blue # color="#FFCE00" # yellow color="#0A8A9F" # teal # add marker to the map folium.CircleMarker(location=(row["LATITUDE"], row["LONGITUDE"]), radius=radio, color=color, popup=popup_text, fill=True).add_to(my_map1) my_map1 # - # ## Step 9: Second map with groups # + m = folium.Map(location = [42,12], zoom_start = 6 ) coordinates =[] # for la,lo in zip(df_clean.LATITUDE,df_clean.LONGITUDE): coordinates.append([la,lo]) locations = list(zip(df_clean.LATITUDE,df_clean.LONGITUDE)) icons = [folium.Icon(icon="car", prefix="fa") for _ in range(len(locations))] fg = folium.FeatureGroup(name='groups') m.add_child(fg) g1 = plugins.FeatureGroupSubGroup(fg, '2019') m.add_child(g1) g2 = plugins.FeatureGroupSubGroup(fg, '2020') m.add_child(g2) for index, row in df_clean.iterrows(): perc_deaths = 100*(row["T_20"]/row["POPULATION"]) if row["T_20"]>0: increase = 100*((row["T_20"]-row["T_19"])/row["T_20"]) if increase>0 : radio = row["T_20"]/1000. else: radio = 0 else: increase = 0.00 radio = 0 radio2 = row["T_19"]/1000 # generate the popup message that is shown on click. popup_text = "{}<br> Population: {}<br> Total Deaths 2020: {} \({} \%\) <br> Increase 2019-20 : {} \%" popup_text = popup_text.format(row["NOME_COMUNE"], row["POPULATION"], row["T_20"], format(round(perc_deaths,2)), format(round(increase,2))) popup_text2 = "{}<br> Population: {}<br> Total Deaths 2019: {} \({} \%\) <br> Increase 2019-20 : {} \%" popup_text2 = popup_text2.format(row["NOME_COMUNE"], row["POPULATION"], row["T_19"], format(round(perc_deaths,2)), format(round(increase,2))) # radius of circles #radio = row["T_20"] # choose the color of the marker if perc_deaths>1.: # color="#FFCE00" # orange # color="#007849" # green color="#E37222" # tangerine else: # color="#0375B4" # blue # color="#FFCE00" # yellow color="#0A8A9F" # teal # add marker to the map folium.CircleMarker(location=(row["LATITUDE"], row["LONGITUDE"]), radius=radio, color=color, popup=popup_text, fill=True).add_to(g2) folium.CircleMarker(location=(row["LATITUDE"], row["LONGITUDE"]), radius=radio2, color='red', popup=popup_text2, fill=True).add_to(g1) folium.LayerControl(collapsed=False).add_to(m) # m.save(os.path.join('results', 'Plugins_8.html')) m # my_map1 # + from folium.plugins import HeatMap my_map2 = folium.Map(location = [42,12], zoom_start = 6 ) coordinates =[] pesi = [] # for la,lo in zip(df_clean.LATITUDE,df_clean.LONGITUDE): coordinates.append([la,lo]) for la,lo,peso in zip(df_clean.LATITUDE,df_clean.LONGITUDE,df_clean.T_20/df_clean.POPULATION): pesi.append([la,lo,peso]) popuplist = [] locations = list(zip(df_clean.LATITUDE,df_clean.LONGITUDE)) icons = [folium.Icon(icon="ambulance", prefix="fa") for _ in range(len(locations))] for index, row in df_clean.iterrows(): popup_text10 = "{}<br> Population: {}<br> Total Deaths 2020: {} \({} \%\) <br> Increase 2019-20 : {} \%" popup_text10 = popup_text10.format(row["NOME_COMUNE"], row["POPULATION"], row["T_20"], format(round(perc_deaths,2)), format(round(increase,2))) popuplist.append(popup_text10) for index, row in df_clean.iterrows(): perc_deaths = 100*(row["T_20"]/row["POPULATION"]) if row["T_20"]>0: increase = 100*((row["T_20"]-row["T_19"])/row["T_20"]) if increase>0 : radio = row["T_20"]/1000. else: radio = 0 else: increase = 0.00 radio = 0 # generate the popup message that is shown on click. popup_text = "{}<br> Population: {}<br> Total Deaths 2020: {} \({} \%\) <br> Increase 2019-20 : {} \%" popup_text = popup_text.format(row["NOME_COMUNE"], row["POPULATION"], row["T_20"], format(round(perc_deaths,2)), format(round(increase,2))) # radius of circles #radio = row["T_20"] # choose the color of the marker if perc_deaths>1.: # color="#FFCE00" # orange # color="#007849" # green color="#E37222" # tangerine else: # color="#0375B4" # blue # color="#FFCE00" # yellow color="#0A8A9F" # teal # # add marker to the map # folium.CircleMarker(location=(row["LATITUDE"], # row["LONGITUDE"]), # radius=1, # color="#0A8A9F", # popup=popup_text, # fill=True).add_to(my_map2) # plugins.MarkerCluster(data, popups=popups).add_to(m) cluster = MarkerCluster(locations=locations, popups=popuplist, icons=icons) my_map2.add_child(cluster) max_amount = float((df_clean['T_20']/df_clean['POPULATION']).max()) print(max_amount) # hm_wide = HeatMap( list(zip(df_clean.LONGITUDE, df_clean.LONGITUDE, df_clean.T_20))).add_to(my_map2)#, hm_wide = HeatMap(pesi, min_opacity=0.2, max_val=max_amount, radius=10, blur=5, max_zoom=1, ).add_to(my_map2) #my_map2.add_child(hm_wide) my_map2
CERGAS/Notebooks/Italy_mortality_rates_correlations-Geo.ipynb