code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="8fTYpzdVqDh2" import keras import numpy as np import pickle import tensorflow as tf from keras import backend as K, regularizers, optimizers from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D, InputLayer from keras.layers.normalization import BatchNormalization from keras.preprocessing.image import ImageDataGenerator from matplotlib import pyplot as plt from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix, classification_report from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier # + colab={} colab_type="code" id="pSz0JZK2qDil" with open(PATH + "train_image.pkl", 'rb') as f: X = np.array(pickle.load(f)) with open(PATH + 'train_label.pkl', 'rb') as f: Y = np.array(pickle.load(f)) X = X/255 # + colab={} colab_type="code" id="ENSRzkNyqDi8" x_train, x_val, y_train, y_val = train_test_split(X, Y) # + [markdown] colab_type="text" id="T2He7QZQqDjV" # ## Data Exploration # # Now, let's look at what we've got here. # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="A_82qTmBqDjf" outputId="2a53aa7b-d12c-436f-adf2-15b0c69f8651" print("Dimensions of training data: ") print(x_train.shape) print(y_train.shape) print("Dimensions of validation data: ") print(x_val.shape) print(y_val.shape) # + [markdown] colab_type="text" id="-2HV8QROqDkE" # `784`, interesting - That is a (28, 28) image. Let's look at some of these below. # + colab={"base_uri": "https://localhost:8080/", "height": 589} colab_type="code" id="TLT1KBqcqDkP" outputId="9b30297e-9c7f-4160-9187-04fc3ffe8fbe" random_index = np.random.randint(0, 6001, size=(25,)) plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.imshow(x_train[random_index[i]].reshape((28, 28))) plt.xlabel(y_train[random_index[i]]) plt.show() # + [markdown] colab_type="text" id="hSTWEvFqqDkj" # #### Fashion MNIST # # Ah, the classic Fashion MNIST set. This is gonna be interesting. Let's try some basic machine learning techniques first. # # Let's first create a helper function so that we can assess the performance of a model easily. # + colab={} colab_type="code" id="SEef0wy6qDko" def evaluate_model(model): y_pred = model.predict(x_val) print("Classifciation Report:") print(classification_report(y_val, y_pred)) print("Confusion Matrix:\n") print(confusion_matrix(y_val, y_pred)) print("\nAccuracy of the model on validation set: {}".format( model.score(x_val, y_val))) print("Accuracy of the model on Training set: {}".format( model.score(x_train, y_train))) # + [markdown] colab_type="text" id="NLDVw1_xqDk6" # #### Let's try SVM first # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" id="vEdiYL07qDk7" outputId="ee55cab4-17e4-4382-ce87-fd382914d67c" SVC = svm.SVC(gamma='scale').fit(x_train, y_train) evaluate_model(SVC) # + [markdown] colab_type="text" id="xrPx8yq7qDlM" # 83% accuracy on first go. We're upto a great start. Let's try some other things now # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" id="tGXbhm1rqDlT" outputId="2503aba3-4fcb-4dd3-aed8-2bc86734e660" dtree_model = DecisionTreeClassifier(max_depth = 17).fit(x_train, y_train) evaluate_model(dtree_model) # + [markdown] colab_type="text" id="vv4R2ZNbqze0" # This shows clear signs of overfitting. Let's try a different `max_depth`. # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" id="YFSMxtubqDlt" outputId="02947be9-6cfa-4671-9e86-8d068f8a47b2" dtree_model = DecisionTreeClassifier(max_depth=25).fit(x_train, y_train) evaluate_model(dtree_model) # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" id="hO-IpibxqDmE" outputId="76f12830-3c9b-4035-974a-f8d45b190109" knn = KNeighborsClassifier(n_neighbors = 7).fit(x_train, y_train) evaluate_model(knn) # + colab={"base_uri": "https://localhost:8080/", "height": 443} colab_type="code" id="Nkz1gv_-qDmV" outputId="1c2268b7-700e-4742-b34a-bbd83fc5ed27" rfc = RandomForestClassifier(min_samples_leaf=20).fit(x_train, y_train) evaluate_model(rfc) # + colab={"base_uri": "https://localhost:8080/", "height": 438} colab_type="code" id="zNNS5j62qDms" outputId="3af3f4fa-e469-426a-daf0-a5f9d0834b83" rfc = RandomForestClassifier(min_samples_leaf=10).fit(x_train, y_train) evaluate_model(rfc) # + colab={"base_uri": "https://localhost:8080/", "height": 438} colab_type="code" id="_AFNbfZPqDnG" outputId="df745392-d95d-4547-d2bc-6435233bb9ff" rfc = RandomForestClassifier(min_samples_leaf=5).fit(x_train, y_train) evaluate_model(rfc) # + colab={"base_uri": "https://localhost:8080/", "height": 443} colab_type="code" id="jdRgica2qDng" outputId="bbcd98f2-fcee-4b18-8be8-e11650f0c4dd" rfc = RandomForestClassifier(min_samples_leaf=4).fit(x_train, y_train) evaluate_model(rfc) # + [markdown] colab_type="text" id="zuIatK4bqDoA" # Hmm, SVM seems to be the winner here. Let's try a grid search for parameters in this case. # + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="NMTLP7zIqDoD" outputId="cf79295c-f60b-43bb-bca2-7b8afba0dd47" parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} svc = svm.SVC(gamma="scale") clf = GridSearchCV(svc, parameters, cv=5) clf.fit(x_train, y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="08MdtHRMqDoh" outputId="bd983347-0b8d-40ef-dc59-36c8dd4d3ce6" clf.best_estimator_ # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="nxclKuk4qDo0" outputId="41ad50ae-e3b3-438c-f978-0f4ed6b0a767" clf.score(x_val, y_val) # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" id="rMfXhPBFqDpQ" outputId="944b5882-16e2-4257-9b40-8e5d6bdf2eeb" evaluate_model(clf) # + [markdown] colab_type="text" id="qTwGK9j3qDpz" # Woohoo, `85%` is decent enough accuracy for submission. Our results seem match the [standard benchmark results]. # # [standard benchmark results]: http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/ # + [markdown] colab_type="text" id="PXpm4BmxqDp6" # ### Let's try a basic Convolutional Neural Network # # We've already reached a pretty high accuracy bar, but according to [zalando research], many CNN based architectures have had also been pretty successful. Since, I do not have as much data as in the original Fashion MNIST, I should train on a relatively simpler architecture. # # [zalando research]: [https://github.com/zalandoresearch/fashion-mnist#benchmark] # + colab={} colab_type="code" id="3j_R5EcrqDqB" batch_size = 128 num_classes = 7 epochs = 20 img_rows, img_cols = 28, 28 # + colab={} colab_type="code" id="6J_dEIC4qDqT" # Since K.image_data_format == 'channels_last': x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_val = x_val.reshape(x_val.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) # + colab={"base_uri": "https://localhost:8080/", "height": 449} colab_type="code" id="t1innwI3qDqe" outputId="bddc7a2f-3fc8-401d-8d9d-5928452e1b32" print(y_train.shape) print(y_val.shape) plt.hist(y_train) # + colab={} colab_type="code" id="Qfo-r69CqDqt" y_train = keras.utils.to_categorical(y_train) y_val = keras.utils.to_categorical(y_val) # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="YniE8-4YqDq8" outputId="d19ad5e0-667e-4c63-a77d-c5e2209245bc" print(y_train.shape) print(y_val.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 154} colab_type="code" id="pLzLsRewqDrO" outputId="664768db-c037-48a0-fd62-24786f54bbe4" cnn = Sequential() cnn.add(Conv2D(32, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal', input_shape=(28, 28, 1))) cnn.add(MaxPooling2D((2, 2))) cnn.add(Dropout(0.25)) cnn.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) cnn.add(MaxPooling2D(pool_size=(2, 2))) cnn.add(Dropout(0.25)) cnn.add(Conv2D(128, (3, 3), activation='relu')) cnn.add(Dropout(0.4)) cnn.add(Flatten()) cnn.add(Dense(128, activation='relu')) cnn.add(Dropout(0.3)) cnn.add(Dense(7, activation='softmax')) cnn.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/", "height": 812} colab_type="code" id="V64kzhQ4qDrb" outputId="5cf97244-b28a-4973-d2ad-57b8e7abd01b" cnn.fit(x_train, y_train, batch_size=batch_size, epochs=20, verbose=1, validation_data=(x_val, y_val)) # + colab={"base_uri": "https://localhost:8080/", "height": 306} colab_type="code" id="I3P9NinJqDrv" outputId="1ab6ecbe-556d-4c82-a02e-40d6017e600f" y_pred = cnn.predict_classes(x_val) y_val_confusion = [np.argmax(encoded_label) for encoded_label in y_val] print("Classification Report: \n", classification_report(y_val_confusion, y_pred)) print("Confusion Matrix: \n", confusion_matrix(y_val_confusion, y_pred)) # + [markdown] colab_type="text" id="BitJxpXOxEXA" # ### Let's try Data Augmentation # # We augment additional examples in the `x/y_aug_train` and `x/y_aug_val` by augmenting data present in `x/y_train`. The data from `x/y_val` shall serve as a test data set here. # + colab={} colab_type="code" id="jsjKIrKWqDsG" # Defines the options for augmentation datagen = ImageDataGenerator( rotation_range=10, horizontal_flip=True, # fill_mode='nearest' ) number_of_augmentation = 2 def image_augmentation(image): ''' Generates new images. ''' images = [] # Take a 28, 28 image and transform into (1, 28, 28, 1) image = image.reshape(1, img_rows, img_cols, 1) i = 0 for x_batch in datagen.flow(image, batch_size=1): images.append(x_batch) i += 1 if i >= number_of_augmentation: break return images # + colab={} colab_type="code" id="4zDhwmyLwm6u" def augment_data(x, y): no_of_training_ex = x.shape[0] x_aug = [] y_aug = [] for i in range(no_of_training_ex): image = x[i] # Image is (28, 28) label = y[i] aug_images = image_augmentation(image) for aug_image in aug_images: x_aug.append(aug_image.reshape((28, 28))) y_aug.append(label) # Now, also save the original image x_aug.append(image) y_aug.append(label) x_aug = np.array(x_aug) y_aug = np.array(y_aug) return x_aug, y_aug # + x_train, x_test, y_train, y_test = train_test_split(X, Y) x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols) print(x_train.shape) print(y_train.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 86} colab_type="code" id="pWTfKidWxrpn" outputId="4c82199d-2566-4422-e79f-53a930f718d0" x_aug_data, y_aug_data = augment_data(x_train, y_train) print(x_aug_data.shape) print(y_aug_data.shape) # - x_aug_train, x_aug_val, y_aug_train, y_aug_val = train_test_split(x_aug_data, y_aug_data) # + colab={"base_uri": "https://localhost:8080/", "height": 86} colab_type="code" id="1kaIzKA0x4et" outputId="20cecd8f-0bbf-4d38-8679-4fef39c4bca8" x_aug_train = x_aug_train.reshape(x_aug_train.shape[0], 28, 28 , 1).astype('float32') x_aug_val = x_aug_val.reshape(x_aug_val.shape[0], 28, 28 , 1).astype('float32') # y_aug_train = keras.utils.to_categorical(y_aug_train) # y_aug_val = keras.utils.to_categorical(y_aug_val) print(x_aug_train.shape) print(y_aug_train.shape) print(x_aug_val.shape) print(y_aug_val.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 164} colab_type="code" id="h92hTCysyU4K" outputId="0c383ff4-8123-4776-d516-2741d9537d8e" # Let's visualise our transforms i = 3455 fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4) ax1.imshow(x_train[i].reshape(28, 28)) ax2.imshow(x_aug_data[3*(i-1) + 3].reshape(28, 28)) ax3.imshow(x_aug_data[3*(i-1) + 4].reshape(28, 28)) ax4.imshow(x_aug_data[3*(i-1) + 5].reshape(28, 28)) print(y_train[i]) # + colab={} colab_type="code" id="-Ot0oBQMyk_f" cnn_2 = Sequential() cnn_2.add(InputLayer(input_shape=(img_rows, img_cols, 1))) cnn_2.add(BatchNormalization()) cnn_2.add(Conv2D(64, (4, 4), padding='same', activation='relu')) cnn_2.add(MaxPooling2D(pool_size=(2, 2))) cnn_2.add(Dropout(0.1)) cnn_2.add(Conv2D(64, (4, 4), activation='relu')) cnn_2.add(MaxPooling2D(pool_size=(2, 2))) cnn_2.add(Dropout(0.3)) cnn_2.add(Flatten()) cnn_2.add(Dense(256, activation='relu')) cnn_2.add(Dropout(0.5)) cnn_2.add(Dense(64, activation='relu')) cnn_2.add(BatchNormalization()) cnn_2.add(Dense(7, activation='softmax')) cnn_2.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(), metrics=['accuracy']) # - y_aug_train = keras.utils.to_categorical(y_aug_train) y_aug_val = keras.utils.to_categorical(y_aug_val) # + colab={"base_uri": "https://localhost:8080/", "height": 200} colab_type="code" id="prFA22qty-WP" outputId="e8ab1a11-ee5a-45a7-b3b6-f564d5e7a7cc" cnn_2.fit(x_aug_train, y_aug_train, batch_size=batch_size, epochs=40, verbose=1, validation_data=(x_aug_val, y_aug_val)) # + colab={} colab_type="code" id="KUcmZS4Tz8Ag" score = cnn_2.evaluate(x_aug_val, y_aug_val, verbose=0) print("Accuracy on the validation set (augmented): ", score[1]) # - x_val = x_val.reshape(x_val.shape[0], 28, 28, 1) y_val = keras.utils.to_categorical(y_val) score = cnn_2.evaluate(x_val, y_val, verbose=0) print("Accuracy on the test set: ", score[1]) # `95.5%` - We are good to go. \o/ # + PATH = "../input/test-images-midas-assignment/" with open(PATH + "test_image.pkl", 'rb') as f: x_final = np.array(pickle.load(f)) x_final = x_final / 255 # - x_final = x_final.reshape(x_final.shape[0], 28, 28, 1) y_pred = cnn_2.predict_classes(x_final) print(y_pred.shape) # + import csv with open('Rahul_Jha.csv', mode='w') as f: writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(['image_index', 'class']) for i in range(len(x_final)): writer.writerow([i, y_pred[i]]) # -
CV_problem/CV Problem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn import datasets from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans import pandas as pd from sklearn_model.export import Model # - # Load the iris dataset iris = datasets.load_iris() X = iris.data y = iris.target target_names = iris.target_names feature_names = iris.feature_names # Since it is unsupervised learning Y is not needed df = pd.DataFrame(X, columns = feature_names) df # Perform MinMax scaling scaler = MinMaxScaler() X = scaler.fit_transform(df) # Create and fit the model model = KMeans(n_clusters=3, random_state=1) model.fit(X) model.labels_ # + mdl = Model() mdl.add_fields(df) # Since it is unsupervised learning the cluster ids are further studied and assigned labels # In this case the three labels are the species # 'versicolor' (cluster id 0), 'setosa' (cluster id 1) and 'virginica' (cluster id 2) # which were determined after further study. # These labels are now assigned using add_output_field method mdl.add_output_field("species", "category", values = ['versicolor', 'setosa', 'virginica' ]) # MinMax transformer is added mdl.add_transformer(scaler, list(df.columns)) # Kmeans model is added mdl.add_model(model) # - # View the exported model print(mdl.exportJSON()) # Save the model in a file mdl.exportJSON('kmeans.json')
examples/02-KMeans-scoring-model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: open-mmlab # language: python # name: open-mmlab # --- from PIL import Image import os from tqdm import tqdm # !pwd path = '/home/gridsan/kxu/mmsegmentation/data/dataset_mmseg/ann_dir_old_labels/val/' new_path = '/home/gridsan/kxu/mmsegmentation/data/dataset_mmseg/ann_dir_new/val/' label_map = {0: 10, 1: 9, 2: -1, 3: -1, 4: 19, 5: -1, 6: -1, 7: -1, 8: 11, 9: 3, 10: -1, 11: 1, 12: 15, 13: 2, 14: 12, 15: -1, 16: -1, 17: 8, 18: -1, 19: -1, 20: 17, 21: 5, 22: -1, 23: -1, 24: -1, 25: -1, 26: -1, 27: -1, 28: -1, 29: -1, 30: -1, 31: -1, 32: 13, 33: -1, 34: 6, 35: -1, 36: -1, 37: -1, 38: -1, 39: -1, 40: -1, 41: -1, 42: -1, 43: -1, 44: -1, 45: -1, 46: 4, 47: -1, 48: -1, 49: -1, 50: -1, 51: -1, 52: -1, 53: -1, 54: -1, 55: -1, 56: -1, 57: -1, 58: -1, 59: -1, 60: -1, 61: -1, 62: -1, 63: -1, 64: 22, 65: -1, 66: -1, 67: -1, 68: -1, 69: -1, 70: -1, 71: -1, 72: -1, 73: -1, 74: -1, 75: -1, 76: -1, 77: -1, 78: -1, 79: -1, 80: -1, 81: -1, 82: -1, 83: -1, 84: -1, 85: -1, 86: -1, 87: -1, 88: -1, 89: 20, 90: -1, 91: -1, 92: -1, 93: 14, 94: -1, 95: -1, 96: -1, 97: -1, 98: -1, 99: -1, 100: -1, 101: -1, 102: -1, 103: -1, 104: -1, 105: -1, 106: -1, 107: -1, 108: -1, 109: 7, 110: -1, 111: -1, 112: -1, 113: -1, 114: -1, 115: -1, 116: -1, 117: -1, 118: -1, 119: -1, 120: -1, 121: -1, 122: -1, 123: 23, 124: 21, 125: -1, 126: 16, 127: 18, 128: -1, 129: -1, 130: -1, 131: -1, 132: -1, 133: -1, 134: 0, 135: -1, 136: -1, 137: -1, 138: -1, 139: -1, 140: -1, 141: -1, 142: -1, 143: -1, 144: -1, 145: -1, 146: -1, 147: -1, 148: -1, 149: -1} aerial_to_ade20k_label_map = dict() for key, val in label_map.items(): if val != -1: aerial_to_ade20k_label_map[val] = key print(aerial_to_ade20k_label_map) print(len(aerial_to_ade20k_label_map)) # + dirs = os.listdir(path) for item in tqdm(dirs): if os.path.isfile(path+item): im = Image.open(path+item) image = im.load() width, height = im.size new_im = Image.new(mode="L", size=(width, height)) new_image = new_im.load() # im_pixels = set() for x in range(width): for y in range(height): pixel = image[x, y] if pixel > 23: pixel = 23 # im_pixels.add(pixel) new_pixel = aerial_to_ade20k_label_map[pixel] new_image[x, y] = new_pixel new_im.save(new_path+item) # print(im_pixels) # break new_image.show() # -
notebooks/change_annotation_ids.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="SwRSqXsjA4kw" # # Practice with 2D lists! # **Remember:** # * Lists can be used to group different values together - it's just a collection of things. # * You can make a list in Python by putting different things in a box of brackets `[]` separated by commas. # * 2D lists are lists of lists # # First, make a list of land animals including cat, dog, elephant, and any other land animals you want to include. Store it in the variable `land`: # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 329, "status": "ok", "timestamp": 1563150257306, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-WQsOgn6cXFg/AAAAAAAAAAI/AAAAAAAAAVA/ol7X28qyOHc/s64/photo.jpg", "userId": "07259925140867310896"}, "user_tz": 240} id="KUnOPBIwA4k0" outputId="9976fc8b-186c-46a2-f7f5-5f8262609481" # command Python to make a list of land animals including dog, cat, and elephant. Store it in land land = ['cat','dog','elephant'] # command Python to print land print(land) # + [markdown] colab_type="text" id="sIp8HuloA4lO" # Second, make a list of aquatic animals including fish, seahorse, and whale. Store it in the variable `aquatic`: # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 647, "status": "ok", "timestamp": 1563150257655, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-WQsOgn6cXFg/AAAAAAAAAAI/AAAAAAAAAVA/ol7X28qyOHc/s64/photo.jpg", "userId": "07259925140867310896"}, "user_tz": 240} id="39oxa1gLA4lP" outputId="177d6169-0788-4195-d55c-2cc0c4f1060e" # command Python to make a list of aquatic animals including fish, seahorse, and whale. Store it in aquatic aquatic = ['fish','seahorse','whale'] # command Python to print aquatic print(aquatic) # + [markdown] colab_type="text" id="TvVceu0SA4lU" # How many things are in your `land` list? How about your `aquatic` list? # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 625, "status": "ok", "timestamp": 1563150257660, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-WQsOgn6cXFg/AAAAAAAAAAI/AAAAAAAAAVA/ol7X28qyOHc/s64/photo.jpg", "userId": "07259925140867310896"}, "user_tz": 240} id="52IlwPohA4lV" outputId="08f898c2-b45e-4b61-8bc8-8a5fdaeaa57c" # command Python to print the length of land print(len(land)) # command Python to print the length of aquatic print(len(aquatic)) # + [markdown] colab_type="text" id="hq_B96OCA4lb" # Okay, time to make a 2D list! Combine land and aquatic into a 2D list called `animals`: # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 616, "status": "ok", "timestamp": 1563150257666, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-WQsOgn6cXFg/AAAAAAAAAAI/AAAAAAAAAVA/ol7X28qyOHc/s64/photo.jpg", "userId": "07259925140867310896"}, "user_tz": 240} id="LzDn6fTaA4le" outputId="4e2ec016-ed76-4c7f-e293-7706f6af5acd" # command Python to make a 2D list of animals that contains land and aquatic animals = [land, aquatic] # commadn Python to print animals print(animals) # + [markdown] colab_type="text" id="KG4Xj_3yA4lu" # Now, create another list called `air` and include robin, cardinal, and bat. # + colab={} colab_type="code" id="kLA6ZapvA4ly" # command Python to make a list called air air = ['robin','cardinal','bat'] # + [markdown] colab_type="text" id="zKq_ToCUA4l1" # Add air to your 2D animals list. # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 698, "status": "ok", "timestamp": 1563150257764, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-WQsOgn6cXFg/AAAAAAAAAAI/AAAAAAAAAVA/ol7X28qyOHc/s64/photo.jpg", "userId": "07259925140867310896"}, "user_tz": 240} id="mSccI1ZuA4l2" outputId="351c3095-f784-4096-df63-c457ce0e4fca" # command Python to add air to your animals list animals.append(air) # command Python to print animals print(animals) # command Python to find the length of animals len(animals) # + [markdown] colab_type="text" id="6GIbjm7VA4l6" # **Challenge:** Make a 2D list of numbers from 1 to 20 where the odd numbers are in the first list and the even numbers are in the second list # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 682, "status": "ok", "timestamp": 1563150257766, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-WQsOgn6cXFg/AAAAAAAAAAI/AAAAAAAAAVA/ol7X28qyOHc/s64/photo.jpg", "userId": "07259925140867310896"}, "user_tz": 240} id="Rf-rpG2xA4l8" outputId="09a3c771-b509-408c-90ed-682d6955d286" # make 2D list of numbers odd = [1,3,5,7,9,11,13,15,17,19] even = [2,4,6,8,10,12,14,16,18,20] all_nums = [odd,even] # print out 2D list of numbers to see if you did it right! print(all_nums) # + [markdown] colab_type="text" id="qK0Qzzo9A4mA" # Now add your animals list and your numbers list together, print it out, and find the length! # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 661, "status": "ok", "timestamp": 1563150257768, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-WQsOgn6cXFg/AAAAAAAAAAI/AAAAAAAAAVA/ol7X28qyOHc/s64/photo.jpg", "userId": "07259925140867310896"}, "user_tz": 240} id="Og26DlSiA4mC" outputId="c164ce21-2828-42c4-e94d-3e22a7d539fc" # add your animals list and your numbers list together both_lists = animals + all_nums # print new list print(both_lists) # find length of new list len(both_lists) # + [markdown] colab_type="text" id="RTEYuE4iA4mG" # Nice job! You just practiced: # * Making a 2D list (a list of lists) # * Finding the length of a 2D list (len) # * Adding 2D lists together (+) # * Adding things to a 2D list (.append)
Practices/_Keys/KEY_Practice06_2D_Lists_Intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np # Number of training iterations NUM_ITERATIONS = 50 # Threshold for 0/1 classification THRESHOLD = 0.0 # Learning rate LEARNING_RATE = 0.01 # Data (both training and testing) input_data = np.linspace(0, 5, 1000) def output(n): return train_value_function(n[0]) + 0.5 * train_value_function(n[1]) - 1.5 * train_value_function(n[2]) def train_value_function(t): return np.sin(10 * np.sin(t) * t) def sigmoid(x): return np.power((1 + np.exp(-x)), -1) def tanh(x): return 2 / (1 + np.exp(-2 * x)) - 1 # Create perceptron weights (random weights) weights = np.random.rand(4) # Train perceptron for i in range(2, len(input_data)): # Calculate predictions with current weights current_input = np.concatenate((input_data[i - 2:i + 1], [1])) predictions = tanh(np.dot(current_input, weights)) output_value = tanh(output(current_input)) # Calculate accuracy (not needed for training, but to track the learning progress) error = abs(predictions - output_value) # Print the error print("Iteration %d: Error %f \t" % (i, error)) # Update weights according to update rule weights = weights + LEARNING_RATE * (output_value - predictions) * current_input # Print weights for inspection print(weights) # -
neural-networks/ipynb/Ex2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.9 64-bit # language: python # name: python3 # --- # # Limpieza y Arreglo de Datos # # En este cuaderno se detalla el proceso de limpieza y arreglo de los dos conjuntos de datos: *products.csv* y *products_categories.csv*. El objetivo es preparar los datasets, para poder manejarlos posteriormente sin errores. # ## Índice # # 1. Importación paquetes # 2. Carga de Datos # 3. Categorías # 4. Productos # 5. Nombres # 6. Conclusiones # # Importación de paquetes # # Como siempre importamos los paquetes necesarios para realizar la limpieza import pandas as pd import re import spacy from collections import Counter, OrderedDict # Semilla para realizar experimentos reproducibles seed = 125 # # Carga de Datos # # Cargamos en memoria los dos ficheros y comprobamos que lo hayan hecho correctamente. df_products = pd.read_csv('../Data/products.csv') df_products.sample(5,random_state=seed) df_categories = pd.read_csv('../Data/products_categories.csv') df_categories.sample(5,random_state=seed) # # Categorías # # Empezaremos con el dataset de categorías, ya que es el *"menos"* problemático y también porque nos servirá de ayuda para completar datos en el otro dataframe. # Si recordamos del Análisis Exploratorio, vimos que hay ciertos número de valores nulos en las subcategorías(**cat2** y **cat3**). Así que sustituiremos esos valores nulos por un simple guión que indique que carece de tal categoría, ya que carecemos de fuentes de datos que nos puedan dar indicios sobre la subcategoría del producto df_categories.isna().sum() df_categories.fillna('-', inplace=True) df_categories.isna().sum() # Lo siguiente será estandarizar todos los campos, ya que se identificaron *"erratas"* a la hora de imputar los nombres. Para ello, se eliminan los espacios innecesarios e inonsitencias de mayúsculas y minúsculas. df_categories.loc[:, ["cat1", "cat2", "cat3"]] = df_categories[["cat1", "cat2", "cat3"]].apply(lambda r: [str(n).strip().capitalize() for n in r], axis=1, result_type="broadcast") df_categories # Finalmente, dado que no se encontraron más inconvenientes con estos datos, se guardan en un nuevo fichero que será el que se use a partir de ahora. df_categories.to_csv('./clean_categories.csv',index=False) # # Productos # # Este dataset presenta varias *"trabas"*, así que veamos como darles solución. El primer gran incoveniente es que había un gran porcentaje de productos sin **analytic_category**. Viendo que los valores de la columna **cat1** coinciden con las de esta, se intentará rescatar su valores a partir de la clave **sku** # df_products.isna().sum() #Cantidad de valores que faltan # Antes de realizar el *merge* comprobamos que los nombres de categorías sean exactamente iguales, para que no aparezcan categorías duplicadas. df_categories['cat1'].value_counts() df_products['analytic_category'].value_counts() # Vemos que hay algunas categorías que llevarían a errores. Vamos a prevenirlos. df_categories.loc[df_categories['cat1'] == 'Higiene y cuidado personal', 'cat1'] = 'Higiene' df_categories.loc[df_categories['cat1'] == 'Cosmética y belleza', 'cat1'] = 'Cosmética y Belleza' recoverable = df_products.loc[df_products["analytic_category"].isna(), ["sku"]].reset_index().merge(df_categories[["sku", "cat1"]], on="sku") # Comprobamos que se ha ejecutado correctamente df_products.loc[recoverable["index"], "analytic_category"] # Asignamos los valores recuperados df_products.loc[recoverable["index"], "analytic_category"] = recoverable["cat1"].values # Comprobamos que ha funcionado correctamente df_products.loc[recoverable["index"].values, "analytic_category"] df_products.isna().sum() # Conseguimos reducir sustancialmente los valores perdidos de categorías de cerca de *3500* pasan a alrededor de *2700*. Aun así sigue siendo un recuento bastante grande respecto al total del conjunto de datos. # **IDEA FELIZ**. Por lo general una marca se suele enmarcar dentro de una misma categoría de productos, ¿por qué no restaurar los valores que quedan de la columna **analytic_category** a través de las categorías de las marcas? df_marcas = df_products[['marca_value','analytic_category']].drop_duplicates(ignore_index=True) df_marcas.sample(5,random_state=seed) # Eliminamos aquellas filas que tengan nula el campo de categoría, ya que no son válidas si queremos recuperar los valores perdidos. df_marcas.shape df_marcas.dropna(inplace=True) df_marcas.shape # Agrupamos las marcas y nos quedamos con las que se asocian solamente a una única categoría, ya que de otro modo no se puede adivinar a que categoría pertenece el producto de la marca. df_marcas_count = df_marcas.groupby(['marca_value'],as_index=False).agg(count=('analytic_category','count'),cat=('analytic_category','first')) df_marcas_count = df_marcas_count[df_marcas_count['count'] == 1] df_marcas_count.sample(5,random_state=seed) # Recuperamos los valores perdidos en el dataframe de productos recoverable = df_marcas_count[["marca_value", "cat"]].merge(df_products.loc[df_products["analytic_category"].isna(), ["marca_value"]].reset_index(), on="marca_value") recoverable df_products.loc[recoverable["index"], "analytic_category"] = recoverable["cat"].values df_products.shape df_products.isna().sum() # Se ha conseguido reducir de nuevo el recuento de categorías perdidas, pero sigue siendo un número cuantioso. # # **IDEA FELIZ** Dado que tenemos las descripciones de los productos, podemos crear un diccionario con palabras clave que se suelen repetir. De esta manera puede que consigamos imputar más valores perdidos, sin perder rigor en los datos. # Dado que las descripciones tienen etiquetas de HTML, estas deben ser eliminadas si queremos tokenizar correctamente los textos. df_products.loc[:,('short_description')] = df_products['short_description'].str.replace(r'<[^<>]*>', '', regex=True) df_products.sample(5,random_state=seed) # Por otro lado, también se vio la presencia de carácteres de salto de línea. Estos deben ser eliminados, así como normalizar las palabras para que las mayúsculas no afecten a la hora de crear el diccionario de palabras. df_products.loc[:,'short_description'] = df_products['short_description'].replace(to_replace=[r"\\t|\\n|\\r", "\t|\n|\r"], value='', regex=True) df_products.loc[:,'short_description'] = df_products['short_description'].str.lower() df_products['short_description'].fillna('-',inplace=True) df_products.sample(5,random_state=seed) # Nos quedamos con las descripciones y sus índices para que sean más fácil después realizar el update del dataframe de productos descriptions = df_products[df_products['analytic_category'].isna()]['short_description'].to_list() indexes = df_products[df_products['analytic_category'].isna()]['short_description'].index descriptions[:10] # Se ha hecho uso de un paquete especial para problemas de procesamiento de lenguajes para tokenizar el texto. # + # Se carga el paquete de español nlp = spacy.load("es_core_news_sm") # Juntamos en un texto todas las descripciones comb_desc = ' '.join(descriptions) doc = nlp(comb_desc) # Extraemos las palabras relevantes y contamos su aparición en el doc words = [token.text for token in doc if not token.is_stop and not token.is_punct] word_freq = Counter(words) # - # Nos hemos quedado con las **150** palabras más relevantes, de las cuales hemos extraído las que hemos considerado menos ambiguas. Con esto nos referimos a palabras que unívocamente podríamos relacionar a un tipo de categoría. common_words = word_freq.most_common(150) print (common_words) # Creamos nuestro diccionario de palabras en orden de frecuencia para evitar solapamientos key_words = ["bebé","chupete","tetina","bebés","suavinex","biberón","niños","gafas","higiene","cabello","chicco","encías","chupetes","rostro","baño","vitaminas","jabón","infantil","champú","gafa","perfume","mam"] value_cat = ["Infantil","Infantil","Infantil","Infantil","Infantil","Infantil","Infantil","Óptica","Higiene","Cosmética y Belleza","Infantil","Higiene","Infantil","Cosmética y Belleza","Higiene","Nutrición","Higiene","Infantil","Higiene","Óptica","Perfumeria","Infantil"] cat_dict = OrderedDict(zip(key_words,value_cat)) print(f'Nº palabras clave: {len(key_words)}') print(f'Categoría más repetida: {Counter(value_cat).most_common(1)}') # Función para asignar la categoría dependiendo de la descripcion def word_in_catalog(desc,catalog): for k,w in catalog.items(): if k in desc: return w for i in indexes: df_products.loc[i,'analytic_category'] = word_in_catalog(df_products.loc[i,'short_description'],cat_dict) df_products.isna().sum() # Con esto hemos logrado rebajar en *otros mil* las categorías perdidas, algo verdaderamente impresionante con algo tan simple. Podríamos seguir intentando recuperar valores a partir de las descripciones con algún tipo de modelo predictivo o viendo el contexto de las palabras, pero ello requiere mucho tiempo del que no se dispone. # # Dicho esto, el resto de nulos que permanecen serán eliminados, porque ya no suponen un gran porcentaje de valores perdidos. percent_missing = df_products.isnull().sum() * 100 / df_products.shape[0] df_missing_values = pd.DataFrame({ 'column_name': df_products.columns, 'percent_missing': percent_missing }) df_missing_values.sort_values(by='percent_missing',ascending=False,inplace=True) print(df_missing_values.to_string(index=False)) # Se ha pasado de **12%** a un **4%** lo cual consideramos un número razonable. En cuanto al resto de variables con valores perdidos, la de *marca* es la única que podemos salvar algunos registros, dado que al echar un vistazo nos hemos percatado de que algunos nombres de producto contiene también el de la marca. df_products[df_products['marca_value'].isna()].sample(10,random_state=seed) # Recuperamos los nombres de productos product_names = df_products[df_products['marca_value'].isna()]['name'].to_list() indexes = df_products[df_products['marca_value'].isna()]['name'].index product_names[:20] # + # Juntamos en un texto todas las descripciones prods = ' '.join(product_names) doc = nlp(prods) # Extraemos las palabras relevantes y contamos su aparición en el doc words = [token.text for token in doc if not token.is_stop and not token.is_punct] word_freq = Counter(words) # - common_words = word_freq.most_common() print (common_words) # Se ha hecho una búsqueda en Internet para comrpobar que existen key_words = ['Dunner', 'Medela', 'Interapothek', 'MiRebotica', 'Rene', 'Furterer', 'Lierac', 'Roche', 'Posay', 'Farlane'] marcas_value = ['Dr Dunner', 'Medela', 'Interapothek', 'MiRebotica', '<NAME>', '<NAME>', 'Lierac','La Roche Posay', 'La Roche Posay', 'Farline'] # Comprobamos que existan esas marcas existan dentro de nuestro catálogo df_products[df_products['marca_value'].isin(marcas_value)]['marca_value'].value_counts() # Vemos que algunas marcas no aparecen, esto puede deberse a que estén escritas de otra manera. Comprobémoslo! df_products.loc[df_products['marca_value'].str.contains('Dr', na=False),'marca_value'].value_counts() df_products.loc[df_products['marca_value'].str.contains('Inter', na=False),'marca_value'].value_counts() df_products.loc[df_products['marca_value'].str.contains('Mi', na=False),'marca_value'].value_counts() # Vemos que efectivamente se trataba de eso, por tanto cambiamos las palabras clave marcas_value = ['Dr. Dunner', 'Medela', 'InterApothek', 'Mi Rebotica', '<NAME>', '<NAME>', 'La Roche Posay', 'La Roche Posay', 'Farline'] marcas_dict = OrderedDict(zip(key_words,marcas_value)) for i in indexes: df_products.loc[i,'marca_value'] = word_in_catalog(df_products.loc[i,'name'],marcas_dict) df_products.isna().sum() # Se ha reducido considerablemente el número de marcas que faltan. Por tanto, ya podemos dar por finalizada la recuperación de valores y eliminaremos todas aquellas filas que contienen valores nulos. df_products.dropna(how='any',inplace=True) df_products.isna().sum() # Ya para finalizar, hemos considerado que la columna de *pictures* no aporta ningún valor, ya que los links no funcionan y además lo que pretendemos desarrollar posteriormente no implica el uso de estas supuestas imágenes. df_products.drop(columns=['picture'],inplace=True) # ## Nombres de productos # p_id = df_products[['product_id']].duplicated(keep='first').sum() p_id_sku = df_products[['product_id','sku']].duplicated(keep='first').sum() p_id_sku_n = df_products[['product_id','sku','name']].duplicated(keep='first').sum() print(f'IDs de productos repetidos: {p_id}') print(f'Combinación de IDs y SKUs de productos repetidos: {p_id_sku}') print(f'Productos repetidos: {p_id_sku_n}') # Normalizamos el nombre de los productos para ver si hay algun duplicado. df_products.loc[:, 'name'] = df_products['name'].apply(lambda r: str(r).strip().lower()) df_products.sample(5,random_state=seed) df_products[df_products[['product_id','sku','name']].duplicated(keep='first')] # Efectivamente vemos que hay algun duplicado, así que los eliminamos. df_products = df_products.drop_duplicates(subset=['product_id','sku','name'] ,keep='first') # Al igual que el otros dataframe, lo guardamos para que pueda ser utilizado df_products.to_csv('./clean_products.csv',index=False) # # Conclusiones # # De todo este proceso sacamos en claro las siguientes conclusiones: # # - En ambos datasets se han regularizado los campos de texto # - Se ha recuperado un **8%** de valores perdidos para el campo *analytic_category* # - Se han suprimido registros una vez que se ha reducido los nulos y necesitamos de técnicas mucho más complejas # - Se ha prescindido de la columna *picture* dado que no aporta valor # - Se ha normalizado los nombre de productos
Clean Data/Data Cleaning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Modelos Sequence-to-Sequence # Hasta el momento hemos trabajado con dos tipos de procesamiento de secuencia: el caso donde a cada elemento de la secuencia se le asigna una etiqueta y el caso donde a una secuencia se le asigna un valor, ya sea categórico o contínuo. Estos son casos útiles y comunes. Sin embargo, falta un caso general. Y este es encontrar una secuencia a partir de otra secuencia, las dos de tamaño arbitrario. A estos modelos se les llama sequence-to-sequence o seq2seq. # # En este ejercicio vamos a implementar dos variantes del mismo. Para ejemplificar su uso, ¡vamos a usar un dataset multilingue! Le enseñaremos a nuestro modelo a conjugar palabras. En el ejemplo lo haremos en tres idiomas, pero el dataset original contiene 100 idiomas con los cuáles se puede probar. Para mas información puede consultar el Sharedtask Sigmorphon en la sigueinte dirección: # # https://github.com/sigmorphon/conll2018 # # # ## Preparar los datos # Tomaremos el ejemplo de español primero. Dado que ya entendemos el concepto de preparar datos de ocaciones anteriores pondemos todo el proceso en una clase, que puede ser reusada de manera genérica para diferentes idiomas. Sin embargo, los datos esta vez son diferentes a lo que ya habíamos visto. # + import csv import pandas as pd import numpy as np import io import os import time from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, GRU, Dense from tensorflow.keras.utils import to_categorical, get_file import tensorflow as tf # - # Descargamos los conjuntos de datos que vamos a usar. En este caso están accesibles libremente. # + # Download the file path_to_train_low = get_file( 'spanish-train-low', origin='https://raw.githubusercontent.com/sigmorphon/conll2018/master/task1/all/spanish-train-low') path_to_train_medium = get_file( 'spanish-train-medium', origin='https://raw.githubusercontent.com/sigmorphon/conll2018/master/task1/all/spanish-train-medium') path_to_dev = get_file( 'spanish-dev', origin='https://raw.githubusercontent.com/sigmorphon/conll2018/master/task1/all/spanish-dev') path_to_test = get_file( 'spanish-test', origin='https://raw.githubusercontent.com/sigmorphon/conll2018/master/task1/all/spanish-test') # - # Nuevamente preparamos los datos para que puedan ser ingresados a la red neuronal. # + input_to_index = {"PAD":0,"EOS":1, "BOS":2, "OOV":3} output_to_index = {"PAD":0,"EOS":1, "BOS":2,"OOV":3} def load_data(path): data = io.open(path, encoding='UTF-8').read().strip().splitlines() symbols = list() input_chars = list() output_chars = list() final_data = list() for line in data: line = line.split('\t') line[2] = line[2].split(";") line[1] = list(line[1]) line[0] = list(line[0]) tags = list() for tag in line[2]: tags.append("<"+tag+">") input_string = "<BOS> " + " ".join(tags) +" " + " ".join(line[0]) + " <EOS>" output_string = "<BOS> " + " ".join(line[1]) + " <EOS>" final_data.append((input_string, output_string)) return zip(*final_data) train_input_med, train_output_med = load_data(path_to_train_medium) train_input_low, train_output_low = load_data(path_to_train_low) dev_input, dev_output = load_data(path_to_dev) test_input, test_output = load_data(path_to_test) print(train_input_med[0]) print(train_output_med[0]) # - # Para este problema en particular agregmos una serie de tags que an a caracterizar la información morfológica con la cual queremos conjugar nuestra raíz verbal. Es de notar que también usamos los tags de Begin of sentece y end of sentence. Para el modelo seq2seq esto es importante, dado que va a iniciar a generar salida a partir de un BOS. # Ahora vamos a extraer nuevamente nuestro vocabulario para poder darle un índice a cada tag o caracter. # + def get_index(data): word_to_index = {"<PAD>":0, "<OOV>":1} index_to_word = {0:"<PAD>", 1:"<OOV>"} voc = list() for line in data: voc += line.split() voc = list(set(voc)) for word in voc: word_to_index[word] = len(word_to_index) index_to_word[len(index_to_word)] = word return word_to_index, index_to_word input_to_index, index_to_input = get_index(train_input_med) output_to_index, index_to_output = get_index(train_output_med) print(input_to_index) print(output_to_index) # - # Y convertimos todas nuestras entradas a enteros. # + def to_ints(data, token_to_int): new_data = list() for line in data: new_line = list() for word in line.split(): if not word in token_to_int.keys(): new_line.append(token_to_int["<OOV>"]) print("not seen", word) else: new_line.append(token_to_int[word]) new_data.append(new_line) return(new_data) train_med_X = to_ints(train_input_med, input_to_index) train_med_Y = to_ints(train_output_med, output_to_index) dev_X = to_ints(dev_input, input_to_index) dev_Y = to_ints(dev_output, output_to_index) train_med_X = tf.keras.preprocessing.sequence.pad_sequences(train_med_X, padding='post') train_med_Y = tf.keras.preprocessing.sequence.pad_sequences(train_med_Y, padding='post') print(train_input_med[0]) print(train_med_X[0]) # - # Preparamos nuestros datos para ser usados por el tipo de datos de tensorflow Fataset y definimos nuestros hyper parámetros. # + BUFFER_SIZE = len(train_med_X) BATCH_SIZE = 20 MAX_LENGTH = 40 steps_per_epoch = len(train_med_X)//BATCH_SIZE embedding_dim = 100 units = 1024 vocab_inp_size = len(input_to_index) vocab_tar_size = len(output_to_index) SAVE_EACH = 2 dataset = tf.data.Dataset.from_tensor_slices((train_med_X, train_med_Y)).shuffle(BUFFER_SIZE) dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) # - # ## El modelo encoder-decoder # Definamos el encoder. En este caso para tener mayor control sobre el comportamiento del encoder vamos usar class Encoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz): super(Encoder, self).__init__() self.batch_sz = batch_sz self.enc_units = enc_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = tf.keras.layers.GRU(self.enc_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') def call(self, x, hidden): x = self.embedding(x) output, state = self.gru(x, initial_state = hidden) return output, state def initialize_hidden_state(self): return tf.zeros((self.batch_sz, self.enc_units)) # Ahora usamos el modelo de attención de Bahdanau, tal como hemos discutido en la parte teórica. class BahdanauAttention(tf.keras.Model): def __init__(self, units): super(BahdanauAttention, self).__init__() self.W1 = tf.keras.layers.Dense(units) self.W2 = tf.keras.layers.Dense(units) self.V = tf.keras.layers.Dense(1) def call(self, query, values): hidden_with_time_axis = tf.expand_dims(query, 1) score = self.V(tf.nn.tanh(self.W1(values) + self.W2(hidden_with_time_axis))) attention_weights = tf.nn.softmax(score, axis=1) context_vector = attention_weights * values context_vector = tf.reduce_sum(context_vector, axis=1) return context_vector, attention_weights # Ahora definiremos el decoder class Decoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz): super(Decoder, self).__init__() self.batch_sz = batch_sz self.dec_units = dec_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = tf.keras.layers.GRU(self.dec_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') self.fc = tf.keras.layers.Dense(vocab_size) self.attention = BahdanauAttention(self.dec_units) def call(self, x, hidden, enc_output): context_vector, attention_weights = self.attention(hidden, enc_output) x = self.embedding(x) x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1) output, state = self.gru(x) output = tf.reshape(output, (-1, output.shape[2])) x = self.fc(output) return x, state, attention_weights # Definimos nuestra función de pérdida personalizada. # + optimizer = tf.keras.optimizers.Adam() loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_mean(loss_) # - # Creamos los objetos de encoder y decoder. encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE) decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE) # Guardamos los mejores modelos. checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder) # Definimos una función de distancia que será útil en la evaluación # + def distance(str1, str2): """Simple Levenshtein implementation for evalm.""" m = np.zeros([len(str2)+1, len(str1)+1]) for x in range(1, len(str2) + 1): m[x][0] = m[x-1][0] + 1 for y in range(1, len(str1) + 1): m[0][y] = m[0][y-1] + 1 for x in range(1, len(str2) + 1): for y in range(1, len(str1) + 1): if str1[y-1] == str2[x-1]: dg = 0 else: dg = 1 m[x][y] = min(m[x-1][y] + 1, m[x][y-1] + 1, m[x-1][y-1] + dg) return int(m[len(str2)][len(str1)]) # - # Predecimos y calculamos accuracy def predict(inp, targ, max_samples=10): output_data = list() i = 0 for inputs, outputs in zip(inp, targ): if i >= max_samples: break i += 1 inputs = tf.expand_dims(inputs,0) result = '' predicted_ids = list() hidden = [tf.zeros((1, units))] enc_out, enc_hidden = encoder(inputs, hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([output_to_index['<BOS>']], 0) for t in range(MAX_LENGTH): predictions, dec_hidden, attention_weights = decoder(dec_input,dec_hidden,enc_out) predicted_id = tf.argmax(predictions[0]).numpy() result += index_to_output[predicted_id] if index_to_output[predicted_id] == '<EOS>': break dec_input = tf.expand_dims([predicted_id], 0) output_data.append((result, predicted_ids, outputs)) #print(result) correct = 0 total = 0 dist = 0 for pred, pred_ids, gold_ids in output_data: gold = "".join([index_to_output[i] for i in gold_ids[1:]]) if pred == gold: correct += 1 print(pred, "==", gold) else: print(pred, "!=", gold) total += 1 dist += distance(pred, gold) print("Accuracy:", correct/total) print("Distance:", dist/total) return output_data # Definimos nuestra función de entrenamiento. @tf.function def train_step(inp, targ, enc_hidden): loss = 0 with tf.GradientTape() as tape: enc_output, enc_hidden = encoder(inp, enc_hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([input_to_index['<BOS>']] * BATCH_SIZE, 1) for t in range(1, targ.shape[1]): predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output) loss += loss_function(targ[:, t], predictions) dec_input = tf.expand_dims(targ[:, t], 1) batch_loss = (loss / int(targ.shape[1])) variables = encoder.trainable_variables + decoder.trainable_variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables)) return batch_loss # Y entrenamos nuestro modelo! # + EPOCHS = 20 print("Epoch", epoch) for epoch in range(EPOCHS): enc_hidden = encoder.initialize_hidden_state() total_loss = 0 for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)): batch_loss = train_step(inp, targ, enc_hidden) total_loss += batch_loss # saving (checkpoint) the model every 2 epochs if (epoch + 1) % SAVE_EACH == 0: checkpoint.save(file_prefix = checkpoint_prefix) predict(dev_X, dev_Y, max_samples=40) print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / steps_per_epoch)) # - # Como hemos podido ver, los modelos sequence to sequence pueden modelar una gran variedad de problemas. Gran parte de ellos pueden aprovechar el poder de estas redes únicamente adecuando los datos de entrada para obtener los resultados deseamos. # # Para mejorar el rendimiento de nuestra red es posible mejorar su rendimiento con diferentes técnicas: multi task training o transfer learning. # # Modificaciones sugeridas para el futuro: # * Implementar transfer learning usando datasets parecidos al español, cómo intaliano o portugués para poder mejorar el rendimiento de español. #
notebooks/5a_seq2seq.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # General info on the fullCyc dataset (as it pertains to SIPSim validation) # # * Simulating 12C gradients # * Determining if simulated taxon abundance distributions resemble the true distributions # * Simulation parameters to infer from dataset: # * Infer total richness of bulk soil community # * richness of starting community # * Infer abundance distribution of bulk soil community # * NO: distribution fit # * INSTEAD: using relative abundances of bulk soil community # * Get distribution of total OTU abundances per fraction # * Number of sequences per sample # # User variables # + init_cell=true # %load_ext rpy2.ipython # + init_cell=true language="R" # workDir = '/home/nick/notebook/SIPSim/dev/fullCyc/' # physeqDir = '/home/nick/notebook/SIPSim/dev/fullCyc_trim/' # # physeqBulkCore = 'bulk-core_trm' # physeqSIP = 'SIP-core_unk_trm' # - ampFragFile = '/home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_kde.pkl' # # Init import os # + init_cell=true language="R" # library(ggplot2) # library(dplyr) # library(tidyr) # library(phyloseq) # library(fitdistrplus) # library(sads) # + init_cell=true language="R" # dir.create(workDir, showWarnings=FALSE) # - # # Loading phyloseq list datasets # + init_cell=true language="R" # # bulk core samples # F = file.path(physeqDir, physeqBulkCore) # physeq.bulk = readRDS(F) # #physeq.bulk.m = physeq.bulk %>% sample_data # physeq.bulk %>% names # + init_cell=true language="R" # # SIP core samples # F = file.path(physeqDir, physeqSIP) # physeq.SIP = readRDS(F) # #physeq.SIP.m = physeq.SIP %>% sample_data # physeq.SIP %>% names # - # # Infer abundance distribution of each bulk soil community # # * distribution fit # + language="R" # # physeq2otu.long = function(physeq){ # df.OTU = physeq %>% # transform_sample_counts(function(x) x/sum(x)) %>% # otu_table %>% # as.matrix %>% # as.data.frame # # df.OTU$OTU = rownames(df.OTU) # df.OTU = df.OTU %>% # gather('sample', 'abundance', 1:(ncol(df.OTU)-1)) # return(df.OTU) # } # # df.OTU.l = lapply(physeq.bulk, physeq2otu.long) # df.OTU.l %>% names # # #df.OTU = do.call(rbind, lapply(physeq.bulk, physeq2otu.long)) # #df.OTU$Day = gsub('.+\\.D([0-9]+)\\.R.+', '\\1', df.OTU$sample) # #df.OTU %>% head(n=3) # + magic_args="-w 450 -h 400" language="R" # # lapply(df.OTU.l, function(x) descdist(x$abundance, boot=1000)) # + language="R" # fitdists = function(x){ # fit.l = list() # #fit.l[['norm']] = fitdist(x$abundance, 'norm') # fit.l[['exp']] = fitdist(x$abundance, 'exp') # fit.l[['logn']] = fitdist(x$abundance, 'lnorm') # fit.l[['gamma']] = fitdist(x$abundance, 'gamma') # fit.l[['beta']] = fitdist(x$abundance, 'beta') # # # plotting # plot.legend = c('exponential', 'lognormal', 'gamma', 'beta') # par(mfrow = c(2,1)) # denscomp(fit.l, legendtext=plot.legend) # qqcomp(fit.l, legendtext=plot.legend) # # # fit summary # gofstat(fit.l, fitnames=plot.legend) %>% print # # return(fit.l) # } # # fits.l = lapply(df.OTU.l, fitdists) # fits.l %>% names # + language="R" # # getting summaries for lognormal fits # get.summary = function(x, id='logn'){ # summary(x[[id]]) # } # # fits.s = lapply(fits.l, get.summary) # fits.s %>% names # + language="R" # # listing estimates for fits # df.fits = do.call(rbind, lapply(fits.s, function(x) x$estimate)) %>% as.data.frame # df.fits$Sample = rownames(df.fits) # df.fits$Day = gsub('.+D([0-9]+)\\.R.+', '\\1', df.fits$Sample) %>% as.numeric # df.fits # + magic_args="-w 650 -h 300" language="R" # ggplot(df.fits, aes(Day, meanlog, # ymin=meanlog-sdlog, # ymax=meanlog+sdlog)) + # geom_pointrange() + # geom_line() + # theme_bw() + # theme( # text = element_text(size=16) # ) # + language="R" # # mean of estimaates # apply(df.fits, 2, mean) # - # # Relative abundance of most abundant taxa # + magic_args="-w 800" language="R" # df.OTU = do.call(rbind, df.OTU.l) %>% # mutate(abundance = abundance * 100) %>% # group_by(sample) %>% # mutate(rank = row_number(desc(abundance))) %>% # ungroup() %>% # filter(rank < 10) # # ggplot(df.OTU, aes(rank, abundance, color=sample, group=sample)) + # geom_point() + # geom_line() + # labs(y = '% rel abund') # - # # Making a community file for the simulations # + magic_args="-w 800 -h 300" language="R" # df.OTU = do.call(rbind, df.OTU.l) %>% # mutate(abundance = abundance * 100) %>% # group_by(sample) %>% # mutate(rank = row_number(desc(abundance))) %>% # group_by(rank) %>% # summarize(mean_abundance = mean(abundance)) %>% # ungroup() %>% # mutate(library = 1, # mean_abundance = mean_abundance / sum(mean_abundance) * 100) %>% # rename('rel_abund_perc' = mean_abundance) %>% # dplyr::select(library, rel_abund_perc, rank) %>% # as.data.frame # # df.OTU %>% nrow %>% print # # ggplot(df.OTU, aes(rank, rel_abund_perc)) + # geom_point() + # geom_line() + # labs(y = 'mean % rel abund') # - # ### Adding reference genome taxon names # ret = !SIPSim KDE_info -t /home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_kde.pkl ret = ret[1:] ret[:5] # + language="R" # # F = '/home/nick/notebook/SIPSim/dev/fullCyc_trim//ampFrags_kde_amplified.txt' # ret = read.delim(F, sep='\t') # ret = ret$genomeID # ret %>% length %>% print # ret %>% head # + language="R" # ret %>% length %>% print # df.OTU %>% nrow # + magic_args="-i ret" language="R" # # # randomize # ret = ret %>% sample %>% sample %>% sample # # # adding to table # df.OTU$taxon_name = ret[1:nrow(df.OTU)] # df.OTU = df.OTU %>% # dplyr::select(library, taxon_name, rel_abund_perc, rank) # df.OTU %>% head # + language="R" # #-- debug -- # # df.gc = read.delim('~/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_parsed_kde_info.txt', # sep='\t', row.names=) # top.taxa = df.gc %>% # filter(KDE_ID == 1, median > 1.709, median < 1.711) %>% # dplyr::select(taxon_ID) %>% # mutate(taxon_ID = taxon_ID %>% sample) %>% # head # # top.taxa = top.taxa$taxon_ID %>% as.vector # top.taxa # + language="R" # #-- debug -- # # p1 = df.OTU %>% # filter(taxon_name %in% top.taxa) # p2 = df.OTU %>% # head(n=length(top.taxa)) # p3 = anti_join(df.OTU, rbind(p1, p2), c('taxon_name' = 'taxon_name')) # # df.OTU %>% nrow %>% print # p1 %>% nrow %>% print # p2 %>% nrow %>% print # p3 %>% nrow %>% print # # p1 = p2$taxon_name # p2$taxon_name = top.taxa # # df.OTU = rbind(p2, p1, p3) # df.OTU %>% nrow %>% print # df.OTU %>% head # - # ### Writing file # + language="R" # # F = file.path(workDir, 'fullCyc_12C-Con_trm_comm.txt') # write.table(df.OTU, F, sep='\t', quote=FALSE, row.names=FALSE) # cat('File written:', F, '\n') # - # ### parsing amp-Frag file to match comm file # !tail -n +2 /home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm.txt | \ # cut -f 2 > /home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm_taxa.txt # + outFile = os.path.splitext(ampFragFile)[0] + '_parsed.pkl' # !SIPSim KDE_parse \ # $ampFragFile \ # /home/nick/notebook/SIPSim/dev/fullCyc/fullCyc_12C-Con_trm_comm_taxa.txt \ # > $outFile print 'File written {}'.format(outFile) # !SIPSim KDE_info -n $outFile
ipynb/bac_genome/fullCyc/trimDataset/dataset_info.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Install dependencies for this example # Note: This does not include itkwidgets, itself import sys # !{sys.executable} -m pip install itk-io # + try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve import os import itk from itkwidgets import checkerboard # - # Download data proton_density_file_name = 'BrainProtonDensitySliceBorder20.png' if not os.path.exists(proton_density_file_name): url = 'https://data.kitware.com/api/v1/file/5c115d388d777f2179e5ad7a/download' urlretrieve(url, proton_density_file_name) t1_file_name = 'BrainT1SliceBorder20.png' if not os.path.exists(t1_file_name): url = 'https://data.kitware.com/api/v1/file/5c1080238d777f2179e42f2a/download' urlretrieve(url, t1_file_name) proton_density = itk.imread(proton_density_file_name, itk.ctype('unsigned char')) t1 = itk.imread(t1_file_name, itk.ctype('unsigned char')) checkerboard(proton_density, t1, pattern=5)
examples/Checkerboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CNN Training # # Target of this code is to train a CNN network to extract the needle position of an analog needle device. # # ### Preparing the training # * First all libraries are loaded # * It is assumed, that they are installed during the Python setup # * matplotlib is set to print the output inline in the jupyter notebook # + import os import tensorflow as tf import matplotlib.pyplot as plt import glob import numpy as np from sklearn.utils import shuffle from tensorflow.python import keras from tensorflow.python.keras import Sequential from tensorflow.python.keras.layers import Dense, InputLayer, Conv2D, MaxPool2D, Flatten, BatchNormalization from tensorflow.keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split import tensorflow.keras.backend as K from tensorflow.keras.callbacks import History import math from PIL import Image loss_ges = np.array([]) val_loss_ges = np.array([]) # %matplotlib inline np.set_printoptions(precision=4) np.set_printoptions(suppress=True) # - # ### Load training data # * The data is expected in the "Input_dir" # * Picture size must be 32x32 with 3 color channels (RGB) # * The filename contains the informations needed for training in the first 3 digits:: # * Typical filename: # * x.y-zzzz.jpg # * e.g. "4.6_Lfd-1406_zeiger3_2019-06-02T050011" # # |Place holder | Meaning | Usage | # |------------- |-----------------------------|--------------| # | **x.y** | readout value | **to be learned** | # | zzzz | additional information | not needed | # # * The images are stored in the x_data[] # * The expected output for each image in the corresponding y_data[] # * The periodic nature is reflected in a **sin/cos coding**, which allows to restore the angle/counter value with an arctan later on. # # * The last step is a shuffle (from sklearn.utils) as the filenames are on order due to the encoding of the expected analog readout in the filename # + Input_dir='data_resize_all' files = glob.glob(Input_dir + '/*.*') x_data = [] y_data = [] for aktfile in files: test_image = Image.open(aktfile) test_image = np.array(test_image, dtype="float32") test_image = np.reshape(test_image, (32,32,3)) base = os.path.basename(aktfile) target_number = (float(base[0:3])) / 10 target_sin = math.sin(target_number * math.pi * 2) target_cos = math.cos(target_number * math.pi * 2) x_data.append(test_image) zw = np.array([target_sin, target_cos]) y_data.append(zw) x_data = np.array(x_data) y_data = np.array(y_data) print(x_data.shape) print(y_data.shape) x_data, y_data = shuffle(x_data, y_data) X_train, X_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.1) # - # ### Define the model # # The layout of the network ist a typcial CNN network with alternating **Conv2D** and **MaxPool2D** layers. Finished after **flattening** with additional **Dense** layer. # # #### Important # * Shape of the input layer: (32, 32, 3) # * Shape of the output layer: (2) - sin and cos # + model = Sequential() model.add(BatchNormalization(input_shape=(32,32,3))) model.add(Conv2D(32, (5, 5), input_shape=(32,32,3), padding='same', activation="relu")) model.add(MaxPool2D(pool_size=(4,4))) model.add(Conv2D(32, (5, 5), padding='same', activation="relu")) model.add(MaxPool2D(pool_size=(4,4))) model.add(Conv2D(32, (3, 3), padding='same', activation="relu")) model.add(MaxPool2D(pool_size=(2,2))) model.add(Flatten()) model.add(Dense(128,activation="relu")) model.add(Dense(64,activation="relu")) model.add(Dense(2)) model.summary() model.compile(loss=keras.losses.mean_squared_error, optimizer=tf.keras.optimizers.Adadelta(learning_rate=1.0, rho=0.95), metrics = ["accuracy"]) # - # # Training # The input pictures are randomly scattered for brightness and pixel shift variations. These is implemented with a ImageDataGenerator. # # # The training is splitted into two steps: # 1. Variation of the brightness only # 2. Variation of brightness and Pixel Shift # ### Step 1: Brigthness scattering only # + Batch_Size = 8 Epoch_Anz = 30 Shift_Range = 0 Brightness_Range = 0.3 datagen = ImageDataGenerator(width_shift_range=[-Shift_Range,Shift_Range], height_shift_range=[-Shift_Range,Shift_Range],brightness_range=[1-Brightness_Range,1+Brightness_Range]) train_iterator = datagen.flow(X_train, y_train, batch_size=Batch_Size) validation_iterator = datagen.flow(X_test, y_test, batch_size=Batch_Size) history = model.fit_generator(train_iterator, validation_data = validation_iterator, epochs = Epoch_Anz) # - # ### Step 1: Learing result # # * Visualization of the training and validation results # + loss_ges = np.append(loss_ges, history.history['loss']) val_loss_ges = np.append(val_loss_ges, history.history['val_loss']) plt.semilogy(history.history['loss']) plt.semilogy(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train','eval'], loc='upper left') plt.show() # - # ### Step 2: Brigthness and Pixel Shift scattering # Here a higher number of epochs is used to reach the minimum loss function # + Batch_Size = 8 Epoch_Anz = 160 Shift_Range = 3 Brightness_Range = 0.3 datagen = ImageDataGenerator(width_shift_range=[-Shift_Range,Shift_Range], height_shift_range=[-Shift_Range,Shift_Range],brightness_range=[1-Brightness_Range,1+Brightness_Range]) train_iterator = datagen.flow(X_train, y_train, batch_size=Batch_Size) validation_iterator = datagen.flow(X_test, y_test, batch_size=Batch_Size) history = model.fit_generator(train_iterator, validation_data = validation_iterator, epochs = Epoch_Anz) # - # ### Overall Learing results (Step 1 & Step 2) # + loss_ges = np.append(loss_ges, history.history['loss']) val_loss_ges = np.append(val_loss_ges, history.history['val_loss']) plt.semilogy(loss_ges) plt.semilogy(val_loss_ges) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train','eval'], loc='upper left') plt.show() # - # ### Check the model by hand # # * The following code uses the trained model to check the deviation for each picture. # * The evaluation takes the periodic character of the results into account (dev1 ... dev2). # * Images, that have a bigger deviation as the parameter "deviation_max_list" are printed in a list to check the picture and labeling itself # + Input_dir='data_resize_all' #Input_dir='test_result' files = glob.glob(Input_dir + '/*.*') res = [] i = 0 deviation_max_list = 0.05 for aktfile in files: base = os.path.basename(aktfile) target = (float(base[0:3])) / 10 target_sin = math.sin(target * math.pi * 2) target_cos = math.cos(target * math.pi * 2) test_image = Image.open(aktfile) test_image = np.array(test_image, dtype="float32") img = np.reshape(test_image,[1,32,32,3]) classes = model.predict(img) out_sin = classes[0][0] out_cos = classes[0][1] out_target = (np.arctan2(out_sin, out_cos)/(2*math.pi)) % 1 dev_sin = target_sin - out_sin dev_cos = target_cos - out_cos dev_target = target - out_target if abs(dev_target + 1) < abs(dev_target): out_target = out_target - 1 dev_target = target - out_target else: if abs(dev_target - 1) < abs(dev_target): out_target = out_target + 1 dev_target = target - out_target res.append(np.array([target, out_target, dev_target, out_sin, out_cos, i])) if abs(dev_target) > deviation_max_list: print(aktfile + " " + str(target) + " " + str(out_target) + " " + str(dev_target)) i+=1 res = np.asarray(res) res_step_1 = res # - # ## Results plt.plot(res[:,3]) plt.plot(res[:,4]) plt.title('Result') plt.ylabel('value') plt.xlabel('#Picture') plt.legend(['sin', 'cos'], loc='lower left') plt.show() plt.plot(res[:,0]) plt.plot(res[:,1]) plt.title('Result') plt.ylabel('Counter Value') plt.xlabel('#Picture') plt.legend(['Orginal', 'Prediction'], loc='upper left') plt.show() # ### Deviation from Expected Value # + plt.plot(res[:,2]) plt.title('Deviation') plt.ylabel('Deviation from expected value') plt.xlabel('#Picture') plt.legend(['Deviation'], loc='upper left') #plt.ylim(-0.3, 0.3) plt.show() statistic = np.array([np.mean(res[:,2]), np.std(res[:,2]), np.min(res[:,2]), np.max(res[:,2])]) print(statistic) # - # ### Save the model # # * Save the model to the file with the "h5" file format # + # model.save("CNN_Analog-Readout_Version-6.2.0.h5") converter = tf.lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert() open("CNN_Analog-Readout_Version-6.3.0-Small1.tflite", "wb").write(tflite_model)
.ipynb_checkpoints/Train_CNN_Analog-Readout_Version-Small1_old-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Unsupervised Dictionary Methods import pandas as pd import numpy as np from nltk import * import string import pickle import re from nltk.corpus import wordnet from sklearn.model_selection import train_test_split # + # Load our data and try with open('modified_data/just_tags.txt', 'rb') as f: just_tags = pickle.load(f) with open('modified_data/just_words.txt', 'rb') as f: just_words = pickle.load(f) # - words_train, words_test, tags_train, tags_test = train_test_split(just_words, just_tags, random_state = 42, test_size = 0.2) # Make hindi dictionary from training data becuase theres no phonetically typed hindi dictionary anywhere hin_dict = [w for i, w in enumerate(words_train) if tags_train[i] == 'HI'] # How wordnets dictionary works: # Case insensitive so its good for our data if wordnet.synsets('glass'): print('ENGLISH') else: print('NOT ENGLISH') # Just check if emoticons with alphabets are present in the training set ':P' in just_words # How do emoticons look? [w for i, w in enumerate(just_words) if just_tags[i] == 'EMT'] # The 4 possible tags we need to classifiy for np.unique(tags_train) # ## Wordnet and Hindi Dictionary # Classifier that uses wordnet's dictionary def classify_dictionary(words): preds = [] for w in words: if wordnet.synsets(w): preds.append('EN') elif re.search('[a-zA-Z]', w) == None: preds.append('EMT') elif w in hin_dict: preds.append('HI') else: preds.append('UN') return preds preds_dict_train = classify_dictionary(words_train) preds_dict_test = classify_dictionary(words_test) def classification_acc(true_y, pred_y): classified = [True for i in range(len(true_y)) if true_y[i] == pred_y[i]] return (np.sum(classified)/len(true_y))*100 print ('Training classification accuracy using just dictionaries is:', classification_acc(tags_train, preds_dict_train), '%') print ('Test classification accuracy using just dictionaries is:', classification_acc(tags_test, preds_dict_test), '%') # ## Checking with the Twitter Dictionary # Load the twitter generated dictionary with open('dictionaries/dictionary_twitter.txt', 'rb') as f: twitter_dict = pickle.load(f) just_keys = list(twitter_dict.keys()) # Clean the keys of punctuations and links invalid_chars = set(string.punctuation) twitter_dict_eng = [w for w in just_keys if not any([True for i in w if i in invalid_chars])] # Classifier that uses the Twitter dictionary dictionary def classify_dictionary_twitter(words): preds = [] for w in words: if w in twitter_dict_eng: preds.append('EN') elif re.search('[a-zA-Z]', w) == None: preds.append('EMT') elif w in hin_dict: preds.append('HI') else: preds.append('UN') return preds preds_dict_train_tw = classify_dictionary_twitter(words_train) preds_dict_test_tw = classify_dictionary_twitter(words_test) print ('Training classification accuracy using Twitter dictionaries is:', classification_acc(tags_train, preds_dict_train_tw), '%') print ('Test classification accuracy using Twitter dictionaries is:', classification_acc(tags_test, preds_dict_test_tw), '%') # Accuracy has slightly improved with a dictionary more suitable for the task.
.ipynb_checkpoints/dictionary_checking-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jvishnuvardhan/Keras_Examples/blob/master/Save_EntireModel.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="kmd-wcqoaLbY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 674} outputId="4cd6ee85-42e6-4354-e6eb-0c207da9c08a" # !pip install tensorflow==2.0.0rc1 # + id="MeaQZct8aRkg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="131adbef-6428-43dc-ee80-4d64d11082aa" import tensorflow as tf from tensorflow import keras mnist = tf.keras.datasets.mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 def create_model(): model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model # Create a basic model instance model=create_model() model.fit(x_train, y_train, epochs=5) loss, acc = model.evaluate(x_test, y_test,verbose=1) print("Original model, accuracy: {:5.2f}%".format(100*acc)) # Save entire model to a HDF5 file model.save('my_model.h5') # + id="Rp4zTGGpOZ7c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="0f01a3e8-d507-4f4e-a373-58790b007ef9" # Recreate the exact same model, including weights and optimizer. new_model = keras.models.load_model('my_model.h5') loss, acc = new_model.evaluate(x_test, y_test) print("Restored model, accuracy: {:5.2f}%".format(100*acc))
Save_EntireModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Source : https://github.com/CVxTz/Recommender_keras # dataset : https://grouplens.org/datasets/movielens/ # dataset file : ml-latest-small.zip # dataset readme : http://files.grouplens.org/datasets/movielens/ml-latest-small-README.html # This code is a combination of utils.py, recommend.py, and plot_movies.py. # And, it also runs and validates in python 3.5 and keras 2.0. import pandas as pd import numpy as np from keras.models import Sequential, Model from keras.layers import Embedding, Reshape, Activation, Input, Dense, Flatten, Dropout from keras.layers.merge import Dot, multiply, concatenate from keras.utils import np_utils from keras.utils.data_utils import get_file from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import skipgrams from collections import defaultdict ### --- # from utils import * # utils.py from sklearn.metrics import mean_absolute_error import pickle ### --- #from utils import * # utils.py #import pickle import pandas as pd from sklearn.manifold import TSNE import numpy as np import plotly.plotly as py # conda install -c plotly plotly import plotly import plotly.graph_objs as go import sys #reload(sys) #sys.setdefaultencoding('utf-8') from keras.models import load_model # NEW ### --- start of Recommender_keras/utils.py def get_mapping(series): occurances = defaultdict(int) for element in series: occurances[element] += 1 mapping = {} i = 0 for element in occurances: i += 1 mapping[element] = i return mapping def get_data(): #data = pd.read_csv("data/ratings.csv") data = pd.read_csv("ml-latest-small/ratings.csv") mapping_work = get_mapping(data["movieId"]) data["movieId"] = data["movieId"].map(mapping_work) mapping_users = get_mapping(data["movieId"]) data["movieId"] = data["movieId"].map(mapping_users) percentil_80 = np.percentile(data["timestamp"], 80) print(percentil_80) print(np.mean(data["timestamp"]<percentil_80)) print(np.mean(data["timestamp"]>percentil_80)) cols = ["userId", "movieId", "rating"] train = data[data.timestamp<percentil_80][cols] print(train.shape) #(80668, 3) test = data[data.timestamp>=percentil_80][cols] print(test.shape) #(20168, 3) max_user = max(data["userId"].tolist() ) max_work = max(data["movieId"].tolist() ) return train, test, max_user, max_work, mapping_work def get_model_1(max_work, max_user): dim_embedddings = 30 bias = 3 # inputs w_inputs = Input(shape=(1,), dtype='int32') w = Embedding(max_work+1, dim_embedddings, name="work")(w_inputs) # context u_inputs = Input(shape=(1,), dtype='int32') u = Embedding(max_user+1, dim_embedddings, name="user")(u_inputs) o = multiply([w, u]) o = Dropout(0.5)(o) o = Flatten()(o) o = Dense(1)(o) rec_model = Model(inputs=[w_inputs, u_inputs], outputs=o) #rec_model.summary() rec_model.compile(loss='mae', optimizer='adam', metrics=["mae"]) return rec_model def get_model_2(max_work, max_user): dim_embedddings = 30 bias = 1 # inputs w_inputs = Input(shape=(1,), dtype='int32') w = Embedding(max_work+1, dim_embedddings, name="work")(w_inputs) w_bis = Embedding(max_work + 1, bias, name="workbias")(w_inputs) # context u_inputs = Input(shape=(1,), dtype='int32') u = Embedding(max_user+1, dim_embedddings, name="user")(u_inputs) u_bis = Embedding(max_user + 1, bias, name="userbias")(u_inputs) o = multiply([w, u]) o = concatenate([o, u_bis, w_bis]) o = Dropout(0.5)(o) o = Flatten()(o) o = Dense(1)(o) rec_model = Model(inputs=[w_inputs, u_inputs], outputs=o) #rec_model.summary() rec_model.compile(loss='mae', optimizer='adam', metrics=["mae"]) return rec_model def get_model_3(max_work, max_user): dim_embedddings = 30 bias = 1 # inputs w_inputs = Input(shape=(1,), dtype='int32') w = Embedding(max_work+1, dim_embedddings, name="work")(w_inputs) w_bis = Embedding(max_work + 1, bias, name="workbias")(w_inputs) # context u_inputs = Input(shape=(1,), dtype='int32') u = Embedding(max_user+1, dim_embedddings, name="user")(u_inputs) u_bis = Embedding(max_user + 1, bias, name="userbias")(u_inputs) o = multiply([w, u]) o = Dropout(0.5)(o) o = concatenate([o, u_bis, w_bis]) o = Flatten()(o) o = Dense(10, activation="relu")(o) o = Dense(1)(o) rec_model = Model(inputs=[w_inputs, u_inputs], outputs=o) #rec_model.summary() rec_model.compile(loss='mae', optimizer='adam', metrics=["mae"]) return rec_model def get_array(series): return np.array([[element] for element in series]) ### --- end of Recommender_keras/utils.py ### --- start of Recommender_keras/recommend.py train, test, max_user, max_work, mapping_work = get_data() pickle.dump(mapping_work, open('mapping_work.pkl', 'wb')) # create pkl file #################### model = get_model_1(max_work, max_user) history = model.fit([get_array(train["movieId"]), get_array(train["userId"])], get_array(train["rating"]), epochs=10, validation_split=0.2, verbose=1) model.save_weights("model_1.h5") # create hdf5 file predictions = model.predict([get_array(test["movieId"]), get_array(test["userId"])]) test_performance = mean_absolute_error(test["rating"], predictions) print(" Test Mae model 1 : %s " % test_performance) #################### model = get_model_2(max_work, max_user) history = model.fit([get_array(train["movieId"]), get_array(train["userId"])], get_array(train["rating"]), epochs=10, validation_split=0.2, verbose=1) predictions = model.predict([get_array(test["movieId"]), get_array(test["userId"])]) test_performance = mean_absolute_error(test["rating"], predictions) print(" Test Mae model 2 : %s " % test_performance) #################### model = get_model_3(max_work, max_user) history = model.fit([get_array(train["movieId"]), get_array(train["userId"])], get_array(train["rating"]), epochs=10, validation_split=0.2, verbose=1) predictions = model.predict([get_array(test["movieId"]), get_array(test["userId"])]) test_performance = mean_absolute_error(test["rating"], predictions) print(" Test Mae model 3 : %s " % test_performance) ### --- End of Recommender_keras/recommend.py """ from keras.models import load_model model.save('my_model.h5') # creates a HDF5 file 'my_model.h5' del model # deletes the existing model # returns a compiled model # identical to the previous one model = load_model('my_model.h5') """ ### --- start of Recommender_keras/plot_movies.py train, test, max_user, max_work, _ = get_data() #movies = pd.read_csv("data/movies.csv") movies = pd.read_csv("ml-latest-small/movies.csv") movie_title = dict(zip(movies["movieId"], movies["title"])) model = get_model_1(max_user=max_user, max_work=max_work) model.load_weights("model_1.h5") # load hdf5 embedding_work = model.get_layer("work").get_weights()[0] print(embedding_work) mapping_work = pickle.load(open("mapping_work.pkl", "rb")) # load pkl (pickle) file # reverse_mapping = dict((v,k) for k,v in mapping_work.iteritems()) # Error: “ 'dict' object has no attribute 'iteritems' ” # Removed dict.iteritems(), dict.iterkeys(), and dict.itervalues() # Instead: use dict.items(), dict.keys(), and dict.values() respectively. reverse_mapping = dict((v,k) for k,v in mapping_work.items()) # iteritems -> items embedding = {} for id in movie_title: if id in mapping_work: embedding[id] = embedding_work[mapping_work[id], :] list_titles = [] list_embeddings = [] for id in embedding: list_titles.append(movie_title[id]) list_embeddings.append(embedding[id]) matrix_embedding = np.array(list_embeddings) X_embedded = TSNE(n_components=2).fit_transform(matrix_embedding) vis_x = X_embedded[:, 0] vis_y = X_embedded[:, 1] data = [ go.Scatter( x=vis_x, y=vis_y, mode='markers', text=list_titles ) ] layout = go.Layout( title='Movies' ) fig = go.Figure(data=data, layout=layout) plotly.offline.plot(fig, filename='movies.html') # -
New-Movielens-reco-Keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.1 64-bit (system) # name: python3 # --- # # Fancy indexing # + # # !pip install numpy # - import numpy as np # Fancy indexing means passing an array of indices to access multiple array elements at once. x = np.random.randint(100, size=10) x # Suppose we want to access four different values. We could do it like this: [x[3], x[6], x[9], x[4]] # Using fancy indexing: ind = [3, 6, 9, 4] x[ind] # Fancy indexing also work with multiple dimenssions M = np.arange(12).reshape((3,4)) M row = np.array([0,2,1]) col = np.array([2,1,3]) M[row, col] # Without fancy indexing [M[0,2], M[2,1], M[1,3]] # For even more powerful operations, fancy indexing can be combined with the other indexing schemes: M[2, [2,0,1]] # Without fancy indexing: [M[2,2], M[2,0], M[2,1]] # With fancy indexing: M[1:, [2, 0 ,1]] # Modifying values with fancy indexing x = np.arange(10) x idx = np.array([0,3,7]) x[idx] = 100 x # Changing the values of those index x[idx] += 20 x x[idx] -= 125 x # Create a filter array that will return only even elements from the original array: filter1 = [] for elem in x: if elem %2 == 0: # if elem is even filter1.append(True) else: filter1.append(False) print(filter1) print(x[filter1]) # Create a filter that returns only values greater than 5: filter2 = x > 5 print(filter2) print(x[filter2]) print(M) filter3 = M > 4 filter3 M[filter3] # ## NumPy unique # Find the unique elements of an array. # # Returns the sorted unique elements of an array. # # There are three optional outputs in addition to the unique elements: # - the indices of the input array that give the unique values # - the indices of the unique array that reconstruct the input array # - the number of times each unique value comes up in the input array arr = ['d','a','b','d','c','c','b', 'a','b','a'] np.unique(arr) # Using return_counts: unique, counts = np.unique(arr, return_counts=True) print(dict(zip(unique, counts))) unique counts # Using return_index: it returns the first index where each value appears unique, idx = np.unique(arr, return_index=True) print(dict(zip(unique, idx))) idx unique, inv = np.unique(arr, return_inverse=True) inv # Recovering the original array: unique[inv]
03-Introduction to Numpy/05-Fancy indexing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (py37) # language: python # name: py37 # --- import numpy as np from matplotlib import pyplot as plt import netCDF4 as nc import re import shutil # %matplotlib inline src='/ocean/eolson/MEOPAR/northernNO3PaperCalcs/bioModel/runFiles/01may15/SalishSea_01153440_restart_trc.nc' dst='/ocean/eolson/MEOPAR/northernNO3PaperCalcs/bioModel/runFiles/01may15/SalishSea_01153440_restart_trc_noMrubrum.nc' shutil.copy(src, dst) f=nc.Dataset(dst,'r+') print(f.variables.keys()) np.max(f.variables['TRNPHY']), np.min(f.variables['TRNPHY']) np.max(f.variables['TRNMYRI']), np.min(f.variables['TRNMYRI']) f.variables['TRNPHY'][:]=f.variables['TRNPHY'][:]+f.variables['TRNMYRI'][:] f.variables['TRNMYRI'][:]=0.0 f.variables['TRBPHY'][:]=f.variables['TRBPHY'][:]+f.variables['TRBMYRI'][:] f.variables['TRNMYRI'][:]=0.0 f.close() f=nc.Dataset(dst) np.max(f.variables['TRNPHY']), np.min(f.variables['TRNPHY']) np.max(f.variables['TRNMYRI']), np.min(f.variables['TRNMYRI']) f.close()
notebooks/revisions/createIC_noMrubrum.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="0Hwyb4fS6l-J" # # Understanding Advance RNN units # # In this Implemetation, we will be comparing some of the advanced RNN units, like Long Short term memory (LSTM) and Gated Recurrent Units (GRU). # # # LSTM units have very intuitive structure. It has two internal states, whereas vanilla RNN has only one hidden state. The cell state in the LSTM is like a conveyor belt which runs on the top of the unit as shown in the diagram. The cell state is highly regulated by gates attached to it. Gates are the way to let the information through. LSTM has three gates to control the information flow. # # ![](figures/LSTM.png) # # # Figure: Showing various Gates present in LSTM. # # # **Forget gate**: It regulates the information flow. A sigmoid gate looks at the input and previous hidden state . The sigmoid output value of 1 means let everything go through and 0 means nothing to get through. # # $$ f_t = \sigma_g (W_f[W_{t-1},x_t] + b_f) $$ # # To keep or not is gradually learned by weights and bias attached to forget gate. # # **Input gate: **Next is the input gate that decides what information we are going to keep in the cell state. The input gate has two inputs one is controlled by sigmoid and another is controlled by tanh. The input gate is defined by below-given equations. # # $$i_t = \sigma_g (W_i\bullet [h_{t-1}, x_t] + b_i) \\ # \widetilde{C}_t = tanh(W_c\bullet [h_{t-1}, x_t] + b_c) $$ # # Output gate: It decides what information to let through according to cell state and hidden state. A sigmoid gate decides what information from the hidden state goes to output. Tanh decides what information from cell state goes to output gate. Output gate can be mathematically represented as follows: # # $$o_t = \sigma_g (W_o[h_{t-1}, x_t]+ b_o) \\ # h_t = o_t * tanh(C_t) $$ # # The information controlled by gate then merges into the cell state as shown in the below-given equation. # # $$c_t = f_t \circ c_{t-1} + i_t \circ \widetilde C_t $$ # # LSTM can be very simply implemented using Pytorch. Pytorch has a function LSTM and it takes similar input shape as described in case of vanilla RNN, it can be used as follow. # # + [markdown] colab_type="text" id="DqQ1RzJ08OWU" # # Importing Requirements # + colab={"base_uri": "https://localhost:8080/", "height": 833} colab_type="code" id="6TEFf4WfvLRW" outputId="2393f08d-fffb-454b-851b-e07ed17f2bca" import json import os import random import tarfile import urllib import zipfile import matplotlib.pyplot as plt import nltk import torch from torch import nn, optim from torchtext import data from torchtext import vocab from tqdm import tqdm nltk.download('popular') SEED = 1234 torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # + [markdown] colab_type="text" id="2FdXgOS6nkHF" # # Downloading required datasets # To demonstrate how embeddings can help, we will be conducting an experiment on sentiment analysis task. I have used movie review dataset having 5331 positive and 5331 negative processed sentences. The entire experiment is divided into 5 sections. # # Downloading Dataset: Above discussed dataset is available at http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz. # # # + colab={} colab_type="code" id="JtpoPziv8GJO" data_exists = os.path.isfile('data/rt-polaritydata.tar.gz') if not data_exists: urllib.request.urlretrieve("http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz", "data/rt-polaritydata.tar.gz") tar = tarfile.open("data/rt-polaritydata.tar.gz") tar.extractall(path='data/') # + [markdown] colab_type="text" id="pG6zlPuBnkHI" # # Downloading embedding # The pre-trained embeddings are available and can be easily used in our model. we will be using the FastText vector trained on the wiki news corpus. # + colab={} colab_type="code" id="j7YAlpTCP6gy" embed_exists = os.path.isfile('../embeddings/wiki-news-300d-1M.vec.zip') if embed_exists: print("FastText embeddings exists, if not downloaded properly, then delete the `../embeddings/wiki-news-300d-1M.vec.zip") urllib.request.urlretrieve("https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M.vec.zip","../embeddings/wiki-news-300d-1M.vec.zip") zip_ref = zipfile.ZipFile("../embeddings/wiki-news-300d-1M.vec.zip", 'r') zip_ref.extractall("../embeddings/") zip_ref.close() # + [markdown] colab_type="text" id="13BKiVJhnkHM" # # Preprocessing # I am using TorchText to preprocess downloaded data. The preprocessing includes following steps: # # - Reading and parsing data # - Defining sentiment and label fields # - Dividing data into train, valid and test subset # - forming the train, valid and test iterators # + colab={} colab_type="code" id="k3sTlJzyvLRb" SEED = 1 split = 0.80 # + colab={} colab_type="code" id="85Uzm_t7vLRe" data_block = [] negative_data = open('data/rt-polaritydata/rt-polarity.neg',encoding='utf8',errors='ignore').read().splitlines() for i in negative_data: data_block.append({"sentiment":str(i.strip()),"label" : 0}) positve_data = open('data/rt-polaritydata/rt-polarity.pos',encoding='utf8',errors='ignore').read().splitlines() for i in positve_data: data_block.append({"sentiment":str(i.strip()),"label" : 1}) # + colab={} colab_type="code" id="SzntAD2CvLRi" random.shuffle(data_block) train_file = open('data/train.json', 'w') test_file = open('data/test.json', 'w') for i in range(0,int(len(data_block)*split)): train_file.write(str(json.dumps(data_block[i]))+"\n") for i in range(int(len(data_block)*split),len(data_block)): test_file.write(str(json.dumps(data_block[i]))+"\n") # + colab={} colab_type="code" id="m2xfhN6avLRl" def tokenize(sentiments): # print(sentiments) return sentiments def pad_to_equal(x): if len(x) < 61: return x + ['<pad>' for i in range(0, 61 - len(x))] else: return x[:61] def to_categorical(x): if x == 1: return [0,1] if x == 0: return [1,0] # + colab={} colab_type="code" id="a8JZt1mBvLRp" SENTIMENT = data.Field(sequential=True , preprocessing =pad_to_equal , use_vocab = True, lower=True) LABEL = data.Field(is_target=True,use_vocab = False, sequential=False, preprocessing =to_categorical) fields = {'sentiment': ('sentiment', SENTIMENT), 'label': ('label', LABEL)} # + colab={} colab_type="code" id="fq8L7GjZvLRu" train_data , test_data = data.TabularDataset.splits( path = 'data', train = 'train.json', test = 'test.json', format = 'json', fields = fields ) # + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="3WzzpHkXvLRy" outputId="03d3f95b-d6ea-4bdb-bda8-287dbc22e731" print("Printing an example data : ",vars(train_data[1])) # + [markdown] colab_type="text" id="sUYeX3eenkHi" # **Splitting data in to test and train** # + colab={} colab_type="code" id="FEZRHuB7vLR3" train_data, valid_data = train_data.split(random_state=random.seed(SEED)) # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="3CFyFa5_vLR7" outputId="744e9de0-82bf-4469-f48d-b37a70675b26" print('Number of training examples: ', len(train_data)) print('Number of validation examples: ', len(valid_data)) print('Number of testing examples: ',len(test_data)) # + [markdown] colab_type="text" id="iWod-X4lnkHp" # **Loading Embedding to vocab** # + colab={} colab_type="code" id="LUF_n6AivLSY" vec = vocab.Vectors(name = "glove.840B.300d.txt",cache = "../embeddings/") # + colab={} colab_type="code" id="_J_s0L0ovLSd" SENTIMENT.build_vocab(train_data, valid_data, test_data, max_size=100000, vectors=vec) # + [markdown] colab_type="text" id="mqWn6jGWnkHy" # **Constructing Iterators** # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="qZKthaRvvLSj" outputId="052822da-5515-4b0c-9042-1e8fa0ddf296" train_iter, val_iter, test_iter = data.Iterator.splits( (train_data, valid_data, test_data), sort_key=lambda x: len(x.sentiment), batch_sizes=(32,32,32), device=-1,) # + colab={} colab_type="code" id="rLTLVlHQvLSr" sentiment_vocab = SENTIMENT.vocab # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="X46F3yZE0Gx9" outputId="80748d07-5caa-4a10-ff38-b912e05d0dff" sentiment_vocab.vectors.shape # + [markdown] colab_type="text" id="zlpB4gbKnkIB" # # Training # Training will be conducted for two models one with Vanilla RNN pre-trained embedding and one with LSTM. I am using FastText embeddings trained on wikipedia corpus with a vector size of 300. # + colab={} colab_type="code" id="p8sGtYZ1-Bkl" def binary_accuracy(preds, y): """ Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8 """ rounded_preds = torch.argmax(preds, dim=1) # print(rounded_preds) correct = (rounded_preds == torch.argmax(y, dim=1)).float() #convert into float for division acc = correct.sum()/len(correct) return acc # + [markdown] colab_type="text" id="QqPGWHX5B9Gj" # ## Training using Vanilla RNN # + colab={} colab_type="code" id="cHhzpI4D41ZL" class VANILA_RNN(nn.Module): def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, bidirectional, dropout, sentiment_vocab): super(VANILA_RNN, self).__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim) self.rnn = nn.RNN(embedding_dim, hidden_dim, num_layers=n_layers, bidirectional=bidirectional, dropout=dropout) self.fc = nn.Linear(hidden_dim * 2, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, x): embedded = self.dropout(self.embedding(x)) output, hidden = self.rnn(embedded) # concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers # and apply dropout hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)) return torch.softmax(self.fc(hidden.squeeze(0)),dim = 1) # + colab={} colab_type="code" id="CRl3m_aN9mQN" INPUT_DIM = len(SENTIMENT.vocab) EMBEDDING_DIM = 300 HIDDEN_DIM = 256 OUTPUT_DIM = 2 BATCH_SIZE = 32 N_LAYERS = 2 BIDIRECTIONAL = True DROPOUT = 0.5 vanila_rnn = VANILA_RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, BIDIRECTIONAL, DROPOUT, sentiment_vocab) vanila_rnn = vanila_rnn.to(device) # + colab={} colab_type="code" id="rcUokNSt9yGl" optimizer = optim.SGD(vanila_rnn.parameters(), lr=0.1) criterion = nn.BCEWithLogitsLoss() criterion = criterion.to(device) # + colab={} colab_type="code" id="mSUXx7X6-EOK" def train(vanila_rnn, iterator, optimizer, criterion): epoch_loss = 0 epoch_acc = 0 vanila_rnn.train() for batch in iterator: optimizer.zero_grad() predictions = vanila_rnn(batch.sentiment.to(device)).squeeze(1) loss = criterion(predictions.type(torch.FloatTensor), batch.label.type(torch.FloatTensor)) acc = binary_accuracy(predictions.type(torch.FloatTensor), batch.label.type(torch.FloatTensor)) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) # + colab={"base_uri": "https://localhost:8080/", "height": 1734} colab_type="code" id="qkqjMC8e-Xzm" outputId="2d19c6c9-f14c-47bc-985d-089ee730afb6" rnn_loss = [] rnn_accuracy = [] for i in tqdm(range(0,100)): loss, accuracy = train(vanila_rnn, train_iter, optimizer, criterion) print("Loss : ",loss, "Accuracy : ", accuracy ) rnn_loss.append(loss) rnn_accuracy.append(accuracy) # + [markdown] colab_type="text" id="aPqBr_3LCFTp" # ## Training using LSTM # + colab={} colab_type="code" id="iAEv9frrjlMN" class LSTM_RNN(nn.Module): def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, bidirectional, dropout, sentiment_vocab): super(LSTM_RNN, self).__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim) self.rnn = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers, bidirectional=bidirectional, dropout=dropout) self.fc = nn.Linear(hidden_dim * 2, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, x): embedded = self.dropout(self.embedding(x)) output, (hidden, cell)= self.rnn(embedded) # concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers # and apply dropout hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)) return self.fc(hidden.squeeze(0)) # + colab={} colab_type="code" id="z2GsTDmPjlMS" INPUT_DIM = len(SENTIMENT.vocab) EMBEDDING_DIM = 300 HIDDEN_DIM = 256 OUTPUT_DIM = 2 BATCH_SIZE = 32 N_LAYERS = 2 BIDIRECTIONAL = True DROPOUT = 0.5 lstm_rnn = LSTM_RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, BIDIRECTIONAL, DROPOUT, sentiment_vocab) lstm_rnn = lstm_rnn.to(device) # + colab={} colab_type="code" id="<KEY>" optimizer = optim.SGD(lstm_rnn.parameters(), lr=0.1) criterion = nn.BCEWithLogitsLoss() criterion = criterion.to(device) # + colab={"base_uri": "https://localhost:8080/", "height": 1734} colab_type="code" id="TLdlbaND-bvv" outputId="a752aa6b-84b0-4792-eb95-fd510dee988f" lstm_loss = [] lstm_accuracy = [] for i in tqdm(range(0,100)): loss, accuracy = train(lstm_rnn, train_iter, optimizer, criterion) print("Loss : ",loss, "Accuracy : ", accuracy ) lstm_loss.append(loss) lstm_accuracy.append(accuracy) # + [markdown] colab_type="text" id="CFT-Uv9UCOcm" # ## Comparision # When the sentiment analysis test was run for 100 epochs. I found that the performance of the LSTM is recommendable. # # ![](figures/LSTM_RNN.png) # Figure: Showing Difference between accuracy when LSTM and RNN used for text classification # # The accuracy of train data was 95+% with LSTM and was around 70% with RNN. # # + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" id="mkMZnGsRDgO0" outputId="ce0b86fc-af53-4bfb-f89a-7bd5f278c5cf" plt.plot(rnn_accuracy , label = "RNN Accuracy") plt.plot(lstm_accuracy , label = "LSTM Accuracy") plt.ylabel("Accuracy") plt.xlabel("Epoch") plt.legend(loc='upper left') plt.show() # + colab={} colab_type="code" id="sAqzXv9WLRfh"
Chapter04/lstm_and_rnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import pickle from torchvision import transforms # # %matplotlib widget import matplotlib.pyplot as plt import numpy as np import sys sys.path.append("..") # Adds higher directory to python modules path. from dynamic_simple.models import dynamicVAE32, beta_from_normalized_beta,prediction_loss from data.dspritesbT import dSpriteBackgroundDatasetTime # + normalized_beta_values = np.logspace(np.log(.001), np.log(5), 6, base=np.e) N = 1 * 32 * 32 M = 10 beta = beta_from_normalized_beta(normalized_beta_values, N = N, M = M) for ii,nb in enumerate(normalized_beta_values): print(['betanorm = %0.3f / beta = %0.1f' % (normalized_beta_values[ii],beta[ii])]) # - vae = list() training_stats = list() for ii,beta_norm in enumerate(normalized_beta_values): # beta values in the file name of checkpoints have varying number of decimal points (not so smart) beta_norm_str = '%0.4f' % (beta_norm) while beta_norm_str[-1] == '0' and beta_norm_str[-2] != '.': beta_norm_str = beta_norm_str[:-1] trainedpath = 'trained/dynamicvae32_nlatent=10_betanorm=%s_dspritesT_circle_last_500K' % (beta_norm_str) trainstatspath = 'trained/dynamicvae32_nlatent=10_betanorm=%s_dspritesT_circle_500K.pkl' % (beta_norm_str) vae.append(dynamicVAE32(n_latent = 10)) # checkpoint = torch.load(trainedpath) # vae[ii].load_state_dict(checkpoint['model_states']['net']) # # training_stats.append(pickle.load(open(trainstatspath, 'rb'))) vae[0] dt = dSpriteBackgroundDatasetTime(transform=transforms.Resize((32,32)), shapetype='circle',data_dir='data/dsprites-dataset/') dt.img[0].shape from data.dspritesb import show_images_grid show_images_grid(dt[3][0]) vae[0].n_frames from torch.utils.data import Dataset, DataLoader dataloader = DataLoader(dt, batch_size=25,shuffle=True, num_workers=4) for i,[samples,latents] in enumerate(dataloader): print(samples.shape) show_images_grid(samples.view(25*10,1,32,32)) break x = samples whos x.shape x.view(vae[0].n_frames*x.size(0),vae[0].img_channels,32,32).shape x1=vae[0].conv1(x.view(vae[0].n_frames*x.size(0),vae[0].img_channels,32,32)) x1.shape x4=vae[0].conv4(vae[0].conv3(vae[0].conv2(x1))) x4.shape x1show = x1[:,0,:,:].view(-1,1,16,16).detach().numpy() show_images_grid(x1show) x4show = x4[:,0,:,:].view(-1,1,2,2).detach().numpy() show_images_grid(x4show) vae[0].fc_enc_mu(x4.view(-1,256)).shape vae[0].fc_enc_mu_pred(x4.view(-1,256)) # + firstone = vae[0].fc_enc_mu(x4.view(-1,256)).view(25,10,10) firstone.shape second = torch.zeros_like(firstone) pred = torch.zeros_like(firstone) second[:,:-1,:] = firstone[:,1:,:] # second pred[:,2:,:] = second[:,:-2,:] + (second[:,:-2,:]-firstone[:,:-2,:]) # pred[:,1:,:] = np.append(np.zeros((25,2,10)),pred[:,:-2,:],1) pred plt.figure(figsize=(10,5)) plt.plot(firstone.detach().numpy().reshape(-1,10)) plt.plot(second.detach().numpy().reshape(-1,10)) plt.plot(pred.detach().numpy().reshape(-1,10)) # - mu,_ = vae[0].encode(x) mu1 = mu.view(-1,vae[0].n_frames,vae[0].n_latent) mu1.size(0) recon,mu,logvar,mu_pred = vae[0].forward(x) recon.size(),mu.size(),logvar.size(),mu_pred.size() plt.imshow(recon[0][4][0].detach().numpy()) mu = mu.view_as(mu_pred) # mu = mu[2:,:].contiguous().view(-1,10) # mu_pred=mu_pred[2:,:].contiguous().view_as(mu) mu_pred.size() diff= mu-mu_pred diff.size() torch.matmul(diff[:2:],diff[:2,:].t()) # + # torch.sum(diff[:2,:]*diff[:2,:].t(),0) # - torch.sum(diff[:8:]**2,1) torch.sum(diff[2,:]*diff[2,:]) torch.sum(diff[2,:]**2) prediction_loss(mu,mu_pred) mu.size(),mu_pred.size() 0.5*torch.sum((mu[:2,:]-mu_pred[:2,:])**2) # + # from main.py from solver import Solver import argparse def str2bool(v): # codes from : https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') parser = argparse.ArgumentParser(description='dynamic beta VAE') parser.add_argument('--model', default='dynamicVAE32', type=str, help='which model to train (dynamicVAE32, dynamicVAE64))') parser.add_argument('--seed', default=1, type=int, help='random seed') parser.add_argument('--cuda', default=True, type=str2bool, help='enable cuda') parser.add_argument('--batch_size', default=64, type=int, help='batch size') parser.add_argument('--shuffle', default=True, type=str2bool, help='shuffle training data') parser.add_argument('--max_iter', default=500000, type=int, help='number of training iterations') parser.add_argument('--lr', default=1e-4, type=float, help='learning rate') parser.add_argument('--n_latent', default=4, type=int, help='dimension of the latent code') parser.add_argument('--img_channels', default=1, type=int, help='number of image channels') parser.add_argument('--beta', default=0.1, type=float, help='beta for the beta VAE') parser.add_argument('--beta_is_normalized', default=True, type=str2bool, help='flag whether input beta should be interpreted as normalized beta (default) or as unnormalized beta') parser.add_argument('--gamma', default=1, type=float, help='gamma hyperparameter for the prediction loss') parser.add_argument('--dset_dir', default='data', type=str, help='dataset directory') parser.add_argument('--dataset', default='dsprites_circle', type=str, help='dataset name') parser.add_argument('--image_size', default=32, type=int, help='image size. now only (32,32) is supported') parser.add_argument('--num_workers', default=6, type=int, help='dataloader num_workers') parser.add_argument('--trainstats_gather_step', default=100, type=int, help='numer of iterations after which training stats are gathered and stored') parser.add_argument('--trainstats_dir', default='trainstats', type=str, help='training statistics directory') parser.add_argument('--display_step', default=100, type=int, help='number of iterations after which loss data is printed and visdom is updated') parser.add_argument('--save_step', default=2000, type=int, help='number of iterations after which a checkpoint is saved') parser.add_argument('--ckpt_dir', default='checkpoints', type=str, help='checkpoint directory') parser.add_argument('--load_last_checkpoint', default=True, type=str2bool, help='load previous checkpoint if it exists') # - args = parser.parse_known_args()[0] seed = args.seed torch.manual_seed(seed) torch.cuda.manual_seed(seed) np.random.seed(seed) net = Solver(args) net.train(plotmode=True) whos show_images_grid(dt[200000][0])
src/dynamic_simple/prototyping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from collections import defaultdict from tqdm import tqdm from Levenshtein import distance from sklearn.model_selection import KFold # + def find_duplicates(liste): # put in dict dico = dict() for i in tqdm(liste): dico[i] = dico.get(i, 0) + 1 output_dict = {} for i in tqdm(dico): if dico[i] > 1: # for dico's element where value >= 2 output_dict[i] = [a+1 for a, b in enumerate(liste) if b == i] return output_dict, dico def load_citation_sentiment_corpus(filepath): texts = [] polarities = [] n_num = 0 o_num = 0 p_num = 0 with open(filepath) as f: for line in f: line = line.strip() # look for invalid lines if (len(line) is 0): continue if line.startswith('#'): continue # divide the line into columns pieces = line.split('\t') if (len(pieces) != 4): print("Warning: incorrect number of fields in the data file for line:", line) continue text = pieces[3] # remove start/end quotes text = text[1:len(text) - 1] # create the labels and count them if pieces[2] is 'n': n_num += 1 polarities.append(0) if pieces[2] is 'o': o_num += 1 polarities.append(1) if pieces[2] is 'p': p_num += 1 polarities.append(2) texts.append(text) print("o_num= ", o_num) print("p_num= ", p_num) print("n_num= ", n_num) return np.asarray(texts), np.asarray(polarities) def add_to_dict(dictonary,key,value): if key in dictonary: dictonary[key].add(value) else: dictonary[key] = {value} def prepare_labels_dict(text_list,labels): d1 = defaultdict(set) # create data + label dict for text,label in tqdm(zip(text_list,labels)): add_to_dict(d1,text,label) #find all the text with more then 1 labels assigned to it. key_list=[] for i in d1: if len(d1[i])>1: key_list.append(i) return d1,key_list def check_multi_label(text_list,label_dict): for text in text_list: if len(label_dict[text])>1: print("ERROR", text) return print("No Multi label text found") # - # Reads the complete corpus and counts the labels. text_list,labels_list=load_citation_sentiment_corpus('../data/complete_corpus.txt') # Cleans the corpus by removing all incorrect labeled instances. # + # finds duplicates duplicates_dic,complete_count_dict=find_duplicates(text_list) # prepare the label dictionary complete_labels_dict, multi_label_text_list=prepare_labels_dict(text_list,labels_list) # setup the required lists of data and label dicts duplicate_texts=list(duplicates_dic.keys()) complete_texts=list(complete_count_dict.keys()) duplicates_removed=list(set(complete_texts).intersection(multi_label_text_list)) final_texts_list=list(set(complete_texts)^set(multi_label_text_list)) if len(duplicates_removed)!= len(multi_label_text_list): print("ERROR! Something is wrong!! The number of removed samples should be equal to the number of samples with multiple labels.") # check if a text has multiple labels check_multi_label(final_texts_list,complete_labels_dict) # create final label list final_labels_list=[] for text in final_texts_list: if len(complete_labels_dict[text])==1: label_set=complete_labels_dict[text] label=next(iter(label_set)) final_labels_list.append(label) else: print("ERROR Muilti label",text,complete_labels_dict[text]) # - # Create the duplicates handling file that covers information about the removed text samples and their labels. # + def get_labels_string(label_set): # creates the label string by converting the label set to the corresponding string. # used to see the different labels for each sample. result='' for label in label_set: if label == 0: result=result+' NEGATIVE ' elif label ==1: result=result+' NEUTRAL ' else: result=result+' POSITIVE ' return result # collects the text for the duplicate handling file text_to_write='\n\n' text_to_write=text_to_write+'========DUPLICATES REMOVED========\n\n' for text in duplicates_removed: text_to_write=text_to_write+text+'\n LABELS:' label_set=complete_labels_dict[text]; text_to_write=text_to_write+get_labels_string(label_set)+'\n\n\n' text_to_write=text_to_write+'==========ALL DUPLICATES=========\n' for text in duplicate_texts: text_to_write=text_to_write+text+'\n LABELS:' label_set=complete_labels_dict[text]; text_to_write=text_to_write+get_labels_string(label_set)+'\n\n\n' text_to_write=text_to_write+'==========COMPLETE DATASET AFTER HANDLING DUPLICATES=========\n' for text in final_texts_list: text_to_write=text_to_write+text+'\n LABELS:' label_set=complete_labels_dict[text]; text_to_write=text_to_write+get_labels_string(label_set)+'\n\n\n' # save the duplicate handling files text_file = open("Duplicates_handling.txt", "w") n = text_file.write(text_to_write) text_file.close() # - # Statistics for the data that is left. # + print("Text length after removing duplicates",len(final_texts_list)) print("Labels length after removing duplicates",len(final_labels_list)) if(len(final_texts_list)!=len(final_labels_list)): print('Something is not right check again! The number of labels and data samples is not the same.') # + def get_labels_letter(label): # convert the labels to the corresponding character. if label == 0: return "n"+"\t" elif label ==1: return "o"+"\t" elif label ==2: return "p"+"\t" else: print("CONTROL SHOULD NEVER COME HERE!") def write_data_txt(text_list,test=False,prefix=''): # method to write the output files that include additional columns to process them using XLNet with the imdb processor file_name="" if test==True: # test folds print('For Test data with',prefix,) file_name='../data/output/'+prefix+'test.txt' else: # train data print('For Train data with',prefix,) file_name='../data/output/'+prefix+'train.txt' # count instances negative_count = 0 neutral_count = 0 positive_count = 0 text_to_write='' for line in text_list: # add dummy columns text_to_write=text_to_write+"AA"+"\t"+"AA"+"\t" label='' if len(complete_labels_dict[line])==1: label_set=complete_labels_dict[line] label=next(iter(label_set)) #print(label) if label == 0: negative_count=negative_count+1 elif label ==1: neutral_count=neutral_count+1 elif label ==2: positive_count=positive_count+1 else: print("CONTROL SHOULD NEVER COME HERE! Wrong label detected") else: print("ERROR! THIS SHOULD NOT HAPPEN! Wrong number of labels e.g. multi label.") text_to_write=text_to_write+get_labels_letter(label)+line+"\n" # statistics print("Number of POSITIVE examples:",positive_count) print("Number of NEGATIVE examples:",negative_count) print("Number of NEUTRAL examples:",neutral_count) # save the fold text_file = open(file_name, "w") n = text_file.write(text_to_write) text_file.close() # - # Complete data computed using cosine similarity. write_data_txt(final_texts_list,prefix='cosine') # Compute the 10 folds for cross validation. # split into ten folds. kf = KFold(n_splits=10) kf.get_n_splits(final_labels_list) final_texts_list=np.array(final_texts_list) print(kf) count=1 for train_index, test_index in kf.split(final_texts_list): # split into trian and test X_train, X_test = final_texts_list[train_index], final_texts_list[test_index] file_name='Fold_'+str(count) count+=1 # process fold write_data_txt(X_train,prefix=file_name) write_data_txt(X_test,test=True,prefix=file_name)
src/dataset_cleaner.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: i3 # language: python # name: i3 # --- # + from threeML import * import numpy as np from threeML.plugins import * from skylab.datasets import Datasets from astropy import units as u import os, sys, glob, abc def read(filelist): data = [] for f in sorted(filelist): x = np.load(f) if len(data) == 0: data = x.copy() else: data = np.concatenate([data, x]) return data # Where is the dataset stored? dataset = "/data/i3store/users/mjlarson/student_data" # Read in all of the data files data_files = dataset + "/IC86_*exp.npy" exp = read(glob.glob(data_files)) # Read in all of the MC files sim_files = dataset + "/IC86*MC*npy" mc = read(glob.glob(sim_files)) # Set the angular error floor to 0.2 degrees #exp['angErr'][exp['angErr']<np.deg2rad(0.2)] = np.deg2rad(0.2) #mc['angErr'][mc['angErr']<np.deg2rad(0.2)] = np.deg2rad(0.2) exp['ra']=np.random.uniform(0,2*np.pi,size=len(exp)) grl = np.load("/data/i3store/users/mjlarson/student_data/GRL/IC86_2012_exp.npy") livetime = np.sum(grl['livetime']) # + from imp import reload reload(IceCubeLike) import warnings warnings.filterwarnings("ignore") #data = np.random.choice(exp, 10000).copy() #jl._data_list['test'].llh_model.update_data(data) IceCube=IceCubeLike.IceCubeLike("test",exp,mc,exp,verbose=True,background_time_profile= (56224,56324)) del mc # - source1_sp = Powerlaw() #source1_sp._set_units(u.GeV,u.cm**-2 / u.s / u.GeV) source1_sp.K=1e-13 source1_neutrino = Powerlaw() source1_neutrino.K=1e-10 source1_neutrino.index=-2 #source1_sp._set_units(u.GeV,u.cm**-2 / u.s / u.GeV) source1 = PointSource("source1", ra=83.63, dec=22.0145, spectral_shape=source1_sp) source1_nu = IceCubeLike.NeutrinoPointSource("source1_nu",ra=83.63, dec=22.0145,spectral_shape=source1_neutrino) model = Model(source1,source1_nu) model.source1.spectrum.main.Powerlaw.K.fix = True model.source1.spectrum.main.Powerlaw.index.fix = True #model.source1_nu.spectrum.main.Powerlaw.index.fix = True model.source1_nu.spectrum.main.Powerlaw.K.bounds = (1e-30, 1e-5) model.source1_nu.spectrum.main.Powerlaw.index.bounds = (-4, -1) IceCubedata = DataList(IceCube) jl = JointLikelihood(model, IceCubedata) # + from imp import reload reload(IceCubeLike) IceCube=IceCubeLike.IceCubeLike("test",exp,mc,exp,background_time_profile=(0,1)) source1_sp = Powerlaw() #source1_sp._set_units(u.GeV,u.cm**-2 / u.s / u.GeV) source1_sp.K=1e-13 source1_neutrino = Powerlaw() source1_neutrino.K=1e-6 #source1_sp._set_units(u.GeV,u.cm**-2 / u.s / u.GeV) source1 = PointSource("source1", ra=83.63, dec=22.0145, spectral_shape=source1_sp) source1_nu = IceCubeLike.NeutrinoPointSource("source1_nu",ra=83.63, dec=22.0145,spectral_shape=source1_neutrino) model = Model(source1,source1_nu) model.source1.spectrum.main.Powerlaw.K.fix = True model.source1.spectrum.main.Powerlaw.index.fix = True model.source1_nu.spectrum.main.Powerlaw.index.bounds = (-4,-1) #model.source1_nu.spectrum.main.Powerlaw.K.bounds = (1e-5 * 1 / (u.cm**2 * u.s * u.TeV),10.0 * 1 / (u.cm**2 * u.s * u.TeV)) IceCubedata = DataList(IceCube) jl = JointLikelihood(model, IceCubedata) data=np.load("testdata.npy") data['time']=56225 jl._data_list['test'].llh_model.update_data(data) # - jl._data_list.values() # %timeit best_fit_parameters, likelihood_values = jl.fit()
test/test3ml.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # # **[Connect Four](https://en.wikipedia.org/wiki/Connect_Four)** is a game where two players alternate turns dropping colored discs into a vertical grid. Each player uses a different color (usually red or yellow), and the objective of the game is to be the first player to get four discs in a row. # # <center> # <img src="https://i.imgur.com/40B1MGc.png"><br/> # </center> # # In this course, you will build your own intelligent agents to play the game. # - In the first lesson, you'll learn how to set up the game environment and create your first agent. # - The next two lessons focus on traditional methods for building game AI. These agents will be smart enough to defeat many novice players! # - In the final lesson, you'll experiment with cutting-edge algorithms from the field of reinforcement learning. The agents that you build will come up with gameplay strategies much like humans do: gradually, and with experience. # # # Join the competition # # Throughout the course, you'll test your agents' performance by competing against agents that other users have created. # # To join the competition, open a new window with **[the competition page](https://www.kaggle.com/c/connectx/overview)**, and click on the **"Join Competition"** button. (_If you see a "Submit Agent" button instead of a "Join Competition" button, you have already joined the competition, and don't need to do so again._) # # <center> # <img src="https://i.imgur.com/dDX1YVW.png" width=80%><br/> # </center> # # This takes you to the rules acceptance page. You must accept the competition rules in order to participate. These rules govern how many submissions you can make per day, the maximum team size, and other competition-specific details. Then, click on **"I Understand and Accept"** to indicate that you will abide by the competition rules. # # # Getting started # # The game environment comes equipped with agents that have already been implemented for you. To see a list of these default agents, run: # + from kaggle_environments import make, evaluate # Create the game environment # Set debug=True to see the errors if your agent refuses to run env = make("connectx", debug=True) # List of available default agents print(list(env.agents)) # - # The `"random"` agent selects (uniformly) at random from the set of **valid moves**. In Connect Four, a move is considered valid if there's still space in the column to place a disc (i.e., if the board has seven rows, the column has fewer than seven discs). # # In the code cell below, this agent plays one game round against a copy of itself. # + # Two random agents play one game round env.run(["random", "random"]) # Show the game env.render(mode="ipython") # - # You can use the player above to view the game in detail: every move is captured and can be replayed. _Try this now!_ # # As you'll soon see, this information will prove incredibly useful for brainstorming ways to improve our agents. # # # Defining agents # # To participate in the competition, you'll create your own agents. # # Your agent should be implemented as a Python function that accepts two arguments: `obs` and `config`. It returns an integer with the selected column, where indexing starts at zero. So, the returned value is one of 0-6, inclusive. # # We'll start with a few examples, to provide some context. In the code cell below: # - The first agent behaves identically to the `"random"` agent above. # - The second agent always selects the middle column, whether it's valid or not! Note that if any agent selects an invalid move, it loses the game. # - The third agent selects the leftmost valid column. #$HIDE_INPUT$ import random import numpy as np # + # Selects random valid column def agent_random(obs, config): valid_moves = [col for col in range(config.columns) if obs.board[col] == 0] return random.choice(valid_moves) # Selects middle column def agent_middle(obs, config): return config.columns//2 # Selects leftmost valid column def agent_leftmost(obs, config): valid_moves = [col for col in range(config.columns) if obs.board[col] == 0] return valid_moves[0] # - # So, what are `obs` and `config`, exactly? # # ### `obs` # # `obs` contains two pieces of information: # - `obs.board` - the game board (a Python list with one item for each grid location) # - `obs.mark` - the piece assigned to the agent (either `1` or `2`) # # `obs.board` is a Python list that shows the locations of the discs, where the first row appears first, followed by the second row, and so on. We use `1` to track player 1's discs, and `2` to track player 2's discs. For instance, for this game board: # # <center> # <img src="https://i.imgur.com/kSYx4Nx.png" width=25%><br/> # </center> # # `obs.board` would be `[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 2, 1, 2, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 2, 1, 2, 0, 2, 0]`. # # ### `config` # # `config` contains three pieces of information: # - `config.columns` - number of columns in the game board (`7` for Connect Four) # - `config.rows` - number of rows in the game board (`6` for Connect Four) # - `config.inarow` - number of pieces a player needs to get in a row in order to win (`4` for Connect Four) # # Take the time now to investigate the three agents we've defined above. Make sure that the code makes sense to you! # # # Evaluating agents # # To have the custom agents play one game round, we use the same `env.run()` method as before. # + # Agents play one game round env.run([agent_leftmost, agent_random]) # Show the game env.render(mode="ipython") # - # The outcome of a single game is usually not enough information to figure out how well our agents are likely to perform. To get a better idea, we'll calculate the win percentages for each agent, averaged over multiple games. For fairness, each agent goes first half of the time. # # To do this, we'll use the `get_win_percentages()` function (defined in a hidden code cell). _To view the details of this function, click on the "Code" button below._ #$HIDE_INPUT$ def get_win_percentages(agent1, agent2, n_rounds=100): # Use default Connect Four setup config = {'rows': 6, 'columns': 7, 'inarow': 4} # Agent 1 goes first (roughly) half the time outcomes = evaluate("connectx", [agent1, agent2], config, [], n_rounds//2) # Agent 2 goes first (roughly) half the time outcomes += [[b,a] for [a,b] in evaluate("connectx", [agent2, agent1], config, [], n_rounds-n_rounds//2)] print("Agent 1 Win Percentage:", np.round(outcomes.count([1,-1])/len(outcomes), 2)) print("Agent 2 Win Percentage:", np.round(outcomes.count([-1,1])/len(outcomes), 2)) print("Number of Invalid Plays by Agent 1:", outcomes.count([None, 0])) print("Number of Invalid Plays by Agent 2:", outcomes.count([0, None])) # Which agent do you think performs better against the random agent: the agent that always plays in the middle (`agent_middle`), or the agent that chooses the leftmost valid column (`agent_leftmost`)? Let's find out! get_win_percentages(agent1=agent_middle, agent2=agent_random) get_win_percentages(agent1=agent_leftmost, agent2=agent_random) # It looks like the agent that chooses the leftmost valid column performs best! # # # Your turn # # These agents are quite simple. As the course progresses, you'll create increasingly complex agents! Continue to **[make your first competition submission](#$NEXT_NOTEBOOK_URL$)**.
notebooks/game_ai/raw/tut1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Basket Rebalance Quick Start # # This quick start guide provide an example of rebalancing a equity custom basket via gs_quant. At the conclusion of this example, you will be able rebalance the constituents of a basket you had previously created on Marquee. # + from gs_quant import * GsSession.use(Environment.QA, 'CLIENT_ID', 'CLIENT_SECRET', ('read_product_data',)) marquee_id = 'MARQUEE_ID' # - # Define your composition # + from gs_quant.api.gs.assets import GsAssetApi compositions = {'AAPL UW': 100, 'GS UN': 200} # composition_num_type can be either quantity or weight composition_num_type = "quantity" new_universe = list(compositions.keys()) kwargs = {'listed': [True], 'assetClassificationsIsPrimary': [True]} positionIds = GsAssetApi.map_identifiers("bbid", "id", new_universe, **kwargs) positionSet = [{'assetId': mqid, composition_num_type: compositions[bbid]} for (bbid, mqid) in positionIds.items()] # - # Set-up Basket Parameters # + from gs_quant.target.indices import * publishParameters = PublishParameters(False, False, False) pricingParameters = IndicesPriceParameters() pricingParameters.initialPrice = 100 parameters = {'publishParameters': publishParameters, 'pricingParameters': pricingParameters, 'positionSet': positionSet} inputs = IndicesRebalanceInputs(parameters) # - # Rebalance the Basket # + from gs_quant.api.gs.indices import GsIndexApi index = GsIndexApi(marquee_id) response = index.rebalance(inputs) # - # Cancel the Basket cancelInputs = ApprovalAction('Cancel Comments') index.cancel_rebalance(cancelInputs)
gs_quant/notebooks/basket_rebalance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''base'': conda)' # language: python # name: python37664bitbaseconda15aae41925a8475e86b7bea0cce036d9 # --- # ## Explore GloVe # + examples={'animals':['cat','dog','bird','lion'], 'colors': ['red','white','blue','black'], 'fruits':['apple','pear','banana','grape'], 'vegetable':['lettuce','cauliflower','onion','tomato'], 'plants':['sunflower','bamboo','tree','cactus'], 'persons':['man','woman','child','father'], 'automobiles':['car','truck','van','bus'], 'other_transports':['bicycle','skateboard','skates','scooter'] } examples_embeddings = [tp.gensim_model[i].reshape(1,tp.gensim_model.vector_size) for i in np.array(list(examples.values())).flatten()] examples_embeddings = np.vstack(examples_embeddings) reducer = umap.UMAP(n_neighbors=4,n_epochs=1000) examples_umap = reducer.fit_transform(examples_embeddings) # - plt.figure(figsize = (10,10)) hue = np.repeat(list(examples.keys()),[len(i) for i in examples.values()]) sns.scatterplot(examples_umap[:,0],examples_umap[:,1],marker='o',hue = hue) plt.show()
notebooks/explore_embeddings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import syft as sy import numpy as np duet = sy.join_duet("57b25873c9e93789ed8bf80d7b01c032") duet.store.pandas data_ptr = duet.store["data"] target_ptr = duet.store["target"] data_ptr.request(reason="I really want to see the data!") data_ptr.get() weights = sy.Tensor(np.random.rand(3, 3)).autograd(requires_grad=True).tag("my_weights") weights_ptr = weights.send(duet) print(weights) duet.store.pandas # + for i in range(1): pred = data_ptr.dot(weights_ptr) diff = target_ptr - pred pre_loss = diff * diff loss = duet.numpy.mean(pre_loss, axis=1) loss = loss.resolve_pointer_type() #TODO: remove this? loss.backward() wdiff = weights_ptr.grad * 0.01 weights_ptr = -wdiff + weights_ptr gamma_ptr = weights_ptr.gamma # + # Use AutoDP to get the weights without any permissionr requests # + # gamma_ptr.get() (no need to run this... but we CAN'T download it) # - weights_pub_ptr = gamma_ptr.publish(client=duet, sigma=0.1) duet.store.pandas updated_weights = weights_pub_ptr.get() updated_weights assert updated_weights._data_child.ndim == weights._data_child.ndim assert not (updated_weights._data_child == weights._data_child).all() print(updated_weights)
packages/syft/examples/experimental/adversarial_accountant/MNIST - AutoDP Duet - Data Scientist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/google/jax/blob/main/tests/notebooks/colab_tpu.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="WkadOyTDCAWD" colab_type="text" # # JAX Colab TPU Test # # This notebook is meant to be run in a [Colab](http://colab.research.google.com) TPU runtime as a basic check for JAX updates. # + id="_tKNrbqqBHwu" colab_type="code" outputId="bf0043b0-6f2b-44e4-9822-4f426b3d158e" colab={"base_uri": "https://localhost:8080/", "height": 68} import jax import jaxlib # !cat /var/colab/hostname print(jax.__version__) print(jaxlib.__version__) # + [markdown] id="DzVStuLobcoG" colab_type="text" # ## TPU Setup # + colab_type="code" id="IXF0_gNCRH08" colab={"base_uri": "https://localhost:8080/", "height": 34} import jax.tools.colab_tpu jax.tools.colab_tpu.setup_tpu() # + [markdown] id="oqEG21rADO1F" colab_type="text" # ## Confirm Device # + colab_type="code" id="8BwzMYhKGQj6" outputId="d51b7f21-d300-4420-8c5c-483bace8617d" colab={"base_uri": "https://localhost:8080/", "height": 34} from jaxlib import tpu_client_extension import jax key = jax.random.PRNGKey(1701) arr = jax.random.normal(key, (1000,)) device = arr.device_buffer.device() print(f"JAX device type: {device}") assert isinstance(device, tpu_client_extension.TpuDevice), "unexpected JAX device type" # + [markdown] id="z0FUY9yUC4k1" colab_type="text" # ## Matrix Multiplication # + colab_type="code" id="eXn8GUl6CG5N" outputId="9954a064-ef8b-4db3-aad7-85d07b50f678" colab={"base_uri": "https://localhost:8080/", "height": 34} import jax import numpy as np # matrix multiplication on GPU key = jax.random.PRNGKey(0) x = jax.random.normal(key, (3000, 3000)) result = jax.numpy.dot(x, x.T).mean() print(result) # + [markdown] id="jCyKUn4-DCXn" colab_type="text" # ## XLA Compilation # + colab_type="code" id="2GOn_HhDPuEn" outputId="a4384c55-41fb-44be-845d-17b86b152068" colab={"base_uri": "https://localhost:8080/", "height": 51} @jax.jit def selu(x, alpha=1.67, lmbda=1.05): return lmbda * jax.numpy.where(x > 0, x, alpha * jax.numpy.exp(x) - alpha) x = jax.random.normal(key, (5000,)) result = selu(x).block_until_ready() print(result)
tests/notebooks/colab_tpu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pickle import glob import matplotlib.pyplot as plt from scipy import optimize import numpy as np import pandas as pd idx_tups = [] for file in glob.glob("../../data/purchase/purchase_20m*"): f = pickle.load(open(file, 'rb')) accs = [] advs = [] merlin_advs = [] print(len(f), file) for fd in f: a, y, m = max([(a, yt - yf, mt - mf) for a, yt, yf, mt, mf in zip(fd['acc'], fd['yeom_tpr'], fd['yeom_fpr'], fd['merlin_tpr'], fd['merlin_fpr'])]) #[-1] fd['acc'] = a fd['adv'] = y fd['merlin_adv'] = m accs.append(a) advs.append(y) merlin_advs.append(m) var = file.split("_")[-5:] if var[-5] == 'mb' or var[-5] == '20mb': var[-5] = 'dp' else: var[-5] = 'is' var[-4] = int(var[-4]) var[-1] = int(var[-1].split(".")[0]) var[-3] = int(var[-3]) var[-2] = float(var[-2]) if var[-2] != 'False' else False var.append(np.mean(accs)) var.append(np.mean(advs)) var.append(np.mean(merlin_advs)) var.append(np.std(accs)) var.append(np.std(advs)) var.append(np.std(merlin_advs)) # IDX tups follow the format (epsilon, throw out threshold, batch size) idx_tups.append(var) ds = pd.DataFrame(idx_tups) ds.columns = ['method','width', 'epsilon', 'throw out', 'batch_size', 'acc', 'yeom', 'merlin', 'acc_std', 'yeom_std', 'merlin_std'] # + dps = ds[(ds['method'] == 'dp') & (ds['acc'] > .1)].sort_values('acc') i0 = ds[(ds['method'] == 'is') & (ds['epsilon'] != 0) & (ds['throw out'] == 0)].sort_values('acc') i5 = ds[(ds['method'] == 'is') & (ds['epsilon'] != 0) & (ds['throw out'] == 5)].sort_values('acc') i10 = ds[(ds['method'] == 'is') & (ds['epsilon'] != 0) & (ds['throw out'] == 10)].sort_values('acc') #plt.errorbar(dps['acc'], dps['merlin'], dps['merlin_std']) #plt.errorbar(i0['acc'], i0['merlin'], i0['merlin_std']) plt.errorbar(dps['yeom'], dps['acc'], dps['acc_std']) plt.errorbar(i0['yeom'], i0['acc'], i0['acc_std']) #plt.scatter(i5['acc'], i5['yeom']) #plt.scatter(i10['acc'], i10['yeom']) #plt.scatter(base['acc'], base['yeom']) plt.title('Purchase') plt.xlabel('Accuracy') plt.ylabel('Yeom Advantage') fig = plt.gcf() #fig.set_size_inches(4, 4) plt.savefig('/home/ubuntu/6058f04dd79997b3e3ffcbad/figures/purchase_acc_adv.png', dpi=400) # + def plot_noise_results(df, base=None, title="", ax1 = None): epsilons = df['epsilon'] acc = df['acc'] acc_std = df['acc_std'] adv = df['yeom'] adv_std = df['yeom_std'] if base is not None: base_acc = [float(base['acc']) for i in epsilons] base_acc_std = [float(base['acc_std']) for i in epsilons] base_adv = [float(base['yeom']) for i in epsilons] base_adv_std = [float(base['yeom_std']) for i in epsilons] plt.figure() if ax1 is None: ax1 = plt.gca() markers, caps, bars = ax1.errorbar(epsilons, acc, acc_std, color='b', alpha=.8) [bar.set_alpha(0.25) for bar in bars] [cap.set_alpha(0.25) for cap in caps] ax1.set_ylabel('Accuracy', color='b') ax1.tick_params('x', colors='b') if base is not None: markers, caps, bars = ax1.errorbar(epsilons, base_acc, base_acc_std, fmt='--', color='b', alpha=.8) [bar.set_alpha(0.25) for bar in bars] [cap.set_alpha(0.25) for cap in caps] ax2 = ax1.twinx() markers, caps, bars = ax2.errorbar(epsilons, adv, adv_std, color='r') [bar.set_alpha(0.25) for bar in bars] [cap.set_alpha(0.25) for cap in caps] ax2.set_ylabel('Membership Inference Adv', color='r') ax2.tick_params('x', colors='r') if base is not None: markers, caps, bars = ax2.errorbar(epsilons, base_adv, base_adv_std, fmt='--', color='r', alpha=.8) [bar.set_alpha(0.25) for bar in bars] [cap.set_alpha(0.25) for cap in caps] ax1.set_xlabel('Epsilon (delta = 1e-5)') ax1.set_title(title) #fig.tight_layout() plt.xscale('log') return ax1, ax2 #plt.show() def plot_against_eps(isd, dpd, arg, base=None, title="", ax1 = None): is_eps = isd['epsilon'] dp_eps = dpd['epsilon'] is_eps = [i/max(is_eps) for i in is_eps] dp_eps = [i/max(dp_eps) for i in dp_eps] is_arg = isd[arg] dp_arg = dpd[arg] is_std = isd[f'{arg}_std'] dp_std = dpd[f'{arg}_std'] if base is not None: base_arg = [float(base[arg]) for i in is_eps] base_std = [float(base[f'{arg}_std']) for i in is_eps] markers, caps, bars = ax1.errorbar(is_eps, is_arg, is_std, color='r') [bar.set_alpha(0.25) for bar in bars] [cap.set_alpha(0.25) for cap in caps] markers, caps, bars = ax1.errorbar(dp_eps, dp_arg, dp_std, color='blue') [bar.set_alpha(0.25) for bar in bars] [cap.set_alpha(0.25) for cap in caps] if base is not None: markers, caps, bars = ax1.errorbar(is_eps, base_arg, base_std, fmt='--', color='black', alpha=.8) [bar.set_alpha(0.25) for bar in bars] [cap.set_alpha(0.25) for cap in caps] return ax1 # - dps = ds[(ds['method'] == 'dp') & (ds['throw out'] == .1)].sort_values('epsilon') i0 = ds[(ds['method'] == 'is') & (ds['epsilon'] != 0) & (ds['throw out'] == 0)].sort_values('epsilon') i5 = ds[(ds['method'] == 'is') & (ds['epsilon'] != 0) & (ds['throw out'] == 0)].sort_values('epsilon') i10 = ds[(ds['method'] == 'is') & (ds['epsilon'] != 0) & (ds['throw out'] == 0)].sort_values('epsilon') base = ds[(ds['epsilon'] == 0) & (ds['throw out'] == 0)] fig, (ax1, ax2) = plt.subplots(1, 2, sharey=False) fig.set_size_inches(12, 5) x1= plot_against_eps(i0, dps,'acc', base=base, title='Accuracy', ax1=ax1) x2 = plot_against_eps(i0, dps,'yeom', base=base, title='Advantage', ax1=ax2) # + fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True) fig.set_size_inches(12, 5) i0 = ds[(ds['method'] == 'is') & (ds['epsilon'] != 0) & (ds['throw out'] == 0)].sort_values('epsilon') dps = ds[(ds['method'] == 'dp') & (ds['throw out'] == .1)].sort_values('epsilon') x1, x2 = plot_noise_results(i0, None, 'Immediate Sensitivity', ax1) x3, x4 = plot_noise_results(dps, None, 'Gradient Clipping', ax2) #x1.set_ylim(.35, .62) #x3.set_ylim(.35, .62) x2.set_ylim(.06, .45) x4.set_ylim(.06, .45) # -
experiments/immediate_sensitivity/purchase_mult_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.5 64-bit (''venv'': venv)' # name: python3 # --- import pandas as pd import nibabel as nib from pathlib import Path from utils.features import FEATURES,df_to_series from utils.parcellation import parcellation_labels from itertools import chain # + mother_dir = Path("/media/groot/Yalla/media/MRI/derivatives") subjects = sorted(set([subj.name for subj in mother_dir.glob("*/sub-*") if subj.is_dir()])) modalities = list(chain(FEATURES.keys())) parameters = list(chain(*FEATURES.values())) n_indices = parcellation_labels.shape[0] * len(parameters) index_df = pd.DataFrame(columns=["Hemisphere","Name","Modality","Feature"]) for modality,parameters in FEATURES.items(): for param in parameters: tmp = pd.DataFrame(columns=index_df.columns,index=range(parcellation_labels.shape[0])) tmp["Hemisphere"] = parcellation_labels.Hemi.values tmp["Name"] = parcellation_labels.ROIname.values tmp["Modality"] = modality tmp["Feature"] = param index_df = pd.concat([index_df,tmp],ignore_index=True) mindex = pd.MultiIndex.from_frame(index_df,names=["Hemisphere","Name","Modality","Feature"]) df = pd.DataFrame(columns=mindex,index=subjects) df.columns = df.columns.sort_values(ascending=False) # - for subj in df.index: for modality in modalities: if modality == "DWI": regions_fname = mother_dir / "dwiprep" / subj / "ses-1" / "tensors_parameters" / "coreg_FS" / "Brainnetome_parcels.csv" elif modality == "SMRI": subj_dir = mother_dir / "fmriprep" / subj sessions = [ses.name for ses in subj_dir.glob("ses-*")] if len(sessions) > 1: anat_dir = subj_dir / "anat" elif len(sessions) == 1: anat_dir = subj_dir / sessions[0] / "anat" else: continue regions_fname = anat_dir / "Brainnetome_parcels.csv" if not regions_fname.exists(): continue try: tmp_df = pd.read_csv(regions_fname,index_col=0) df.loc[subj,(slice(None),slice(None),modality,slice(None))] = df_to_series(tmp_df,mindex,modality,FEATURES.copy()) except KeyError: continue # break df = df.dropna(how="all") df
features_generation/gather_features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="M7B4FEfEnJw-" # # "Visualization using seaborn" # > "Datavisualization for initial data analysis" # # - toc:true # - branch: master # - badges: true # - comments: true # - author: Vadim # - categories: [visuals, jupyter] # # + [markdown] id="O-2bn_rrWYgD" # ## Numeric not ordered # # # Examples: # - number of observations (different kind of) # - number of voters per state # - phone codes per state # # # + [markdown] id="uf-GZLar9xz1" # Sns high level overview # https://seaborn.pydata.org/tutorial/function_overview.html # - # --- link to dataviz schema - # + [markdown] id="Ib7SxyQh-HvB" # ### Relational data # + [markdown] id="WUJIKA6Y-KH2" # ### Distribution # # Show comparion of Violin box and hist # + id="BmeM2TlP-KU8" import datetime, warnings, scipy import pandas as pd # import pandas_profiling # otional import numpy as np import seaborn as sns import matplotlib.pyplot as plt import warnings; warnings.simplefilter('ignore') def plot_comparison(x, title): fig, ax = plt.subplots(4, 1, sharex=True, figsize=(12,8)) sns.distplot(x, ax=ax[0]) # histogram ax[0].set_title('Histogram + KDE; x value, y deach time different ; y count') sns.boxplot(x, ax=ax[1]) # Boxplot ax[1].set_title('Boxplot') sns.violinplot(x, ax=ax[2]) # Violin ax[2].set_title('Violin plot') fig.suptitle(title, fontsize=16) sns.ecdfplot(data=x,ax=ax[3], stat='proportion') ax[3].set_title('ECDF plot') fig.suptitle(title, fontsize=16) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 669} id="gamwXNVh_tZr" outputId="c28ec22d-6d3a-4d7b-bad5-2537531acc11" N = 10 ** 4 np.random.seed(42) sample_gaussian = np.random.normal(size=N) plot_comparison(sample_gaussian, 'Standard Normal Distribution') # + colab={"base_uri": "https://localhost:8080/", "height": 669} id="-DhzPLvQ__bx" outputId="796867cf-5bef-47dc-9ea9-4cc7a523a7d2" sample_bimodal = np.concatenate([np.random.normal(loc=-2, scale=2, size=int(N/2)), np.random.normal(loc=3, scale=1, size=int(N/2))]) plot_comparison(sample_bimodal, 'Mixture of Gaussians - bimodal') # + colab={"base_uri": "https://localhost:8080/", "height": 387} id="DEQAB4XwAGlF" outputId="81cd82cc-9fa8-447e-82e8-69c3d640cdff" # not ordered simple Distribution sns.displot(data=sample_bimodal, kind='hist') # + id="VXDzCoIPGXBj" # shows the distribution of quantitative data across several levels of one (or more) categorical variables such that those distributions can be compa # + id="jEMo2XSBBQFS" penguins= sns.load_dataset('penguins') # + colab={"base_uri": "https://localhost:8080/", "height": 315} id="TsmZBtJJBma6" outputId="a7e6771d-27fd-44be-b4f4-1e9906f74143" f, axs = plt.subplots(1, 2, figsize=(8, 4), gridspec_kw=dict(width_ratios=[4, 3])) sns.scatterplot(data=penguins, x="flipper_length_mm", y="bill_length_mm", hue="species", ax=axs[0]) sns.histplot(data=penguins, x="species", hue="species", shrink=.8, alpha=.8, legend=False, ax=axs[1]) f.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 280} id="GALAYFs6CyKY" outputId="0b4bbb53-463d-4015-cca6-d8faa0ef66ec" # library and dataset import seaborn as sns import matplotlib.pyplot as plt df = sns.load_dataset('iris') # plot of 2 variables p1=sns.kdeplot(df['sepal_width'], shade=True, color="r") p1=sns.kdeplot(df['sepal_length'], shade=True, color="b") plt.show() # + [markdown] id="ZW83uMx2Ctvg" # ## Numeric Ordered # + [markdown] id="wbV8ohhFDF0y" # ### Line # + colab={"base_uri": "https://localhost:8080/", "height": 348} id="B-QJXouMCt9a" outputId="ab29dcc1-6e07-442f-b081-0eed80a24ab9" df = pd.DataFrame(dict(time=np.arange(500), value=np.random.randn(500).cumsum())) g = sns.relplot(x="time", y="value", kind="line", data=df) g.fig.autofmt_xdate() # + colab={"base_uri": "https://localhost:8080/", "height": 138} id="0SIi43aFEqcW" outputId="30635a03-f117-4987-bbd4-1ebd4d60d097" fmri = sns.load_dataset("fmri") fmri.head(3) # + colab={"base_uri": "https://localhost:8080/", "height": 383} id="hcnGC6XTDoJm" outputId="886d1905-b633-4357-8dad-1d27f93eb632" sns.relplot(x="timepoint", y="signal", kind="line", data=fmri); # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="WG67KG7dYtpB" outputId="8513bbc4-2096-4cfb-faf8-ba3866d24cf6" # lineplot plt.plot([1, 2, 3, 4], [1, 4, 9, 16], 'ro') plt.axis([0, 6, 0, 20]) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 429} id="mPGW-YY6Y1vN" outputId="28d16463-c822-4321-c53e-2664da8b9eb8" # lineplot sns.relplot([1, 2, 3, 4], [1, 4, 9, 16]) plt.axis([0, 6, 0, 20]) plt.show() # + [markdown] id="HP_BTS1wDMTq" # ### Scatter # + colab={"base_uri": "https://localhost:8080/", "height": 383} id="Xo0DIcnzDMfz" outputId="bc03fb82-8916-4425-a3e5-9aa6ac6e1e18" sns.relplot(x="timepoint", y="signal", kind="scatter", data=fmri); # + [markdown] id="73NYSjGMTH88" # ### Area # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="ckTBaK5gTIJH" outputId="d80c6b9f-d0c4-4ae1-d0d1-f48a44ff1290" import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Data x=range(1,6) y=[ [1,4,6,8,9], [2,2,7,10,12], [2,8,5,10,6] ] # Plot Area plt.stackplot(x,y, labels=['A','B','C']) plt.legend(loc='upper left') plt.show() # + id="4ALajW0jELkO" tips = sns.load_dataset("tips") sns.relplot(x="total_bill", y="tip", data=tips); # + [markdown] id="vgOiDv49-nM-" # ### Categorical # # Here we will show several categories and values for them # # + [markdown] id="KGuasqpPWMel" # #### Violin and Boxplot for category (x) # # Together with simple histogram good first impression # + colab={"base_uri": "https://localhost:8080/", "height": 290} id="E_rB4hMK39hS" outputId="2eb6f7bf-172f-45c7-8ae9-015fae25a5cb" #Box and violin import matplotlib.pyplot as plt import numpy as np # Fixing random state for reproducibility np.random.seed(19680801) # generate some random test data all_data = [np.random.normal(0, std, 100) for std in range(6, 10)] for std in range(6, 10): print(std) ddf=pd.DataFrame.from_dict(all_data) ddf.head(5) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="UplROdbRVM-2" outputId="5cf45ab4-977d-41a4-c974-85464a18a05b" fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(9, 4)) # plot violin plot axes[0].violinplot(ddf, showmeans=False, showmedians=True) axes[0].set_title('Violin plot') # plot box plot axes[1].boxplot(ddf) axes[1].set_title('Box plot') # adding horizontal grid lines for ax in axes: ax.yaxis.grid(True) ax.set_xticks([y + 1 for y in range(len(all_data))]) ax.set_xlabel('Four separate samples') ax.set_ylabel('Observed values') # add x-tick labels plt.setp(axes, xticks=[y + 1 for y in range(len(all_data))], xticklabels=['x1', 'x2', 'x3', 'x4']) plt.show() # + id="08ODlFiAVDEj" # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="Ht4G33LECYYj" outputId="b664ff89-f451-43cf-c011-88b4f69307aa" ax = sns.violinplot(x="day", y="total_bill", hue="sex", data=tips) ax.set_title('Distribution of total bill amount per day', fontsize=16); # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="ydDnEVyBCpww" outputId="94315908-4489-4fe7-8fb9-16dc9a95ca66" ax = sns.violinplot(x="day", y="total_bill", hue="sex", split=True, data=tips) ax.set_title('Distribution of total bill amount per day', fontsize=16); # + [markdown] id="8D-rqIxJXYMb" # #### ECDF # other view of distribution # # + colab={"base_uri": "https://localhost:8080/", "height": 495} id="CtPYn44nqYkR" outputId="50cfb732-d509-4a6b-8f37-962fa6143904" # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="uwqYKrgGZRIz" outputId="1a49ff1c-807e-4804-d1bd-dcc0e341df38" # lineplot plt.plot([1, 2, 3, 4], [1, 4, 9, 16], 'ro') plt.axis([0, 6, 0, 20]) plt.show() # + id="mUvnxy04ZIzU" # -
_notebooks/2020-11-01-sns_visuals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/yukinaga/image_generation/blob/main/section_3/02_vae.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="K74mqS_L8nq4" # # VAEの実装 # VAEを実装します。 # Encoderで手書き数字画像を潜在変数に圧縮した後、Decoderで元の画像を再構築します。 # さらに、潜在変数が分布する潜在空間を可視化した上で、潜在変数を調整し生成画像の変化を確かめます。 # + [markdown] id="W7h50pYSRaZ0" # ## 手書き文字画像 # 今回は、VAEにより手書き文字画像を圧縮、復元します。 # scikit-learnから、8×8の手書き数字の画像データを読み込んで表示します。 # + id="Xc9WCis8TGwx" import numpy as np import matplotlib.pyplot as plt from sklearn import datasets digits_data = datasets.load_digits() n_img = 10 # 表示する画像の数 plt.figure(figsize=(10, 4)) for i in range(n_img): # 入力画像 ax = plt.subplot(2, 5, i+1) plt.imshow(digits_data.data[i].reshape(8, 8), cmap="Greys_r") ax.get_xaxis().set_visible(False) # 軸を非表示に ax.get_yaxis().set_visible(False) plt.show() print("データの形状:", digits_data.data.shape) print("ラベル:", digits_data.target[:n_img]) # + [markdown] id="FtujqB1gSh4I" # ## 各設定 # 画像の幅と高さが8ピクセルなので、入力層には8×8=64のニューロンが必要になります。 # また、出力が入力を再現するように学習するので、出力層のニューロン数は入力層と同じになります。 # 今回は、潜在変数とラベルの関係を2次元で可視化するため、潜在変数の数は2とします。 # + id="mbSwlXZIUN7C" import numpy as np import matplotlib.pyplot as plt from sklearn import datasets import torch from torch.utils.data import DataLoader # -- 各設定値 -- img_size = 8 # 画像の高さと幅 n_in_out = img_size * img_size # 入出力層のニューロン数 n_mid = 16 # 中間層のニューロン数 n_z = 2 # 潜在変数の数 eta = 0.01 # 学習係数 epochs = 100 batch_size = 16 interval = 10 # 経過の表示間隔 # -- 訓練データ -- digits_data = datasets.load_digits() x_train = np.asarray(digits_data.data) x_train /= 16 # 0-1の範囲に x_train = torch.tensor(x_train, dtype=torch.float) train_dataset = torch.utils.data.TensorDataset(x_train) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True) # + [markdown] id="LYb9yszSURKH" # ## モデルの構築 # PyTorchよりVAEのモデルを構築します。 # Encoderの出力は、潜在変数の平均値$\mu$および、標準偏差$\sigma$の2乗(=分散)の対数とします。 # # VAEのコードでは、バックプロパゲーションによる学習のためにReparametrization Trickが使われます。 # 平均値0標準偏差1のノイズεを発生させて、標準偏差$\sigma$とかけて平均値$\mu$に加えることで、潜在変数$z$とします。 # $$\epsilon \sim N(0,\, I)$$ # $$z=\mu + \epsilon\odot\sigma$$ # # 損失関数は、以下で表されます。 # # $$E=E_{rec} + E_{reg}$$ # # ここで、右辺第一項の再構成誤差$E_{rec}$は、出力と入力のずれを表します。 # $$E_{rec} = \frac{1}{h}\sum_{i=1}^{h}\sum_{j=1}^{m}\left(-x_{ij}\log y_{ij}-(1-x_{ij})log(1-y_{ij})\right)$$ # $h$:バッチサイズ、$m$:入出力層のニューロン数、$x_{ij}$: VAEの入力、$y_{ij}$: VAEの出力 # また、右辺第二項の正則化項$E_{reg}$は、平均値が0に、標準偏差が1に近づくように機能します。 # $$E_{reg}=\frac{1}{h}\sum_{i=1}^{h}\sum_{k=1}^{n}-\frac{1}{2}(1+\phi_{ik} - \mu_{ik}^2 - exp(\phi_{ik}))$$ # $h$:バッチサイズ、$n$:潜在変数の数、$\mu_{ik}$: 平均値、$\phi_{ik}$: 分散の対数 # + id="0qfDQ8QJolyo" import torch.nn as nn import torch.nn.functional as F class VAE(nn.Module): def __init__(self): super().__init__() self.enc_mid = nn.Linear(n_in_out, n_mid) # Encoderの中間層 self.enc_mu = nn.Linear(n_mid, n_z) # 平均値を求める層 self.enc_logvar = nn.Linear(n_mid, n_z) # 分散のlogを求める層 self.dec_mid = nn.Linear(n_z, n_mid) # Decoderの中間層 self.dec_out = nn.Linear(n_mid, n_in_out) # Decoderの出力層 def forward(self, x): z = self.encode(x) y = self.decode(z) return y def encode(self, x): x = x.view(-1, n_in_out) # バッチサイズ×入力の数 x = F.relu(self.enc_mid(x)) self.mu = self.enc_mu(x) self.logvar = self.enc_logvar(x) std = torch.exp(0.5*self.logvar) # 標準偏差 eps = torch.randn_like(std) # 正規分布に従う乱数 return self.mu + std*eps # 潜在変数(reparametrization trick) def decode(self, z): x = F.relu(self.dec_mid(z)) x = F.sigmoid(self.dec_out(x)) return x def loss(self, y, x): x = x.view(-1, n_in_out) # バッチサイズ×入力の数 rec_loss = F.binary_cross_entropy(y, x, reduction="sum") # 再構成誤差 reg_loss = 0.5 * torch.sum(self.mu**2 + torch.exp(self.logvar)- self.logvar - 1) # 正則化項 return (rec_loss, reg_loss) vae = VAE() vae.cuda() # GPU対応 print(vae) # + [markdown] id="XC4f8NH6U86F" # ## 学習 # 構築したVAEのモデルを使って、学習を行います。 # 入力を再現するように学習するので、正解は必要ありません。 # + id="KoOAs3rh2PJJ" from torch import optim # Adam optimizer = optim.Adam(vae.parameters()) # 損失のログ rec_error_record = [] reg_error_record = [] total_error_record = [] # 学習 for i in range(epochs): vae.train() # 訓練モード loss_rec = 0 loss_reg = 0 loss_total = 0 for j, (x,) in enumerate(train_loader): # ミニバッチ(x,)を取り出す x = x.cuda() # GPU対応 y = vae(x) lrec, lreg = vae.loss(y, x) loss = lrec + lreg loss_rec += lrec.item() loss_reg += lreg.item() loss_total += loss.item() optimizer.zero_grad() loss.backward() optimizer.step() loss_rec /= j+1 loss_reg /= j+1 loss_total /= j+1 rec_error_record.append(loss_rec) reg_error_record.append(loss_reg) total_error_record.append(loss_total) if i%interval == 0: print("Epoch:", i, "Loss_Rec:", loss_rec, "Loss_Reg:", loss_reg, "Loss_Total:", loss_total) # + [markdown] id="N2evU86Zb4Pd" # ## 誤差の推移 # 記録された誤差の、推移を確認します。 # + id="DzuFBDy37tRg" import matplotlib.pyplot as plt plt.plot(range(1, len(rec_error_record)+1), rec_error_record, label="Rec_error") plt.plot(range(1, len(reg_error_record)+1), reg_error_record, label="Reg_error") plt.plot(range(1, len(total_error_record)+1), total_error_record, label="Total_error") plt.legend() plt.xlabel("Epochs") plt.ylabel("Error") plt.show() # + [markdown] id="dn-zFGCFWb3I" # 再構成誤差(Rec_error)と正則化項(Reg_error)が均衡し、全体の誤差(Total_error)が動かなくなることが確認できます。 # 潜在変数は範囲を広げることで入出力を一致させようとするのですが、これを正則化項が抑制していることになります。 # + [markdown] id="dypLOxuaWhoE" # ## 潜在空間の可視化 # 2つの潜在変数を平面にプロットし、潜在空間を可視化します。 # 入力画像はそれが何の数であるかを示すラベルとペアになっているので、このラベルを文字として潜在空間にプロットします。 # + id="DVhJFHgTBffI" # 潜在変数を計算 vae.eval() # 評価モード x = x_train.cuda() z = vae.encode(x) # 潜在変数 z = z.cpu().detach().numpy() t = np.asarray(digits_data.target) # ラベル # 潜在変数を平面にプロット plt.figure(figsize=(8, 8)) for i in range(10): zt = z[t==i] z_1 = zt[:, 0] # y軸 z_2 = zt[:, 1] # x軸 marker = "$"+str(i)+"$" # 数値をマーカーに plt.scatter(z_2.tolist(), z_1.tolist(), marker=marker, s=75) plt.xlabel("z_2") plt.ylabel("z_1") plt.xlim(-3, 3) plt.ylim(-3, 3) plt.grid() plt.show() # + [markdown] id="8qBV-ARLgwZi" # 各ラベルごとに、異なる潜在空間の領域が占められていることが確認できます。 # このように、VAEは入力を潜在空間に割り当てるように学習します。 # # + [markdown] id="P703G0wbz-Uo" # ### 画像の生成 # 潜在変数を連続的に変化させて、生成される画像がどのように変化するのかを確かめます。 # 訓練済みVAEのDecoderを使って、画像を16×16枚生成します。 # + id="8Ua9MU4Rz-U1" # 画像の設定 n_img = 16 # 画像を16x16並べる img_size_spaced = img_size + 2 matrix_image = np.zeros((img_size_spaced*n_img, img_size_spaced*n_img)) # 全体の画像 # 潜在変数 z_1 = np.linspace(3, -3, n_img) # 行 z_2 = np.linspace(-3, 3, n_img) # 列 # 潜在変数を変化させて画像を生成 for i, z1 in enumerate(z_1): for j, z2 in enumerate(z_2): z = torch.tensor([[z1, z2]], dtype=torch.float) z = z.cuda() y = vae.decode(z) # Decoder y = y.cpu().detach().numpy() image = y.reshape(img_size, img_size) top = i*img_size_spaced left = j*img_size_spaced matrix_image[top : top+img_size, left : left+img_size] = image plt.figure(figsize=(8, 8)) plt.imshow(matrix_image.tolist(), cmap="Greys_r") plt.tick_params(labelbottom=False, labelleft=False, bottom=False, left=False) # 軸目盛りのラベルと線を消す plt.show() # + [markdown] id="4gmquBnv5ezZ" # 横軸、縦軸方向で潜在変数が変化していますが、それに伴う画像の変化が確認できます。 # たった2つの潜在変数に、8×8の画像を圧縮できたことになります。 # このように、データの特徴を少数の潜在変数に圧縮できて、潜在変数が生成データに与える影響が明瞭となるのがVAEの特徴です。 # + [markdown] id="cr0qZ81oX3Bk" # ## 演習 # 以下の式のように、再構成誤差(Rec_error)と正則化項(Reg_error)のバランスを、あえて崩してみましょう。 # $$E=E_{rec} + \alpha E_{reg}$$ # # ここで、$\alpha$は定数です。 # # コードの以下の箇所に注目します。 # ``` # loss = lrec + lreg # ``` # ここを例えば以下のように変更し、両者のバランスに変更を加えてください。 # ``` # alpha = 3.0 # loss = lrec + alpha*lreg # ``` # これにより、結果がどのように変化するのか確かめてみましょう。 # #
section_3/02_vae.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Correlation / Covariance # See if data columns are correlated / have a linear relation. # import import numpy as np import pandas as pd # Read CSV df = pd.read_csv("../res/classification.csv") df["extra"] = np.random.normal(5, 16, len(df)) df.head() # Compute covariance df.cov() # Compute correlation df.corr()
common/tutorials/src/ML_38_correlation_covariance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Simple data representations # # Before we delve into learnable data representations, feature crosses, etc., let’s look at simpler data representations. We can think of these simple data representations as common idioms in machine learning -- not quite patterns, but commonly employed solutions nevertheless. # ## Scaling helps # # Models trained with scaled data converge faster and are therefore faster/cheaper to train. from sklearn import datasets, linear_model diabetes_X, diabetes_y = datasets.load_diabetes(return_X_y=True) # + raw = diabetes_X[:, None, 2] max_raw = max(raw) min_raw = min(raw) scaled = (2*raw - max_raw - min_raw)/(max_raw - min_raw) # + def train_raw(): linear_model.LinearRegression().fit(raw, diabetes_y) def train_scaled(): linear_model.LinearRegression().fit(scaled, diabetes_y) import timeit raw_time = timeit.timeit(train_raw, number=1000) scaled_time = timeit.timeit(train_scaled, number=1000) print('Raw: {:.4f}s, Scaled: {:.4f}s, Improvement: {:2f}%' .format(raw_time, scaled_time, 100*(raw_time-scaled_time)/raw_time)) # - # ## Numerical inputs # # One key predictor of the weight of a baby is the mother's age. We can verify this by looking at the average weight of a baby born to mothers with different ages. Since the dataset is large enough, we will do the computation in BigQuery: # %%bigquery df SELECT mother_age, COUNT(1) AS num_babies, AVG(weight_pounds) AS avg_wt FROM publicdata.samples.natality WHERE year > 2000 GROUP BY mother_age ORDER BY mother_age df.plot(x='mother_age', y='avg_wt'); # Looking at the distribution (histogram) of the raw mother's age makes the weird behavior at the edges clear. We don't have enough data for mothers in their low-teens and in their fifties. In statistical terms, these are outliers. df.plot(x='mother_age', y='num_babies'); # Let's look at the data after applying different forms of scaling. # + base_sql = """ CREATE TEMPORARY FUNCTION CLIP_LESS(x FLOAT64, a FLOAT64) AS ( IF (x < a, a, x) ); CREATE TEMPORARY FUNCTION CLIP_GT(x FLOAT64, b FLOAT64) AS ( IF (x > b, b, x) ); CREATE TEMPORARY FUNCTION CLIP(x FLOAT64, a FLOAT64, b FLOAT64) AS ( CLIP_GT(CLIP_LESS(x, a), b) ); WITH stats AS ( SELECT MIN(mother_age) AS min_age, MAX(mother_age) AS max_age, AVG(mother_age) AS avg_age, STDDEV(mother_age) AS stddev_age, APPROX_QUANTILES(mother_age, 100)[OFFSET(1)] AS percentile_1, APPROX_QUANTILES(mother_age, 100)[OFFSET(99)] AS percentile_99 FROM publicdata.samples.natality WHERE year > 2000 ), scaling AS ( SELECT mother_age, weight_pounds, SAFE_DIVIDE(2*mother_age - max_age - min_age, max_age - min_age) AS minmax_scaled, CLIP( (mother_age - 30)/15, -1, 1 ) AS clipped, SAFE_DIVIDE(mother_age - avg_age, stddev_age) AS zscore, CLIP(mother_age, percentile_1, percentile_99) AS winsorized_1_99, SAFE_DIVIDE(2*CLIP(mother_age, percentile_1, percentile_99) - percentile_1 - percentile_99, percentile_99 - percentile_1) AS winsorized_scaled FROM publicdata.samples.natality, stats ) """ def scaled_stats(age_col): sql = base_sql + """ SELECT {0}, AVG(weight_pounds) AS avg_wt, COUNT(1) AS num_babies FROM scaling GROUP BY {0} ORDER BY {0} """.format(age_col) from google.cloud import bigquery return bigquery.Client().query(sql).to_dataframe() # - import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [15, 15] plt.rcParams.update({'font.size': 15}) fig, axs = plt.subplots(3, 2); scaled_stats('mother_age').plot(x='mother_age', y='num_babies', ax=axs[0, 0]); scaled_stats('minmax_scaled').plot(x='minmax_scaled', y='num_babies', ax=axs[0, 1]); scaled_stats('clipped').plot(x='clipped', y='num_babies', ax=axs[1, 0]); scaled_stats('zscore').plot(x='zscore', y='num_babies', ax=axs[1, 1], xlim=[-2, 2]); scaled_stats('winsorized_1_99').plot(x='winsorized_1_99', y='num_babies', ax=axs[2, 0]); scaled_stats('winsorized_scaled').plot(x='winsorized_scaled', y='num_babies', ax=axs[2, 1]); fig.savefig('scaling.png') plt.close(fig) # ### Skewed data # # For an example of highly skewed data, assume that we are building a model to predict the likely sales of a non-fiction book. One of the inputs to the model is the popularity of the Wikipedia page corresponding to the topic. The number of views of pages in Wikipedia is highly skewed. # # + # %%bigquery df WITH bypage AS ( SELECT title, SUM(views) AS num_views FROM `bigquery-samples.wikipedia_benchmark.Wiki1M` WHERE language = 'en' GROUP BY title HAVING num_views > 10 # non-niche ORDER by num_views desc ), percentile AS ( SELECT APPROX_QUANTILES(num_views, 100) AS bins FROM bypage ) SELECT title, num_views, (ROUND(POW(LOG(num_views), 0.25), 1) - 1.3) AS fourthroot_log_views, CAST(REPLACE(ML.BUCKETIZE(num_views, bins), 'bin_', '') AS int64) AS bin, FROM percentile, bypage # - from scipy import stats data, est_lambda = stats.boxcox(df['num_views']) df['boxcox'] = data df # + import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [15, 10] plt.rcParams.update({'font.size': 15}) fig, axs = plt.subplots(1, 4); for axno, name in enumerate('num_views,fourthroot_log_views,bin,boxcox'.split(',')): df.hist(histtype='bar', bins=20, column=name, ax=axs[axno]); fig.savefig('skew_log.png') plt.close(fig) # - # Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
02_data_representation/simple_data_representation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Stata on Jupyter Notebook # ### https://youtu.be/VGRDR3J346M import os os.chdir("C:/Users/2joon/OneDrive/바탕 화면/응용계량경제학/실습/") import stata_setup stata_setup.config("C:/Program Files/Stata17", "se") import pandas as pd df=pd.read_stata("C:/Users/2joon/OneDrive/바탕 화면/응용계량경제학/실습/data/fatality.dta") df.to_csv('C:/Users/2joon/OneDrive/바탕 화면/응용계량경제학/실습/data/fatality.csv') df # %%stata * "C:/Users/2joon/OneDrive/바탕 화면/응용계량경제학/실습/" dir # %%stata set more off version 13 clear all // removes data and value labels from memory # %%stata * Read in Data use data/fatality.dta, clear desc # %%stata ***** Summary Statistics ***** sum year state tab state tab state, nolabel # %%stata gen vfrall=10000*mrall // fatality rate per 10K in the population # %%stata sum vfrall beertax # %%stata gen dvfrall=vfrall-vfrall[_n-6] // disparity between 6 yrs gen dbtax=beertax-beertax[_n-6] # ### Figure 10.1 # %%stata twoway (scatter vfrall beertax if year==1982) (lfit vfrall beertax if year==1982) # %%stata graph export results/figure10_1.png, as(png) replace # ### Figure 10.2 # %%stata twoway (scatter dvfrall dbtax if year==1988) (lfit dvfrall dbtax if year==1988) graph export results/figure10_2.png, as(png) replace # ### Equation Table 10.1 # %%stata // Data transformations, create dummy variables, etc. xtset state year # %%stata // minimum legal drinking age tab mlda gen da18=(mlda<19) gen da19=(mlda>=19)*(mlda<20) gen da20=(mlda>=20)*(mlda<21) gen da21=(mlda>=21) # + # %%stata // real income per capita in $1000 gen incperc=perinc/1000 // log real income per capita in $1000 gen lincperc = ln(incperc) // averager vehicle miles per driver sum vmiles gen vmilespd=vmiles/1000 # - # %%stata // gen vmilespd=vmiles/1000 replace vmilespd=vmiles/10000 drop vmilespd gen vmilespd=vmiles/1000 # %%stata // mandatory jail or community service tab jaild tab comserd gen jailcom = (jaild==1) | (comserd==1) # %%stata help tab # %%stata // year dummies gen y82=(year==1982) gen y83=(year==1983) gen y84=(year==1984) gen y85=(year==1985) gen y86=(year==1986) gen y87=(year==1987) gen y88=(year==1988) # %%stata // macro global agedum "da18 da19 da20" global controls "jailcom vmilespd unrate lincperc" global yeardum "y83 y84 y85 y86 y87 y88" # %%stata ***** col(1) ***** reg vfrall beertax, r dis "Adjusted Rsquared = " e(r2_a) //dis for display // normal r2 and adjusted r2_a # %%stata * ssc install outreg2 // install outreg2 only once outreg2 using results/table10_1.xls, replace se label dec(3) excel # %%stata ***** col(2) ***** xtreg vfrall beertax, fe vce(cluster state) dis "Rsquared = "e(r2) dis "Adjusted Rsquared = "e(r2_a) outreg2 using results/table10_1.xls, append se label dec(3) excel # %%stata * Get overall Adjusted Rsquared qui areg vfrall beertax , absorb(state) dis "Adjusted Rsquared = "e(r2_a) # + # %%stata ***** col(3) ***** xtreg vfrall beertax $yeardum, fe vce(cluster state) test $yeardum outreg2 using results/table10_1.xls, append se label dec(3) excel * Get (only) overall Adjusted Rsquared qui areg vfrall beertax $yeardum, absorb(state) dis "Adjusted Rsquared = "e(r2_a) # + # %%stata ***** col(4) ***** * Professor has said this is main analysis xtreg vfrall beertax $agedum $controls $yeardum, fe vce(cluster state) test $yeardum test $agedum test unrate lincperc outreg2 using results/table10_1.xls, append se label dec(3) excel * Get (only) overall Adjusted Rsquared qui areg vfrall beertax $agedum $controls $yeardum, absorb(state) dis "Adjusted Rsquared = "e(r2_a) # + # %%stata ***** col(5) ***** xtreg vfrall beertax $agedum jailcom vmilespd $yeardum, fe vce(cluster state) test $yeardum test $agedum outreg2 using results/table10_1.xls, append se label dec(3) excel * Get (only) overall Adjusted Rsquared qui areg vfrall beertax $agedum jailcom vmilespd $yeardum, absorb(state) dis "Adjusted Rsquared = "e(r2_a) # + # %%stata ***** col(6) ***** xtreg vfrall beertax mlda $controls $yeardum, fe vce(cluster state) test $yeardum test unrate lincperc outreg2 using results/table10_1.xls, append se label dec(3) excel * Get (only) overall Adjusted Rsquared qui areg vfrall beertax mlda $controls $yeardum, absorb(state) dis "Adjusted Rsquared = "e(r2_a) # + # %%stata ***** col(7) ***** // 1982 & 1988 only keep if (year==1982) | (year==1988) xtreg vfrall beertax $agedum $controls y82, fe vce(cluster state) test y82 test unrate lincperc outreg2 using results/table10_1.xls, append se label dec(3) excel * Get (only) overall Adjusted Rsquared qui areg vfrall beertax $agedum $controls y82, absorb(state) dis "Adjusted Rsquared = "e(r2_a) # + # # %stata help reghdfe
python-stata-on-notebook/.ipynb_checkpoints/panel1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.0.3 # language: julia # name: julia-1.0 # --- # # K-shell fluorescence & Auger yields for neon-like ions using JAC # The fluorescence yield $\omega_r$ characterizes the probability of an inner-shell (core) hole to be filled under photon emission, in contrast and competition with other nonradiative processes. It fulfills together with the Auger yield $\omega_a$ the simple relations: $\omega_r + \omega_a = 1$ but generally requires to consider all possible (radiative and non-radiative) decay channels. # # In Jac, the computation of all fluorescence and Auger yields is always traced back to single-step **cascade computations**. Here, we wish to compute the fluorescence and Auger yields for neon-like ions and to see how the *fluorescence* increases with the nuclear charge. Similar as for other (level) properties, we can estimate the yields by performing an `Atomic.Computation(..., properties=[Yields, ..], configs=[..], yieldSettings=DecayYield.Settings(..), ..)`. # # To understand, how we can control the computation of yields, let us have a look at the internal definition and the default settings of `DecayYield.Settings(..)` # ? DecayYield.Settings DecayYield.Settings() # Here, the approach refers to one *cascade approaches* (cf. User Guide, chapter 8) that are available in JAC; because of the internal structure, however, this approach need to be provided in `String` notation as indicated above. # # To compute the fluorescence yields for neon-like Mg, we can specify the settings and computation as well as run the computation by: settings=DecayYield.Settings("SCA", true, false, Int64[]) wa = Atomic.Computation("Yield computations for neon-like Mg", Nuclear.Model(12.); properties=[JAC.Yields], grid=JAC.Radial.Grid("grid: by given parameters"; rnt = 2.0e-5, h = 5.0e-2, hp = 2.0e-2, NoPoints = 600), configs=[Configuration("1s 2s^2 2p^6")], yieldSettings=settings ) perform(wa) # # Obviously, this results in a rather detailed output due to printed data of the underlying cascade computation. In this single-step cascade, all the neon-like configurations with total energies below of the given $1s 2s^2 2p^6$ configuration above are generated automatically, together with all energetically allowed fluorine-like configuration due to the (Auger) emission of an electon. Here, we shall not discuss this output in detail but just go over to the final table: It report an fluorescence yield $\omega_r \approx 0.08$ in Babushkin gauge and a corresponding Auger yield of $\omega_a \approx 0.92$. The same yields in Coulomb gauge are (0., 1.), but this occurs since # by default all radiative transitions are only considered in Babushkin (length) gauge. To modify this settings, the user would have to change the *call* of the cascade computations within the code. # # To see how the coupling of the radiation field and, hence, the fluorescence increases with an increasing nuclear charge, let us perform the same computations but for Z=18 and Z=26, respectively. wa = Atomic.Computation("Yield computations for neon-like Ar", Nuclear.Model(18.); properties=[JAC.Yields], grid=JAC.Radial.Grid("grid: by given parameters"; rnt = 2.0e-5, h = 5.0e-2, hp = 2.0e-2, NoPoints = 600), configs=[Configuration("1s 2s^2 2p^6")], yieldSettings=settings ) perform(wa) wa = Atomic.Computation("Yield computations for neon-like Fe", Nuclear.Model(26.); properties=[JAC.Yields], grid=JAC.Radial.Grid("grid: by given parameters"; rnt = 2.0e-5, h = 5.0e-2, hp = 2.0e-2, NoPoints = 600), configs=[Configuration("1s 2s^2 2p^6")], yieldSettings=settings ) perform(wa) # # From the output of these three computations, we see that the fluorescence yield $\omega_r$ increase from 8 % (Z=12) to 28 % (Z=18) and up to 40 % (Z=26). # # # We could consider also the yields for the $2s$ hole state of argon which is known to decay by fast Coster-Kronig transitions. Because of the (very) low energies of the emitted Coster-Kronig electrons, these computations require further care in order to obtain reliable estimate. Since all these yield computations are always based on (elaborate) *cascade computations*, further improvements are possible but will require advice and support by the community. #
tutorials/14-estimate-decay-yields.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (py36) # language: python # name: myenv # --- # # Neural Cats Embeddings import yaml import numpy as np import pandas as pd from pathlib import Path import sys sys.path.append('..') from df4cats.frames import CodedDF from df4cats.embedding.generators import SiameseGeneratorDF from df4cats.embedding.models import Siamese, Embedder from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard # # %load_ext autoreload # # %autoreload 2 # ## Load the data # ### Load CSV cols = ['age','workclass','fnlwgt','education','education-num','marital-status','occupation','relationship','race','sex','capital-gain','capital-loss','hours-per-week','native-country','income-class'] sets = ['train', 'test'] data = {} data_folder = Path('../sample_data/adult_income') for kind in sets: data[kind] = pd.read_csv(data_folder / f'adult.{kind}', names=cols, sep=', ') data[kind].replace('?', np.nan, inplace=True) # ### Create CodedDF # - Encode and normalize train and test (validation) data. # - Hardcode variables. hard_cats = yaml.load(open(data_folder / 'adult.yaml', 'r'), Loader=yaml.FullLoader) cdf = {} cdf_hard = {} for kind in sets: cdf[kind] = CodedDF(data[kind], categorical_columns=list(hard_cats), label_columns=['income-class'], normalize=True) cdf_hard[kind] = cdf[kind].hardcode_categories(hard_cats, add_other=True, add_nan=True) # ## Train embedder # ### Get generators dfgen = {} for kind in sets: dfgen[kind] = SiameseGeneratorDF(X=cdf_hard[kind].data, y=cdf_hard[kind].data['income-class'], batch_size=20, columns=cdf_hard[kind].categorical_columns + cdf_hard[kind].continuous_columns) # ### Create Models embedder = Embedder(categorical_features= cdf_hard['train'].category_dimensions(), continuous_features= cdf_hard['train'].continuous_columns, n_dense=2, nodes_per_dense=1000, output_dim = 1000, max_embedding_dim=600, dropout_rate=0.2, residual=False,) siamese = Siamese(embedder.model) siamese.joint.compile(optimizer='Adam', loss='binary_crossentropy') # ### Train models stopping = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='min', restore_best_weights=True) lr_schedule = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=0, mode='min', verbose=1) # tboard = TensorBoard(log_dir=str(data_folder)) # checkpoint_val_loss = ModelCheckpoint(str(data_folder / 'siamese_weights_file_e{epoch:03d}-l{val_loss:.2f}.hdf5'), # save_weights_only=True, # verbose=True, # save_best_only=True, # monitor='val_loss', # mode='min', # ) siamese.joint.fit_generator(generator=dfgen['train'], epochs=35, validation_data=dfgen['test'], callbacks=[stopping, lr_schedule]) siamese.get_twin().save_weights(data_folder / 'embedder_weights_file.hdf5') ## CHECK IF LOADING IS NEEDED embed = embedder.get_embedding_model() embed.load_weights(data_folder / 'embedder_weights_file.hdf5', by_name=True) in_dict = embedder.get_input_dictionary(cdf['train'].data) pred = embed.predict(in_dict) embedder.predictions_to_df(pred).head(5) # ## Extra: get embeddings for each category, plot 3D embeddings from keras.models import Model category_embeddings = {} for cat in embedder.categorical_features: inp = embed.get_layer(cat) out = embed.get_layer(f'{cat}_flat_emb') category_embeddings[cat] = Model(inputs=inp.input, outputs=out.output) maps = {} for cat in embedder.categorical_features: maps[cat] = {} for v in range(embedder.categorical_features[cat]): value = cdf['train'].categorical_mapping[cat].inverse_mapping[v] maps[cat][value] = category_embeddings[cat].predict([v])[0] # + from mpl_toolkits.mplot3d import Axes3D # %matplotlib qt import matplotlib.pyplot as plt for cat in embedder.categorical_features: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') for el in maps[cat]: point = maps[cat][el] ax.scatter(point[0], point[1], point[2], label=el) ax.text(point[0], point[1], point[2], '%s' % (el), size=8, zorder=1, color='k') plt.show()
notebooks/Neural Cats Embedding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import data import tensorflow as tf import numpy as np import matplotlib.pyplot as plt (X_train,y_train),(X_test,y_test) = tf.keras.datasets.mnist.load_data() print(X_train.shape) print(y_train.shape) print(X_test.shape) print(y_test.shape) # + # cast from float64 to float32 X_train = X_train.astype(np.float32) X_test = X_test.astype(np.float32) # add channels dimension X_train = X_train[...,None] X_test = X_test[..., None] # - print(X_train.shape) print(y_train.shape) print(X_test.shape) print(y_test.shape) # symbolically generate a batch of images and labels # + X_tarin_or_test = tf.placeholder(tf.float32, shape=[None,28,28,1], name='input_image') y_train_or_test = tf.placeholder(tf.int32, shape=[None],name='image_label' ) batch_size = 32 dataset = tf.data.Dataset.from_tensor_slices((X_tarin_or_test,y_train_or_test)) # - dataset = dataset.batch(batch_size) batch_generator = dataset.make_initializable_iterator() X_batch, y_batch = batch_generator.get_next() y_batch # given a symbolic batch, symbolically process it through a network and output the loss # + net = X_batch # add conv layers layer1 = tf.layers.conv2d(net,filters=32,kernel_size=3,padding='SAME',activation=tf.nn.relu) layer2 = tf.layers.conv2d(layer1,filters=32,kernel_size=3,padding='SAME',activation=tf.nn.relu) layer3 = tf.layers.max_pooling2d(layer2,pool_size=2,strides=2) layer4 = tf.layers.conv2d(layer3,filters=32,kernel_size=3,padding='SAME',activation=tf.nn.relu) layer5 = tf.layers.conv2d(layer4,filters=32,kernel_size=3,padding='SAME',activation=tf.nn.relu) layer6 = tf.layers.max_pooling2d(layer5,pool_size=2,strides=2) # fully connected layer: fc = tf.layers.flatten(layer6) fc = tf.layers.dense(fc,units=512,activation=tf.nn.relu) logits = tf.layers.dense(fc,units=10) # - loss = tf.losses.softmax_cross_entropy(onehot_labels=tf.one_hot(y_batch,depth=10),logits=logits) # given a loss, create op that, when run, descends the gradient by one step train_op = tf.train.GradientDescentOptimizer(learning_rate=.001).minimize(loss) # start a tensorflow session and use it to initialize all variables and ops config = tf.ConfigProto(device_count={'GPU':0}) sess = tf.InterativeSession() sess.run(tf.global_variables_initializer()) # do gradient descent: run a train loop over multiple iterations sess.run(batch_generator.initializer,feed_dict{X_train_or_test: X_train, y_train_or_test: y_train}) for i in range(1000): _, loss_i = sess.run([train_op,loss]) if i%100 == 0: print(loss_i) # pass through the validation set sess.run(batch_generator.initializer, feed_dict{X_train_or_test:X_test, y_train_or_test:y_test}) # + correct = 0 total = 0 for i in range(100): prediction, truth = sess.run([logits,y_batch]) correct += np.sum(prediction.argmax(1)==truth) total +=len(truth) # - print(correct/total)
session/TA session Feb18.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # ``pandas`` Integration # # Ref: # # - https://arrow.apache.org/docs/python/pandas.html import numpy as np import pandas as pd import pyarrow as pa import pyarrow.parquet as pq # ## DataFrames # # [EN] # # The equivalent to a pandas DataFrame in Arrow is a Table. Both consist of a set of named columns of equal length. While pandas only supports flat columns, the Table also provides nested columns, thus it can represent more data than a DataFrame, so a full conversion is not always possible. # # Conversion from a Table to a DataFrame is done by calling ``pyarrow.Table.to_pandas()``. The inverse is then achieved by using ``pyarrow.Table.from_pandas()``. # # By default ``pyarrow`` tries to preserve and restore the ``.index`` data as accurately as possible. See the section below for more about this, and how to disable this logic. # # [CN] # + df = pd.DataFrame( { "id": [1, 2, 3], "name": ["Alice", "Bob", "Cathy"], } ) # Convert from pandas to Arrow table = pa.Table.from_pandas(df) # Convert back to pandas df_new = table.to_pandas() # - table df_new # Infer Arrow schema from pandas schema = pa.Schema.from_pandas(df) schema # ## Handling pandas Indexes # # [EN] # # Methods like ``pyarrow.Table.from_pandas()`` have a ``preserve_index`` option which defines how to preserve (store) or not to preserve (to not store) the data in the ``index`` member of the corresponding pandas object. This data is tracked using schema-level metadata in the internal ``arrow::Schema`` object. # # The default of ``preserve_index`` is ``None``, which behaves as follows: # # - ``RangeIndex`` is stored as metadata-only, not requiring any extra storage. # - Other index types are stored as one or more physical data columns in the resulting ``Table`` # # To not store the index at all pass ``preserve_index=False``. Since storing a ``RangeIndex`` can cause issues in some limited scenarios (such as storing multiple DataFrame objects in a Parquet file), to force all index data to be serialized in the resulting table, pass ``preserve_index=True``. # # [CN] # pandas.DataFrame 是有行 index 的. 但是 pyarrow 主要是列式存储, 大部分情况下不需要 index. # # 在默认情况下从 ``Table`` 转化为 ``DataFrame`` 的时候是 ``preserve_index=None``, 具体效果如下: # # - 如果 ``DataFrame`` 用的是 ``RangeIndex`` (就是自动生成的从 0 到 n-1, n 为总行数的 index). ``Table`` 就会在 metadata 中标注使用 ``RangeIndex`` 即可, 由于只需要知道总行数即可, 所以无需在内存中真正维护 index 数据. # - 而其他的 pandas index 类型, 例如时间序列索引, 那么就会保存为真正的 ``pyarrow.Column`` 同时拷贝数据. # # 什么时候用 ``preserve_index=True``, 什么时候用 ``preserve_index=False``? # # - 当你的 ``DataFrame`` 都用的是自动生成的 ``RangeIndex``, 而你在数据处理时又不在乎 index 的时后, 建议用 ``False``. 有些极端情况下例如你想将多个 ``pandas.DataFrame`` 写入 parquet 的文件的时候会出错误. # - 如果你希望将 ``DataFrame`` 序列化为 parquet 并保存, 并且希望读取回来的时候依旧能恢复 index, 建议用 ``True`` df = pd.DataFrame( {"value": np.arange(3)}, index=pd.date_range("2000-01-01", periods=3, freq="1d") ) df table = pa.Table.from_pandas(df, preserve_index=False) pq.write_table(table, "df_preserve_index_false.parquet") df_loaded = pq.read_table("df_preserve_index_false.parquet").to_pandas() df_loaded table = pa.Table.from_pandas(df, preserve_index=True) pq.write_table(table, "df_preserve_index_true.parquet") df_loaded = pq.read_table("df_preserve_index_true.parquet").to_pandas() df_loaded table = pa.Table.from_pandas(df, preserve_index=None) pq.write_table(table, "df_preserve_index_none.parquet") df_loaded = pq.read_table("df_preserve_index_none.parquet").to_pandas() df_loaded # **Zero Copy Series Conversions** # # [EN] # # Zero copy conversions from Array or ChunkedArray to NumPy arrays or pandas Series are possible in certain narrow cases: # # - The Arrow data is stored in an integer (signed or unsigned ``int8`` through ``int64``) or floating point type (``float16`` through (``float64``). This includes many numeric types as well as timestamps. # - The Arrow data has no null values (since these are represented using bitmaps which are not supported by pandas). # - For ``ChunkedArray``, the data consists of a single chunk, i.e. ``arr.num_chunks == 1``. Multiple chunks will always require a copy because of pandas’s contiguousness requirement. # # In these scenarios, to_pandas or to_numpy will be zero copy. In all other scenarios, a copy will be required. # # [CN] # # 以下的几种情况下, 从 ``.to_pandas()`` 方法是 zero copy. # # - Arrow data 是整数, 有符号 / 无符号, int8 / 16 / 32 / 64, float 16 / 32 / 64. 这包含了大部分的数字类型, 包括 timestamps (因为 timestamps 本质上也是用整数储存的) # - Arrow data 没有 null value, 因为这些 null value 在 Arrow 中使用的是 bit map 数据结构存储的, 而 pandas 并不支持这种存储方式. # - 对于 ``ChunkedArray``, 在只有 1 个 chunk 的情况下 (也就是 Table 只有一个 RecordBatch) 是 Zero Copy. 否则一定需要 Copy. # # 如果有许多 column, 有的可以 zero copy, 有的不能. 那么转化为 Table 之后只有那些不能 zero copy 的 column 的数据被复制了. 例如 string. # # **一句话总结: 想要避免 Copy Data, 请确保没有 null value, 可以 fillin 一些 default value** # # 小知识: # # - 2 ** 32 = 4,294,967,296 # - 2 ** 64 = 18,446,744,073,709,551,616 # - 从 1970-01-01 00:00:00 开始, 如果精度是 1 秒, 32 位整数可以表示到 2106-02-07 06:28:16 # - 从 1970-01-01 00:00:00 开始, 如果精度是 1 纳秒 (10e9 ns = 1 s), 64 位整数可以表示到 2554 年 12 月, 足够我们用了. table = pa.table( { "id": pa.array([1, 2, 3], type=pa.int16()), "name": pa.array(["alice", "bob", "cathy"]), } ) table.to_pandas()
01-pyarrow-examples/05-pandas-Integration/pandas-Integration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Question 1 # # Print the first ArmStrong number in the range of 1042000 to 702648265 and exit the loop as soon # as you encounter the first armstrong number. # Use while loop # for i in range(1042000,702648265): y = len(str(i)) sum = 0 j = i while j > 0: x = j % 10 sum += x ** y j //= 10 if i == sum: i=i+0 print("The First ArmStrong Number is :-",i) break
Day 4 Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Heading # ##subheading 2+2 1+1 4+0 # testing def groupSort(arr): from collections import Counter lst = [] count = Counter(arr) m_freq = 0 for key,value in count.items(): lst.append([key,value]) if value > m_freq: m_freq = value s_lst = [] print(m_freq) for i,y in lst: if y == m_freq: s_lst.append(i) for i in sorted(s_lst): print(lst) def groupSort(arr): # Ex: [3,3,1,2,1] #decreasing freq #[3,3,1,1,2] #increasing value #[1,1,3,3,2] from collections import Counter count = Counter(arr) arr.sort(key=lambda x: (-count[x], x)) #sorts first by decreasing freq and then by increasing #value count = Counter(arr) lst = [] for key,value in count.items(): lst.append([key,value]) return lst groupSort([3,3,1,2,1]) groupSort([2,1,2,2]) from collections import Counter arr = [3,3,1,2,1] count = Counter(arr) print(arr) print(count) # + # Ex: [3,3,1,2,1] #decreasing freq #[3,3,1,1,2] #increasing value #[1,1,3,3,2] # -
Test2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mbrl # language: python # name: mbrl # --- # + import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import omegaconf import torch import torch.optim as optim import mbrl.models as models import mbrl.util.common as common_utils import mbrl.util.replay_buffer as replay_buffer device = torch.device("cuda:0") # %load_ext autoreload # %autoreload 2 # %matplotlib inline mpl.rcParams['figure.facecolor'] = 'white' # + x_data = np.linspace(-12, 12, 10000) y_data = np.sin(x_data) train_size = 2000 val_size = 200 x_train = np.zeros(2 * train_size) y_train = np.zeros(2 * train_size) x_val = np.zeros(2 * val_size) y_val = np.zeros(2 * val_size) # Half with lower noise train_val_idx_1 = np.random.choice(list(range(1200, 3500)), size=train_size + val_size, replace=False) mag = 0.05 x_train[:train_size] = x_data[train_val_idx_1[:train_size]] y_train[:train_size] = y_data[train_val_idx_1[:train_size]] + mag * np.random.randn(train_size) x_val[:val_size] = x_data[train_val_idx_1[train_size:]] y_val[:val_size] = y_data[train_val_idx_1[train_size:]] + mag * np.random.randn(val_size) # Half with higher noise train_val_idx_2 = np.random.choice(list(range(6500, 8800)), size=train_size + val_size, replace=False) mag = 0.20 x_train[train_size:] = x_data[train_val_idx_2[:train_size]] y_train[train_size:] = y_data[train_val_idx_2[:train_size]] + mag * np.random.randn(train_size) x_val[val_size:] = x_data[train_val_idx_2[train_size:]] y_val[val_size:] = y_data[train_val_idx_2[train_size:]] + mag * np.random.randn(val_size) plt.figure(figsize=(16, 8)) plt.plot(x_data, y_data, x_train, y_train, '.', x_val, y_val, 'o', markersize=4) plt.show() train_size *=2 val_size *= 2 # + # ReplayBuffer generates its own training/validation split, but in this example we want to # keep the split generated above, so instead we use two replay buffers. num_members = 5 train_buffer = replay_buffer.ReplayBuffer(train_size, (1,), (0,)) val_buffer = replay_buffer.ReplayBuffer(val_size, (1,), (0,)) for i in range(train_size): train_buffer.add(x_train[i], 0, y_train[i], 0, False) for i in range(val_size): val_buffer.add(x_val[i], 0, y_val[i], 0, False) train_dataset, _ = common_utils.get_basic_buffer_iterators( train_buffer, 2048, 0, ensemble_size=num_members, shuffle_each_epoch=True) val_dataset, _ = common_utils.get_basic_buffer_iterators( val_buffer, 2048, 0, ensemble_size=1) # - ensemble = models.GaussianMLP( 1, # input size 1, # output size device, num_layers=3, hid_size=64, activation_fn_cfg={"_target_": "torch.nn.SiLU"}, ensemble_size=num_members ) wrapper = models.OneDTransitionRewardModel( ensemble, target_is_delta=False, normalize=True, learned_rewards=False) wrapper.update_normalizer(train_buffer.get_all()) trainer = models.ModelTrainer(wrapper, optim_lr=0.001, weight_decay=5e-5) train_losses, val_losses = trainer.train( train_dataset, val_dataset, num_epochs=5000, patience=500) fig, ax = plt.subplots(2, 1, figsize=(16, 8)) ax[0].plot(train_losses) ax[0].set_xlabel("epoch") ax[0].set_ylabel("train loss (gaussian nll)") ax[1].plot(val_losses) ax[1].set_xlabel("epoch") ax[1].set_ylabel("val loss (mse)") plt.show() # + x_tensor = torch.from_numpy(x_data).unsqueeze(1).float().to(device) x_tensor = wrapper.input_normalizer.normalize(x_tensor) with torch.no_grad(): y_pred, y_pred_logvar = ensemble(x_tensor) y_pred = y_pred[..., 0] y_pred_logvar = y_pred_logvar[..., 0] y_var_epi = y_pred.var(dim=0).cpu().numpy() y_var = y_pred_logvar.exp() y_pred = y_pred.mean(dim=0).cpu().numpy() y_var_ale = y_var.mean(dim=0).cpu().numpy() y_std = np.sqrt(y_var_epi + y_var_ale) plt.figure(figsize=(16, 8)) plt.plot(x_data, y_data, 'r') plt.plot(x_train, y_train, '.', markersize=0.9) plt.plot(x_data, y_pred, 'b-', markersize=4) plt.fill_between(x_data, y_pred, y_pred + 2 * y_std, color='b', alpha=0.2) plt.fill_between(x_data, y_pred - 2 * y_std, y_pred, color='b', alpha=0.2) plt.axis([-12, 12, -2.5, 2.5]) plt.show() # -
notebooks/fit_gaussian_mlp_ensemble_1d.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Бифуркации $I_{Na,p}+I_K$ модели и интерактивные графики на фазовой плоскости # + # Графики будут сохраняться в сам документ: # %pylab inline # - from __future__ import division # + # Для интерактивных графиков, легко установить, если используете Anaconda import bokeh import bokeh.plotting as bp from bokeh.models import ColumnDataSource, Range1d from ipywidgets import interact # - bp.output_notebook() # Устанавливается командой `pip install PyDSTool` import PyDSTool as dst from PyDSTool.Toolbox import phaseplane as pp # ## Модель $I_{Na,p}+I_K$ # # \begin{equation} # C\dot{V} = I - (\bar{g}_Kn(V-E_K) + \bar{g}_{Na}m_\infty(V)(V-E_{Na}) + gl(V-El)) # \end{equation} # # \begin{equation} # \tau_n n = (n_\infty(V) - n) # \end{equation} # # \begin{equation} # x_\infty = \frac{1}{1 + \exp(\frac{V_{1/2}-V}{k})} # \end{equation} napk_pset = dict( I = 0.0, El = -80.0, Ena = 60.0, Ek = -90.0, gl = 8.0, gna = 20.0, gk = 10.0, ntau = 5.0, minf_vhalf = -20., minfk = 15., ninf_vhalf = -25., ninfk = 5.0 ) pset_str = ';\n'.join(['{k}=dst.Par({v},"{k}")'.format(k=k,v=v) for k,v in napk_pset.items()]) print pset_str exec(pset_str) # Переменные состояния V = dst.Var('V') n = dst.Var('n') # Параметризация зависимости x_inf от потенциала boltzman = dst.Fun(1./(1. + dst.Exp(('Vhalf'- V)/'bk')), ['Vhalf','bk'], 'boltzman') minf = boltzman(minf_vhalf,minfk) ninf = boltzman(ninf_vhalf,ninfk) print minf print 'At -10 mV:', minf.eval(V=-10) vtest = linspace(-89, 45, 250) plot(vtest, eval(str(minf.eval(V='vtest', Exp='np.exp', **napk_pset)))) plot(vtest, eval(str(ninf.eval(V='vtest', Exp='np.exp',**napk_pset)))) xlabel(u'V, mV'); legend(('$m_\infty$', '$n_\infty$'),loc='best') # Уравнение для активации К-тока dn = (ninf-n)/ntau print dn # + # Символьное описание токов и уравнение для dV/dt iK = gk*(V-Ek)*n iNa = gna*(V-Ena)*minf ileak = gl*(V-El) dV = I - (ileak + iNa + iK) print dV # - print dV Iinf = iNa + gk*(V-Ek)*ninf + ileak vnull = (I - (ileak + iNa))/(gk*(V-Ek)) print 'Iinf = ', Iinf print 'V-nullcline:', vnull vnull_expr = str(vnull.eval(V='vtest',Exp='np.exp', **napk_pset)) # + # Собственно, создаем модель и передаем ее PyDSTool NapKmodel = dst.args( name = 'napk', pars = napk_pset, varspecs = {'V':dV, 'n':dn}, tdomain=[0,250], xdomain=dict(V=[-150, 60], n=[0,1]), ics = {'V':-70,'n':0}) odeset = dst.Generator.Vode_ODEsystem(NapKmodel) # - # Посчитаем траекторию (динамику) для примера traj = odeset.compute('test') pts = traj.sample(dt=0.1) # + vnull_0 = eval(vnull_expr) nnull_0 =eval(str(ninf.eval(V='vtest',Exp='exp',**napk_pset))) Iinf_0 = eval(str(Iinf.eval(V='vtest',Exp='exp',**napk_pset))) source_nulls = ColumnDataSource(data=dict(x=vtest, vnull=vnull_0, nnull=nnull_0, Iinf=Iinf_0)) source_traj = ColumnDataSource(data=dict(t=pts['t'],v=pts['V'],n=pts['n'])) def warp_eval(x,args): return eval(str(x.eval(args))) def update(I=0, vhalf_n=-25, ntaux=0.2): I_before = napk_pset['I'] vhalf_n_before = napk_pset['ninf_vhalf'] napk_pset['I'] = I napk_pset['ninf_vhalf'] = vhalf_n napk_pset['ntau'] = ntaux args = dict(V='vtest',Exp='exp',**napk_pset) source_nulls.data['vnull'] = warp_eval(vnull,args) source_nulls.data['nnull'] = warp_eval(ninf,args) source_nulls.data['Iinf'] = warp_eval(Iinf,args) odeset.set(pars=napk_pset) traj = odeset.compute('test') pts = traj.sample(dt=0.025) source_traj.data['t'] = pts['t'] source_traj.data['v'] = pts['V'] source_traj.data['n'] = pts['n'] bokeh.io.push_notebook() # + p1 = bp.figure(title=u"Фазовый портрет", plot_height=500, plot_width=500, x_axis_label='V', y_axis_label='n') p1.line(vtest, y='vnull', source=source_nulls, line_width=1.5, color='magenta', legend="dV/dt=0") p1.line(vtest, 'nnull', source=source_nulls, color='teal', line_width=1.5,legend='dn/dt=0') p1.line(x='v',y='n', source=source_traj, line_width=1) #p1.legend.orientation = "top_right" p1.y_range = Range1d(-0.2,2) p2 = bp.figure(title=u"Равновесный ток", plot_height=500, plot_width=500, x_axis_label='V', y_axis_label='Iinf') p2.line(vtest, y='Iinf', source=source_nulls, line_width=1, color='black') p2.y_range = Range1d(-100,100) p3 = bp.figure(title=u"Динамика (V)", plot_width=900, plot_height=200) p3.line(x='t',y='v', source=source_traj) bp.show(bp.vplot(bp.hplot(p1,p2),p3)) # - interact(update, I=[-10, 40, 0.1], vhalf_n=[-40,-20], ntaux=[0.1,2,0.01]) # todo: demonstrate bistability for SN p1.
INa,p+IK model with interactive plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import os import librosa import sys import pandas as pd import numpy as np from matplotlib import pyplot as plt import librosa.display from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.model_selection import cross_val_score from get_mel_spectrogram import get_mel_spectrogram # - # get the parent directory parent_dir = os.path.abspath('..') test_dir = os.path.join(parent_dir, 'Testing') data_dir = os.path.join(parent_dir, 'Data') meta_dir = os.path.join(data_dir, 'metadata') sour_dir = os.path.join(data_dir, 'source') data_dir = os.path.join(parent_dir, 'EDA') fan_data = pd.read_csv(os.path.join(test_dir, 'all_fan_subset.csv')) pump_data = pd.read_csv(os.path.join(test_dir, 'all_pump_subset.csv')) slider_data = pd.read_csv(os.path.join(test_dir, 'all_slider_subset.csv')) valve_data = pd.read_csv(os.path.join(test_dir, 'all_valve_subset.csv')) # + #best number of mels 64, best window size, 1.0, best channel = 3, best number of neighbours = 5 # - def modeler(machine): """ Function that does the actual training of the models, using the parameters that have come up as optimal from the testing process. :machine: string that represents which machine type we are currently training on :return: both the model and a dict containing all params used and results are returned. """ # select the correct dataset if machine == 'fan': data = fan_data elif machine == 'pump': data = pump_data elif machine == 'slider': data = slider_data elif machine == 'valve': data = valve_data # setting a dict for storing all parameters and results results_knn = { 'window' : [], 'window_length' : [], 'overlap' : [], 'overlap_length' : [], 'hop_length' : [], 'n_fft' : [], 'n_mels' : [], 'n_neighbor' : [], 'channel' : [], 'accuracy' : [], 'recall' : [], 'precision' : [], 'f1' : [], 'cross_val_scores' : [], 'cross_val_mean' : [], 'cross_val_stdev' : [] } # storing the results of the data preprocessing for all the files all_mel_spect = [] all_mel_spect_db = [] all_mfcc = [] all_params = [] # setting the classifiers parameters here window_p = 1 overlap_p = None n_fft_p = None if machine == 'fan': n_mels_p = 32 n_neighbors_p = 5 channel_p = 3 elif machine == 'pump': n_mels_p = 512 n_neighbors_p = 3 channel_p = 1 elif machine == 'slider': n_mels_p = 512 n_neighbors_p = 3 channel_p = 6 elif machine == 'valve': # not yet optimized n_mels_p = 512 n_neighbors_p = 3 channel_p = 7 # loop through all files in the data set to apply preprocessing for idx, row in data.iterrows(): mel_spect, mel_spect_db, mfcc, params = get_mel_spectrogram( os.path.join(sour_dir, row['file_rel_path']), no_channel = channel_p, window = window_p, overlap = overlap_p, n_fft = n_fft_p, n_mels = n_mels_p ) # storing the results all_mel_spect.append(mel_spect) all_mel_spect_db.append(mel_spect_db) all_mfcc.append(mfcc) # store the parameters params['n_neighbor'] = n_neighbors_p params['channel'] = channel_p all_params.append(params) # write the results to the dataframe data['mel_spect'] = all_mel_spect data['mel_spect_db'] = all_mel_spect_db data['all_params'] = all_params # define target y = data['anomaly'] # define features X = [] for idx, row in data.iterrows(): #X.append(row['mel_spect_db'].reshape(1, total_size)) X.append(np.array(row['mel_spect_db']).flatten()) # split the data in 2 groups, 1 for training, 1 for testing X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 2) # initialize the knn classifier model = KNeighborsClassifier(n_neighbors=n_neighbors_p) # train the classifier model.fit(X_train,y_train) # get predictions predicted = model.predict(X_test) # calculate results accuracy = metrics.accuracy_score(y_test, predicted) recall = metrics.recall_score(predicted, y_test, average='macro') precision = metrics.precision_score(predicted, y_test, average='macro') f1 = metrics.f1_score(predicted, y_test, average='macro') # cross-validate cross_val_scores = cross_val_score(model, X_test, y_test, cv=5) # cv is the number of folds (k) cross_val_mean = cross_val_scores.mean() * 100 cross_val_stdev = cross_val_scores.std() * 100 # store results for key, val in params.items(): results_knn[key].append(val) results_knn['accuracy'].append(accuracy) results_knn['recall'].append(recall) results_knn['precision'].append(precision) results_knn['f1'].append(f1) results_knn['cross_val_scores'].append(cross_val_scores) results_knn['cross_val_mean'].append(cross_val_mean) results_knn['cross_val_stdev'].append(cross_val_stdev) print(f"params: {params}") print(f"Accuracy: {accuracy}") print(f"recall: {recall}") print(f"precision: {precision}") print(f"f1: {f1}") print("cross_val mean and acc: {:.2f}% (+/- {:.2f})".format(cross_val_mean, cross_val_stdev)) return model, results_knn fan_model, fan_results = modeler('fan') pump_model, pump_results = modeler('pump') slider_model, slider_results = modeler('slider') valve_model, valve_results = modeler('valve') results_knn # storing the results import joblib joblib.dump(fan_model, 'Models/fan_model.pkl') joblib.dump(pump_model, 'Models/pump_model.pkl') joblib.dump(slider_model, 'Models/slider_model.pkl') joblib.dump(valve_model, 'Models/valve_model.pkl') pd.DataFrame(fan_results).to_csv('Models/fan_results.csv') pd.DataFrame(pump_results).to_csv('Models/pump_results.csv') pd.DataFrame(slider_results).to_csv('Models/slider_results.csv') pd.DataFrame(valve_results).to_csv('Models/valve_results.csv') fan_model_cv_test, fan_results_cv_test = modeler('fan') fan_model_cv_test, fan_results_cv_test = modeler('fan')
Code/Modeling/KNN/trainingFinalKNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="1K0_sJIHCjL6" # # Colabで全文検索(その2:PostgreSQL編) # # 各種全文検索ツールをColabで動かしてみるシリーズです。全7回の予定です。今回はPostgreSQLです。 # # 処理時間の計測はストレージのキャッシュとの兼ね合いがあるので、2回測ります。2回目は全てがメモリに載った状態での性能評価になります。ただ1回目もデータを投入した直後なので、メモリに載ってしまっている可能性があります。 # + [markdown] id="rr8_1pqmMHal" # ## 準備 # # まずは検索対象のテキストを日本語wikiから取得して、Google Driveに保存します。(※ Google Driveに約1GBの空き容量が必要です。以前のデータが残っている場合は取得せず再利用します。) # + [markdown] id="SfwapuEEMHal" # Google Driveのマウント # + colab={"base_uri": "https://localhost:8080/"} id="uB4D_a2JMHal" outputId="0aef0966-b79d-46df-f29a-7dffe5d66c53" from google.colab import drive drive.mount('/content/drive') # + [markdown] id="noPtYp0ZMHam" # jawikiの取得とjson形式に変換。90分ほど時間がかかります。他の全文検索シリーズでも同じデータを使うので、他の記事も試す場合は wiki.json.bz2 を捨てずに残しておくことをおすすめします。 # + colab={"base_uri": "https://localhost:8080/"} id="5AbKu4okMHam" outputId="0ec1b5c0-5ae5-4da5-9163-87d6c13a8b4f" # %%time # %cd /content/ import os if not os.path.exists('/content/drive/MyDrive/wiki.json.bz2'): # !wget https://dumps.wikimedia.org/jawiki/latest/jawiki-latest-pages-articles.xml.bz2 # !pip install wikiextractor # !python -m wikiextractor.WikiExtractor --no-templates --processes 4 --json -b 10G -o - jawiki-latest-pages-articles.xml.bz2 | bzip2 -c > /content/drive/MyDrive/wiki.json.bz2 # + [markdown] id="n2mhXmY6MHam" # json形式に変換されたデータを確認 # + colab={"base_uri": "https://localhost:8080/"} id="do5SBCPJMHam" outputId="abd61db5-0253-43ad-8572-8399770c5722" import json import bz2 with bz2.open('/content/drive/MyDrive/wiki.json.bz2', 'rt', encoding='utf-8') as fin: for n, line in enumerate(fin): data = json.loads(line) print(data['title'].strip(), data['text'].replace('\n', '')[:40], sep='\t') if n == 5: break # + [markdown] id="GQ53mxqQFWNP" # ## PostgreSQLのインストール # # pg_bigmをビルドするのにPostgreSQLのソースコードが必要なようなので、ソースコードからインストールします。 # + id="28b04YaMhbY2" # %cd /content # !wget https://ftp.postgresql.org/pub/source/v14.1/postgresql-14.1.tar.gz # !tar xzf postgresql-14.1.tar.gz # %cd /content/postgresql-14.1/ # !./configure # !make install # + [markdown] id="_EGfXFUbXXom" # 全文検索用のモジュールpg_bigmをインストールします。 # + colab={"base_uri": "https://localhost:8080/"} id="dJPGI5UfhjGF" outputId="a74f8ce3-915d-4306-b0b9-9a38542c1f93" # %cd /content # !wget https://ja.osdn.net/projects/pgbigm/downloads/72448/pg_bigm-1.2-20200228.tar.gz # !tar xzf pg_bigm-1.2-20200228.tar.gz # %cd /content/pg_bigm-1.2-20200228 # !make USE_PGXS=1 PG_CONFIG=/usr/local/pgsql/bin/pg_config install # + [markdown] id="qVY52zRaGNtO" # ## PostgreSQLの立ち上げ # + [markdown] id="p9lRvtNKXtPv" # PostgreSQLを実行するユーザーを作成します。 # + colab={"base_uri": "https://localhost:8080/"} id="t6ZCHMHWRswj" outputId="10205d32-8cc7-49d2-e4d6-c99c717514bf" # !yes | adduser --disabled-password postgres # + [markdown] id="QJjhbVpuXz1l" # DBを構築する場所を初期化します。 # + colab={"base_uri": "https://localhost:8080/"} id="YmCb7m1UO-cj" outputId="a9a1b8f2-9662-40f3-9838-efd5c931d435" # !sudo -u postgres /usr/local/pgsql/bin/initdb -D /tmp/postgres --encoding=UTF8 # + [markdown] id="XBCINkYTX_J7" # pg_bigmをロードするための設定を書き込みます。 # + id="hgtS1QznLfuM" # !echo shared_preload_libraries = 'pg_bigm' >> /tmp/postgres/postgresql.conf # + [markdown] id="TvnJHyvqYIGx" # PostgreSQLをバックグラウンドで走らせます。 # + colab={"base_uri": "https://localhost:8080/"} id="sRVO007ujHxG" outputId="9a6f1501-b8a1-4e69-f729-5eff58e4b29a" magic_args="--bg" language="bash" # sudo -u postgres /usr/local/pgsql/bin/pg_ctl -D /tmp/postgres start # + [markdown] id="ZCX_s5s4YSds" # 5秒間、起動を待ちます。 # + id="KaqVqBG7SHRM" import time time.sleep(5) # + [markdown] id="Qm_Y3QaTYZX1" # ユーサーを確認します。 # + colab={"base_uri": "https://localhost:8080/"} id="fCFpy9GxSJyI" outputId="9a454c71-5184-4830-95f1-c7875f0ba5a1" # !echo "\\du" | sudo -u postgres /usr/local/pgsql/bin/psql # + [markdown] id="BqgpKSMAYjRl" # プロセスを確認します。 # + colab={"base_uri": "https://localhost:8080/"} id="wOhLZcnSSVff" outputId="557269bc-3f07-49e2-9aac-0873848f26bf" # !ps aux | grep postgres | grep -v grep # + [markdown] id="e3U353jCG9z-" # ## DB作成 # + colab={"base_uri": "https://localhost:8080/"} id="24FiGCQpbrZ7" outputId="f74d4761-4ae1-4b38-91bd-a04ea28dc0c1" # !echo "create database db" | sudo -u postgres /usr/local/pgsql/bin/psql # + colab={"base_uri": "https://localhost:8080/"} id="GtnVNXjBSzFf" outputId="2ae14d6f-a7b1-4674-f513-5c97494bd040" # !echo "CREATE EXTENSION pg_bigm" | sudo -u postgres /usr/local/pgsql/bin/psql db # + colab={"base_uri": "https://localhost:8080/"} id="QyESW1GXS2ze" outputId="48aa3d62-a8ce-4c30-da96-bc8db3e50c19" # !echo "\\dx" | sudo -u postgres /usr/local/pgsql/bin/psql db # + [markdown] id="55AM6XT7HDr9" # ## Pythonクライアントのインストール # + colab={"base_uri": "https://localhost:8080/"} id="wWBpoxVmjRTC" outputId="4af4ff84-561a-4309-b8a5-3943c2d8097c" # !pip install psycopg2 # + [markdown] id="oZzcOXj4HIOj" # ## データのインポート # # テーブルを作成して、データを50万件登録します。10分ほど時間がかかります。 # + colab={"base_uri": "https://localhost:8080/", "height": 106, "referenced_widgets": ["903eecdd177f4e7fa85375f12a39ecf4", "e0d595b2ed884fcc879d66a32ed3a32f", "<KEY>", "7482b442f0fe4d669ed7517f8c74978c", "5c76c15c34634d2faca9cee9410a1a72", "4786e3e3e7214a9fbf7c96c70a9c9e5a", "498fe644d3fb4bce9f27f283ea6bfa85", "<KEY>", "09c7f34e1d124c438705ee1be84e2f65", "1823fa8acb6b4184829f23c91a566eb7", "bf2bd33fd13e4935a88f5e62f63566cb"]} id="irdCqoI0cD95" outputId="e0172b88-5a50-4b30-9fb8-f530c70ae4eb" import psycopg2 import json import bz2 from tqdm.notebook import tqdm db = psycopg2.connect(database="db", user="postgres", host="/tmp/") cursor = db.cursor() cursor.execute('drop table if exists wiki_jp') cursor.execute('create table wiki_jp(title text, body text)') limit = 500000 insert_wiki = 'insert into wiki_jp (title, body) values (%s, %s);' with bz2.open('/content/drive/MyDrive/wiki.json.bz2', 'rt', encoding='utf-8') as fin: n = 0 for line in tqdm(fin, total=limit*1.5): data = json.loads(line) title = data['title'].strip() body = data['text'].replace('\n', '') if len(title) > 0 and len(body) > 0: cursor.execute(insert_wiki, (title, body)) n += 1 if n == limit: break db.commit() db.close() # + [markdown] id="Ycuf9PjwJskZ" # 登録件数を確認します。 # + colab={"base_uri": "https://localhost:8080/"} id="2czdK07q1tex" outputId="6a354452-bc2b-454d-af70-46b666d1df6a" # !echo "select count(*) from wiki_jp;" | sudo -u postgres /usr/local/pgsql/bin/psql db # + [markdown] id="A_BknkylHgCU" # ## インデックスを使わない検索 # # like検索でシーケンシャルに検索した場合を測定します。 # + [markdown] id="QDLJFdDl5BFj" # 検索プランを表示します。インデックスを使っていないことが確認できます。 # + colab={"base_uri": "https://localhost:8080/"} id="IoI0DqzXGFBp" outputId="38a03544-f216-4bf4-ab25-e6e02044a440" # !echo "set enable_bitmapscan=off; explain select * from wiki_jp where body like '%日本語%';" | sudo -u postgres /usr/local/pgsql/bin/psql db # + [markdown] id="hopmskgx4CBL" # \timingを設定すると、出力の最後に処理時間が出力されます。 # + colab={"base_uri": "https://localhost:8080/"} id="MNrr-2hQvYf-" outputId="19cc5879-b894-4fe1-f5b8-e6a79329d086" # %%writefile command.txt set enable_bitmapscan=off; \timing select * from wiki_jp where body like '%日本語%'; # + [markdown] id="oj7LYVFR4Or_" # 出力される件数と処理時間の部分だけをtailコマンドで切り出しています。 # + colab={"base_uri": "https://localhost:8080/"} id="JhQYLl-pT5cI" outputId="09d450c8-ad31-4e9a-d7be-97c8bcc87dc9" # %%time # !sudo -u postgres /usr/local/pgsql/bin/psql db < command.txt | tail -3 # + [markdown] id="YNrdh6zK4z3V" # 2回目 # + colab={"base_uri": "https://localhost:8080/"} id="pUvXLfSfczDZ" outputId="312c5ef9-6474-4b6b-a993-95ba1bb41fb7" # %%time # !sudo -u postgres /usr/local/pgsql/bin/psql db < command.txt | tail -3 # + [markdown] id="1KwME6GBUkhr" # 内部での処理時間と%%timeによるセルの実行時間に乖離があります。内部の処理時間は検索のみの時間かもしれません。 # + [markdown] id="k6l-0-pGcUTk" # 参考にtitleのみを抽出するクエリを測定します。 # + colab={"base_uri": "https://localhost:8080/"} id="AaPgO3GzLdHA" outputId="7dc6631e-4ece-40c4-ac24-d5452fccebe2" # %%writefile command2.txt set enable_bitmapscan=off; \timing select title from wiki_jp where body like '%日本語%'; # + colab={"base_uri": "https://localhost:8080/"} id="stmnyC8WLgch" outputId="078e2433-3041-410b-8872-14276b89101b" # %%time # !sudo -u postgres /usr/local/pgsql/bin/psql db < command2.txt | tail -3 # + [markdown] id="Q9UPBzy_f2eY" # 内部の検索時間は変わりません。全体の処理時間が短くなったのは、転送する結果の量の違いによるものでしょう。 # + [markdown] id="BJbvluZHHl5R" # ## 全文検索用インデックスの作成 # + [markdown] id="K0pm-oefLPES" # インデックスの作成には15分ほどかかります。 # + colab={"base_uri": "https://localhost:8080/"} id="peb32ohOCHoE" outputId="a4ed7698-8871-4578-85e9-77f2dae185a9" # %%time # !echo "CREATE INDEX wiki_jp_idx ON wiki_jp USING gin (body gin_bigm_ops);" | sudo -u postgres /usr/local/pgsql/bin/psql db # + [markdown] id="pTaLmCjpHtcB" # ## インデックスを使った検索 # + [markdown] id="Dn117B-g5TmT" # 検索プランを表示します。インデックスを使っていることが確認できます。 # + colab={"base_uri": "https://localhost:8080/"} id="DtNENB5GG0k7" outputId="52f8ed24-5912-4172-e50e-cd2053bc7d50" # !echo "set enable_bitmapscan=on; explain select * from wiki_jp where body like '%日本語%';" | sudo -u postgres /usr/local/pgsql/bin/psql db # + [markdown] id="uTrTPxVL5hmg" # インデックスを有効にした検索コマンドを作成します。 # + colab={"base_uri": "https://localhost:8080/"} id="_zBzLNgo56lO" outputId="91e69819-2392-47a9-fcca-2c0da4606738" # %%writefile command.txt set enable_bitmapscan=on; \timing select * from wiki_jp where body like '%日本語%'; # + [markdown] id="PNTjR6wE5cuD" # 1回目 # + colab={"base_uri": "https://localhost:8080/"} id="AImnpER2VUMr" outputId="573504a3-b448-445e-8273-a0a096bc56df" # %%time # !sudo -u postgres /usr/local/pgsql/bin/psql db < command.txt | tail -3 # + [markdown] id="WLM7yBzW5bOO" # 2回目 # + colab={"base_uri": "https://localhost:8080/"} id="WoNEar-Rc3Pl" outputId="7b07dcf9-dd84-48b0-dcb3-5b4c9ad1bb06" # %%time # !sudo -u postgres /usr/local/pgsql/bin/psql db < command.txt | tail -3 # + [markdown] id="EfLqfLg636I7" # インデックスを使うことによる効果は表れていますが、それほどでもありません。検索対象の数が増えるともっと顕著な差が現れると思われます。 # + [markdown] id="MzoDOLhVcJqZ" # 参考にtitleのみを抽出するクエリを測定してみます。 # + colab={"base_uri": "https://localhost:8080/"} id="bDf1Id9XK4Fk" outputId="d4038943-72a5-416a-95a4-328dcde82eec" # %%writefile command2.txt set enable_bitmapscan=on; \timing select title from wiki_jp where body like '%日本語%'; # + colab={"base_uri": "https://localhost:8080/"} id="Mz8FuA6LK7iX" outputId="4fa91450-52f5-405c-87ce-8f348b4a57c9" # %%time # !sudo -u postgres /usr/local/pgsql/bin/psql db < command.txt | tail -3 # + [markdown] id="Wy0d7eiKH4H5" # ## DBの停止 # + colab={"base_uri": "https://localhost:8080/"} id="g2ovVFGW29Mt" outputId="9ceb0d9e-597d-4182-f88a-c62aeac0e1bc" # !sudo -u postgres /usr/local/pgsql/bin/pg_ctl -D /tmp/postgres stop
FullTextSearch_Postgres.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: DS 5110 Spark 3.1 # language: python # name: ds5110_spark3.1 # --- # ## Predicting Sentiment from Tweets: Data Summary # #### <NAME> (nmf8dm), <NAME> (ddj9rt), <NAME> (kal4af) # At a minimum, the file should include a summary containing: # * Number of records # * Number of columns # * Statistical summary of response variable # * Statistical summary of potential predictor variables (if there are a large number of predictors, select the top 10) # * Note: Summarize categorical variables with counts and percentages for each level and summarize numerical variables with mean/quantiles/standard deviation. # * Include up to five helpful graphs # + from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() # + df = spark.read.csv("training.1600000.processed.noemoticon.csv") df = df.withColumnRenamed("_c0", "label")\ .withColumnRenamed("_c1", "id")\ .withColumnRenamed("_c2", "date_time")\ .withColumnRenamed("_c3", "Query_Boolean")\ .withColumnRenamed("_c4", "Username")\ .withColumnRenamed("_c5", "Content") df.show(5) # - positive = df.filter(df.label == 4) negative = df.filter(df.label == 0) # ### Number of Records df.count() # ### Number of Columns len(df.columns) # ### Statistical Summary of Response Variable label_count = df.groupBy('label').count() label_count.show() # + # Graph import matplotlib.pyplot as plt data = {'negative': 800000, 'positive': 800000} sentiment = list(data.keys()) count = list(data.values()) # Plot plt.xlabel('Sentiment') plt.ylabel('Number of Tweets') plt.bar(sentiment, count) plt.show() # this looks dumb # - # ### Statistical Summary of Potential Predictor Variables # #### Content import pyspark.sql.functions as F # Total Word Count total_count = df.withColumn('wordCount', F.size(F.split(F.trim(F.col('Content')), ' '))) total_count.describe('wordCount').show() # Plot total_count.toPandas().boxplot(column=["wordCount"]) plt.show() # Positive Word Count positive.withColumn('wordCount', F.size(F.split(F.trim(F.col('Content')), ' '))).describe('wordCount').show() # Negative Word Count negative.withColumn('wordCount', F.size(F.split(F.trim(F.col('Content')), ' '))).describe('wordCount').show() # Plot total_count.toPandas().boxplot(column=["wordCount"],by="label") plt.show() # #### Date and Time # + # Date and Time split_col = F.split(df['date_time'], ' ') #df.withColumn('Date', F.concat(split_col.getItem(0), F.lit(' '), split_col.getItem(1), F.lit(' '), split_col.getItem(2), F.lit(' '), split_col.getItem(5))) day = df.withColumn('Day', F.concat(split_col.getItem(0))) month = df.withColumn('Month', F.concat(split_col.getItem(1))) year = df.withColumn('Year', F.concat(split_col.getItem(5))) time = df.withColumn('Time', F.concat(split_col.getItem(3))) # - # Day day_counts = day.groupBy('Day').count() day_counts.show() pos_day_counts = day.filter(day.label == 4).groupBy('Day').count() pos_day_counts.show() neg_day_counts = day.filter(day.label == 0).groupBy('Day').count() neg_day_counts.show() # + # Plot plt.xlabel('Day') plt.ylabel('Number of Tweets') plt.bar(day_counts.toPandas()['Day'], day_counts.toPandas()['count']) plt.show() # Try to do stacked bar chart adding the positive and negative # Also try to reorder the bars # - # Month month_counts = month.groupBy('Month').count() month_counts.show() pos_month_counts = month.filter(month.label == 4).groupBy('Month').count() pos_month_counts.show() neg_month_counts = month.filter(month.label == 0).groupBy('Month').count() neg_month_counts.show() # + # Plot plt.xlabel('Month') plt.ylabel('Number of Tweets') plt.bar(month_counts.toPandas()['Month'], month_counts.toPandas()['count']) plt.show() # Try to do stacked bar chart adding the positive and negative # Also try to reorder the bars # - # Year year.groupBy('Year').count().show() # + # Time split_col = F.split(time['Time'], ':') hour = time.withColumn('Hour', F.concat(split_col.getItem(0))) hour_count = hour.groupBy('Hour').count() hour_count.show(24) # - pos_hour_counts = hour.filter(hour.label == 4).groupBy('Hour').count() pos_hour_counts.show(24) neg_hour_counts = hour.filter(hour.label == 0).groupBy('Hour').count() neg_hour_counts.show(24) # + # Plot hours = ['12 am', '1 am', '2 am', '3 am', '4 am', '5 am', '6 am', '7 am', '8 am', '9 am', '10 am', '11 am', '12 pm', '1 pm', '2 pm', '3 pm', '4 pm', '5 pm', '6 pm', '7 pm','8 pm', '9 pm', '10 pm', '11 pm'] f = plt.figure() f.set_figwidth(15) f.set_figheight(4) plt.xlabel('Time') plt.ylabel('Number of Tweets') plt.plot(hours, hour_count.toPandas().sort_values(by='Hour')['count'], label = "Total") plt.plot(hours, pos_hour_counts.toPandas().sort_values(by='Hour')['count'], label = "Positive") plt.plot(hours, neg_hour_counts.toPandas().sort_values(by='Hour')['count'], label = "Negative") plt.legend() plt.show() # -
Data_Summary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=[] # # `EDAhelper.column_stats` # # Obtain summary statistics of column(s) including count, mean, median, mode, Q1, Q3, # variance, standard deviation, correlation, and covariance in table format. # - # import function from EDAhelper.column_stats import column_stats import pandas as pd import numpy as np import statistics # + # load data data = pd.read_csv("https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/iris.data") columns = ['SepalLength', 'SepalWidth'] data.head() # - # ## Generate summary table, correlation matrix, and covariance matrix # # `column_stats` takes two arguments; a data set, and a list of column names as strings column_stats(data, columns)
docs/example_column_stats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Metoda K Najbliższych sąsiadów (KNN) # # <img src="Grafika/knn.png",width="500"> # # Źródło: https://i.pinimg.com/originals/65/36/b9/6536b9a63fc427e0fc3e1a9687b49aff.png # + from sklearn.neighbors import KNeighborsClassifier # ?KNeighborsClassifier # argument - _weights_ umozliwia bardziej zlozony schemat klasyfikacji niz klasyfikacja wiekszosciowa # - # # Klasyfikator SVM - Support Vector Machines # # *W sklearn: SVC.* # *Po polsku: maszyna wektorów nośnych.* # # <img src="Grafika/svm_intro1.jpg"style="width: 200px;"> # <img src="Grafika/svm_intro2.jpg"style="width: 300px;"> # Źródło: https://www.safaribooksonline.com/library/view/python-deeper-insights/9781787128576/ch03s04.html # # ### Matematyka # # Zajmujemy się klasyfikacją binarną, ale przyjmujemy konwencję $Y\in \{-1,1\}$. # # Rozważmy przypadek, gdy dane są liniowo separowalne. # # $ \mathbf{w} \cdot \mathbf{x} = 0$ - równanie opisujące linię (hiperpłaszczyznę) rozdzielającą; # # $ # \begin{equation} # \left.\begin{aligned} # \mathbf{w} \cdot \mathbf{x} - b & = 1\\ # \mathbf{w} \cdot \mathbf{x} - b & = -1 # \end{aligned}\right\} \text{- równania marginesów.} # \end{equation} # $ # # Zatem SVM szuka takiej płaszczyzny (parametrów $\mathbf{w}, b$), dla której: # # $ # \begin{equation} # \begin{aligned} # \mathbf{w} \cdot x_i - b & \geq 1, \ \ \text{ gdy } \ y_i = 1, \\ # \mathbf{w} \cdot x_i - b & \leq -1 \ \ \text{ gdy } \ y_i = -1, # \end{aligned} # \end{equation} # $ # # Co można zapisać w skrócie warunkiem: # # $y_i ( \mathbf{w} \cdot x_i - b) \geq 1.$ # # A ponieważ odległość między marginesami wynosi $\frac{2}{\|\mathbf{w}\|}$, to ostatecznie uczenie klasyfikatora SVM można zdefiniować jako: # # $$ # \begin{equation} # \begin{aligned} # \text{zminimalizuj } & \ \ \ \|\mathbf{w}\|, \\ # \text{przy ograniczeniu: } & \ \ \ y_i ( \mathbf{w} \cdot x_i - b) \geq 1, \ \ \ i = 1, \ldots, n. # \end{aligned} # \end{equation} # $$ # # ## Dane nieseparowalne # # # <img src="Grafika/svm_nonlinear0.png"style="width: 500px;"> # Źródło: http://inspirehep.net/record/1265323/plots # # Definiujemy zawiasową funkcję straty (*hinge loss*): # # $\zeta _{i} = \max{(0, 1 - y_i ( \mathbf{w} \cdot x_i - b))}.$ # # Funkcja ta przyjmuje wartość $0$, gdy obserawcja $x_i$ leży po właściwej stronie hiperpłaszczyzny rozdzielającej, oraz jej wartość jest proporcjonalna do odległości do płaszczyzny w przypadku, gdy punkt leży po złej stronie. # # Uczenie klasyfikatora definiujemy jako minimalizacja funkcji: # # $$\bigg[\frac {1}{n}\sum\limits_{i=1}^{n}\max (0,1-y_{i}({\mathbf {w}}\cdot {\mathbf {x}}_{i}-b) )\bigg] + \lambda \| {\mathbf {w}}\| ^{2}.$$ # # Czyli jednocześnie maksymalizujemy odległość między marginesami (minimalizujemy $\| {\mathbf {w}}\|$) oraz minimalizujemy karę za punkty leżące po złej stronie. $\lambda$ - współczynnik ważący składowe opytmalizowanej funkcji. # # Uwaga: tak samo jak w regresji logistycznej w sklearn pojawia się parametr `C` - odpowiada on wartości $\frac{1}{\lambda}$. Zatem: im większe `C`, tym mniejszą wagę przywiązujemy do szerokości marginesu, a większą do położenia punktów po właściwych stronach, czyli margines będzie węższy ale lepiej dopasowany. # # <br> # **Uwaga 1**: Czy SVM da nam prawdopodobieństwo przynależenia obserwacji do klas (jak $\pi(x)$ w regresji logistycznej, czy empiryczny procent klas w liściu w drzewie decyzyjnym? # # <br> # # <br> # # Z definicji nie. Ale w praktyce się to robi. W uproszczeniu, przyjmuje się, że $P(Y=1 \mid x) = \text{sigmoid}(x) = \frac{1}{1+e^{-d(x)}}$, gdzie $d(x)$ - odległość punktu $x$ od hiperpłaszczyzny rozdzielającej. W `sklearn` musimy ustawić w konstruktorze SVC parametr `probability=True`, jeśli chcemy żeby klasyfikator wyliczył te prawdopodobieństwa. # # <br> # # <br> # # <br> # # **Uwaga 2**: Czy SVM można zastosować do klasyfikacji wieloklasowej? # # <br> # # <br> # # Tak jak w regresji logistycznej, klasyfikator jest zdefiniowany dla problemu klasyfikacji binarnej, ale w praktyce oczywiście jest na to sposób i to się robi. # # Można wykorzystać schemat *one-vs-rest*: w przypadku $K$ klas uczymy model $K$ razy do problemów binarnych - $Y = k$ vs $Y \neq k$. Ostateczna predykcja, to klasa $c$, dla której prawdopodobieństwo $Y = c$ było największe. # # Inny schemat to *one-vs-one*. Uczymy rozwiązywać problem klasyfikacji dla wszystkich kombinacji dwóch klas. Predykcja to najcześciej wskazana klasa przez poszczególne klasyfikatory. # # <br> # # <br> # # <br> # # **Uwaga 3**: Czy SVM wymaga jakiegoś przygotowania danych? # # Tak - skalowanie. # # <br> # # <br> # # <br> # # ## Kernel trick # # #### Jak można wykorzystać SVM do klasyfikacji w przypadku danych nieliniowych. # # <img src="Grafika/svm_nonlinear1.png"style="width: 400px;"> # # Rozważmy rysunek B. W przedstawionych danych mamy dwie zmienne: $x_1, x_2$. Czy da się zastosować tutaj klasyfikator SVM? # # <br> # # <br> # # Rozszerzmy ręcznie zbiór danych do następujących zmiennych: # # $$x_1, x_2, x_1^2, x_2^2.$$ # # Gdybyśmy do takich zmiennych zastosowali SVM na tym zbiorze, to jak mogłaby wyglądać płaszczyzna rozdzielająca? Na przykład tak: # # $$ 0 \cdot x_1 + 0 \cdot x_2 + a \cdot x_1^2 + b \cdot x_2^2 + c= 0.$$ # # Czyli $\mathbf{w} = [0, 0 , a, b]$. # # Wszystko fajnie, ale skąd mamy wiedzieć jakie przekształcenia zmiennych dodać (np. na rysunku poniżej - pomijając już nawet fakt, że w rzeczywistych danych nawet nie będziemy w stanie spojrzeć na jakikolwiek rysunek...)? Możemy dodać dla każdej zmiennej dużo różnych transformacji, ale jeśli zmiennych pierwotnie będzie dużo, to z transfromacjami będzie ich $\text{dużo}^2$... # # <img src="Grafika/svm_nonlinear2.png"style="width: 300px;"> # # # Wróćmy do uczenia klasyfikatora, czyli rozwiązywania problemu optymalizacyjnego. # # Przyjmując oznaczenie $\zeta _{i}=\max (0,1-y_{i}(\mathbf{w}\cdot x_{i}-b))$, można go alternatywnie zapisać w postaci: # # $$ # \begin{equation} # \begin{aligned} # \text{zminimalizuj } & \ \ \ \frac {1}{n}\sum\limits_{i=1}^{n} \zeta_i + \lambda \|\mathbf{w}\|^2, \\ # \text{przy ograniczeniu: } & \ \ \ y_i ( \mathbf{w} \cdot x_i - b) \geq 1 - \zeta_i \ \text{oraz} \ \zeta_i \geq 0, \ \ \ i = 1, \ldots, n. # \end{aligned} # \end{equation} # $$ # # # Okazuje się (matematyka wyższa...), że problem ten można sformułować równoważnie jako problem maksymalizacji funkcji: # # # $$f(c_1 \ldots c_n) = \sum\limits_{i=1}^n c_i - \frac{1}{2}\sum\limits_{i=1}^n \sum\limits_{j=1}^n y_i c_i (x_i\cdot x_j) y_j c_j, # $$ # # przy pewnych ograniczeniach, dla pewnych $c_1, \ldots, c_n$ (i tak się to w rzeczywistości odbywa). Widzimy zatem, że rozwiązanie zależy od iloczynów skalarnych obserwacji $x_i\cdot x_j$. # # <br> # # <br> # # Rozważmy transformacje zmiennych i iloczyny sklarne. Przyjmijmy, że mamy dwie zmienne oryginalne: $x_1, x_2$. Załóżmy, że transformujemy dane do $x_1^2, x_1x_2, x_2x_1, x_2^2$. Czyli mamy przekształcenie $f(x_1,x_2) = (x_1^2, x_1x_2, x_2x_1, x_2^2)$. Weźmy przykładowo dwie obserwacje $a = (a_1, a_2), b = (b_1, b_2)$. Obserwacje te w nowej przestrzeni mają postać $f(a) = (a_1^2, a_1 a_2, a_2 a_1, a_2^2), \ f(b) = (b_1^2,b_1 b_2, b_2 b_1, b_2^2)$. Przeanalizujmy iloczyn skalarny: # # $f(a) \cdot f(b) = a_1^2 \cdot b_1^2 + a_1a_2 \cdot b_1b_2 + a_2a_1 \cdot b_2b_1 + a_2^2 \cdot b_2^2 = (a_1b_1)^2 + 2a_1a_2b_1b_2 + (a_2b_2)^2$. # # Teraz rozważmy funkcję $K(z) = z^2$ i spójrzmy na wynik działania tej funkcji na iloczynie $a \cdot b$: # # $K(a \cdot b) = K(a_1 \cdot b_1 + a_2 \cdot b_2) = (a_1 \cdot b_1 + a_2 \cdot b_2)^2 = (a_1b_1)^2 + 2a_1a_2b_1b_2 + (a_2b_2)^2$. # # Wniosek? # # # <br> # # <br> # # # # $f(a) \cdot f(b) = K(a \cdot b)$ # # # <br> # # <br> # # # # Zatem, zamiast transformować zmienne, wystarczy użyć pewnego przekształcenia $K(x_i \cdot x_j)$ zamiast surowych iloczynów $x_i \cdot x_j$. # # Dzięki temu: # - koszt obliczeniowy jest dużo mniejszy. # - pozbywamy się problemu szukania transformacji - w miejsce tego testujemy kilka przekształceń $K$. # # $K$ jest zwane **jądrem** (funkcją jądrową, ang. *kernel*). # # # Jądra w sklearn: # # http://scikit-learn.org/stable/modules/svm.html#svm-kernels from sklearn.svm import SVC # ?SVC # ## Co SVM ma wspólnego z regresją logistyczną? # # ### Reguła decyzyjna w SVM: # # $ # \begin{equation} # \begin{aligned} # \hat{y_i} = & \ \ \ \ 1, \ \ \text{ gdy }\mathbf{w} \cdot x_i \geq 0, \\ # \hat{y_i} = & - 1, \ \ \text{ gdy }\mathbf{w} \cdot x_i < 0. # \end{aligned} # \end{equation} # $ # # # ### Reguła decyzyjna w regresji logistycznej # # $ # \begin{equation} # \begin{aligned} # \hat{y_i} = & \ \ \ \ 1, \ \ \text{ gdy }\frac{1}{1+e^{-\beta x_i}} \geq 0.5, \\ # \hat{y_i} = & - 1, \ \ \text{ gdy } \frac{1}{1+e^{-\beta x_i}} < 0.5, # \end{aligned} # \end{equation} # $ # # Jest ona równoważna regule: # # $ # \begin{equation} # \begin{aligned} # \hat{y_i} = & \ \ \ \ 1, \ \ \text{ gdy } \beta x_i \geq 0, \\ # \hat{y_i} = & - 1, \ \ \text{ gdy } \beta x_i < 0. # \end{aligned} # \end{equation} # $ # # Czyli oba klasyfikatory mają identyczną postać reguły decyzyjnej: __liniową__. # # Klasyfikatory o takiej postaci reguły decyzyjnej nazywają się klasyfikatorami liniowymi. Różnią się one sposobem wyznaczania hiperpłaszczyzny (np. prostej w przypadku 2D) rozdzielającej. # # Uwaga: pomimo tego, że są to klasyfikatory liniowe, to można objąć nimi nieliniowość (transformacje zmiennych, czy kernel trick w svm). # Zadanie: # # Skopiuj kod z : # https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html#sphx-glr-auto-examples-classification-plot-classifier-comparison-py # # Zmodyfikuj go tak, aby wyswietli funkcje decyzyjne dla różnych wariantów klasyfikatora SVM z jądrem radialnym: # # a) dla gamma = 2 i wartości C: [0.0001, 0.1, 10, 10000] # # b) dla C = 0.1 i wartości gamma: [0.0001, 0.1, 10, 10000] # # Zinterpretuj wyniki. # + import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.datasets import make_moons, make_circles, make_classification from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis h = .02 # step size in the mesh names = ["Nearest Neighbors", "Linear SVM", "Radial SVM c 0.0001", "Radial SVM c 0.1", "Radial SVM c 10", "Radial SVM c 10000","Radial SVM gamma 0.0001","Radial SVM gamma 0.1","Radial SVM gamma 10","Radial SVM gamma 10000"] classifiers = [ KNeighborsClassifier(3), SVC(kernel="linear", C=0.025), SVC(kernel='rbf', gamma=2, C=0.0001), SVC(kernel='rbf', gamma=2, C=0.1), SVC(kernel='rbf', gamma=2, C=10), SVC(kernel='rbf', gamma=2, C=10000), SVC(kernel='rbf', C=0.1, gamma = 0.0001), SVC(kernel='rbf', C=0.1, gamma = 0.1), SVC(kernel='rbf', C=0.1, gamma = 10), SVC(kernel='rbf', C=0.1, gamma = 10000), ] X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1) rng = np.random.RandomState(2) X += 2 * rng.uniform(size=X.shape) linearly_separable = (X, y) datasets = [make_moons(noise=0.3, random_state=0), make_circles(noise=0.2, factor=0.5, random_state=1), linearly_separable ] figure = plt.figure(figsize=(27, 9)) i = 1 # iterate over datasets for ds_cnt, ds in enumerate(datasets): # preprocess dataset, split into training and test part X, y = ds X = StandardScaler().fit_transform(X) X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=.4, random_state=42) x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # just plot the dataset first cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) ax = plt.subplot(len(datasets), len(classifiers) + 1, i) if ds_cnt == 0: ax.set_title("Input data") # Plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors='k') # Plot the testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors='k') ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) i += 1 # iterate over classifiers for name, clf in zip(names, classifiers): ax = plt.subplot(len(datasets), len(classifiers) + 1, i) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. if hasattr(clf, "decision_function"): Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) else: Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] # Put the result into a color plot Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) # Plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors='k') # Plot the testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, edgecolors='k', alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) if ds_cnt == 0: ax.set_title(name) ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'), size=15, horizontalalignment='right') i += 1 plt.tight_layout() plt.show() # - # Porównaj działania klasyfikatora SVM z domyslnymi parametrami na danych digits ze standaryzacją i bez standaryzacji. # + from sklearn.datasets import load_digits digits = load_digits() X, y = digits.data, digits.target X.shape # - y X_train, X_test , y_train, y_test = train_test_split(X,y,test_size=1000) from sklearn.pipeline import Pipeline from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score # Zoptymalizuj i przetestuj klasyfikator SVM na danych digits. # + pipelines = [Pipeline([ ("scaler", StandardScaler(with_mean=False)), ("model", SVC()) ]), Pipeline([ ("model", SVC()) ]) ] param_grids = [{"model__gamma": [0.01, 0.1, 1, 10, 100], "model__C": [0.01, 0.1, 1, 10, 100], "model__kernel":["linear", "rbf"]}, {"model__gamma": [0.01, 0.1, 1, 10, 100], "model__C": [0.01, 0.1, 1, 10, 100], "model__kernel":["linear", "rbf"]} ] for model, grid in zip(pipelines, param_grids): gs = GridSearchCV(model, grid, cv=10) gs.fit(X_train, y_train) print(f' {model} accuracy - {accuracy_score(y_test, gs.predict(X_test))}') # - # # Lasy losowe # # Wyobraźmy sobie, że mamy 100 klasyfikatorów i każdy z nich potrafi przewidywać $Y$ ze skutecznością $70\%$. Pytanie: jaką skuteczność będzie miała procedura klasyfikacyjna polegająca na dokonaniu predykcji każdym klasyfikatorem, a następnie podjęcie ostatecznej decyzji demokratycznie - czyli finalna decyzja to klasa dominująca wśród predykcji tych stu klasyfikatorów? # # <br> # # <br> # # <br> # # <br> # # # Jeżeli każdy klasyfikator ma skuteczność $70\%$, to średnio 70 ze 100 klasyfikatorów podejmie prawidłową decyzję. Jakie jest prawdopodobieństwo, że więcej niż 50 się pomyli? # # Bardzo małe... Zatem mamy doskonałą metodę klasyfikacji: nauczmy dużo modeli i klasyfikujmy demokratycznie. # # Zatem po co ogóle uczyć się uczenia maszynowego, skoro możemy załatwić wszystko w ten prosty sposób? # # <br> # # <br> # # # Po to, bo jest jeden haczyk: **niezależność**... # # Tak pięknie byłoby, gdyby te wszystkie klasyfikatory były niezależne. A nie są... Rozważmy sytuację, w które nauczyliśmy dwa modele: regresję logistyczną i SVM. Na czym polega zależność między tymi klasyfikatorami? # # 1. Oba klasyfikatory uczone są na tych samych zmiennych - zatem wykryją podobne zależności w danych, co przekłada się na podobieństwo predykcji. # # 2. Oba klasyfikatory są nauczone na tych samych obserwacjach. # # Ponadto w tym konkretnym przypadku oba klasyfikatory są to klasyfikatory liniowe, zatem podejmują decyzję na podstawie płaszczyzny rozdzielającej. Te płaszczyzny mogą być bardzo podobne... A nawet jeśli nie są bardzo podobne, to i tak predykcje obu klasyfikatorów w praktyce będą silnie skorelowane, tzn. oba klasyfikatory będą dawały bardzo podobne predykcje... # # Co możemy na to poradzić? # # - każdy klasyfikator uczyć na innych zmiennych -> w praktyce losowe podzbiory, # - każdy klasyfikator uczyć na innym podzbiorze obserwacji (w praktyce ma to mniejsze znaczenie). # # W obu przypadkach pojedyncze klasyfikatory będą słabsze (bo wykorzystują mniej informacji), ale globalnie to pomoże! # # Czy dwa powyższe zabiegi zapewniają niezależność klasyfikacji między poszczególnymi klasyfikatorami? # # Nie, bo: # - nawet jak różne klasyfikatory uczymy na różnych zmiennych, to te zmienne będą z reguły zależne. Do tego w praktyce i tak nie jesteśmy w stanie nauczyć poszczególnych modeli na rozłącznych zbiorach zmiennych, bo musielibyśmy mieć tych zmiennych bardzo dużo (no chyba, że w każdym modelu wykorzystamy bardzo mało zmiennych, ale wtedy te modele będą słabe), # - nawet jak uczymy na różnych obserwacjach, to i tak w tych obserwacjach będą ukryte te same wzorce, które będą wykrywały modele. # # # ### Lasy losowe - komitet drzew uczonych na różnych podzbiorach zmiennych i (ewentualnie) obserwacji. # # Na marginesie: ciekawostką jest, że w praktyce przyjęło się, że mówiąc "lasy losowe" mamy na myśli **jeden** klasyfikator, czyli tak naprawdę "LAS losowy". # # W sklearn las losowy zdefiniowany jest trochę inaczej: from sklearn.ensemble import RandomForestClassifier # ?RandomForestClassifier # W sklearnie drzewa nie są uczone na podzbiorach zmiennych. Randomizacja jest realizowana przez próbkowanie obserwacji (i ewentualnie ograniczanie przeszukiwania podziałów do losowego podzbioru zmiennych o zadanej liczbie). # # Opisaną powyżej wersję lasu losowego można otrzymać wykorzystując `BaggingClassifier` from sklearn.ensemble import BaggingClassifier # ?BaggingClassifier # ### Zadanie # # Zrealizować przy użyciu tej klasy las losowy o "podręcznikowej" definicji: # - każde drzewo uczone jest na losowym podzbiorze zmiennych (np połowie), # - każde drzewo uczone jest na losowym podzbiorze obserwacji (np. połowie), # - obserwacje losowane są bez powtórzeń. model = BaggingClassifier(max_features=0.5, max_samples=0.5, bootstrap=False) model.fit(X_train, y_train) accuracy_score(y_test, model.predict(X_test)) # ### Zadanie # # Narysuje wykres skuteczności lasu od liczby drzew na danych digits. Wyciągnij 3 wnioski. # + N = [3,5,10,15,20,25,30,40,50,75,100,200,300,500,1000] acc = [] for n in N: model = RandomForestClassifier(n) model.fit(X_train, y_train) acc.append(accuracy_score(y_test, model.predict(X_test))) plt.plot(N, acc) plt.show() # - # ### Zadanie # # Narysuj wykres skrzynkowy przedstawiający rozkład skuteczności drzew w lesie losowym (po dopasowaniu lasu losowego wyciągnij z niego każde drzewo i oceń jest na zbiorze testowym). acc = [] for tree in model.estimators_: acc.append(accuracy_score(y_test, tree.predict(X_test))) plt.boxplot(acc) plt.show() # ## Case study: Titanic # # Znaleźć najlepszy klasyfikator, który będzie przewidywał czy dana osoba przetrwa. # # (przyda się funkcja pd.get_dummies()) # # # Cel: porównać wszystkie poznane klasyfikatory. # # Tak jak poprzednio na wejściu przyjmujemy listę modeli/pipelinów i siatek parametrów dla każdego modelu i chcemy zaimplementować analizę zupełnie automatycznie # # Elementy zadania: # - przygotować dane # - wszystkie klasyfikatory zoptymalizować na części treningowej # - w optymalizacji svm'a nie wykonywać niepotrzebnych obliczeń (różne jądra korzystają z różnych parameterów) # - w baggingu zoptymalizować parametry drzewa # import pandas as pd data = pd.read_csv("Dane/titanic.csv",decimal=",") data.head() data.shape y = data.survived features = ["pclass", "sex", "sibsp", "parch"] X = pd.get_dummies(data[features]) X_train, X_test , y_train, y_test = train_test_split(X,y,test_size=0.33) # + model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X_train, y_train) predictions = model.predict(X_test) accuracy_score(y_test, predictions) # + pipelines = [Pipeline([ ("scaler", StandardScaler(with_mean=False)), ("model", SVC()) ]), Pipeline([ ("model", SVC()) ]), Pipeline([ ("model", KNeighborsClassifier()) ]), Pipeline([ ("model", RandomForestClassifier()) ]) ] #param_grids = [{"model__gamma": [0.01, 0.1, 1, 10], # "model__C": [0.01, 0.1, 1, 10, 100], # "model__kernel":["linear", "rbf"]}, # {"model__gamma": [0.01, 0.1, 1, 10], # "model__C": [0.01, 0.1, 1, 10], # "model__kernel":["linear", "rbf"]}, # {}, # {} # ] param_grids = [{},{},{},{}] for model, grid in zip(pipelines, param_grids): gs = GridSearchCV(model, grid, cv=10) gs.fit(X_train, y_train) print(f' {model} accuracy - {accuracy_score(y_test, gs.predict(X_test))}') # + ############################# z prowadzacym ################################## # - import pandas as pd data = pd.read_csv("Dane/titanic.csv",decimal=",") data.head() data.fare.fillna(data.fare.mean(), inplace=True) data["cabin"] = data.cabin.str[0].fillna("X").replace({"T":"G"}) data.embarked.fillna(data.embarked.mode()[0], inplace=True) data["sex"] = data.sex.replace({"male":1, "female":0}) X = data.drop(["name", "ticket","home.dest", "survived"], axis=1) y = data.survived X X_train, X_test , y_train, y_test = train_test_split(X,y,test_size=0.3) # # Omówienie projektów
Machine Learning/ML2_materialy_do_zajec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # 6.6 基尼不纯度 # # 默认使用基尼不纯度进行测量,不过可以将超参数`criterion`设置为`entropy`来选择使用熵的形式进行测量。 # # 公式6-3 # # $$ # H_i = - \sum_{k=1}^n p_{i, k} log_2(p_{i, k}) \tag{6-3} # $$ # # 其中$p_{i,k} \not= 0$ # 图6-1中的深度为2的左侧的节点的熵的计算如下: # # ![图6-1](./6-1_files/iris_tree.png) # $$- \frac{49}{54} log_2( \frac{49}{54}) - \frac{5}{54} log_2( \frac{5}{54}) \approx 0.445$$ # 其实,大多数情况下,基尼不纯度和熵并没有什么大的不同,产生的树都很相似。基尼不纯度的计算速度略微快一些(**这是由于基尼不纯度无需计算log2**),所以它是个不错的默认选择。它们的不同在于,基尼不纯度倾向于从树枝中分裂出最常见的类别,而熵则倾向于生产更平衡的树。 # # 关于二者的比较可以参考: https://sebastianraschka.com/faq/docs/decision-tree-binary.html
docs/source/chapter6/6-6_geni_pure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # language: python # name: python3 # --- # + """ You will receive a string from the keyboard. You will need to print a tuple that contains all the letters of that string. example: You will receive: 'cmi' You will print: ('c', 'm', 'i') """ val = input() print(tuple(val)) # -
Python Random old, new challenges/Section 1/ex14.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Lecture 10 # # ## Differential Equations II: # # ### From Words to Maths # + slideshow={"slide_type": "skip"} import numpy as np import sympy as sp import scipy.integrate sp.init_printing() ################################################## ##### Matplotlib boilerplate for consistency ##### ################################################## from ipywidgets import interact from ipywidgets import FloatSlider from matplotlib import pyplot as plt # %matplotlib inline from IPython.display import set_matplotlib_formats set_matplotlib_formats('svg') global_fig_width = 10 global_fig_height = global_fig_width / 1.61803399 font_size = 12 plt.rcParams['axes.axisbelow'] = True plt.rcParams['axes.edgecolor'] = '0.8' plt.rcParams['axes.grid'] = True plt.rcParams['axes.labelpad'] = 8 plt.rcParams['axes.linewidth'] = 2 plt.rcParams['axes.titlepad'] = 16.0 plt.rcParams['axes.titlesize'] = font_size * 1.4 plt.rcParams['figure.figsize'] = (global_fig_width, global_fig_height) plt.rcParams['font.sans-serif'] = ['Computer Modern Sans Serif', 'DejaVu Sans', 'sans-serif'] plt.rcParams['font.size'] = font_size plt.rcParams['grid.color'] = '0.8' plt.rcParams['grid.linestyle'] = 'dashed' plt.rcParams['grid.linewidth'] = 2 plt.rcParams['lines.dash_capstyle'] = 'round' plt.rcParams['lines.dashed_pattern'] = [1, 4] plt.rcParams['xtick.labelsize'] = font_size plt.rcParams['xtick.major.pad'] = 4 plt.rcParams['xtick.major.size'] = 0 plt.rcParams['ytick.labelsize'] = font_size plt.rcParams['ytick.major.pad'] = 4 plt.rcParams['ytick.major.size'] = 0 ################################################## # + [markdown] slideshow={"slide_type": "slide"} # ## Example of forming a differential equation from words: # # The length $\;y\;$ cm of a leaf during the period of its growth is proportional to the amount of water it contains. # During this period the leaf retains a similar shape: that is, the ratio of its length to its width remains constant. # The leaf absorbs water from its plant at a rate proportional to $\;y\;$ and it loses water by evaporation at a rate proportional to the area # of the leaf at the time when its length is $\;y\;$ cm. # # - Form a differential equation to describe the growth of the leaf. # + [markdown] slideshow={"slide_type": "fragment"} # 1. Assume the length of the leaf is $\;y\;$ cm at time $\;t\;$ days after it was first observed. # + [markdown] slideshow={"slide_type": "fragment"} # 2. Let the rate the leaf receives water be $\;k_1y\;$ where $\;k_1\;$ is a positive constant. # + [markdown] slideshow={"slide_type": "fragment"} # 3. Area of leaf at time $\;t\;$ days is proportional to $\;y^2\;$ since it maintains its shape, so the leaf is losing water at a rate of $\;k_2y^2\;$ where $\;k_2\;$ is another positive constant. # + [markdown] slideshow={"slide_type": "slide"} # ## Example of forming a differential equation from words: # # 1. Assume the length of the leaf is $\;y\;$ cm at time $\;t\;$ days after it was first observed. # 2. Let the rate the leaf receives water be $\;k_1y\;$ where $\;k_1\;$ is a positive constant. # 3. Area of leaf at time $\;t\;$ days is proportional to $\;y^2\;$ since it maintains its shape, so the leaf is losing water at a rate of $\;k_2y^2\;$ where $\;k_2\;$ is another positive constant. # # + [markdown] slideshow={"slide_type": "fragment"} # 4. Rate of growth is given by $\displaystyle{ {\rm d}y\over {\rm d}t}$, which is the rate of change of its length. # + [markdown] slideshow={"slide_type": "fragment"} # # $${{\rm d}y\over {\rm d}t}=k_1y-k_2y^2=y(k_1-k_2y)$$ # + slideshow={"slide_type": "slide"} def solve(k1,k2): x = np.linspace(0,10,100) def dydt(y,t): return k1*y - k2*y**2 y = scipy.integrate.odeint(dydt,0.1,x) plt.plot(x,y) plt.xlabel('x') plt.ylabel('y') interact(solve, k1 = FloatSlider(value=1,min=0.01,max=2.0,step=0.01, continuous_update=False), k2 = FloatSlider(value=1,min=0.01,max=2.0,step=0.01, continuous_update=False)) # + [markdown] slideshow={"slide_type": "slide"} # ## Example 2: Solid tumour growth. # # An avascular tumour tends to grow in a spherical shape, depending on environmental restrictions. # # Assuming that the growth rate of the tumour depends only on the availability of oxygen and nutrients, which enter the tumour solely by diffusing through its surface, derive a model for the change in tumour volume over time. # + [markdown] slideshow={"slide_type": "fragment"} # 1. Denote the volume of the tumour at time $\;t\;$ by $\;V(t)\;$, and its radius by $\;R(t)$. # # + [markdown] slideshow={"slide_type": "fragment"} # 2. Since the tumour is a sphere, its volume $\;V\;$ is proportional to $\;R^3\;$ and its surface area to $\;R^2\;$, so the surface area is proportional to $\;V^{2/3}\;$. # # + [markdown] slideshow={"slide_type": "fragment"} # 3. The rate at which the tumour acquires nutrients, and hence the rate at which the volume increases, is thus proportional to $\;V^{2/3}$. # # + [markdown] slideshow={"slide_type": "slide"} # 1. Denote the volume of the tumour at time $\;t\;$ by $\;V(t)\;$, and its radius by $\;R(t)$. # 2. Since the tumour is a sphere, its volume $\;V\;$ is proportional to $\;R^3\;$ and its surface area to $\;R^2\;$, so the surface area is proportional to $\;V^{2/3}\;$. # 3. The rate at which the tumour acquires nutrients, and hence the rate at which the volume increases, is thus proportional to $\;V^{2/3}$. # # + [markdown] slideshow={"slide_type": "fragment"} # This gives us the equation: # # $$\frac{{\rm d}V}{{\rm d}t} = kV^{2/3}$$ # + [markdown] slideshow={"slide_type": "slide"} # $$\frac{{\rm d}V}{{\rm d}t} = kV^{2/3}$$ # # Solve by separation of variables: # # $$\int V^{-2/3}~{\rm d}V = \int k~{\rm d}t$$ # # $$V = \left({kt+c\over 3}\right)^3$$ # # where $c$ is a constant of integration, the value of which will depend upon the initial conditions. # + slideshow={"slide_type": "slide"} def solve(k,c): t = np.linspace(0,10,100) v = ((k*t + c)/3)**3 plt.plot(t,v) plt.xlabel('t') plt.ylabel('V') interact(solve, k = FloatSlider(value=1,min=0.01,max=2.0,step=0.01,continuous_update=False), c = FloatSlider(value=10,min=0.01,max=20.0,step=0.01,continuous_update=False)) # + [markdown] slideshow={"slide_type": "slide"} # ## Second Order Differential Equations # # # **Example: Solve** $\quad \displaystyle \frac{{\rm d}^2y}{{\rm d}x^2} = \left(\frac{{\rm d}y}{{\rm d}x}\right)^2$ # # + [markdown] slideshow={"slide_type": "fragment"} # We will use the substitution $\displaystyle z = \frac{{\rm d}y}{{\rm d}x}$. # This implies that $\displaystyle \frac{{\rm d}z}{{\rm d}x} = \frac{{\rm d}^2y}{{\rm d}x^2}$. # # + [markdown] slideshow={"slide_type": "fragment"} # Substituting into the original equation, to eliminate $y$, gives # # $$\frac{{\rm d}z}{{\rm d}x} = z^2$$ # + [markdown] slideshow={"slide_type": "slide"} # This is a separable equation: # # $$\int z^{-2}~{\rm d}z = \int ~{\rm d}x \qquad\Rightarrow\qquad # -{1\over z} = x + A \qquad\Rightarrow\qquad # z = -{1\over x+A}$$ # # where $A$ is an arbitrary constant. # # + [markdown] slideshow={"slide_type": "fragment"} # # We can then replace $z$ using the original substitution to obtain a first order differential equation: # $$\frac{{\rm d}y}{{\rm d}x} = -{1\over x+A} ~~~~~~~~~~~~{\rm Equation~(9)}$$ # # + [markdown] slideshow={"slide_type": "fragment"} # This equation can be solved with a standard integral: # # $$y = -\ln(x+A) + B$$ # # (how do we determine $A$ and $B$?) # + [markdown] slideshow={"slide_type": "slide"} # Example # # 1. At $\;x=0,\;$ $\;\displaystyle \frac{{\rm d}y}{{\rm d}x} = -1\;$ and $\;y=0\;$. # # + [markdown] slideshow={"slide_type": "fragment"} # # We substitute the first condition into $\frac{{\rm d}y}{{\rm d}x} = -{1\over x+A}$ to obtain $\;A=1\;$. # + [markdown] slideshow={"slide_type": "fragment"} # Then substitute $A$ and the second condition into the eventual solution # to find $\;B=0$. # + [markdown] slideshow={"slide_type": "fragment"} # 2. $\;y(0)=0\;$ and $\;y(e-1)=-1$. # + [markdown] slideshow={"slide_type": "fragment"} # This time both conditions can be substituted into the solution: # # $$y(0)=0 \Rightarrow 0=B-\ln(A) \Rightarrow B=\ln(A)$$ # # $$y(e-1)=-1 \Rightarrow -1=\ln(A)-\ln{e-1+A} \Rightarrow A=1$$ # + [markdown] slideshow={"slide_type": "slide"} # ## More tricks to do integration # # Let's return to and solve our leaf example: # # $${{\rm d}y\over {\rm d}t}=k_1y-k_2y^2=y(k_1-k_2y)$$ # # + [markdown] slideshow={"slide_type": "fragment"} # This is a separable differential equation: $$\;\displaystyle\int {{\rm d}y\over # y(k_1-k_2y)} =\int {\rm d}t.\;$$ # + [markdown] slideshow={"slide_type": "fragment"} # We can split apart the integral on the RHS using **partial fractions** in SymPy. # + [markdown] slideshow={"slide_type": "slide"} # We want $\qquad\displaystyle{1\over y(k_1-k_2y)}={A\over y}+{B\over (k_1-k_2y)}$ # + slideshow={"slide_type": "-"} y, k1, k2 = sp.symbols('y k_1 k_2') sp.apart(1 / (y*(k1 - k2*y)),y) # + [markdown] slideshow={"slide_type": "-"} # So $A={1\over k_1}$ and $B={k_2\over k_1}$. # + [markdown] slideshow={"slide_type": "slide"} # $${\rm~Thus,~~~~}\int {{\rm d}y\over y(k_1-k_2y)}=\int {{\rm d}y\over k_1y}+ # \int {k_2\over k_1(k_1-k_2y)}{\rm d}y =\int dt$$ # # + [markdown] slideshow={"slide_type": "fragment"} # $${\rm~Thus~~~~~~ }{1\over k_1}\ln y-{k_2\over k_1k_2}\ln(k_1-k_2y)=t + c$$ # # + [markdown] slideshow={"slide_type": "fragment"} # We can get $c$ from initial conditions. # Try doing the algebraic manipulation of this to make $y$ the subject of the equation: # # $$y={k_1\exp (k_1t+d)\over 1+ k_2 \exp(k_1t + d)}$$ # # where $d$ is a constant.
lectures/lecture-10-differential-eq2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.6 64-bit # name: python3 # --- # # Data structures in python # --- # ## List # # #### Lists are orderderd, mutable, allow duplicates and indexed # Example list sample_list = [12, "hello", ("four", 7), {"name": "peter", "age": 28}] print(sample_list) # ## Accessing data # By Index print("\nBy index 0:", sample_list[0]) print("By negative index -1:", sample_list[-1]) # By Slice print("By slice:", sample_list[2:3]) print("By slice:", sample_list[:3]) # Slice, but reverse step print("By slice, reversed:\n", sample_list[::-1]) # By Iteration for item in sample_list: print(item) print() # By comprehension - returns a new list # All items print("By list comprehension, all items:", [item for item in sample_list]) # int larger than 10 print("By list comprehension, selective:", [number for number in sample_list if type(number) is int and number > 10]) # ## Inserting Data # # Example list sample_list = [] # Append(object) sample_list.append("List Things") sample_list.append(12) print(sample_list) # Insert(index, object) sample_list.insert(1, "entry") sample_list.insert(len(sample_list), "last!") print(sample_list) # Extend(iterable) sample_tuple = ("sample", "data") sample_list.extend(sample_tuple) sample_list.extend({"a":"this", "b":"too"}.values()) print(sample_list) # ## Altering Data # Example list sample_list = ["String", 12, {"key": 23}, [1, 2, 3]] print(sample_list) # By Index sample_list[1] = 20 print("Altered index 1:",sample_list) sample_list[2]["key"] = 99 print("Altered value of index 2 with dict key 'key':", sample_list) sample_list[3][1] = 4 print("Altered value of index 1 within index 3 ", sample_list) # By range sample_list[1:4] = ["smol", "entries", "added"] print("Altered values from index 2 to 3:", sample_list) # By iteration for index in range(len(sample_list)): sample_list[index] = sample_list[index].upper() print(sample_list) # By comprehension - returns a new list print([item.lower() if len(item) > 4 else item for item in sample_list]) print([item.title() for item in sample_list]) # ## Removing Entries sample_list = [x*x for x in range (5)] print(sample_list) # + # remove(object) sample_list.insert(2, 4) print("sample_list with double entry", sample_list) while 4 in sample_list: sample_list.remove(4) print("sample_lint with 4 removed", sample_list) # sample_list.remove(4) # Will give error due to item not in list # + # pop(index) sample_list.pop(2) print("Entry removed at index 3:", sample_list) sample_list.pop() print("Last entry removed:", sample_list) # - # del list[index] del sample_list[0] print("sample_list now:", sample_list) # only 1 entry left after this # + # clear() sample_list.clear() print(sample_list) # - # ### Misc list methods # + sample_list = [(1, 2), (2, 2), (3, 2), (2, 1), (2, 2), (1, 5), (10, 4), (10, 1), (3, 1)] print("sample list:", sample_list) # reverse() sample_list.reverse() print("reversed sample_list", sample_list) # Index print("Index of irst instance of (2,2)", sample_list.index((2,2))) # Count print("Count instances of (2,2)", sample_list.count((2,2))) # Sort sample_list.sort() print("sorted sample_list", sample_list) sample_list.sort(reverse=True) print("inverted, sorted sample_list", sample_list) sample_list.sort(key=lambda x: (x[1], x[0])) print("sorted sample_list", sample_list) # - # --- # ## Tuple # #### Tuples are orderderd, immurable, allow duplicates and indexed # + # Declaration sample_tuple = ("this", 2, "is a", ["tuple"]) print("sample_tuple:", sample_tuple) # Retreive by index print("By index:", sample_tuple[2]) print("By index:", sample_tuple[-1]) # Retreive by range print("By range:", sample_tuple[0:2]) # Retreive by Iteration print("By iteration:") for item in sample_tuple: print(item) # Since its immutable we cant update, or alter, we must make a new tuple temp_list = list(sample_tuple) temp_list[0] = "that" sample_tuple = tuple(temp_list) print("New, changed tuple", sample_tuple) # - # ### Unpacking # + a, b, c, d = sample_tuple print("a:", a) print("b:", b) print("c:", c) print("d:", d[0]) # - # ## Set # #### Sets are *un*orderderd, mutable (but unchangeable), do not allow duplicates and *un*indexed # + sample_set = {"this", "that", "this", "the other", 1, 3, 2, 6, 5} # Accessing the values # Print print("sample_set:", sample_set) # random order # Iteration for item in sample_set: print(item) # Checking of contents print("is 'this' in sample_set?:", "this" in sample_set) # + # Altering of contents is not possible # But we can add new items sample_set.add("peter") print("sample_set with 'peter' added:", sample_set) # Update(iterable) sample_set.update(["thin", "thick"]) print("sample_set updated with a list of str:", sample_set) # + # Removal of elements # Remove(element) sample_set.remove("peter") print("sample_set with peter removed", sample_set) # sample_set.remove("peter") will throw an error if executed again # discard(element) sample_set.discard("peter") # will not throw error, but will not change either sample_set.discard("thin") print("Current sample_set:", sample_set) # pop() sample_set.pop() print("sample_set after pop:", sample_set) # clear() sample_set.clear() print("Cleared sample_set:", sample_set) # del del sample_set #print(sample_set) # throws error since sample_set is not defined # - # ### Important Set methods # + sample_set_1 = {"this", "that", "other"} sample_set_2 = {"those", "this", "these"} print("set1 and 2:", sample_set_1, sample_set_2) # Intersection(set) Returns a set, that contains only values present in both sets print("Intersection:", sample_set_1.intersection(sample_set_2)) # setA.difference(setB) Returns a set, that only contains entries not in common from set A print("Difference:", sample_set_1.difference(sample_set_2)) # Symmetric_difference(set) Returns a set, that contains only values not in common with either set print("Symmetric_difference:", sample_set_1.symmetric_difference(sample_set_2)) # Union(set) Returns a set with values from both sets, no duplicates print("Union:", sample_set_1.union(sample_set_2)) # - # ## Dictionary # #### Dicts are orderderd (since 3.7), mutable, allow duplicates and *un*indexed # Definition sample_dict = {"name": "Golf", "size": 2, "is_fast": True, "type": "electric"} print(sample_dict) # + # Accessing keys / values and altering values # Key print("key -> name:", sample_dict["name"]) # Get print("key -> size:", sample_dict.get("size")) # Keys print("\nKeys:") for key in sample_dict.keys(): print(sample_dict[key]) # Values print("\nValues:") for value in sample_dict.values(): print(value) # + # Altering values # By key sample_dict["type"] = "gasoline" print("key -> type:", sample_dict["type"]) # By update() sample_dict.update({"type": "flying"}) print("dict:", sample_dict) # + # Adding key / values # By key sample_dict["color"] = "red" print("key -> color:", sample_dict["color"]) # By update() sample_dict.update({"engine": "V-8"}) print("dict:", sample_dict) # + # Removing key / values # pop(key) sample_dict.pop("color") print("dict -color :", sample_dict) # popitem() removes last item sample_dict.popitem() print("dict :", sample_dict) # del dict[key] del sample_dict["is_fast"] print("dict - is_fast :", sample_dict) # del dict del sample_dict try: print(sample_dict) # Will throw exception because it doesnt exit except Exception: print("Exception was thrown.") pass # - # ## List, Tuple, Set and Dict comprehension & examples # ### List: # + # List comprehension list1 = ["this", "list", "is", "too", "lowercase"] # Regular upper_list = [x.upper() for x in list1] print("upper_list :", upper_list) # Conditional kinda_lower = [x.lower() if len(x) > 3 else x.title() for x in upper_list] print("kinda_lower :", kinda_lower) # Filter for short and double short_words = [x*2 for x in kinda_lower if len(x) <= 3] print("short_words :", short_words) # From any type new_list = [x for x in {"name": "Golf", "size": 2, "is_fast": True, "type": "electric"}.values()] print("new_list :", new_list) new_list = [x for x in (1, 4, 5, 2)] print("new_list :", new_list) new_list = [x for x in {"this", "that", 1}] print("new_list :", new_list) # - # ### Tuple: # + # Tuple comprehension # Whoops! there is no native tupple comprehension, but we can use a generator expression to mimic it instead! comprehensive_tuple = tuple(x*x for x in range(1,11) if x*x != 9) print("generatade tuple :", comprehensive_tuple) smaller_tuple = tuple(x for x in comprehensive_tuple if x < 50) print("same but smaller values:", smaller_tuple) # Another alternative is: another_tuple = tuple([x*x for x in range(0,11)]) print("alternative generation :", another_tuple) # - # ### Set: # + # Set comprehension cool_set = {x*x for x in range(0, 1000) if (x*x)%1000 == 0} print("cool_set :", cool_set) another_one = {x.lower() for x in ["COol", "sEt", "BRo"]} print("another_one:", another_one) # - # ### Dict: # + # Dict comprehension keys_list = ["name", "age", "sex"] value_list = ["Peter", "12", "Other"] student_dict = {key:value for (key, value) in zip(keys_list, value_list)} print("student dict :", student_dict) cubed = {x: x**3 for x in range(1, 11)} print("cubed numbers 1-10:", cubed) # - # ## Functions # ### How to declare them # + # Declaring a function def plus_ten(number): return number + 10 def numbers_plus_ten(*numbers): number_list = [] for number in numbers: number_list.append(number + 10) return number_list def rent_calculation_kr(**keys): # base * (1 + rent)^years x = keys["base"] y = (1 + keys["rent"]) z = pow(y,keys["years"]) return str(round(x*z)) + ",00 DKK" # - # ### How to call a function # + # Calling functions print(plus_ten(5)) print(numbers_plus_ten(15, 50, 10)) print(rent_calculation_kr(base=1000, rent=0.10, years=10))
exam_things/1data_structures_and_functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Analysis with PcapInspect of .pcap file showing slow sending of BGP updates # This notebook was developed interactively in [Jupyter](https://jupyter.org/). # # Unfortunately, the .pcap file processed in this example can't be shared externally. In this investigation, an observation was made that after software upgrade of an Arista switch, some other peers started taking a very long time to deliver BGP Update messages. # # Of course, the rate at which the peer sent routes was mostly the responsibility of the peer's BGP implementation. However, it was possible that the Arista device wasn’t processing fast enough and consequently sending the peer a reduced TCP window size, which _would_ slow it down. # # Using PcapInspect, we generate a bunch of statistics and data extracted from the .pcap, but the most interesting view is that provided by the plots shown near the end. # + # Grab output from matplotlib and display the graphics in Jupyter. For other options, see # https://medium.com/@1522933668924/using-matplotlib-in-jupyter-notebooks-comparing-methods-and-some-tips-python-c38e85b40ba1 # %matplotlib inline import sys sys.path.append("../..") # Hack to get PcapInspect and other modules import PcapInspect # - pcapInspect = PcapInspect.PcapInspect('SlowBgpUpdates.pcap', stopAnalysisTime=300) pcapInspect.plugins[ 'BgpPlugin' ].findEor('10.0.0.100', 'Peer') pcapInspect.doDeltaAnalysis('10.0.0.100', 'Peer') pcapInspect.doDeltaAnalysis( '10.0.0.101', 'Arista' ) winSizePlotData = pcapInspect.doWindowSizeAnalysis( '10.0.0.101', 'Arista' ) frameAndBytePlotData = pcapInspect.doFrameAndByteCount( '10.0.0.100', 'Peer' ) # # The issue appears to be with the peer # As can be seen by comparing the 2 graphics below, after a while, the Arista device settles down to a fairly steady window size of 16k. There are some brief dips associated with out-of-order arrival of TCP packets, but it's generally steady. In spite of this, there is a ~100 second gap where the peer appears to be transmitting very little data. This kind of insight is very difficult to get looking at individual frames (after filtering, the PCAP contained about 1500 frames). # # plotter = PcapInspect.Plotter() plotter.plot( winSizePlotData, 'lines' ) imageNameTemplate = pcapInspect.directory + 'Sender' +'_%s_count_' + pcapInspect.baseFilename + '.png' plotData = frameAndBytePlotData[ 'byte' ] plotData[ 'imageName' ] = imageNameTemplate % 'byte' plotter.plot( plotData, 'boxes' )
examples/SlowBgpUpdates/ExampleAnalysisOfSlowBgpUpdatesUsingJupyter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import keras.backend as K from keras.callbacks import LambdaCallback, ModelCheckpoint from keras.optimizers import RMSprop from keras.utils import plot_model # Import Custom Module module_path = os.path.abspath(os.path.join('../..')) if module_path not in sys.path: sys.path.append(module_path) # - K.tensorflow_backend._get_available_gpus() # *** # + from src.utils.config import PATH_TO_DATA, GUIS_CODE from src.utils.image import get_preprocessed_img from src.utils.sequence import tokenize_dsl_code from src.utils.sets import (init_sketch_id_code_map, load_vocabulary, init_word2idx, init_idx2word, data_generator) from src.nn import SketchEncoder, SequenceDecoder, NeuralSketchCoding, NSCInference # - # *** demo_sketch_id = '00150311-A7AE-4804-8B7D-9273687B4FC0' # ### Init Sketch to DSL Code map sketch_id_code_map = init_sketch_id_code_map(GUIS_CODE) print(sketch_id_code_map[demo_sketch_id]) # ### Load / Show the Sketch image = get_preprocessed_img(PATH_TO_DATA+demo_sketch_id+'.png') plt.imshow(image); # *** # ## Create a Vocabulary vocabulary = load_vocabulary('../../vocabulary.txt') len(vocabulary) # ### Word2Idx, Idx2Word maps word2idx = init_word2idx(vocabulary) idx2word = init_idx2word(vocabulary) word2idx # *** # ## Max DSL Code Length print('Total number of DSL Codes: {}'.format(len(sketch_id_code_map.values()))) # + longets_code = max( sketch_id_code_map.values(), key=lambda x: len(tokenize_dsl_code(x))) print('Longes code:') print('=' * 30) print() print(longets_code) # + max_code_length = len(tokenize_dsl_code(longets_code)) + 2 print('Max DSL Code Length: {}'.format(max_code_length)) # - # *** # ## Train/Dev/Test split len(sketch_id_code_map) SPLIT = (.8, .1, .1) # + items = list(sketch_id_code_map.items()) total_num = len(items) borders = ( int(total_num * SPLIT[0]), int(total_num * SPLIT[0]) + int(total_num * SPLIT[1]), int(total_num * SPLIT[0]) + int(total_num * SPLIT[1]) + int( total_num * SPLIT[2]), ) train_sketch_id_code_map = dict(items[0:borders[0]]) dev_sketch_id_code_map = dict(items[borders[0]:borders[1]]) test_sketch_id_code_map = dict(items[borders[1]:borders[2]]) # - assert len(train_sketch_id_code_map) + len(dev_sketch_id_code_map) + len( test_sketch_id_code_map) == len(sketch_id_code_map) print('Train Set size: {}'.format(len(train_sketch_id_code_map))) print('Dev Set size: {}'.format(len(dev_sketch_id_code_map))) print('Test Set size: {}'.format(len(test_sketch_id_code_map))) # *** EMBEDDING_DIM = 128 NUM_HIDDEN_NEURONS = [256, 256] # # Sketch Encoder (ConvNet) sketch_encoder = SketchEncoder(EMBEDDING_DIM).build_model() # + plot_model(sketch_encoder.model, to_file='./plots/sketch_encoder.png', show_shapes=True, show_layer_names=True) print(sketch_encoder.model.summary()) # - # *** # # Sequence Decoder (LSTM) # <br> # A LSTM network # # ![LSTM_net](../../img/LSTM.png) sequence_decoder = SequenceDecoder(max_code_length, EMBEDDING_DIM, len(vocabulary), NUM_HIDDEN_NEURONS).build_model() # + plot_model(sequence_decoder.model, to_file='./plots/sequence_decoder.png', show_shapes=True, show_layer_names=True) print(sequence_decoder.model.summary()) # - # *** # # Model # # ![NSC_model](../../img/NSC.png) neural_sketch_coding = NeuralSketchCoding( EMBEDDING_DIM, max_code_length, len(vocabulary), NUM_HIDDEN_NEURONS).build_model() # + plot_model(neural_sketch_coding.model, to_file='./plots/neural_sketch_coding.png', show_shapes=True, show_layer_names=True) print(neural_sketch_coding.model.summary()) # - # *** # # Inference # # ![LSTM_sampling](../../img/Sampling.png) inference = NSCInference(neural_sketch_coding, word2idx) inference.greedy_search( get_preprocessed_img(PATH_TO_DATA+demo_sketch_id+'.png') ) # *** # ## Run Train # ### Callbacks # + filepath="weights/nsc-weights.hdf5" checkpoint = ModelCheckpoint(filepath, verbose=True) def on_epoch_end(epoch, logs): if epoch % 5 == 0: print() print('Sampled:') print('='*30) print(greedy_search_inference(get_preprocessed_img( PATH_TO_DATA+np.random.choice(list(dev_sketch_id_code_map.keys()))+'.png' ))) print() print() lambda_cb = LambdaCallback(on_epoch_end=on_epoch_end) callbacks = [checkpoint, lambda_cb] # + neural_sketch_coding.model.load_weights('../weights/nsc-weights.hdf5') neural_sketch_coding.model.compile(RMSprop(0.0001), loss='categorical_crossentropy') # + batch_size = 64 steps = len(train_sketch_id_code_map) // batch_size neural_sketch_coding.model.fit_generator( data_generator(train_sketch_id_code_map, word2idx, batch_size, max_code_length, len(vocabulary)), steps_per_epoch=steps, epochs=100, verbose=True, callbacks=callbacks, ) # - # *** # ## Dev # + from nltk.translate import bleu_score inference = NSCInference(neural_sketch_coding, word2idx) # - def evaluate(path_to_img=None, reference=None): if path_to_img is None: sketch_id = np.random.choice(list(dev_sketch_id_code_map.keys())) sketch = get_preprocessed_img(PATH_TO_DATA+sketch_id+'.png') reference = [dev_sketch_id_code_map[sketch_id]] else: sketch = get_preprocessed_img(path_to_img) hypothesis = inference.greedy_search(sketch) print('Greedy Search Hypothesis:') print('='*30) print() print(hypothesis) print(end='\n\n') print('Reference:') print('='*30) print() print(reference[0]) print(end='\n\n') print('BLEU score: {}'.format( bleu_score.sentence_bleu(reference, hypothesis) )) plt.imshow(sketch); evaluate() # *** # # Compilation from src.compilers import web # + sketch = get_preprocessed_img('./test.png') dsl_code = inference.greedy_search(sketch) with open('./dsl_code.gui', 'w') as f: f.write(dsl_code) # - plt.imshow(sketch); web_compiler = web.Compiler(dsl_mapping_file_path='../compilers/web/dsl_mapping.json') web_compiler.compile('./dsl_code.gui', './out.html')
src/notebooks/Neural Sketch Coding (NSC).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # .............................................MNIST IN KERAS...................................................... # ### Load Data from Keras and split into Training and Testing set. # + from keras.models import Sequential from keras.layers import Dense,Flatten from keras.datasets import mnist # number of classes is 10 as the output is 0-9 digits nb_classes = 10 # the data, shuffled and split between tran and test sets (X_train, y_train), (X_test, y_test) = mnist.load_data() print("X_train original shape", X_train.shape) print("y_train original shape", y_train.shape) # - for i in range(9): plt.subplot(3,3,i+1) plt.imshow(X_train[i], cmap='gray', interpolation='none') plt.title("Class {}".format(y_train[i])) plt.show() # ### Reshape the Input data (X_Train,X_test) as vectors with shape (60000,784) So input node neurons is 784 X_train = X_train.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 print("Training matrix shape", X_train.shape) print("Testing matrix shape", X_test.shape) # ### We have to convert the output (Target) data into one hot vectors # ### Digit 4 becomes 0000100000 # + from keras.layers.core import Dense, Dropout, Activation from keras.utils import np_utils Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) print(Y_train.shape) print(Y_test.shape) # - # ### Our Neural Network Architecture with one input layer with 784 nodes, two hidden layers with 512 nodes each and 1 output layer with 10 nodes each node giving probability of a digit being that number. # <img src="mnistnn.png"> # #### Both hidden layers has relu activation function. # #### Model also has dropout units after each hidden layer to prevent the model from overfitting and memorizing the training data. # #### Finally the output layer has a softmax function with gives probabilities based on the scores in output layer so that the model predicts the digit in the input image. # <img src="relu.png"> model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) model.summary() # ### We use rmsprop optimizer and categorical cross entropy i.e -∑ (y*log(y^)) where y is true label and y^ is predicted probability. model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy']) model.fit(X_train,Y_train,epochs=10,batch_size=128) score = model.evaluate(X_test, Y_test, verbose=0) print('Test score:', score[0]) print('Test accuracy:', score[1])
MNIST with keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pickle import numpy as np import torch_geometric as tg from scipy.sparse import csr_matrix from sklearn.model_selection import train_test_split import torch import matplotlib.pyplot as plt from torch_geometric.data import Data from torch_geometric.nn import GCNConv, ChebConv, GATConv from torch.nn import functional as F from torch_geometric.nn import GCNConv, GAE, VGAE, GATConv, AGNNConv from torch_geometric.utils import dropout_adj import torch_geometric.transforms as T from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression from collections import Counter import torch.nn as nn from torch_geometric_temporal.nn.recurrent import DCRNN, GConvGRU, GConvLSTM from torch_geometric_temporal.data.splitter import discrete_train_test_split from tqdm import tqdm import matplotlib.pyplot as plt data_path = 'graph_data.pkl' data_in = pickle.load(open(data_path, 'rb')) npzfile = np.load('trade_savez_files.npz', allow_pickle = True) trade_edge_attr.shape device = torch.device('cpu' if torch.cuda.is_available() else 'cpu') print('Device: '.ljust(32), device) #print('Model Name: '.ljust(32), str(model_name.__name__)) #print('Model params:{:19} lr: {:.4f} weight_decay: {:.4f}'.format('',lr, weight_decay)) #print('Total number of epochs to run: '.ljust(32), epochs) print('*' * 65) infeat = 1 outfeat = 1 def make_graph_torch_data(graph_rawdata): list_from = np.array([x[0] for x in graph_rawdata['edges']]) list_to = np.array([x[1] for x in graph_rawdata['edges']]) list_features = np.array([x[2] for x in graph_rawdata['edges']]) edge_index = torch.tensor([list_from, list_to], dtype = torch.long) edge_attr = torch.tensor(list_features, dtype = torch.float32) x = np.array([x['NetWeight'] for x in graph_rawdata['nodes'].values()])[:, np.newaxis] y = np.array([x['L1_soybean'] for x in graph_rawdata['nodes'].values()])[:, np.newaxis] node_x = torch.tensor(x, dtype = torch.float32) node_y = torch.tensor(y, dtype = torch.float32) torch_data = Data(x = node_x, y = node_y, edge_index = edge_index, edge_attr = edge_attr) graph_rawdata['torch'] = torch_data.clone().to(device) return graph_rawdata temporal_data = [make_graph_torch_data(v) for k, v in data_in.items()] temporal_data[2]['torch'].edge_index.device train_dataset, test_dataset = temporal_data[:-6], temporal_data[-6:] # + # n = len(y) # test_size = int(n * 0.2) # train_idx, test_idx = train_test_split(range(n), test_size=test_size, random_state=1651516) # torch_data.train_idx = torch.tensor(train_idx, dtype = torch.long) # torch_data.test_idx = torch.tensor(test_idx, dtype = torch.long) # - def RMSELoss(yhat,y): return torch.sqrt(torch.mean((yhat-y)**2)) class cheb_net(nn.Module): def __init__(self, in_channels, out_channels): super(cheb_net, self).__init__() self.conv1 = ChebConv(in_channels, 1, K = 5)#, cached=True) self.linear_out = nn.Linear(1, out_channels) # self.conv1 = ChebConv(data.num_features, 16, K=2) # self.conv2 = ChebConv(16, data.num_features, K=2) def forward(self, x, edge_index, edge_weight): x = F.relu(self.conv1(x, edge_index, edge_weight)) x = F.dropout(x, p = 0.2, training=self.training) x = self.linear_out(x) return x model = cheb_net(infeat, outfeat).to(device) class GCNet(nn.Module): def __init__(self, in_channels, out_channels): super(GCNet, self).__init__() self.conv1 = GCNConv(in_channels, 20, add_self_loops = False)#, cached=True) self.conv2 = GCNConv(20, 5, add_self_loops = False) #data.num_classes)#, cached=True) self.conv3 = GCNConv(5, 3, add_self_loops = False)#data.num_classes)#, cached=True) self.linear_out = nn.Linear(3, out_channels) # self.conv1 = ChebConv(data.num_features, 16, K=2) # self.conv2 = ChebConv(16, data.num_features, K=2) def forward(self, data): x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr x = F.relu(self.conv1(x, edge_index, edge_weight)) x = F.dropout(x, p = 0.2, training=self.training) x = F.relu(self.conv2(x, edge_index, edge_weight)) x = F.dropout(x, p = 0.2, training=self.training) x = self.conv3(x, edge_index, edge_weight) x = self.linear_out(x) return x model = GCNet(infeat, outfeat).to(device) class RecurrentGCN(torch.nn.Module): def __init__(self, node_features, dropout): self.dropout = dropout super(RecurrentGCN, self).__init__() self.recurrent_1 = GConvGRU(node_features, 64, 10) self.recurrent_2 = GConvGRU(64, 32, 5) self.recurrent_3 = GConvGRU(32, 16, 5) self.linear = torch.nn.Linear(16, 1) def forward(self, x, edge_index, edge_weight): x = self.recurrent_1(x, edge_index, edge_weight) x = F.relu(x) x = F.dropout(x, p=0.3, training=self.training) x = self.recurrent_2(x, edge_index, edge_weight) x = F.relu(x) x = F.dropout(x, p=0.3, training=self.training) x = self.recurrent_3(x, edge_index, edge_weight) x = F.relu(x) x = F.dropout(x, p=0.3, training=self.training) x = self.linear(x) return x class RecurrentGCN_Small(torch.nn.Module): def __init__(self, node_features, K, dropout): self.dropout = dropout super(RecurrentGCN_Small, self).__init__() self.recurrent_1 = GConvGRU(node_features, 16,K) self.linear = torch.nn.Linear(16, 1) def forward(self, x, edge_index, edge_weight): x = self.recurrent_1(x, edge_index, edge_weight) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.linear(x) return x class RecurrentGCN_Large(torch.nn.Module): def __init__(self, node_features, K, dropout): self.dropout = dropout super(RecurrentGCN_Large, self).__init__() self.recurrent_1 = GConvGRU(node_features, 16,K) self.recurrent_2 = GConvGRU(16, 32, K) self.recurrent_3 = GConvGRU(32, 64, K) self.recurrent_4 = GConvGRU(64, 32, K) self.recurrent_5 = GConvGRU(32, 16, K) self.linear = torch.nn.Linear(16, 1) def forward(self, x, edge_index, edge_weight): x = self.recurrent_1(x, edge_index, edge_weight) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.recurrent_2(x, edge_index, edge_weight) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.recurrent_3(x, edge_index, edge_weight) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.recurrent_4(x, edge_index, edge_weight) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.recurrent_5(x, edge_index, edge_weight) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.linear(x) return x class LSTM_Mod_Small(torch.nn.Module): def __init__(self, in_channels, out_channels, K, dropout): self.dropout = dropout super(LSTM_Mod_Small, self).__init__() self.recurrent_1 = GConvLSTM(in_channels, 16, K) self.linear = torch.nn.Linear(16, out_channels) def forward(self, x_in, edge_index, edge_weight): h, c = self.recurrent_1(x_in, edge_index, edge_weight) h = F.relu(h) h = F.dropout(h, p=self.dropout, training=self.training) x = self.linear(x) return x class LSTM_Mod_Large(torch.nn.Module): def __init__(self, in_channels, out_channels, K): self.dropout = dropout super(LSTM_Mod_Large, self).__init__() self.recurrent_1 = GConvLSTM(in_channels, 16, K) self.linear = torch.nn.Linear(16, out_channels) def forward(self, x_in, edge_index, edge_weight): h, c = self.recurrent_1(x_in, edge_index, edge_weight) h = F.relu(h) h = F.dropout(h, p=self.dropout, training=self.training) h, c = self.recurrent_2(h, edge_index, edge_weight) h = F.relu(h) h = F.dropout(h, p=self.dropout, training=self.training) h, c = self.recurrent_3(h, edge_index, edge_weight) h = F.relu(h) h = F.dropout(h, p=self.dropout, training=self.training) h, c = self.recurrent_4(h, edge_index, edge_weight) h = F.relu(h) h = F.dropout(h, p=self.dropout, training=self.training) x, c = self.recurrent_5(h, edge_index, edge_weight) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.linear(x) return x class DCRNN_Mod(torch.nn.Module): def __init__(self, in_channels, out_channels, K): super(DCRNN_Mod, self).__init__() self.recurrent_1 = DCRNN(in_channels, 16, K) self.linear = torch.nn.Linear(16, out_channels) self.dropout = dropout def forward(self, x_in, edge_index, edge_weight): h = self.recurrent_1(x_in, edge_index, edge_weight) h = F.relu(h) h = F.dropout(h, p=self.dropout, training=self.training) y = self.linear(h) return y class GCNet(nn.Module): def __init__(self, in_channels, out_channels, dropout): super(GCNet, self).__init__() self.dropout = dropout self.conv1 = GCNConv(in_channels, 20, add_self_loops = False)#, cached=True) self.conv2 = GCNConv(20, 5, add_self_loops = False) #data.num_classes)#, cached=True) self.conv3 = GCNConv(5, 3, add_self_loops = False)#data.num_classes)#, cached=True) self.linear_out = nn.Linear(3, out_channels) # self.conv1 = ChebConv(data.num_features, 16, K=2) # self.conv2 = ChebConv(16, data.num_features, K=2) def forward(self, x, edge_index, edge_weight): x = F.relu(self.conv1(x, edge_index, edge_weight)) x = F.dropout(x, p = self.dropout, training=self.training) x = F.relu(self.conv2(x, edge_index, edge_weight)) x = F.dropout(x, p = self.dropout, training=self.training) x = self.conv3(x, edge_index, edge_weight) x = self.linear_out(x) return x K = [5,3,2] dropout = [0.1,0.3,0.5] import itertools model_options = [] for element in itertools.product(dropout, K): model_options.append(element) model_options lr = [10,1,0.5,0.1] weight_decay = [0.5,0.1] epochs = [100] models = [LSTM_Mod_Small(in_channels = 1, out_channels = 1, K = 5).to(device), LSTM_Mod_Small(in_channels = 1, out_channels = 1, K = 3).to(device), LSTM_Mod_Small(in_channels = 1, out_channels = 1, K = 2).to(device), LSTM_Mod_Large(in_channels = 1, out_channels = 1, K = 5).to(device), LSTM_Mod_Large(in_channels = 1, out_channels = 1, K = 3).to(device), LSTM_Mod_Large(in_channels = 1, out_channels = 1, K = 2).to(device), RecurrentGCN_Small(node_features = 1, K = 5).to(device), RecurrentGCN_Small(node_features = 1, K = 3).to(device), RecurrentGCN_Small(node_features = 1, K = 2).to(device), RecurrentGCN_Large(node_features = 1, K = 5).to(device), RecurrentGCN_Small(node_features = 1, K = 3).to(device), RecurrentGCN_Small(node_features = 1, K = 2).to(device), GCNet(in_channels = 1, out_channels = 1, K = 5).to(device), GCNet(in_channels = 1, out_channels = 1, K = 3).to(device), GCNet(in_channels = 1, out_channels = 1, K = 2).to(device) ] split_location = [-6, -1, -24] import itertools model_options = [] for element in itertools.product(models, lr, weight_decay, epochs, split_location): model_options.append(element) import random random.shuffle(train_dataset) random.shuffle(test_dataset) train_dataset.reverse() test_dataset.reverse() def model_execution(settings): model, lr, weight_decay, epochs, split_location = settings print( lr, weight_decay, epochs, split_location) train_dataset, test_dataset = temporal_data[:split_location], temporal_data[split_location:] optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay = weight_decay) train_performance = [] test_performance = [] for epoch in tqdm(range(epochs)): model.train() cost = 0 for time, snapshot in enumerate(train_dataset): y_hat = model(snapshot['torch'].x, snapshot['torch'].edge_index, snapshot['torch'].edge_attr) cost = cost + torch.mean((y_hat-snapshot['torch'].y)**2) cost = cost / (time+1) cost.backward() optimizer.step() optimizer.zero_grad() train_performance.append(float(cost)) model.eval() test_cost = 0 for time, snapshot in enumerate(test_dataset): y_hat = model(snapshot['torch'].x, snapshot['torch'].edge_index, snapshot['torch'].edge_attr) test_cost = test_cost + torch.mean((y_hat-snapshot['torch'].y)**2) test_cost = test_cost / (time+1) test_performance.append(float(cost)) if (epoch % int(epochs/5) == 0): print('Epoch: {} Train loss: {:.4f} Test RMSE: {:.4f}'.format(epoch, cost, test_cost)) if (epoch == epochs - 1): print('-'*65,'\nFinal epoch: {} Train loss: {:.4f} Test RMSE: {:.4f}'.format(epoch, cost, test_cost)) print('-'*65) return (train_performance, test_performance, model) model_results = {} for i, element in enumerate(model_options): results = model_execution(element) model_results[i] = { 'model' : element[0], 'lr' : element[1], 'weight_decay' : element[2], 'epochs' : element[3], 'split_location' : element[4], 'train_loss_trace' : results[0], 'test_loss_trace' : results[1], 'trained_model' : results[2] } import pickle pickle.dump(model_results,open('model_results.pkl','wb')) class LSTM_Mod_Small(torch.nn.Module): def __init__(self, in_channels, out_channels, K, dropout): self.dropout = dropout super(LSTM_Mod_Small, self).__init__() self.recurrent_1a = GConvLSTM(in_channels, 24, K) self.recurrent_1b = GConvLSTM(in_channels, 24, K) self.linear1 = torch.nn.Linear(48,12) self.linear2 = torch.nn.Linear(12, out_channels) def forward(self, snapshot_list): for i, snapshot in enumerate(snapshot_list): x_in, edge_index, edge_weight = snapshot if i == 0: ha, ca = self.recurrent_1a(x_in, edge_index, edge_weight, None, None) hb, cb = self.recurrent_1b(x_in, edge_index, edge_weight, None, None) else: ha, ca = self.recurrent_1a(x_in, edge_index, edge_weight, ha, ca) hb, cb = self.recurrent_1b(x_in, edge_index, edge_weight, hb, cb) xa = F.relu(ha) xa = F.dropout(xa, p=self.dropout, training=self.training) xb = F.relu(hb) xb = F.dropout(xb, p=self.dropout, training=self.training) x = self.linear1(torch.cat((xa,xb),1)) #x = self.linear1(torch.cat((x,x_in),1)) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.linear2(x) if i == 0 : x_out = x else: x_out = torch.cat((x_out, x),1) return x_out class LSTM_Mod_Medium(torch.nn.Module): def __init__(self, in_channels, out_channels, K, dropout): self.dropout = dropout super(LSTM_Mod_Small, self).__init__() self.recurrent_1a = GConvLSTM(in_channels, 24, K) self.recurrent_1b = GConvLSTM(in_channels, 24, K) self.linear1 = torch.nn.Linear(48,12) self.linear2 = torch.nn.Linear(12, out_channels) def forward(self, snapshot_list): for i, snapshot in enumerate(snapshot_list): x_in, edge_index, edge_weight = snapshot if i == 0: ha, ca = self.recurrent_1a(x_in, edge_index, edge_weight, None, None) hb, cb = self.recurrent_1b(x_in, edge_index, edge_weight, None, None) else: ha, ca = self.recurrent_1a(x_in, edge_index, edge_weight, ha, ca) hb, cb = self.recurrent_1b(x_in, edge_index, edge_weight, hb, cb) xa = F.relu(ha) xa = F.dropout(xa, p=self.dropout, training=self.training) xb = F.relu(hb) xb = F.dropout(xb, p=self.dropout, training=self.training) x = self.linear1(torch.cat((xa,xb),1)) #x = self.linear1(torch.cat((x,x_in),1)) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.linear2(x) if i == 0 : x_out = x else: x_out = torch.cat((x_out, x),1) return x_out class LSTM_Mod_Small(torch.nn.Module): def __init__(self, in_channels, out_channels, K, dropout): self.dropout = dropout super(LSTM_Mod_Small, self).__init__() self.recurrent_1a = GConvLSTM(in_channels, 24, K) self.recurrent_1b = GConvLSTM(in_channels, 24, K) self.linear1 = torch.nn.Linear(48,12) self.linear2 = torch.nn.Linear(12, out_channels) def forward(self, snapshot_list): for i, snapshot in enumerate(snapshot_list): x_in, edge_index, edge_weight = snapshot if i == 0: h, c = self.recurrent_1a(x_in, edge_index, edge_weight, None, None) else: h, c = self.recurrent_1a(x_in, edge_index, edge_weight, h, c) x = F.relu(h) x = F.dropout(x, p=self.dropout, training=self.training) #x = self.linear1(torch.cat((x,x_in),1)) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.linear2(x) if i == 0 : x_out = x else: x_out = torch.cat((x_out, x),1) return x_out class LSTM_Mod_Small(torch.nn.Module): def __init__(self, in_channels, out_channels, K, dropout): self.dropout = dropout super(LSTM_Mod_Small, self).__init__() self.recurrent_1 = GConvLSTM(in_channels, 128, K) self.linear = torch.nn.Linear(128, out_channels) def forward(self, snapshot_list): for i, snapshot in enumerate(snapshot_list): x_in, edge_index, edge_weight = snapshot if i == 0: h, c = self.recurrent_1(x_in, edge_index, edge_weight, None, None) else: h, c = self.recurrent_1(x_in, edge_index, edge_weight, h, c) x = F.relu(h) x = F.dropout(x, p=self.dropout, training=self.training) x = self.linear(x) if i == 0 : x_out = x else: x_out = torch.cat((x_out, x),1) return x_out class RecurrentGCN_Small(torch.nn.Module): def __init__(self, in_channels, out_channels, K, dropout): self.dropout = dropout super(RecurrentGCN_Small, self).__init__() self.recurrent_1 = GConvGRU(in_channels, 16,1) self.recurrent_2 = GConvGRU(16, 32,2) self.recurrent_3 = GConvGRU(32, 16,3) self.linear = torch.nn.Linear(16, 1) def forward(self, snapshot_list): for i, snapshot in enumerate(snapshot_list): x_in, edge_index, edge_weight = snapshot if i == 0: h = self.recurrent_1(x_in, edge_index, edge_weight, None) else: h = self.recurrent_1(x_in, edge_index, edge_weight, h) x = F.relu(h) x = F.dropout(x, p=self.dropout, training=self.training) x = self.linear(x) if i == 0 : x_out = x else: x_out = torch.cat((x_out, x),1) return x_out for i, snapshot in enumerate(train_dataset): if i == 0: y_out = snapshot['torch'].y else: y_out = torch.cat((y_out, snapshot['torch'].y),1) model # + from tqdm import tqdm model = LSTM_Mod_Small(in_channels = 1,out_channels = 1, K = 3, dropout = 0.15).to(device) for i, snapshot in enumerate(test_dataset): if i == 0: y_test = snapshot['torch'].y else: y_test = torch.cat((y_test, snapshot['torch'].y),1) # + optimizer = torch.optim.Adam(model.parameters(), lr=0.08, weight_decay = 0.01) epochs = 500 train_performance = [] test_performance = [] for epoch in tqdm(range(epochs)): model.train() cost = 0 y_hat = model([[snapshot['torch'].x, snapshot['torch'].edge_index, snapshot['torch'].edge_attr] for snapshot in train_dataset]) cost = torch.sqrt(torch.mean((y_hat - y_out)**2)) cost.backward() optimizer.step() optimizer.zero_grad() train_performance.append(cost) model.eval() test_cost = 0 size_test = len(test_dataset) y_hat = model([[snapshot['torch'].x, snapshot['torch'].edge_index, snapshot['torch'].edge_attr] for snapshot in train_dataset + test_dataset]) y_hat = y_hat[:,-size_test:] test_cost = torch.sqrt(torch.mean((y_hat - y_test)**2)) test_performance.append(test_cost) if (epoch % int(epochs/100) == 0): print('Epoch: {} Train loss: {:.4f} Test RMSE: {:.4f}'.format(epoch, cost, test_cost)) if (epoch == epochs - 1): print('-'*65,'\nFinal epoch: {} Train loss: {:.4f} Test RMSE: {:.4f}'.format(epoch, cost, test_cost)) print('-'*65) # - plt.plot(test_performance) plt.plot(train_performance) model_save = { 'model' : model, 'train_loss_trace' : train_performance, 'test_loss_trace' : test_performance, 'trained_model' : model.state_dict(), } import pickle pickle.dump(model_save,open('model_lstm_recurrent.pkl','wb')) # + import pandas as pd df = pd.DataFrame({'test_rmse' : [x.detach().numpy() for x in test_performance], 'train_rmse' : [x.detach().numpy() for x in train_performance]}).reset_index().rename(columns = {'index' : 'epoch'}) df.to_csv('model_train_performance-gc-lstm.csv') # + list_perf = [] model.eval() y_hat = model([[snapshot['torch'].x, snapshot['torch'].edge_index, snapshot['torch'].edge_attr] for snapshot in train_dataset]) for time, snapshot in enumerate(train_dataset): y = snapshot['torch'].y.cpu().detach().numpy() last_prediction = y_hat[:,time].cpu().detach().numpy() for i, val in enumerate(last_prediction): #print(f"Country: {snapshot['country_dict'][i]}, Period: {snapshot['period']}, Actual: {y[i][0]}, Predicted: {last_prediction[i]}") list_perf.append({'country' : snapshot['country_dict'][i], 'date' : snapshot['date'], 'val' : y[i][0], 'type' : 'actual'}) list_perf.append({'country' : snapshot['country_dict'][i], 'date' : snapshot['date'], 'val' : last_prediction[i], 'type' : 'train-predict'}) y_hat = model([[snapshot['torch'].x, snapshot['torch'].edge_index, snapshot['torch'].edge_attr] for snapshot in train_dataset + test_dataset]) for time, snapshot in enumerate(test_dataset): y = snapshot['torch'].y.cpu().detach().numpy() last_prediction = y_hat[:,time + len(train_dataset)].cpu().detach().numpy() for i, val in enumerate(last_prediction): #print(f"Country: {snapshot['country_dict'][i]}, Period: {snapshot['period']}, Actual: {y[i][0]}, Predicted: {last_prediction[i]}") list_perf.append({'country' : snapshot['country_dict'][i], 'date' : snapshot['date'], 'val' : y[i][0], 'type' : 'actual'}) list_perf.append({'country' : snapshot['country_dict'][i], 'date' : snapshot['date'], 'val' : last_prediction[i], 'type' : 'test-predict'}) # - import pandas as pd df_perf = pd.DataFrame(list_perf) df_perf[df_perf['country'] == 'USA'] import plotly.express as px df_perf.to_csv('model_prediction-s-gc-lstm.csv') fig = px.line(df_perf, x = 'date', y = 'val', color = 'country') fig.write_html('plot4.html') snapshot['period'] in period_list # + import copy reporter = 'China' partner = 'United States of America' period_list = [201901,201902,201903,201904,201905,201906,201907,201908,201909,201910,201911,201912] mod_dataset = {} for time, snapshot in enumerate(data_in.values()): if time == 0: reporter_num = [k for k,v in snapshot['country_dict'].items() if v == reporter][0] partner_num = [k for k,v in snapshot['country_dict'].items() if v == partner][0] snapshot_mod = copy.deepcopy(snapshot) if snapshot['period'] in period_list: snapshot_mod['edges'] = [x for x in snapshot_mod['edges'] if not (x[0] == partner_num and x[1] == reporter_num)] snapshot_mod['edges'] = [x for x in snapshot_mod['edges'] if not (x[0] == reporter_num and x[1] == partner_num)] if len(snapshot_mod['edges']) != len(snapshot['edges']): print('dropped edge',snapshot['period']) print('mod period',snapshot['period']) mod_dataset[time] = snapshot_mod # - alt_data = [make_graph_torch_data(v) for k, v in mod_dataset.items()] # + list_perf = [] model.eval() y_hat = model([[snapshot['torch'].x, snapshot['torch'].edge_index, snapshot['torch'].edge_attr] for snapshot in alt_data]) for time, snapshot in enumerate(train_dataset): if snapshot['period'] in period_list: y = snapshot['torch'].y.cpu().detach().numpy() last_prediction = y_hat[:,time].cpu().detach().numpy() for i, val in enumerate(last_prediction): #print(f"Country: {snapshot['country_dict'][i]}, Period: {snapshot['period']}, Actual: {y[i][0]}, Predicted: {last_prediction[i]}") list_perf.append({'country' : snapshot['country_dict'][i], 'date' : snapshot['date'], 'val' : y[i][0], 'type' : 'actual'}) list_perf.append({'country' : snapshot['country_dict'][i], 'date' : snapshot['date'], 'val' : last_prediction[i], 'type' : 'alt-predict'}) y_hat = model([[snapshot['torch'].x, snapshot['torch'].edge_index, snapshot['torch'].edge_attr] for snapshot in temporal_data]) for time, snapshot in enumerate(train_dataset): if snapshot['period'] in period_list: y = snapshot['torch'].y.cpu().detach().numpy() last_prediction = y_hat[:,time].cpu().detach().numpy() for i, val in enumerate(last_prediction): #print(f"Country: {snapshot['country_dict'][i]}, Period: {snapshot['period']}, Actual: {y[i][0]}, Predicted: {last_prediction[i]}") list_perf.append({'country' : snapshot['country_dict'][i], 'date' : snapshot['date'], 'val' : y[i][0], 'type' : 'actual'}) list_perf.append({'country' : snapshot['country_dict'][i], 'date' : snapshot['date'], 'val' : last_prediction[i], 'type' : 'base-predict'}) # - df_alt = pd.DataFrame(list_perf) df_alt.to_csv('model_prediction_scenario.csv') class LSTM_Mod_Small(torch.nn.Module): def __init__(self, in_channels, out_channels, K, dropout): self.dropout = dropout super(LSTM_Mod_Small, self).__init__() self.recurrent_1 = GConvLSTM(in_channels, 64, K) self.linear = torch.nn.Linear(64, out_channels) def forward(self, snapshot_list): for i, snapshot in enumerate(snapshot_list): x_in, edge_index, edge_weight = snapshot if i == 0: h, c = self.recurrent_1(x_in, edge_index, edge_weight, None, None) else: h, c = self.recurrent_1(x_in, edge_index, edge_weight, h, c) x = F.sigmoid(h) x = F.dropout(x, p=self.dropout, training=self.training) x = self.linear(x) return x # + from tqdm import tqdm model = LSTM_Mod_Small(in_channels = 1,out_channels = 1, K = 3, dropout = 0.1).to(device) for i, snapshot in enumerate(test_dataset): if i == 0: y_test = snapshot['torch'].y else: y_test = torch.cat((y_test, snapshot['torch'].y),1) optimizer = torch.optim.Adam(model.parameters(), lr=0.08, weight_decay = 0.01) epochs = 100 train_performance = [] test_performance = [] for epoch in tqdm(range(epochs)): model.train() cost = 0 for time, snapshot in enumerate(train_dataset): y_hat = model([[snapshot['torch'].x, snapshot['torch'].edge_index, snapshot['torch'].edge_attr] for snapshot in train_dataset[:time+1]]) cost = cost + torch.sqrt(torch.mean((y_hat - snapshot['torch'].y)**2)) #print(time) cost = cost / (time+1) cost.backward() optimizer.step() optimizer.zero_grad() train_performance.append(cost) model.eval() test_cost = 0 size_test = len(test_dataset) for time, snapshot in enumerate(test_dataset): y_hat = model([[snapshot['torch'].x, snapshot['torch'].edge_index, snapshot['torch'].edge_attr] for snapshot in train_dataset + test_dataset[:time+1]]) test_cost = test_cost + torch.sqrt(torch.mean((y_hat - snapshot['torch'].y)**2)) #print(time) test_cost = test_cost / (time+1) test_performance.append(test_cost) if (epoch % int(epochs/100) == 0): print('Epoch: {} Train loss: {:.4f} Test RMSE: {:.4f}'.format(epoch, cost, test_cost)) if (epoch == epochs - 1): print('-'*65,'\nFinal epoch: {} Train loss: {:.4f} Test RMSE: {:.4f}'.format(epoch, cost, test_cost)) print('-'*65) # - y_hat[:,-6:] for i, snapshot in enumerate(test_dataset): if i == 0: y_out = snapshot['torch'].y else: y_out = torch.cat((y_out, snapshot['torch'].y),1) import pandas as pd df_perf = pd.DataFrame(list_perf) optimizer = torch.optim.Adam(model.parameters(), lr = lr, weight_decay = weight_decay) train_losses = [] accs = [] model.train() for epoch in range(1, epochs + 1): optimizer.zero_grad() out = model(data) loss = F.mse_loss(out[data.train_idx], data.y[data.train_idx]) train_losses.append(loss.item()) loss.backward() optimizer.step() model.eval() pred = model(data) RMSE = RMSELoss(pred[data.test_idx], data.y[data.test_idx]) accs.append(RMSE) if (epoch % int(epochs/10) == 0): print('Epoch: {} Train loss: {:.4f} Test RMSE: {:.4f}'.format(epoch, loss, RMSE)) if (epoch == epochs): print('-'*65,'\nFinal epoch: {} Train loss: {:.4f} Test RMSE: {:.4f}'.format(epoch, loss, RMSE)) print('-'*65) print('\033[1mBest Accuracy\nEpoch: {} Train loss: {:.4f} Test RMSE: {:.4f}\n' .format(accs.index(min(accs))+1, train_losses[accs.index(min(accs))], min(accs)))
gnn_modeling/temporal-gc-lstm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Project - Identify Data # ![Data Science Workflow](img/ds-workflow.png) # ## Goal of Project # - World population growth rate # - Is the growth rate stable? Increasing or Declining? # ## Step 1: Acquire # - Explore problem # - Identify data # - Import data # ### Step 1.a: Explore and understand problem # - What data do we need to answer the problem? # ### Step 1.b: Indentify data with world population # - There are multiple sources # - World bank: https://data.worldbank.org/indicator/SP.POP.TOTL # - Wiki: https://en.wikipedia.org/wiki/World_population # - ...and probably more # # ### Step 1.c: Download and import the data # - We will download it from World Bank # ### Step 1.d: Import libraries # - Execute the cell below (SHIFT + ENTER) import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # ### Step 1.e: Read the data # - Use ```pd.read_csv()``` to read the file `files/API_SP/API_SP.POP.TOTL_DS2_en_csv_v2_3158886.csv` # - NOTE: Remember to assign the result to a variable (e.g., ```data```) # - HELP: You might need to `skiprows=4` # - Apply ```.head()``` on the data to see all is as expected # ### Step 1.f: Get world data # - We are only insterested in data for the world. # - The data can be identified by `Country Name` (World) or `Country Code` (WLD). # - This can be done as follows: # ```Python # data[data['Country Name'] == 'World'] # ``` # - NOTE: Remember to keep data # ## Step 2: Prepare # - Explore data # - Visualize ideas # - Cleaning data # ### Step 2.a: Check the data types # - This step tells you if some numeric column is not represented numeric. # - Get the data types by ```.dtypes``` # ### Step 2.b: Keep only needed columns # - We are only interested in the year columns. # - You can drop the other columns by: ```.drop(list_of_columns_to_delete, axis='columns')``` # - Notice there is a `unnamed` column. # - Remove it with `dataset.dropna(how='all', axis='columns')` # ### Step 2.c: Transpose data # - It makes sense to have years in the rows # - This can be done with `.transpose()` # ### Step 2.d: Rename column # - The name 259 can be renamed as follows # - This can be done as follows # ```.rename(columns={259:'World Population'})``` # ### Step 2.e: Visualize data # - This is done to check data quality # - You can plot the DataFrame with `.plot()` # ## Step 3: Analyze # - Feature selection # - Model selection # - Analyze data # ### Step 3.a: Calculate yearly percentage change # - We want to explore the growth of the world population # - First step is to calculate the yearly growth # - This can be done with `pct_change()` ([docs](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.pct_change.html)) # - Add a new column `Yearly growth` with the growth in percentage # ### Step 3.b: Visualize the Yearly growth # - Plot it with `.plot()` # - This will give you an idea of the trend. # ### Step 3.c: Smooth the result # - A great way is to smooth the result # - This can show local tendencies # - Calculate a 10-years rolling average by using [rolling(10)](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.rolling.html).[mean()](https://pandas.pydata.org/docs/reference/api/pandas.core.window.rolling.Rolling.mean.html) # ```Python # dataset['Yearly growth'].rolling(10).mean() # ``` # - Visualize the result # - HINT: Make a plot containing `Yearly growth` and the new calculation. # - What does that tell you? # ## Step 4: Report # - Present findings # - Visualize results # - Credibility counts # ### Step 4.a: Transform data # - To make plot more readable transform data to percentage # - HINT: Multiply columns data `Yearly growth` and the smoothed data by 100 # ### Step 4.b: Set title and labels on axis # - In the plot set labels and axis # - `xlabel='Year'` the same for `ylabel` and `title` # ### Step 4.c: Adding ranges on axis # - Add range on y-axis to make the plot more readable. # - Use `ylim=0` # ## Step 5: Actions # - Use insights # - Measure impact # - Main goal # ### Step 5.a # - Any insights to use? # - Do we need to make predictions based on it?
Data Science With Python/05 - Project - Identify Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import requests from bs4 import BeautifulSoup as bs def get_department(uid): response = requests.get('https://directory.caltech.edu/personnel/'+uid) if response.status_code == 404: return 'Missing' else: soup = bs(response.text) return soup.find('span', class_="organization-unit").string # + import csv with open('divisions.csv', 'w', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',') with open('caltechdata_emails.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=',') for row in reader: email = row[0] if 'caltech.edu' in email: uid = email.split('@')[0] dept = get_department(uid) writer.writerow([email,dept]) # + with open('divisions.csv', 'r', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=',') total_count = 0 bio_count = 0 bio_emails = [] for row in reader: email = row[0] department = row[1] if department != 'Missing': total_count += 1 if any(department in s for s in ['Biology and Biological Engineering','Bioengineering','Biochemistry & Molecular Biophysics','Biology'] ): bio_emails.append(email) bio_count += 1 else: print(department) print(total_count) print(bio_count/total_count) with open('emails.csv', 'w', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',') for email in bio_emails: writer.writerow([email]) # -
user_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import numpy as np import matplotlib.pyplot as plt def func(x): a = -2 b = 10 return np.e**(a*x) * np.cos(b*x) def func_integral(x): a = (-2.) b = 5. c = 10. d = (-1/52.) return np.e**(a*x) * ((np.cos(c*x)- b*np.sin(c*x))) * d # Trapezoid Method def trap_core(f,x,h): return 0.5 * h * (f(x+h) + f(x)) def trap_method (f,a,b,N): x = np.linspace(a,b,N) h = x[1] - x[0] Fint = 0.0 for i in range(0,len(x)-1,1): Fint += trap_core(f,x[i],h) print (N, "iterations") return Fint # Simpson's Method def simp_core(f,x,h): return h * (f(x) + 4*f(x+h) + f(x+2*h))/3. def simp_method (f,a,b,N): x = np.linspace(a,b,N) h = x[1]-x[0] Fint = 0.0 for i in range(0, len(x)-2,2): Fint += simp_core(f,x[i],h) if ((N%2)==0): Fint += simp_core(f,x[-2],0.5*h) print (N, "iterations") return Fint # Romberg's Method def rom_core(f,a,b,i): h = b-a dh = h/2.**i K = h/2.**(i+1) M = 0.0 for j in range(2**i): M += f(a + 0.5*dh + j*dh) return K*M def rom_method(f,a,b,tol): i = 0 imax = 1000 delta = 100.0*np.fabs(tol) I = np.zeros(imax,dtype=float) I[0] = 0.5*(b-a)*(f(a) + f(b)) i += 1 while (delta>tol): I[i] = 0.5*I[i-1] + rom_core(f,a,b,i) delta = np.fabs ((I[i]-I[i-1])/I[i]) print (i, I[i],I[i-1],delta) if (delta>tol): i += 1 if(i>imax): print("Max iterations reached.") raise StopIteration('Stopping iterations after ', i) print (i, "iterations") return I[i] ans = func_integral(np.pi) - func_integral(0) print("Integral = " + str(ans)) print('Trapezoid method') print(trap_method(func,0,np.pi,10)) print("Simpson's method") print(simp_method(func,0,np.pi,10)) print("Romberg's method") tolerance = 1e-6 RI = rom_method(func,0,np.pi,tolerance) print(RI, (RI - answer)/answer, tolerance)
hw-4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # history dumps import to mariadb # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # #### Summary of notebook # - # Imported the history dumps to mariadb, each dump as a separate table # # - Used sqlalchemy/python as much as possible for easy replicability of process # - failed so-far to do ```LOAD DATA LOCAL INFILE``` in python # - did it in CLI with scripts generated and documented in this notebook # - Tracked and monitored warnings during ```LOAD DATA LOCAL INFILE``` # - adjusted the schema and repeated # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## Generate MySQL queries # + [markdown] tags=[] # #### imports # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### libraries, datapath, metadata # + import os, re, pyperclip, pandas as pd, numpy as np from datetime import datetime as dt datapath = '../data/raw/jawiki/dumps_unzipped/' mtdt = pd.read_csv('../references/metadata/history.tsv', sep='\t') # print(mtdt.col_name.tolist()) # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### list of years for history dumpfiles ```years``` # - years = list(range(2013,2022)) # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ### code to make queries # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # #### make query list ```drop_table_query_list``` # - drop_table_query_list = ['DROP TABLE IF EXISTS ' + 't' + str(year) + ';' for year in years] # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # #### make query list ```create_table_query_list``` # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### store the query in variable # + def create_table(year): create_table_1 = '''CREATE TABLE ''' create_table_2 = ''' ( wiki_db VARCHAR(20) NOT NULL, event_entity VARCHAR(20) NOT NULL, event_type VARCHAR(20) NOT NULL, event_timestamp VARCHAR(30) NOT NULL, event_comment TEXT, event_user_id BIGINT, event_user_text_historical TEXT, event_user_text TEXT, event_user_blocks_historical VARCHAR(30), event_user_blocks VARCHAR(60), event_user_groups_historical TEXT, event_user_groups TEXT, event_user_is_bot_by_historical VARCHAR(10), event_user_is_bot_by VARCHAR(10), event_user_is_created_by_self VARCHAR(10), event_user_is_created_by_system VARCHAR(10), event_user_is_created_by_peer VARCHAR(10), event_user_is_anonymous VARCHAR(10), event_user_registration_timestamp VARCHAR(30), event_user_creation_timestamp VARCHAR(30), event_user_first_edit_timestamp VARCHAR(30), event_user_revision_count BIGINT, event_user_seconds_since_previous_revision BIGINT, page_id BIGINT, page_title_historical TEXT, page_title TEXT, page_namespace_historical INT, page_namespace_is_content_historical VARCHAR(10), page_namespace INT, page_namespace_is_content VARCHAR(10), page_is_redirect VARCHAR(20), page_is_deleted VARCHAR(20), page_creation_timestamp VARCHAR(30), page_first_edit_timestamp VARCHAR(30), page_revision_count BIGINT, page_seconds_since_previous_revision BIGINT, user_id BIGINT, user_text_historical VARCHAR(100), user_text VARCHAR(100), user_blocks_historical VARCHAR(50), user_blocks VARCHAR(50), user_groups_historical VARCHAR(50), user_groups VARCHAR(50), user_is_bot_by_historical VARCHAR(50), user_is_bot_by VARCHAR(50), user_is_created_by_self VARCHAR(50), user_is_created_by_system VARCHAR(50), user_is_created_by_peer VARCHAR(50), user_is_anonymous VARCHAR(10), user_registration_timestamp VARCHAR(30), user_creation_timestamp VARCHAR(30), user_first_edit_timestamp VARCHAR(30), revision_id BIGINT, revision_parent_id BIGINT, revision_minor_edit VARCHAR(10), revision_deleted_parts VARCHAR(30), revision_deleted_parts_are_suppressed VARCHAR(10), revision_text_bytes BIGINT, revision_text_bytes_diff BIGINT, revision_text_sha1 VARCHAR(50), revision_content_model VARCHAR(10), revision_content_format VARCHAR(10), revision_is_deleted_by_page_deletion VARCHAR(10), revision_deleted_by_page_deletion_timestamp VARCHAR(30), revision_is_identity_reverted VARCHAR(10), revision_first_identity_reverting_revision_id BIGINT, revision_seconds_to_identity_revert BIGINT, revision_is_identity_revert VARCHAR(10), revision_is_from_before_page_creation VARCHAR(10), revision_tags TEXT, row_id BIGINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (row_id) );''' return create_table_1 + 't' + str(year) + create_table_2 create_table_query_list = [create_table(year) for year in years] # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### formatted markdown of create table query # - # ```sql # CREATE TABLE ja_hist ( wiki_db VARCHAR(20) NOT NULL, event_entity VARCHAR(20) NOT NULL, event_type VARCHAR(20) NOT NULL, event_timestamp VARCHAR(30) NOT NULL, event_comment TEXT, event_user_id BIGINT, event_user_text_historical TEXT, event_user_text TEXT, event_user_blocks_historical VARCHAR(30), event_user_blocks VARCHAR(60), event_user_groups_historical TEXT, event_user_groups TEXT, event_user_is_bot_by_historical VARCHAR(10), event_user_is_bot_by VARCHAR(10), event_user_is_created_by_self VARCHAR(10), event_user_is_created_by_system VARCHAR(10), event_user_is_created_by_peer VARCHAR(10), event_user_is_anonymous VARCHAR(10), event_user_registration_timestamp VARCHAR(30), event_user_creation_timestamp VARCHAR(30), event_user_first_edit_timestamp VARCHAR(30), event_user_revision_count BIGINT, event_user_seconds_since_previous_revision BIGINT, page_id BIGINT, page_title_historical TEXT, page_title TEXT, page_namespace_historical INT, page_namespace_is_content_historical VARCHAR(10), page_namespace INT, page_namespace_is_content VARCHAR(10), page_is_redirect VARCHAR(20), page_is_deleted VARCHAR(20), page_creation_timestamp VARCHAR(30), page_first_edit_timestamp VARCHAR(30), page_revision_count BIGINT, page_seconds_since_previous_revision BIGINT, user_id BIGINT, user_text_historical VARCHAR(100), user_text VARCHAR(100), user_blocks_historical VARCHAR(50), user_blocks VARCHAR(50), user_groups_historical VARCHAR(50), user_groups VARCHAR(50), user_is_bot_by_historical VARCHAR(50), user_is_bot_by VARCHAR(50), user_is_created_by_self VARCHAR(50), user_is_created_by_system VARCHAR(50), user_is_created_by_peer VARCHAR(50), user_is_anonymous VARCHAR(10), user_registration_timestamp VARCHAR(30), user_creation_timestamp VARCHAR(30), user_first_edit_timestamp VARCHAR(30), revision_id BIGINT, revision_parent_id BIGINT, revision_minor_edit VARCHAR(10), revision_deleted_parts VARCHAR(30), revision_deleted_parts_are_suppressed VARCHAR(10), revision_text_bytes BIGINT, revision_text_bytes_diff BIGINT, revision_text_sha1 VARCHAR(50), revision_content_model VARCHAR(10), revision_content_format VARCHAR(10), revision_is_deleted_by_page_deletion VARCHAR(10), revision_deleted_by_page_deletion_timestamp VARCHAR(30), revision_is_identity_reverted VARCHAR(10), revision_first_identity_reverting_revision_id BIGINT, revision_seconds_to_identity_revert BIGINT, revision_is_identity_revert VARCHAR(10), revision_is_from_before_page_creation VARCHAR(10), revision_tags TEXT, row_id BIGINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (row_id) ); # ``` # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # #### make query list ```load_data_query_list``` # - # *(list of multi-queries to paste into MySQL CLI)* # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### store multi-queries in list/array variable # - colnames = ['wiki_db', 'event_entity', 'event_type', 'event_timestamp', 'event_comment', 'event_user_id', 'event_user_text_historical', 'event_user_text', 'event_user_blocks_historical', 'event_user_blocks', 'event_user_groups_historical', 'event_user_groups', 'event_user_is_bot_by_historical', 'event_user_is_bot_by', 'event_user_is_created_by_self', 'event_user_is_created_by_system', 'event_user_is_created_by_peer', 'event_user_is_anonymous', 'event_user_registration_timestamp', 'event_user_creation_timestamp', 'event_user_first_edit_timestamp', 'event_user_revision_count', 'event_user_seconds_since_previous_revision', 'page_id', 'page_title_historical', 'page_title', 'page_namespace_historical', 'page_namespace_is_content_historical', 'page_namespace', 'page_namespace_is_content', 'page_is_redirect', 'page_is_deleted', 'page_creation_timestamp', 'page_first_edit_timestamp', 'page_revision_count', 'page_seconds_since_previous_revision', 'user_id', 'user_text_historical', 'user_text', 'user_blocks_historical', 'user_blocks', 'user_groups_historical', 'user_groups', 'user_is_bot_by_historical', 'user_is_bot_by', 'user_is_created_by_self', 'user_is_created_by_system', 'user_is_created_by_peer', 'user_is_anonymous', 'user_registration_timestamp', 'user_creation_timestamp', 'user_first_edit_timestamp', 'revision_id', 'revision_parent_id', 'revision_minor_edit', 'revision_deleted_parts', 'revision_deleted_parts_are_suppressed', 'revision_text_bytes', 'revision_text_bytes_diff', 'revision_text_sha1', 'revision_content_model', 'revision_content_format', 'revision_is_deleted_by_page_deletion', 'revision_deleted_by_page_deletion_timestamp', 'revision_is_identity_reverted', 'revision_first_identity_reverting_revision_id', 'revision_seconds_to_identity_revert', 'revision_is_identity_revert', 'revision_is_from_before_page_creation', 'revision_tags', 'row_id'] def get_tsvpath(year): return f'/home/bhrdwj/git/predwikt/data/raw/jawiki/dumps_unzipped/2021-12.jawiki.{str(year)}.tsv' # + load_data_query_list = ([ r""" LOAD DATA LOCAL INFILE """ + f"'{get_tsvpath(year)}' " + r""" INTO TABLE t""" + str(year) + r""" CHARACTER SET utf8mb4 FIELDS TERMINATED BY '\t' LINES TERMINATED BY '\n' """ + '(@' + ',@'.join(colnames) + ')' + # columns as @variables r""" SET """ + ', '.join([f"{i} = NULLIF(@{i},'')" for i in colnames]) + ';' # recieve empty strings to NULL values + f""" tee {'load_data_warnings/t' + str(year)}.log; show warnings; notee;""" for year in years ]) # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### previously-working text of load data local infile # - # ```sql # LOAD DATA LOCAL INFILE '/home/bhrdwj/git/predwikt/data/raw/jawiki/dumps_unzipped/2021-12.jawiki.2014.tsv' # INTO TABLE ja_hist # CHARACTER SET utf8mb4 # FIELDS TERMINATED BY '\t' # LINES TERMINATED BY '\n' # (@wiki_db,@event_entity,@event_type,@event_timestamp,@event_comment,@event_user_id,@event_user_text_historical,@event_user_text,@event_user_blocks_historical,@event_user_blocks,@event_user_groups_historical,@event_user_groups,@event_user_is_bot_by_historical,@event_user_is_bot_by,@event_user_is_created_by_self,@event_user_is_created_by_system,@event_user_is_created_by_peer,@event_user_is_anonymous,@event_user_registration_timestamp,@event_user_creation_timestamp,@event_user_first_edit_timestamp,@event_user_revision_count,@event_user_seconds_since_previous_revision,@page_id,@page_title_historical,@page_title,@page_namespace_historical,@page_namespace_is_content_historical,@page_namespace,@page_namespace_is_content,@page_is_redirect,@page_is_deleted,@page_creation_timestamp,@page_first_edit_timestamp,@page_revision_count,@page_seconds_since_previous_revision,@user_id,@user_text_historical,@user_text,@user_blocks_historical,@user_blocks,@user_groups_historical,@user_groups,@user_is_bot_by_historical,@user_is_bot_by,@user_is_created_by_self,@user_is_created_by_system,@user_is_created_by_peer,@user_is_anonymous,@user_registration_timestamp,@user_creation_timestamp,@user_first_edit_timestamp,@revision_id,@revision_parent_id,@revision_minor_edit,@revision_deleted_parts,@revision_deleted_parts_are_suppressed,@revision_text_bytes,@revision_text_bytes_diff,@revision_text_sha1,@revision_content_model,@revision_content_format,@revision_is_deleted_by_page_deletion,@revision_deleted_by_page_deletion_timestamp,@revision_is_identity_reverted,@revision_first_identity_reverting_revision_id,@revision_seconds_to_identity_revert,@revision_is_identity_revert,@revision_is_from_before_page_creation,@revision_tags,@row_id) # # SET wiki_db = NULLIF(@wiki_db,''), event_entity = NULLIF(@event_entity,''), event_type = NULLIF(@event_type,''), event_timestamp = NULLIF(@event_timestamp,''), event_comment = NULLIF(@event_comment,''), event_user_id = NULLIF(@event_user_id,''), event_user_text_historical = NULLIF(@event_user_text_historical,''), event_user_text = NULLIF(@event_user_text,''), event_user_blocks_historical = NULLIF(@event_user_blocks_historical,''), event_user_blocks = NULLIF(@event_user_blocks,''), event_user_groups_historical = NULLIF(@event_user_groups_historical,''), event_user_groups = NULLIF(@event_user_groups,''), event_user_is_bot_by_historical = NULLIF(@event_user_is_bot_by_historical,''), event_user_is_bot_by = NULLIF(@event_user_is_bot_by,''), event_user_is_created_by_self = NULLIF(@event_user_is_created_by_self,''), event_user_is_created_by_system = NULLIF(@event_user_is_created_by_system,''), event_user_is_created_by_peer = NULLIF(@event_user_is_created_by_peer,''), event_user_is_anonymous = NULLIF(@event_user_is_anonymous,''), event_user_registration_timestamp = NULLIF(@event_user_registration_timestamp,''), event_user_creation_timestamp = NULLIF(@event_user_creation_timestamp,''), event_user_first_edit_timestamp = NULLIF(@event_user_first_edit_timestamp,''), event_user_revision_count = NULLIF(@event_user_revision_count,''), event_user_seconds_since_previous_revision = NULLIF(@event_user_seconds_since_previous_revision,''), page_id = NULLIF(@page_id,''), page_title_historical = NULLIF(@page_title_historical,''), page_title = NULLIF(@page_title,''), page_namespace_historical = NULLIF(@page_namespace_historical,''), page_namespace_is_content_historical = NULLIF(@page_namespace_is_content_historical,''), page_namespace = NULLIF(@page_namespace,''), page_namespace_is_content = NULLIF(@page_namespace_is_content,''), page_is_redirect = NULLIF(@page_is_redirect,''), page_is_deleted = NULLIF(@page_is_deleted,''), page_creation_timestamp = NULLIF(@page_creation_timestamp,''), page_first_edit_timestamp = NULLIF(@page_first_edit_timestamp,''), page_revision_count = NULLIF(@page_revision_count,''), page_seconds_since_previous_revision = NULLIF(@page_seconds_since_previous_revision,''), user_id = NULLIF(@user_id,''), user_text_historical = NULLIF(@user_text_historical,''), user_text = NULLIF(@user_text,''), user_blocks_historical = NULLIF(@user_blocks_historical,''), user_blocks = NULLIF(@user_blocks,''), user_groups_historical = NULLIF(@user_groups_historical,''), user_groups = NULLIF(@user_groups,''), user_is_bot_by_historical = NULLIF(@user_is_bot_by_historical,''), user_is_bot_by = NULLIF(@user_is_bot_by,''), user_is_created_by_self = NULLIF(@user_is_created_by_self,''), user_is_created_by_system = NULLIF(@user_is_created_by_system,''), user_is_created_by_peer = NULLIF(@user_is_created_by_peer,''), user_is_anonymous = NULLIF(@user_is_anonymous,''), user_registration_timestamp = NULLIF(@user_registration_timestamp,''), user_creation_timestamp = NULLIF(@user_creation_timestamp,''), user_first_edit_timestamp = NULLIF(@user_first_edit_timestamp,''), revision_id = NULLIF(@revision_id,''), revision_parent_id = NULLIF(@revision_parent_id,''), revision_minor_edit = NULLIF(@revision_minor_edit,''), revision_deleted_parts = NULLIF(@revision_deleted_parts,''), revision_deleted_parts_are_suppressed = NULLIF(@revision_deleted_parts_are_suppressed,''), revision_text_bytes = NULLIF(@revision_text_bytes,''), revision_text_bytes_diff = NULLIF(@revision_text_bytes_diff,''), revision_text_sha1 = NULLIF(@revision_text_sha1,''), revision_content_model = NULLIF(@revision_content_model,''), revision_content_format = NULLIF(@revision_content_format,''), revision_is_deleted_by_page_deletion = NULLIF(@revision_is_deleted_by_page_deletion,''), revision_deleted_by_page_deletion_timestamp = NULLIF(@revision_deleted_by_page_deletion_timestamp,''), revision_is_identity_reverted = NULLIF(@revision_is_identity_reverted,''), revision_first_identity_reverting_revision_id = NULLIF(@revision_first_identity_reverting_revision_id,''), revision_seconds_to_identity_revert = NULLIF(@revision_seconds_to_identity_revert,''), revision_is_identity_revert = NULLIF(@revision_is_identity_revert,''), revision_is_from_before_page_creation = NULLIF(@revision_is_from_before_page_creation,''), revision_tags = NULLIF(@revision_tags,''), row_id = NULLIF(@row_id,''); # ``` # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### get query to clipboard for pasting into mysql CLI # - gen = (i for i in range(len(years))) i = next(gen) i, years[i] pyperclip.copy(load_data_query_list[i]) # multiquery # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## Connect to SQLAlchemy # + [markdown] tags=[] # #### imports # - import mysql.connector as mysql import sqlalchemy # + [markdown] tags=[] # ### login # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # #### initialize mariadb # - # ```bash # # mysql --user=root --password=<PASSWORD> # mysql -p # ``` # + [markdown] tags=[] # #### get credentials into notebook # - mysql_user = 'bhrdwj' # mysql_pass = input(f'Enter the MySQL password for user {mysql_user}: ') # + [markdown] tags=[] # #### connect to database # + host='localhost'; user=mysql_user; passwd=mysql_pass; dbname='jawiki'; cxn = mysql.connect(host=host,user=user,passwd=passwd, database=dbname) cur = cxn.cursor() connection_str = 'mysql+mysqlconnector://'+user+':'+passwd+'@'+host+'/'+dbname # removed this after host +':'+dbport try: engine = sqlalchemy.create_engine(connection_str) conn = engine.connect() except Exception as e: print('Database connection error - check creds') print(e) # + [markdown] tags=[] # #### get ```tablenames``` # - metadata = sqlalchemy.MetaData(conn) metadata.reflect() current_tables = list(metadata.tables.keys()) current_tables # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## Load all tables to mariadb # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ### setup # + [markdown] tags=[] # #### define function ```jpr_sql``` # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### jpr_sql # - def jpr_sql(query, conn=conn): try: a = conn.execute(query) except Exception as e: print('Printing execute error:') print(e) return None try: return a.fetchall() except Exception as e: print('Printing results error:') print(e) print('Printing result') return None # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # #### check connection # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### reconnect to database # + host='localhost'; user=mysql_user; passwd=<PASSWORD>; dbname='jawiki'; cxn = mysql.connect(host=host,user=user,passwd=<PASSWORD>, database=dbname) cur = cxn.cursor() connection_str = 'mysql+mysqlconnector://'+user+':'+passwd+'@'+host+'/'+dbname # removed this after host +':'+dbport try: engine = sqlalchemy.create_engine(connection_str) conn = engine.connect() except: print('Database connection error - check creds') # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### check it # + current_tables = jpr_sql(f'show tables;') current_tables = [i[0] for i in current_tables] display(current_tables) jpr_sql(f'select * from {current_tables[0]} limit 10;'); jpr_sql(f'desc {current_tables[0]};'); # + [markdown] tags=[] # ### babysit the load data loop # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # #### before starting # - jpr_sql('set global max_error_count=100000;') jpr_sql(""" SELECT VARIABLE_NAME, SESSION_VALUE, GLOBAL_VALUE FROM INFORMATION_SCHEMA.SYSTEM_VARIABLES WHERE VARIABLE_NAME IN ('max_error_count');""") gen = (i for i in range(len(years))) # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # #### loop # - i = next(gen) i, years[i] # + tags=[] active="" # drop_table_query_list[i] # - jpr_sql(drop_table_query_list[i]) # + tags=[] active="" # create_table_query_list[i] # - jpr_sql(create_table_query_list[i]) jpr_sql('show tables;') # + tags=[] active="" # load_data_query_list[i] # - pyperclip.copy(load_data_query_list[i]) # multiquery # *paste and run into mariadb cli* # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # #### check # - years[i] jpr_sql('show tables;') jpr_sql(f'select page_id from t{years[i]} limit 10;') jpr_sql(f'desc t{years[i]};'); jpr_sql(f'select count(*) from t{years[i]};') # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## Restrict user bhrdwj to read-only access to raw data # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ### revoke # - current_tables = jpr_sql(f'show tables;') current_tables = [i[0] for i in current_tables] restricted_tables = current_tables restricted_user = 'bhrdwj' print('\n'.join([f"REVOKE ALL ON jawiki.{table} FROM {user};" for table in current_tables])) # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ### grant # - 'GRANT SELECT, SHOW VIEW, PROCESS, REPLICATION CLIENT ON *.* TO bhrdwj;' # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## EDA for adjusting ```load data local infile``` settings # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### ```revision_minor_edit``` # - # ```sql # select revision_minor_edit, min(row_id), count(*) # from ja_hist # group by revision_minor_edit order by row_id; # ``` # + active="" # +---------------------+-------------+----------+ # | revision_minor_edit | min(row_id) | count(*) | # +---------------------+-------------+----------+ # | false | 1 | 6196406 | # | true | 5 | 2132443 | # | NULL | 17 | 710853 | # | text | 7655907 | 1 | # | 177 | 8572611 | 1 | # | 360 | 8572647 | 1 | # | 1 | 8572651 | 1 | # | 237 | 8572713 | 1 | # | 629 | 8572741 | 1 | # | 136 | 8572820 | 1 | # | 168 | 8573015 | 1 | # +---------------------+-------------+----------+ # 11 rows in set (13.258 sec) # # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### ```user_is_anonymous``` # - # ```sql # select user_is_anonymous, min(row_id), count(*) # from ja_hist # group by user_is_anonymous order by row_id; # ``` # + active="" # +-------------------+-------------+----------+ # | user_is_anonymous | min(row_id) | count(*) | # +-------------------+-------------+----------+ # | NULL | 1 | 8802062 | # | false | 17 | 237640 | # | 53505764 | 8572611 | 1 | # | 53505797 | 8572647 | 1 | # | 53505801 | 8572651 | 1 | # | 53505858 | 8572713 | 1 | # | 53505884 | 8572741 | 1 | # | 53505960 | 8572820 | 1 | # | 53506139 | 8573015 | 1 | # | 2014-11-13 | 8573733 | 1 | # +-------------------+-------------+----------+ # 10 rows in set (11.623 sec) # # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### ```user_text_historical``` # - # ```sql # select user_text_historical, min(row_id), count(*) # from ja_hist # where user_text_historical regexp '^[0-9]*$' # group by user_text_historical order by row_id; # ``` # + active="" # +----------------------+-------------+----------+ # | user_text_historical | min(row_id) | count(*) | # +----------------------+-------------+----------+ # | 80686 | 1719492 | 1 | # | 560889223 | 1816793 | 1 | # | 8596 | 2616687 | 1 | # | 67781530 | 3526969 | 1 | # | 930310 | 5571549 | 1 | # | 888344 | 6086476 | 1 | # | 555 | 6933543 | 1 | # | 1989 | 8755251 | 1 | # | 9114619 | 8914891 | 1 | # +----------------------+-------------+----------+ # 9 rows in set (9.313 sec) # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### ```revision_content_model``` # + active="" # MariaDB [ja_hist]> select revision_content_model, min(row_id), count(*) from ja_hist group by revision_content_model order by row_id; # +------------------------+-------------+----------+ # | revision_content_model | min(row_id) | count(*) | # +------------------------+-------------+----------+ # | NULL | 1 | 9039683 | # | false | 44048 | 27 | # +------------------------+-------------+----------+ # 2 rows in set (11.401 sec) # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### ```user_is_bot_by_historical``` # + active="" # MariaDB [ja_hist]> select user_is_bot_by_historical, min(row_id), count(*) from ja_hist group by user_is_bot_by_historical order by row_id; # +---------------------------+-------------+----------+ # | user_is_bot_by_historical | min(row_id) | count(*) | # +---------------------------+-------------+----------+ # | NULL | 1 | 9039370 | # | false | 43899 | 4 | # | name | 255483 | 314 | # | name,group | 875311 | 18 | # | group | 3511631 | 3 | # | true | 8573733 | 1 | # +---------------------------+-------------+----------+ # 6 rows in set (11.498 sec) # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ##### ```page_revision_count``` # + active="" # MariaDB [ja_hist]> select page_revision_count from ja_hist group by length(page_revision_count); # +---------------------+ # | page_revision_count | # +---------------------+ # | NULL | # | 7 | # | 12 | # | 109 | # | 1835 | # | 10000 | # | 694801 | # | 3464244 | # | 22929404 | # +---------------------+ # 9 rows in set (12.684 sec) # # - # # END
notebooks/1.15-sfb-history-tables-to-mariadb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import uuid import json roiIndex = 1 cellIndex = 1 def get_annotation(filename): with open(filename) as f: data = json.load(f) f.close() return data['regions'] def get_rois(regions,tagGroup,checkForExtraction): rois = [] index = 0 global roiIndex for region in regions: if region['tags'][0].startswith(tagGroup): if region['tags'][0] in checkForExtraction: extractionValue = "NUMERIC_CLASSIFICATION" else: extractionValue = "CELL_OMR" rois.append({ #"annotationId": region['id'], "annotationTags": region['tags'][0], "extractionMethod": extractionValue, "roiId": str(roiIndex), "index": index, "rect": { "top": int(region['boundingBox']['top']), "left": int(region['boundingBox']['left']), "bottom": int(region['boundingBox']['top']) + int(region['boundingBox']['height']), "right": int(region['boundingBox']['left']) + int(region['boundingBox']['width']) } }) index = index + 1 roiIndex = roiIndex +1 return rois def get_cells(regions,tagGroups,formatLookup): cells_data = [] renderIndex = 1 global cellIndex for tagGroup in tagGroups: try: formatValue = formatLookup[str(tagGroup)] except KeyError as ke: formatValue = "" cells_data.append({ "cellId": str(cellIndex), "rois": get_rois(regions,tagGroup,checkForExtraction), "render": { "index": renderIndex }, "format": { "name": tagGroup.rstrip("_"), "value": formatValue }, "validate": { "regExp": "" } }) renderIndex = renderIndex +1 cellIndex = cellIndex +1 return cells_data def get_layout(cells): layout_data = [] layout_data.append({ "layout": { "version": "1.0", "name": "UP PAT Exam Sheet Form", "cells": cells } }) return layout_data[0] def pp_json(json_thing, sort=True, indents=4): if type(json_thing) is str: print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) else: print(json.dumps(json_thing, sort_keys=sort, indent=indents)) return None # - regions=get_annotation("pat_UP_vottraw.json") # + tagGroups = ["ROLLNUMBERID1", "ROLLNUMBER1_Q1_1","ROLLNUMBER1_Q2_1","ROLLNUMBER1_Q3_1","ROLLNUMBER1_Q4_2","ROLLNUMBER1_Q5_2","ROLLNUMBER1_Q6_2","ROLLNUMBER1_Q7_2","ROLLNUMBER1_Q8_3","ROLLNUMBER1_Q9_3","ROLLNUMBER2_Q10_3","ROLLNUMBER1_Q11_3","ROLLNUMBER1_Q12_3","ROLLNUMBER1_Q13_3", "ROLLNUMBERID2","ROLLNUMBER2_Q1_1","ROLLNUMBER2_Q2_1","ROLLNUMBER2_Q3_1","ROLLNUMBER2_Q4_2","ROLLNUMBER2_Q5_2","ROLLNUMBER2_Q6_2","ROLLNUMBER2_Q7_2","ROLLNUMBER2_Q8_3","ROLLNUMBER2_Q9_3","ROLLNUMBER2_Q10_3","ROLLNUMBER2_Q11_3","ROLLNUMBER2_Q12_3","ROLLNUMBER2_Q13_3","ROLLNUMBERID3", "ROLLNUMBER3_Q1_1","ROLLNUMBER3_Q2_1","ROLLNUMBER3_Q3_1","ROLLNUMBER3_Q4_2","ROLLNUMBER3_Q5_2","ROLLNUMBER3_Q6_2","ROLLNUMBER3_Q7_2","ROLLNUMBER3_Q8_3","ROLLNUMBER3_Q9_3","ROLLNUMBER3_Q10_3","ROLLNUMBER3_Q11_3","ROLLNUMBER3_Q12_3","ROLLNUMBER3_Q13_3","ROLLNUMBERID4", "ROLLNUMBER4_Q1_1","ROLLNUMBER4_Q2_1","ROLLNUMBER4_Q3_1","ROLLNUMBER4_Q4_2","ROLLNUMBER4_Q5_2","ROLLNUMBER4_Q6_2","ROLLNUMBER4_Q7_2","ROLLNUMBER4_Q8_3","ROLLNUMBER4_Q9_3","ROLLNUMBER4_Q10_3","ROLLNUMBER4_Q11_3","ROLLNUMBER4_Q12_3","ROLLNUMBER4_Q13_3", "ROLLNUMBERID5","ROLLNUMBER5_Q1_1","ROLLNUMBER5_Q2_1","ROLLNUMBER5_Q3_1","ROLLNUMBER5_Q4_2","ROLLNUMBER5_Q5_2","ROLLNUMBER5_Q6_2","ROLLNUMBER5_Q7_2","ROLLNUMBER5_Q8_3","ROLLNUMBER5_Q9_3","ROLLNUMBER5_Q10_3","ROLLNUMBER5_Q11_3","ROLLNUMBER5_Q12_3","ROLLNUMBER5_Q13_3","ROLLNUMBERID6", "ROLLNUMBER6_Q1_1","ROLLNUMBER6_Q2_1","ROLLNUMBER6_Q3_1","ROLLNUMBER6_Q4_2","ROLLNUMBER6_Q5_2","ROLLNUMBER6_Q6_2","ROLLNUMBER6_Q7_2","ROLLNUMBER6_Q8_3","ROLLNUMBER6_Q9_3","ROLLNUMBER6_Q10_3","ROLLNUMBER6_Q11_3","ROLLNUMBER6_Q12_3","ROLLNUMBER6_Q13_3","ROLLNUMBERID7", "ROLLNUMBER7_Q1_1","ROLLNUMBER7_Q2_1","ROLLNUMBER7_Q3_1","ROLLNUMBER7_Q4_2","ROLLNUMBER7_Q5_2","ROLLNUMBER7_Q6_2","ROLLNUMBER7_Q7_2","ROLLNUMBER7_Q8_3","ROLLNUMBER7_Q9_3","ROLLNUMBER7_Q10_3","ROLLNUMBER7_Q11_3","ROLLNUMBER7_Q12_3","ROLLNUMBER7_Q13_3","ROLLNUMBERID8","ROLLNUMBER8_Q1_1","ROLLNUMBER8_Q2_1","ROLLNUMBER8_Q3_1","ROLLNUMBER8_Q4_2","ROLLNUMBER8_Q5_2","ROLLNUMBER8_Q6_2","ROLLNUMBER8_Q7_2","ROLLNUMBER8_Q8_3","ROLLNUMBER8_Q9_3","ROLLNUMBER8_Q10_3","ROLLNUMBER8_Q11_3","ROLLNUMBER8_Q12_3","ROLLNUMBER8_Q13_3"] formatLookup= { 'ROLLNUMBERID1': 'छात्र आईडी', 'ROLLNUMBER1_Q1_1': 'छात्र आईडी1 Q1 ', 'ROLLNUMBER1_Q2_1': 'छात्र आईडी1 Q2', 'ROLLNUMBER1_Q3_1': 'छात्र आईडी1 Q3 ', 'ROLLNUMBER1_Q4_2': 'छात्र आईडी1 Q4', 'ROLLNUMBER1_Q5_2': 'छात्र आईडी1 Q5', 'ROLLNUMBER1_Q6_2': 'छात्र आईडी1 Q6', 'ROLLNUMBER1_Q7_2': 'छात्र आईडी1 Q7', 'ROLLNUMBER1_Q8_3': 'छात्र आईडी1 Q8', 'ROLLNUMBER1_Q9_3': 'छात्र आईडी1 Q9', 'ROLLNUMBER1_Q10_3': 'छात्र आईडी1 Q10', 'ROLLNUMBER1_Q11_3': 'छात्र आईडी1 Q11', 'ROLLNUMBER1_Q12_3': 'छात्र आईडी1 Q12', 'ROLLNUMBER1_Q13_3': 'छात्र आईडी1 Q13', 'ROLLNUMBERID2': 'छात्र आईडी', 'ROLLNUMBER2_Q1_1': 'छात्र आईडी2 Q1', 'ROLLNUMBER2_Q2_1': 'छात्र आईडी2 Q2', 'ROLLNUMBER2_Q3_1': 'छात्र आईडी2 Q3', 'ROLLNUMBER2_Q4_2': 'छात्र आईडी2 Q4', 'ROLLNUMBER2_Q5_2': 'छात्र आईडी2 Q5', 'ROLLNUMBER2_Q6_2': 'छात्र आईडी2 Q6', 'ROLLNUMBER2_Q7_2': 'छात्र आईडी2 Q7', 'ROLLNUMBER2_Q8_3': 'छात्र आईडी2 Q8', 'ROLLNUMBER2_Q9_3': 'छात्र आईडी2 Q9', 'ROLLNUMBER2_Q10_3': 'छात्र आईडी2 Q10', 'ROLLNUMBER2_Q11_3': 'छात्र आईडी2 Q11', 'ROLLNUMBER2_Q12_3': 'छात्र आईडी2 Q12', 'ROLLNUMBER2_Q13_3': 'छात्र आईडी2 Q13', 'ROLLNUMBERID3': 'छात्र आईडी', 'ROLLNUMBER3_Q1_1': 'छात्र आईडी3 Q1', 'ROLLNUMBER3_Q2_1': 'छात्र आईडी3 Q2', 'ROLLNUMBER3_Q3_1': 'छात्र आईडी3 Q3', 'ROLLNUMBER3_Q4_2': 'छात्र आईडी3 Q4', 'ROLLNUMBER3_Q5_2': 'छात्र आईडी3 Q5', 'ROLLNUMBER3_Q6_2': 'छात्र आईडी3 Q6', 'ROLLNUMBER3_Q7_2': 'छात्र आईडी3 Q7', 'ROLLNUMBER3_Q8_3': 'छात्र आईडी3 Q8', 'ROLLNUMBER3_Q9_3': 'छात्र आईडी3 Q9', 'ROLLNUMBER3_Q10_3': 'छात्र आईडी3 Q10', 'ROLLNUMBER3_Q11_3': 'छात्र आईडी3 Q11', 'ROLLNUMBER3_Q12_3': 'छात्र आईडी3 Q12', 'ROLLNUMBER3_Q13_3': 'छात्र आईडी3 Q13', 'ROLLNUMBERID4': 'छात्र आईडी', 'ROLLNUMBER4_Q1_1': 'छात्र आईडी4 Q1', 'ROLLNUMBER4_Q2_1': 'छात्र आईडी4 Q2', 'ROLLNUMBER4_Q3_1': 'छात्र आईडी4 Q3', 'ROLLNUMBER4_Q4_2': 'छात्र आईडी4 Q4', 'ROLLNUMBER4_Q5_2': 'छात्र आईडी4 Q5', 'ROLLNUMBER4_Q6_2': 'छात्र आईडी4 Q6', 'ROLLNUMBER4_Q7_2': 'छात्र आईडी4 Q7', 'ROLLNUMBER4_Q8_3': 'छात्र आईडी4 Q8', 'ROLLNUMBER4_Q9_3': 'छात्र आईडी4 Q9', 'ROLLNUMBER4_Q10_3': 'छात्र आईडी4 Q10', 'ROLLNUMBER4_Q11_3': 'छात्र आईडी4 Q11', 'ROLLNUMBER4_Q12_3': 'छात्र आईडी4 Q12', 'ROLLNUMBER4_Q13_3': 'छात्र आईडी4 Q13', 'ROLLNUMBERID5': 'छात्र आईडी', 'ROLLNUMBER5_Q1_1': 'छात्र आईडी5 Q1', 'ROLLNUMBER5_Q2_1': 'छात्र आईडी5 Q2', 'ROLLNUMBER5_Q3_1': 'छात्र आईडी5 Q3', 'ROLLNUMBER5_Q4_2': 'छात्र आईडी5 Q4', 'ROLLNUMBER5_Q5_2': 'छात्र आईडी5 Q5', 'ROLLNUMBER5_Q6_2': 'छात्र आईडी5 Q6', 'ROLLNUMBER5_Q7_2': 'छात्र आईडी5 Q7', 'ROLLNUMBER5_Q8_3': 'छात्र आईडी5 Q8', 'ROLLNUMBER5_Q9_3': 'छात्र आईडी5 Q9', 'ROLLNUMBER5_Q10_3': 'छात्र आईडी5 Q10', 'ROLLNUMBER5_Q11_3': 'छात्र आईडी5 Q11', 'ROLLNUMBER5_Q12_3': 'छात्र आईडी5 Q12', 'ROLLNUMBER5_Q13_3': 'छात्र आईडी5 Q13', 'ROLLNUMBERID6': 'छात्र आईडी', 'ROLLNUMBER6_Q1_1': 'छात्र आईडी6 Q1', 'ROLLNUMBER6_Q2_1': 'छात्र आईडी6 Q2', 'ROLLNUMBER6_Q3_1': 'छात्र आईडी6 Q3', 'ROLLNUMBER6_Q4_2': 'छात्र आईडी6 Q4', 'ROLLNUMBER6_Q5_2': 'छात्र आईडी6 Q5', 'ROLLNUMBER6_Q6_2': 'छात्र आईडी6 Q6', 'ROLLNUMBER6_Q7_2': 'छात्र आईडी6 Q7', 'ROLLNUMBER6_Q8_3': 'छात्र आईडी6 Q8', 'ROLLNUMBER6_Q9_3': 'छात्र आईडी6 Q9', 'ROLLNUMBER6_Q10_3': 'छात्र आईडी6 Q10', 'ROLLNUMBER6_Q11_3': 'छात्र आईडी6 Q11', 'ROLLNUMBER6_Q12_3': 'छात्र आईडी6 Q12', 'ROLLNUMBER6_Q13_3': 'छात्र आईडी6 Q13', 'ROLLNUMBERID7': 'छात्र आईडी', 'ROLLNUMBER7_Q1_1': 'छात्र आईडी7 Q1', 'ROLLNUMBER7_Q2_1': 'छात्र आईडी7 Q2', 'ROLLNUMBER7_Q3_1': 'छात्र आईडी7 Q3', 'ROLLNUMBER7_Q4_2': 'छात्र आईडी7 Q4', 'ROLLNUMBER7_Q5_2': 'छात्र आईडी7 Q5', 'ROLLNUMBER7_Q6_2': 'छात्र आईडी7 Q6', 'ROLLNUMBER7_Q7_2': 'छात्र आईडी7 Q7', 'ROLLNUMBER7_Q8_3': 'छात्र आईडी7 Q8', 'ROLLNUMBER7_Q9_3': 'छात्र आईडी7 Q9', 'ROLLNUMBER7_Q10_3': 'छात्र आईडी7 Q10', 'ROLLNUMBER7_Q11_3': 'छात्र आईडी7 Q11', 'ROLLNUMBER7_Q12_3': 'छात्र आईडी7 Q12', 'ROLLNUMBER7_Q13_3': 'छात्र आईडी7 Q13', 'ROLLNUMBERID8': 'छात्र आईडी', 'ROLLNUMBER8_Q1_1': 'छात्र आईडी8 Q1', 'ROLLNUMBER8_Q2_1': 'छात्र आईडी8 Q2', 'ROLLNUMBER8_Q3_1': 'छात्र आईडी8 Q3', 'ROLLNUMBER8_Q4_2': 'छात्र आईडी8 Q4', 'ROLLNUMBER8_Q5_2': 'छात्र आईडी8 Q5', 'ROLLNUMBER8_Q6_2': 'छात्र आईडी8 Q6', 'ROLLNUMBER8_Q7_2': 'छात्र आईडी8 Q7', 'ROLLNUMBER8_Q8_3': 'छात्र आईडी8 Q8', 'ROLLNUMBER8_Q9_3': 'छात्र आईडी8 Q9', 'ROLLNUMBER8_Q10_3': 'छात्र आईडी8 Q10', 'ROLLNUMBER8_Q11_3': 'छात्र आईडी8 Q11', 'ROLLNUMBER8_Q12_3': 'छात्र आईडी8 Q12', 'ROLLNUMBER8_Q13_3': 'छात्र आईडी8 Q13' } checkForExtraction= ["ROLLNUMBERID1_1","ROLLNUMBERID1_2","ROLLNUMBERID1_3","ROLLNUMBERID1_4","ROLLNUMBERID1_5","ROLLNUMBERID1_6","ROLLNUMBERID1_7","ROLLNUMBERID2_1","ROLLNUMBERID2_2","ROLLNUMBERID2_3","ROLLNUMBERID2_4","ROLLNUMBERID2_5","ROLLNUMBERID2_6","ROLLNUMBERID2_7","ROLLNUMBERID3_1","ROLLNUMBERID3_2","ROLLNUMBERID3_3","ROLLNUMBERID3_4","ROLLNUMBERID3_5","ROLLNUMBERID3_6","ROLLNUMBERID3_7","ROLLNUMBERID4_1","ROLLNUMBERID4_2","ROLLNUMBERID4_3","ROLLNUMBERID4_4","ROLLNUMBERID4_5","ROLLNUMBERID4_6","ROLLNUMBERID4_7","ROLLNUMBERID5_1","ROLLNUMBERID5_2","ROLLNUMBERID5_3","ROLLNUMBERID5_4","ROLLNUMBERID5_5","ROLLNUMBERID5_6","ROLLNUMBERID5_7","ROLLNUMBERID6_1","ROLLNUMBERID6_2","ROLLNUMBERID6_3","ROLLNUMBERID6_4","ROLLNUMBERID6_5","ROLLNUMBERID6_6","ROLLNUMBERID6_7","ROLLNUMBERID7_1","ROLLNUMBERID7_2","ROLLNUMBERID7_3","ROLLNUMBERID7_4","ROLLNUMBERID7_5","ROLLNUMBERID7_6","ROLLNUMBERID7_7","ROLLNUMBERID8_1","ROLLNUMBERID8_2","ROLLNUMBERID8_3","ROLLNUMBERID8_4","ROLLNUMBERID8_5","ROLLNUMBERID8_6","ROLLNUMBERID8_7"] # - cells=get_cells(regions,tagGroups,formatLookup) pp_json(get_layout(cells),False)
specs/v1/jupyter-notebook/transform_pat_UP_vott_to_roi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Attribute Inference Attack (AIA) Dataset E #import libraries import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd from matplotlib import pyplot as plt import seaborn as sns import os print('Libraries imported!!') # + #define directory of functions and actual directory HOME_PATH = '' #home path of the project FUNCTIONS_DIR = 'EVALUATION FUNCTIONS/PRIVACY' ACTUAL_DIR = os.getcwd() #change directory to functions directory os.chdir(HOME_PATH + FUNCTIONS_DIR) #import functions for membership attack simulation from attribute_inference import DataPreProcessor from attribute_inference import RiskAttributesPredictors from attribute_inference import identified_attributes_percentage #change directory to actual directory os.chdir(ACTUAL_DIR) print('Functions imported!!') # - # ## 1. Read real and synthetic datasets # In this part real and synthetic datasets are read. # + #Define global variables DATA_TYPES = ['Real','GM','SDV','CTGAN','WGANGP'] SYNTHESIZERS = ['GM','SDV','CTGAN','WGANGP'] FILEPATHS = {'Real' : HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/E_PimaIndiansDiabetes_Real_Train.csv', 'GM' : HOME_PATH + 'SYNTHETIC DATASETS/GM/E_PimaIndiansDiabetes_Synthetic_GM.csv', 'SDV' : HOME_PATH + 'SYNTHETIC DATASETS/SDV/E_PimaIndiansDiabetes_Synthetic_SDV.csv', 'CTGAN' : HOME_PATH + 'SYNTHETIC DATASETS/CTGAN/E_PimaIndiansDiabetes_Synthetic_CTGAN.csv', 'WGANGP' : HOME_PATH + 'SYNTHETIC DATASETS/WGANGP/E_PimaIndiansDiabetes_Synthetic_WGANGP.csv'} categorical_columns = ['Outcome'] data = dict() qid_columns = ['Age','BMI','Pregnancies'] risk_attributes = ['Glucose','BloodPressure','SkinThickness','Insulin','DiabetesPedigreeFunction','Outcome'] data = dict() data_qid = dict() data_risk = dict() # - #iterate over all datasets filepaths and read each dataset for name, path in FILEPATHS.items() : data[name] = pd.read_csv(path) #data[name] = data[name].drop(['id'],axis=1) for col in categorical_columns : data[name][col] = data[name][col].astype('category').cat.codes data_qid[name] = data[name][qid_columns] data_risk[name] = data[name][risk_attributes] data data_qid data_risk # ## 2. Train models to predict attributes values # + #initialize classifiers categorical_columns = None numerical_columns = ['Age','BMI','Pregnancies'] categories = None classifiers_all = dict() data_preprocessors = dict() attributes_models_all = dict() for name in SYNTHESIZERS : print(name) data_preprocessors[name] = DataPreProcessor(categorical_columns, numerical_columns, categories) x_train = data_preprocessors[name].preprocess_train_data(data_qid[name]) # attributes_models = dict() # attributes_models = train_attributes_prediction_models(data_risk[name], x_train) attributes_models_all[name] = RiskAttributesPredictors(data_risk[name], qid_columns) attributes_models_all[name].train_attributes_prediction_models(x_train) print('####################################################') # - # ## 3. Read Real Data and Find Combinations #read real dataset real_data = pd.read_csv(HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/E_PimaIndiansDiabetes_Real_Train.csv') real_data['Outcome'] = real_data['Outcome'].astype('category').cat.codes real_data = real_data.sample(frac=1) real_data = real_data[0:int(len(real_data)*0.5)] real_data combinations = real_data[qid_columns] combinations.drop_duplicates(keep='first',inplace=True) combinations # + results_data_all = dict() columns_results = ['Age','BMI','Pregnancies','Glucose_rmse','BloodPressure_rmse','SkinThickness_rmse','Insulin_rmse', 'DiabetesPedigreeFunction_rmse','Outcome_accuracy'] for name in SYNTHESIZERS : print(name) results_data = pd.DataFrame(columns = columns_results) for comb in combinations.values : batch = real_data.loc[(real_data['Age'] == comb[0]) & (real_data['BMI'] == comb[1]) & (real_data['Pregnancies'] == comb[2])] row_data = (batch[qid_columns].values[0]).tolist() print(row_data) x_test = data_preprocessors[name].preprocess_test_data(batch[qid_columns]) print(x_test.shape) row = attributes_models_all[name].evaluate_attributes_prediction_models(x_test, batch, columns_results) results_data = results_data.append(row) results_data_all[name] = results_data print('#######################################') # - results_data_all # ## 5. Visuzalize obtained results results_columns = ['Glucose_rmse','BloodPressure_rmse','SkinThickness_rmse','Insulin_rmse','DiabetesPedigreeFunction_rmse', 'Outcome_accuracy'] len(results_columns) for name in SYNTHESIZERS : identified_attributes = identified_attributes_percentage(results_data_all[name], results_columns) print(name,' : ', identified_attributes) # + boxplots_data = dict() for c in results_columns : boxplots_data[c] = results_data_all[SYNTHESIZERS[0]][c] for i in range(1,len(SYNTHESIZERS)) : boxplots_data[c] = np.column_stack((boxplots_data[c], results_data_all[SYNTHESIZERS[i]][c])) # + fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(12,2.5*2)) axs_idxs = [[0,0], [0,1], [0,2], [1,0], [1,1], [1,2]] idx = dict(zip(results_columns,axs_idxs)) for c in results_columns : ax = axs[idx[c][0], idx[c][1]] ax.boxplot(boxplots_data[c]) ax.set_title(c) ax.set_xticklabels(SYNTHESIZERS) for ax in axs.ravel(): ax.set_xticklabels(ax.get_xticklabels(), rotation = 30, ha="right") plt.tight_layout() fig.savefig('INFERENCE TESTS RESULTS/ATTRIBUTES INFERENCE TESTS RESULTS.svg', bbox_inches='tight') # -
notebooks/Dataset E - Pima Indians Diabetes/Synthetic data evaluation/Privacy/3_Attribute_Inference_Test_Dataset E.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sepal_ui import sepalwidgets as sw #hack of the about_tile to create a licence tile licence_tile = sw.TileAbout('utils/EULA.md') licence_tile._metadata['mount_id'] = "licence_tile" licence_tile
licence_ui.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install -r ../requirements.txt from em_examples.DCWidgetPlate2_5D import plate_app from IPython.display import display # %matplotlib inline from matplotlib import rcParams rcParams['font.size'] = 16 # # Purpose # # For a direct current resistivity (DCR) survey, currents are injected into the earth, and flow. # Depending upon the subsurface conductivity structures current flow in the earth will be distorted and charges will accumulate on interfaces between regions of different conductivites. These changes can be measurable at the sufurface electrodes. # # Here, we focus on a plate target embedded in a halfspace, and investigate what is happening in the earth when static currents are injected. Different from the sphere case, which is symmetric, "coupling" between the Tx, target (conductor or resistor), and Rx will be significanlty different with various scenarios and geometries. # Using this app we can investigate what effect different targets and survey geometries have on the currents, electric fields, potentials, charges, and sensitivities. # # Set up # # <img src="https://github.com/geoscixyz/em_apps/blob/master/images/DC_PlateApp_Setup.png?raw=true"> # # Questions # # - Is the potential difference measured by a dipole over a conductive (/resisitive) target higher or lower compared to the half-space reference? # - how do the field lines bend in presence of a conductive (/resistive) target? # - Compared to the positive and negative sources (A and B), how are oriented the positive and negative accumulated charges around a conductive (/resistive) target? # - How would you describe the secondary fields pattern? Does it remind you of the response of an object fundamental to electromagnetics? # # Plate app # # ## Parameters: # # - **survey**: Type of survey # - **A**: (+) Current electrode location # - **B**: (-) Current electrode location # - **M**: (+) Potential electrode location # - **N**: (-) Potential electrode location # - **dx**: width of plate # - **dz**: height/thickness of plate # - **xc**: x location of plate center # - **zc**: z location of plate center # - **$\theta$**: rotation angle of plate from the horizontal # - **$\rho_1$**: Resistivity of the halfspace # - **$\rho_2$**: Resistivity of the plate # - **Field**: Field to visualize # - **Type**: which part of the field # - **Scale**: Linear or Log Scale visualization # # ### **Do not forget to hit Run Interact to update the figure after you made modifications** app = plate_app();
LabExercise_1/.ipynb_checkpoints/DC_Plate2_5D-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd df = pd.read_csv('location_rev.csv') df.head() # - X = df[['num_competitors', 'median_income', 'num_loyalty_members', 'population_density', 'location_age' ]] y = df['revenue'] # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 100) # + from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(X_train,y_train) # - model.coef_ model.intercept_ # + single_location = pd.DataFrame({ 'num_competitors': [3], 'median_income': [30000], 'num_loyalty_members': [1200], 'population_density': [2000], 'location_age': [10] }) model.predict(single_location) # - X.mean() # + import matplotlib.pyplot as plt # %matplotlib inline plt.scatter(model.predict(X_test),y_test) plt.xlabel('Model Predictions') plt.ylabel('True Value') plt.plot([0, 100000], [0, 100000], 'k-', color = 'r') plt.show() # + from scipy.stats.stats import pearsonr pearsonr(model.predict(X_test),y_test) # -
Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/MARKETING_DATA_SCIENCE/01_Data_Science_for_Marketing_Analytics/Part_05/02_Activity_9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Training a Convolutional Neural Network # # In this exercise, you will have to create a CNN model and then train it on the CIFAR10 dataset. The data loading and model training, testing logic are already included in your code. Infact, they are the same as for the Feed Forward Neural Network you built in the last exercises. # # Here are the steps you need to do to complete this exercise: # # 1. In Starter Code below, finish the `Model()` class. These should contain the code that defines the layers of your model in the `__init__()` function and the model execution in the `forward()` function. # 2. Add a cost function and optimizer. You can use the same cost functions and optimizer from the previous exercise. # 3. Run the cells to make sure that the model is training properly. # # In case you get stuck, you can look at the solution by clicking the jupyter symbol at the top left and navigating to `training_a_cnn_solution.ipynb`. # # ## Try It Out! # - Play around with the number of layers and filters in your model. How does the accuracy change? How long does it take to train the model? # - Try to train your model with some other types of convolutional layers like depthwise separable convolutions # - Can you create the same network in TensorFlow as well? # # ## Package Installations # **NOTE**: Everytime you start the GPU, run this before your code. # !pip install ipywidgets # !pip list from IPython.core.display import HTML HTML("<script>Jupyter.notebook.kernel.restart()</script>") # ## Starter Code # # **Remember** to DISABLE the GPU when you are not training. # + import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torchvision from torchvision import transforms def train(model, train_loader, cost, optimizer, epoch): model.train() for e in range(epoch): running_loss=0 correct=0 for data, target in train_loader: optimizer.zero_grad() #NOTE: Notice how we are not changing the data shape here # This is because CNNs expects a 3 dimensional input pred = model(data) loss = cost(pred, target) running_loss+=loss loss.backward() optimizer.step() pred=pred.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() print(f"Epoch {e}: Loss {running_loss/len(train_loader.dataset)}, Accuracy {100*(correct/len(train_loader.dataset))}%") def test(model, test_loader): model.eval() correct = 0 with torch.no_grad(): for data, target in test_loader: #NOTE: Notice how we are not changing the data shape here # This is because CNNs expects a 3 dimensional input output = model(data) pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() print(f'Test set: Accuracy: {correct}/{len(test_loader.dataset)} = {100*(correct/len(test_loader.dataset))}%)') class Model(nn.Module): def __init__(self): super().__init__() # TODO: Define the layers you need in your model self.conv1 = nn.Conv2d(3, 6, 5) # (Channels, # Kernel, Kernel Size) self.pool = nn.MaxPool2d(2,2) # (Kernel Size, Stride) self.conv2 = nn.Conv2d(6, 16, 5) self.conv3 = nn.Conv2d(16, 16, 5) self.fcn1 = nn.Linear(20 * 5 * 5, 256) self.fcn2 = nn.Linear(256, 128) self.fcn3 = nn.Linear(128, 84) self.fcn4 = nn.Linear(84, 64) self.fcn5 = nn.Linear(64, 10) # Entrada = torch.Size([64, 3, 32, 32]) # Saída Conv1 torch.Size([64, 6, 14, 14]) # Saída Conv2 torch.Size([64, 16, 5, 5]) def forward(self, x): #TODO: Define your model execution print('Entrada = ', x.shape) x = self.conv1(x) # Entrada da imagem na primeira layer de convolução x = F.relu(x) # Aplicação de ReLU x = self.pool(x) # Pooling p/ downsampling e entrada em uma nova camada print('Saída Conv1',x.shape) # de convolução x = self.conv2(x) # Entrada da saída do pooling para nova convolução x = F.relu(x) # Aplicação de ReLU na saída da convolução x = self.pool(x) # Downsampling da saída da segunda layer de convolução print('Saída Conv2',x.shape) x = self.conv3(x) # Entrada da saída do pooling para nova convolução x = F.relu(x) # Aplicação de ReLU na saída da convolução x = self.pool(x) # Downsampling da saída da segunda layer de convolução print('Saída Conv3',x.shape) # --- Final das Layers de Convolução --- x = torch.flatten(x,1) # Transformação para vetor 1-D de entrada na # Fully Connected Network x = self.fcn1(x) # Input inicial na FCN vindo das layers de convolução x = F.relu(x) # Aplicação da ReLU x = self.fcn2(x) # Hidden Layer x = F.relu(x) # Aplicação da ReLU x = self.fcn3(x) # Hidden Layer x = F.relu(x) # Aplicação da ReLU x = self.fcn4(x) # Hidden Layer x = F.relu(x) # Aplicação da ReLU x = self.fcn5(x) return x batch_size = 32 epoch = 1 training_transform = transforms.Compose([ transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) testing_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=training_transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=testing_transform) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False) model=Model() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) train(model, trainloader, criterion, optimizer, epoch) test(model, testloader) # -
Training_CNN/training_a_cnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="J1dNz-eQmvpl" import numpy as np import pandas as pd import matplotlib.pyplot as plt import sklearn as sk # + colab={"base_uri": "https://localhost:8080/"} id="OvHbiXAEq2ck" outputId="23db62f2-b609-46f2-ce61-48fa9791f553" d = {} d[1] = 10 d['abc'] = 'def' d[0] = [0, 1, 2, ] d[(0, 1, )] = 'a' print(d) # + id="IOaRjLCFuTfH" outputId="8d482232-784e-4104-eb18-0db029b14c38" colab={"base_uri": "https://localhost:8080/"} d[1] = d.get(1) + 5 d['abc'] = 2 * d.get('abc') d.get(0).append(3) d.get(0).pop(0) d.pop((0, 1, )) print(d)
2021 Осенний семестр/Практическое задание 3_5/Бевз_Задание_3.5.4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import zipfile import requests import os import io from io import BytesIO from collections import Counter import pandas as pd import matplotlib.pyplot as plt import matplotlib.style as style from matplotlib.cm import ScalarMappable import seaborn as sns import skimage from skimage import io from spectral import * import rasterio from rasterio.plot import show from rasterio.warp import calculate_default_transform, reproject from rasterio.enums import Resampling from rasterio.coords import BoundingBox from rasterio import windows from rasterio import warp import geopandas as gpd from shapely.geometry import box import folium import branca import random import pickle from tqdm import tqdm import time import datetime import shutil from sklearn.preprocessing import OneHotEncoder, MinMaxScaler from sklearn.metrics import confusion_matrix import scipy as sp import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.preprocessing import image_dataset_from_directory import tensorflow_addons as tfa from tensorflow_addons.metrics import MultiLabelConfusionMatrix import cv2 from keras_applications.imagenet_utils import _obtain_input_shape from keras import backend as K from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Activation, concatenate, Dropout, Flatten, Dense, GlobalAveragePooling2D, GlobalMaxPooling2D from tensorflow.keras.callbacks import ModelCheckpoint, Callback, ReduceLROnPlateau from tensorflow.keras.applications.resnet import ResNet50 from tensorflow.keras.models import Model from tensorflow.keras.utils import get_source_inputs from tensorflow.keras.utils import get_file import tempfile class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self def select_channels(string): '''Channels selector: Input: 'all' or list among following: 'Blue', 'Green', 'Red', 'Red edge 1', 'Red edge 2', 'Red edge 3', 'NIR', 'Red edge 4', 'SWIR 1', 'SWIR 2''' channels = [] if string == 'all': channels = list(np.arange(10)) else: _dict = { 'Blue': 0, 'Green': 1, 'Red': 2, 'Red edge 1': 3, 'Red edge 2': 4, 'Red edge 3': 5, 'NIR': 6, 'Red edge 4': 7, 'SWIR 1': 8, 'SWIR 2': 9 } channels = list(map(_dict.get, string)) return channels def params( extension, epochs, pcg_dataset=1, batch_size=128, size=64, parse_bands_verbose=False, inspect_raster=False, channels='all', preprocess=False, horizontal_flip=False, vertical_flip=False, rotation_range=0, shear_range=0, seed=random.seed(123), columns=[ 'AnnualCrop', 'Forest', 'HerbaceousVegetation', 'Highway', 'Industrial', 'Pasture', 'PermanentCrop', 'Residential', 'River', 'SeaLake' ], loss_type='categorical_crossentropy', opt_type='Adam', learning_rate=1e-4, momentum=0.9, regularization=False, rlronplateau=False, checkpoint=True, no_imbalanced=True, trainable='Full', pcg_unfreeze=0, data_path='./data', reports_path='./reports', tif_path='./data/raw/eurosat/ds/images/remote_sensing/otherDatasets/sentinel_2/tif', jpg_path='./data/raw/eurosat/2750'): ''' extension: jpg or tif channels: 'all' means all channels in reference table; as alternative, select channels by name, i.e.: channels = ['Blue','Green', 'Red', 'NIR', 'SWIR2'] B02 - Blue 10 490 B03 - Green 10 560 B04 - Red 10 665 B05 - Red edge 1 20 705 B06 - Red edge 2 20 740 B07 - Red edge 3 20 783 B08 - NIR 10 842 B08A - Red edge 4 20 865 B11 - SWIR 1 20 1610 B12 - SWIR 2 20 2190 ''' raw_data_path = os.path.join(data_path, 'raw') data_path_jpg = os.path.join(data_path, 'jpg') data_path_tif = os.path.join(data_path, 'tif') processed_path = os.path.join(data_path, 'processed') eurosat_path = os.path.join(raw_data_path, 'eurosat') assets_path = os.path.join(reports_path, 'assets') pickled_tif_path = os.path.join(processed_path, 'tif') reprojected_path = os.path.join(processed_path, 'reprojected') reprojected_path_tif = os.path.join(reprojected_path, 'tif') reports_maps_path = os.path.join(reports_path, 'maps') reports_map_eda_path = os.path.join(start_params.reports_maps_path, 'eda') reports_map_classifier_path = os.path.join(start_params.reports_maps_path, 'classifier') train_data_dir_jpg = os.path.join(data_path_jpg, 'train') val_data_dir_jpg = os.path.join(data_path_jpg, 'val') test_data_dir_jpg = os.path.join(data_path_jpg, 'test') train_data_dir_tif = os.path.join(data_path_tif, 'train') val_data_dir_tif = os.path.join(data_path_tif, 'val') test_data_dir_tif = os.path.join(data_path_tif, 'test') log_folder = os.path.join(reports_path, 'logs') log_gradient_tape_path = os.path.join(log_folder, 'gradient_tape') log_cm_path = os.path.join(log_folder, 'cm') weights_path = os.path.join(data_path, 'weights') num_classes = len(columns) channels = select_channels(channels) if extension == 'jpg': num_channels = 3 elif extension == 'tif': num_channels = len(channels) else: print( 'Error extension format: specify correct exentsion, either \'jpg\' or \'tif\'' ) subdirs_raw = os.listdir(jpg_path) filenames_raw = [] for subdir in subdirs_raw: imgs_raw = os.listdir(os.path.join(jpg_path, subdir)) random_sampled = random.sample(imgs_raw, 2000) if no_imbalanced: sub_path_imgs = [ os.path.join(subdir, img) for img in random_sampled ] else: sub_path_imgs = [os.path.join(subdir, img) for img in imgs_raw] filenames_raw.append(sub_path_imgs) filenames = [ os.path.join(data_path_jpg, f) for sublist in filenames_raw for f in sublist if f.endswith('.jpg') ] pcg_total_files = int(pcg_dataset * len(filenames)) filenames = filenames[:pcg_total_files] train_val_files_length = int( 0.9 * len(filenames)) # 10% for testing, 90% for val and train test_files_length = len(filenames) - train_val_files_length train_files_length = int( 0.7 * train_val_files_length) # 70% for train, 30% for val val_files_length = train_val_files_length - train_files_length params = AttrDict({ 'num_channels': num_channels, 'extension': extension, 'num_images_train': train_files_length, 'num_images_val': val_files_length, 'num_images_test': test_files_length, 'num_classes': num_classes, 'parse_bands_verbose': parse_bands_verbose, 'inspect_raster': inspect_raster, 'channels': channels, 'num_epochs': epochs, 'learning_rate': learning_rate, 'momentum': momentum, 'checkpoint': checkpoint, 'trainable': trainable, 'pcg_dataset': pcg_dataset, 'pcg_unfreeze': pcg_unfreeze, 'preprocess': preprocess, 'horizontal_flip': horizontal_flip, 'vertical_flip': vertical_flip, 'rotation_range': rotation_range, 'shear_range': shear_range, 'no_imbalanced': no_imbalanced, 'batch_size': batch_size, 'size': size, 'seed': seed, 'columns': columns, 'regularization': regularization, 'rlronplateau': rlronplateau, 'num_classes': num_classes, 'loss_type': loss_type, 'opt_type': opt_type, 'loss_obj': loss_obj(loss_type), 'optimizer_obj': optimizer(learning_rate, momentum, opt_type), 'raw_jpg_path': jpg_path, 'raw_tif_path': tif_path, 'raw_data_path': raw_data_path, 'data_path_jpg': data_path_jpg, 'data_path_tif': data_path_tif, 'weights_path': weights_path, 'processed_path': processed_path, 'pickled_tif_path': pickled_tif_path, 'eurosat_path': eurosat_path, 'assets_path': assets_path, 'reprojected_path': reprojected_path, 'reprojected_path_tif': reprojected_path_tif, 'reports_maps_path': reports_maps_path, 'reports_map_eda_path': reports_map_eda_path, 'reports_map_classifier_path': reports_map_classifier_path, 'train_data_dir_jpg': train_data_dir_jpg, 'val_data_dir_jpg': val_data_dir_jpg, 'test_data_dir_jpg': test_data_dir_jpg, 'train_data_dir_tif': train_data_dir_tif, 'val_data_dir_tif': val_data_dir_tif, 'test_data_dir_tif': test_data_dir_tif, 'log_folder': log_folder, 'log_gradient_tape_path': log_gradient_tape_path, 'log_cm_path': log_cm_path, 'num_classes': len(columns) }) return params def resample(path): '''Resamples img and returns bands upsampled''' upscale_factor = 2 # upsample channels to 2x image = rasterio.open(path) b01, b02, b03, b04, b05, b06, b07, b08, b08A, b09, b10, b11, b12 = image.read( out_shape=(image.count, int(image.height * upscale_factor), int(image.width * upscale_factor)), resampling=Resampling.bilinear) return # bands that were resampled from 20m to 10m def parse_bands(img, params): '''Parse tif Sentinel-2A images into 13 bands. Returns: coord_bb, channels = [b02, b03, b04, b05, b06, b07, b08, b08A, b11, b12] with b05, b06, b07, b08A, b11, b12 upsampled to 10m ''' satdat = rasterio.open(img) if img.split('/')[-1].endswith('.tif'): b01, b02, b03, b04, b05, b06, b07, b08, b08A, b09, b10, b11, b12 = satdat.read( ) channels = [b02, b03, b04, b05, b06, b07, b08, b08A, b11, b12 ] # filter out b01, b09, b10 intended for atmosphere study elif img.split('/')[-1].endswith('.jpg'): b, g, r = satdat.read() channels = [b, g, r] # Get resolution, in map units (meters) xres = (satdat.bounds.right - satdat.bounds.left) / satdat.width yres = (satdat.bounds.top - satdat.bounds.bottom) / satdat.height coord_bb = [ satdat.bounds.left, satdat.bounds.bottom, satdat.bounds.right, satdat.bounds.top ] # coordinate bounding box [left, bottom, right, top] # geo coordinates [left-long, bottom-lat, right-long, top-lat] if params.parse_bands_verbose: print('W resolution (m): {}; H resolution: {}'.format(xres, yres)) print("Are the pixels square: {}".format(xres == yres)) print(satdat.profile) return coord_bb, channels def transform_reproj(img, params): '''Apply affine transformation to array (satdat) and save to file (.tif or .jpg) path = './data/processed/reprojected/filename; filename format: rerpoj_{image_name})')''' target_crs = 'epsg:4326' satdat = rasterio.open(img) # calculate a transform and new dimensions using our dataset's current CRS and dimensions transform, width, height = calculate_default_transform( satdat.crs, target_crs, satdat.width, satdat.height, *satdat.bounds) # Copy the metadata metadata = satdat.meta.copy() # Change the CRS, transform, and dimensions in metadata to match our desired output dataset metadata.update({ 'crs': target_crs, 'transform': transform, 'width': width, 'height': height }) # apply the transform & metadata to perform the reprojection dst = os.path.join(params.reprojected_path_tif, 'reproj_' + img.split('/')[-1]) with rasterio.open(dst, 'w', **metadata) as reprojected: for band in range(1, satdat.count + 1): reproject(source=rasterio.band(satdat, band), destination=rasterio.band(reprojected, band), src_transform=satdat.transform, src_crs=satdat.crs, dst_transform=transform, dst_crs=target_crs) return dst def inspect_raster(satdat, img): '''Inspect raster (after rescaling)''' fig, ax = plt.subplots(1, 1, dpi=100) show(satdat.read([4, 3, 2]) * 255 / 65535, ax=ax) plt.title(img.split('/')[-1]) def mkdir(path): new_dir = path if not os.path.exists(path): os.mkdir(path) def percentage(count_tags): _sum = sum(count_tags.values()) return [(el / _sum) * 100 for el in count_tags.values()] def cmap_rescale(elements): result = [] if isinstance(elements, dict): _max = max(elements.values()) _min = min(elements.values()) result = [(el - _min) / (_max - _min) for el in elements.values()] if isinstance(elements, list): _max = np.max(elements.values()) _min = np.min(elements.values()) result = [(el - _min) / (_max - _min) for el in elements.values()] return result def convert_to_hex(rgba_color): red = str(hex(int(rgba_color[0] * 255)))[2:].capitalize() green = str(hex(int(rgba_color[1] * 255)))[2:].capitalize() blue = str(hex(int(rgba_color[2] * 255)))[2:].capitalize() if blue == '0': blue = '00' if red == '0': red = '00' if green == '0': green = '00' return '#' + red + green + blue def dirs2df(img_path): '''From img directory to dataframe. input path images folder return df ------------------------------------ img directory tree: |images | --> labels | --> .tif or .jpg''' dirs_path = [] dirs = [] dirs = os.listdir(img_path) dirs_path = [os.path.join(img_path, _dir) for _dir in dirs] imgdict = {} img_names = [] img_paths = [] for _dir in dirs_path: if _dir.split('/')[-1] != '.DS_Store': nameslist = os.listdir(_dir) for el in nameslist: if el.endswith('.jpg') | el.endswith('.tif'): img_names.append(el) img_paths.append(os.path.join(_dir, el)) imgdict['image_name'] = img_names df = pd.DataFrame.from_dict(imgdict) df['label'] = df['image_name'].apply(lambda x: x.split('_')[0]) return df, img_paths def create_filenames(df, params): # pcg_dataset = percentage of total files to use: i.e. 30% of 40479 samples = 12143 samples # empty data dirs if df['image_name'].iloc[0].endswith('.jpg'): print('Format: jpg') train_data_dir = params.train_data_dir_jpg val_data_dir = params.val_data_dir_jpg test_data_dir = params.test_data_dir_jpg raw_data_dir = params.raw_jpg_path endswith = '.jpg' if df['image_name'].iloc[0].endswith('.tif'): print('Format: tif') train_data_dir = params.train_data_dir_tif val_data_dir = params.val_data_dir_tif test_data_dir = params.test_data_dir_tif raw_data_dir = params.raw_tif_path endswith = '.tif' data_dirs = [train_data_dir, val_data_dir, test_data_dir] for data_dir in data_dirs: for file in os.listdir(data_dir): os.remove(os.path.join(data_dir, file)) # create lists of filenames for train, val, test sets # copy lists of images from raw folder to train, val, test folders using lists of filenames pcg_total_files = int(params.pcg_dataset * len(df)) subdirs_raw = os.listdir(raw_data_dir) filenames_raw = [] for subdir in subdirs_raw: imgs_raw = os.listdir(os.path.join(raw_data_dir, subdir)) random_sampled = random.sample(imgs_raw, 2000) if params.no_imbalanced: sub_path_imgs = [ os.path.join(subdir, img) for img in random_sampled ] else: sub_path_imgs = [os.path.join(subdir, img) for img in imgs_raw] filenames_raw.append(sub_path_imgs) filenames = [ os.path.join(raw_data_dir, f) for sublist in filenames_raw for f in sublist if f.endswith(endswith) ] seed = random.seed(123) filenames.sort() random.shuffle(filenames) filenames = filenames[:pcg_total_files] split_train_test = int( 0.9 * len(filenames)) # 10% for testing, 90% for val and train train_filenames_raw = filenames[:split_train_test] test_filenames_raw = filenames[split_train_test:] split_train_val = int( 0.7 * len(train_filenames_raw)) # 70% for train, 30% for val val_filenames_raw = train_filenames_raw[split_train_val:] train_filenames_raw = train_filenames_raw[:split_train_val] train_val_test = [ train_filenames_raw, val_filenames_raw, test_filenames_raw ] dest_dirs = [train_data_dir, val_data_dir, test_data_dir] for filename_dir, dest_dir in tqdm(zip(train_val_test, dest_dirs)): if len(os.listdir(dest_dir)) != len( filename_dir): #check if directory is empty for filename in filename_dir: shutil.copy(filename, dest_dir) # get lists of filenames with new path (i.e. '.data/jpg/train/img_name.jpg') train_filenames = [] val_filenames = [] test_filenames = [] for filename_dir, dest_dir in tqdm(zip(train_val_test, dest_dirs)): for filename in filename_dir: if dest_dir == train_data_dir: train_filenames.append( os.path.join(dest_dir, filename.split('/')[-1])) elif dest_dir == val_data_dir: val_filenames.append( os.path.join(dest_dir, filename.split('/')[-1])) elif dest_dir == test_data_dir: test_filenames.append( os.path.join(dest_dir, filename.split('/')[-1])) train_val_test = [train_filenames, val_filenames, test_filenames] #get names of images for each set train_filenames_img = [el.split('/')[-1] for el in train_filenames_raw] val_filenames_img = [el.split('/')[-1] for el in val_filenames_raw] test_filenames_img = [el.split('/')[-1] for el in test_filenames_raw] data_filenames_img = [ train_filenames_img, val_filenames_img, test_filenames_img ] print( 'Total number of samples (train + val + test) (%d %% of original dataset) : %d' % (params.pcg_dataset * 100, len(filenames))) print('Training set - number of samples: %d' % len(train_filenames_raw)) print('Validation set - number of samples: %d' % len(val_filenames_raw)) print('Test set - number of samples: %d' % len(test_filenames_raw)) print('Training set - number of samples in .data/train: %d' % len(os.listdir(train_data_dir))) print('Validation set - number of samples .data/val: %d' % len(os.listdir(val_data_dir))) print('Test set - number of samples .data/test: %d' % len(os.listdir(test_data_dir))) return train_val_test, data_filenames_img def loss_obj(loss_type): if loss_type == 'categorical_crossentropy': loss_obj = tf.keras.losses.CategoricalCrossentropy() return loss_obj def optimizer(learning_rate, momentum, opt_type): if opt_type == 'SGD_momentum': opt = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=momentum) if opt_type == 'Adam': opt = tf.keras.optimizers.Adam(learning_rate=learning_rate, decay=0.0001) return opt def load_data_using_keras(folders, df, data_filenames_img, params): image_generator = {} data_generator = {} for _dir, _filenames in zip(folders, data_filenames_img): end = _dir.split('/')[-1] if params.preprocess: if end == 'train': image_generator['train'] = ImageDataGenerator( horizontal_flip=params.horizontal_flip, vertical_flip=params.vertical_flip, rotation_range=params.rotation_range, shear_range=params.shear_range) data_generator['train'] = image_generator[ 'train'].flow_from_dataframe( dataframe=df[df['image_name'].isin(_filenames)], x_col='image_name', y_col=params.columns, batch_size=params.batch_size, directory=_dir, seed=params.seed, shuffle=True, target_size=(64, 64), class_mode='raw', color_mode='rgb') if end == 'val': image_generator['val'] = ImageDataGenerator() data_generator['val'] = image_generator[ 'val'].flow_from_dataframe( dataframe=df[df['image_name'].isin(_filenames)], x_col='image_name', y_col=params.columns, batch_size=params.batch_size, directory=_dir, seed=params.seed, shuffle=False, target_size=(64, 64), class_mode='raw', color_mode='rgb') if end == 'test': image_generator['test'] = ImageDataGenerator() data_generator['test'] = image_generator[ 'test'].flow_from_dataframe( dataframe=df[df['image_name'].isin(_filenames)], x_col='image_name', y_col=params.columns, batch_size=len(df[df['image_name'].isin(_filenames)]), directory=_dir, seed=params.seed, shuffle=False, target_size=(64, 64), class_mode='raw', color_mode='rgb') else: if end == 'train': image_generator['train'] = ImageDataGenerator( horizontal_flip=params.horizontal_flip, vertical_flip=params.vertical_flip, rotation_range=params.rotation_range, shear_range=params.shear_range, rescale=1. / 255) data_generator['train'] = image_generator[ 'train'].flow_from_dataframe( dataframe=df[df['image_name'].isin(_filenames)], x_col='image_name', y_col=params.columns, batch_size=params.batch_size, directory=_dir, seed=params.seed, shuffle=True, target_size=(64, 64), class_mode='raw', color_mode='rgb') if end == 'val': image_generator['val'] = ImageDataGenerator(rescale=1. / 255) data_generator['val'] = image_generator[ 'val'].flow_from_dataframe( dataframe=df[df['image_name'].isin(_filenames)], x_col='image_name', y_col=params.columns, batch_size=params.batch_size, directory=_dir, seed=params.seed, shuffle=False, target_size=(64, 64), class_mode='raw', color_mode='rgb') if end == 'test': image_generator['test'] = ImageDataGenerator(rescale=1. / 255) data_generator['test'] = image_generator[ 'test'].flow_from_dataframe( dataframe=df[df['image_name'].isin(_filenames)], x_col='image_name', y_col=params.columns, batch_size=len(df[df['image_name'].isin(_filenames)]), directory=_dir, seed=params.seed, shuffle=False, target_size=(64, 64), class_mode='raw', color_mode='rgb') return data_generator def normalize_band(band): # min-max norm MinMax = MinMaxScaler() band_norm = MinMax.fit_transform(band) return band_norm def tif2sets(train_val_test_tif, dataset_tif, params): '''This function parses tiff images from images path, returns train, val, test set with upsampled bands''' # initialize if params.channels == 'all': X_train = np.zeros([len(train_val_test_tif[0]), 64, 64, 10], dtype="float32") X_val = np.zeros([len(train_val_test_tif[1]), 64, 64, 10], dtype="float32") X_test = np.zeros([len(train_val_test_tif[2]), 64, 64, 10], dtype="float32") y_train = np.zeros([len(train_val_test_tif[0]), 10]) y_val = np.zeros([len(train_val_test_tif[1]), 10]) y_test = np.zeros([len(train_val_test_tif[2]), 10]) else: X_train = np.zeros( [len(train_val_test_tif[0]), 64, 64, len(params.channels)], dtype="float32") X_val = np.zeros( [len(train_val_test_tif[1]), 64, 64, len(params.channels)], dtype="float32") X_test = np.zeros( [len(train_val_test_tif[2]), 64, 64, len(params.channels)], dtype="float32") y_train = np.zeros([len(train_val_test_tif[0]), len(params.channels)]) y_val = np.zeros([len(train_val_test_tif[1]), len(params.channels)]) y_test = np.zeros([len(train_val_test_tif[2]), len(params.channels)]) sets = [(X_train, y_train), (X_val, y_val), (X_test, y_test)] for folder, _set in zip(train_val_test_tif, sets): X_set, y_set = _set for i in range(len(_set[0])): X_set[i, :, :, :] = dataset_tif[folder[i].split('/') [-1]]['X_array'] y_set[i, :] = dataset_tif[folder[i].split('/')[-1]]['y_array'] print('Number of samples in train set: {}'.format(len(sets[0][0]))) print('Number of labels in train set: {}'.format(len(sets[0][-1]))) print('\nNumber of samples in val set: {}'.format(len(sets[1][0]))) print('Number of labels in val set: {}'.format(len(sets[1][-1]))) print('\nNumber of samples in test set: {}'.format(len(sets[-1][0]))) print('Number of labels in test set: {}'.format(len(sets[-1][-1]))) print('\nTotal number of samples: {}'.format( len(sets[0][0]) + len(sets[1][0]) + len(sets[2][0]))) return sets def load_data_using_keras_tif(train_val_test_tif, dataset_tif, params): data_generators = {} train_val_test_sets = tif2sets(train_val_test_tif, dataset_tif, params) X_train, y_train = train_val_test_sets[0] X_val, y_val = train_val_test_sets[1] X_test, y_test = train_val_test_sets[2] image_generator = ImageDataGenerator( horizontal_flip=params.horizontal_flip, vertical_flip=params.vertical_flip, rotation_range=params.rotation_range, shear_range=params.shear_range) data_generators['train'] = image_generator.flow( X_train, y_train, batch_size=params.batch_size, seed=params.seed) data_generators['val'] = image_generator.flow(X_val, y_val, batch_size=params.batch_size, seed=params.seed) data_generators['test'] = image_generator.flow( X_test, y_test, batch_size=params.batch_size, seed=params.seed) return data_generators def spectral_module(x, spectral_id, squeeze=16, expand_1x1=96, expand_3x3=32): sq1x1 = "squeeze1x1" exp1x1 = "expand1x1" exp3x3 = "expand3x3" relu = "relu_" s_id = 'spectral' + str(spectral_id) + '/' if K.image_data_format() == 'channels_first': channel_axis = 1 else: channel_axis = 3 x = Conv2D(squeeze, (1, 1), padding='same', name=s_id + sq1x1, kernel_initializer='glorot_uniform', activation='relu')(x) left = Conv2D(expand_1x1, (1, 1), padding='same', name=s_id + exp1x1, kernel_initializer='glorot_uniform')(x) left = Activation('relu', name=s_id + relu + exp1x1)(left) right = Conv2D(expand_3x3, (3, 3), padding='same', name=s_id + exp3x3, kernel_initializer='glorot_uniform')(x) right = Activation('relu', name=s_id + relu + exp3x3)(right) x = concatenate([left, right], axis=channel_axis, name=s_id + 'concat') return x def SpectralNet(params, input_shape=(64, 64, 10), classes=10): """Implementation of SpectralNet architecture - <NAME>, <NAME>, <NAME> - Gianforte School of Computing and Dept. Elec & Computer Engineering - Montana State University, Bozeman, USA paper: https://www.cs.montana.edu/sheppard/pubs/ijcnn-2019c.pdf modifing SqueezeNet implementation in Keras: https://github.com/rcmalli/keras-squeezenet """ if params.extension == 'jpg': input_shape = (64, 64, 3) elif params.extension == 'tif': input_shape = (64, 64, 10) inputs = tf.keras.Input(shape=input_shape) x = Conv2D(96, (2, 2), strides=(2, 2), padding='same', name='conv1', activation='relu', kernel_initializer='glorot_uniform')(inputs) x = spectral_module(x, spectral_id=2, squeeze=16, expand_1x1=96, expand_3x3=32) x = spectral_module(x, spectral_id=3, squeeze=16, expand_1x1=96, expand_3x3=32) x = spectral_module(x, spectral_id=4, squeeze=32, expand_1x1=192, expand_3x3=64) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool4')(x) x = spectral_module(x, spectral_id=5, squeeze=32, expand_1x1=192, expand_3x3=64) x = spectral_module(x, spectral_id=6, squeeze=48, expand_1x1=288, expand_3x3=96) x = spectral_module(x, spectral_id=7, squeeze=48, expand_1x1=288, expand_3x3=96) x = spectral_module(x, spectral_id=8, squeeze=64, expand_1x1=385, expand_3x3=128) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool8')(x) x = spectral_module(x, spectral_id=9, squeeze=64, expand_1x1=385, expand_3x3=128) x = Conv2D(classes, (1, 1), padding='same', name='conv10', activation='relu', kernel_initializer='glorot_uniform')(x) x = GlobalAveragePooling2D()(x) softmax = Activation("softmax", name='softmax')(x) model = tf.keras.Model(inputs, softmax) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) return model def create_model(version, params): if version == 'v1.0': # Baseline inputs = Input(shape=(params.size, params.size, params.num_channels)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Flatten()(x) x = Dense(64, activation='relu')(x) outputs = Dense(params.num_classes, activation='softmax')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) if version == 'v1.1': # v1.0 with 128 units in FC layer w.r.t 64 inputs = Input(shape=(params.size, params.size, params.num_channels)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Conv2D(64, 3, activation='relu', kernel_initializer='he_uniform', padding='same')(x) x = Conv2D(64, 3, activation='relu', kernel_initializer='he_uniform', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Flatten()(x) x = Dense(128, activation='relu')(x) outputs = Dense(params.num_classes, activation='softmax')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) if version == 'v1.2': # v1.3 with dropout layers after each block inputs = Input(shape=(params.size, params.size, params.num_channels)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Conv2D(128, 3, activation='relu', padding='same')(x) x = Conv2D(128, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Flatten()(x) x = Dense(128, activation='relu')(x) outputs = Dense(params.num_classes, activation='softmax')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) if version == 'v1.3': inputs = Input(shape=(params.size, params.size, params.num_channels)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Conv2D(128, 3, activation='relu', padding='same')(x) x = Conv2D(128, 3, activation='relu', padding='same')(x) x = Conv2D(128, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Flatten()(x) x = Dense(128, activation='relu')(x) outputs = Dense(params.num_classes, activation='softmax')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) if version == 'v1.4': inputs = Input(shape=(params.size, params.size, params.num_channels)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Conv2D(128, 3, activation='relu', padding='same')(x) x = Conv2D(128, 3, activation='relu', padding='same')(x) x = Conv2D(128, 3, activation='relu', padding='same')(x) x = MaxPooling2D((2, 2))(x) x = Flatten()(x) x = Dense(1024, activation='relu')(x) outputs = Dense(params.num_classes, activation='softmax')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) return model def create_resnet(params): if params.trainable == True: print('\n Unfreezing ResNet {}% top layers'.format( params.pcg_unfreeze * 100)) layers_to_freeze = 175 - int( 175 * params.pcg_unfreeze ) #resnet has 175 layers; this is the number of layers to freeze base_model = tf.keras.applications.ResNet50( input_shape=(params.size, params.size, params.num_channels), include_top=False, weights='imagenet') for layer in base_model.layers[:layers_to_freeze]: layer.trainable = False for layer in base_model.layers[layers_to_freeze:]: layer.trainable = True if params.regularization: base_model = add_regularization( base_model, regularizer=tf.keras.regularizers.l2(0.0001)) print('L2 regularization added') if params.preprocess: inputs = tf.keras.Input(shape=(params.size, params.size, params.num_channels)) x = tf.keras.applications.resnet.preprocess_input(inputs) x = base_model(x, training=False) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = Dropout(0.2)(x) outputs = Dense(params.num_classes, activation='softmax')(x) model = tf.keras.Model(inputs, outputs) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) else: inputs = tf.keras.Input(shape=(params.size, params.size, params.num_channels)) x = base_model(inputs, training=False) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = Dropout(0.2)(x) outputs = Dense(params.num_classes, activation='softmax')(x) model = tf.keras.Model(inputs, outputs) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) elif (params.trainable == 'Full'): print('\n Using Resnet - Full training'.format(params.pcg_unfreeze)) base_model = tf.keras.applications.ResNet50( input_shape=(params.size, params.size, params.num_channels), include_top=False, weights='imagenet') if params.preprocess: print('\n Using Keras preprocess_input') base_model.trainable = True if params.regularization: base_model = add_regularization( base_model, regularizer=tf.keras.regularizers.l2(0.0001)) print('L2 regularization added') inputs = tf.keras.Input(shape=(params.size, params.size, params.num_channels)) x = tf.keras.applications.resnet.preprocess_input(inputs) x = base_model(x, training=False) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = Dropout(0.2)(x) outputs = Dense(params.num_classes, activation='softmax')(x) model = tf.keras.Model(inputs, outputs) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) else: base_model.trainable = True if params.regularization: base_model = add_regularization( base_model, regularizer=tf.keras.regularizers.l2(0.0001)) print('L2 regularization added') inputs = tf.keras.Input(shape=(params.size, params.size, params.num_channels)) x = tf.keras.applications.resnet.preprocess_input(inputs) x = base_model(x, training=False) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = Dropout(0.2)(x) outputs = Dense(params.num_classes, activation='softmax')(x) model = tf.keras.Model(inputs, outputs) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) else: print('\n Using Resnet as feature extractor'.format( params.pcg_unfreeze)) base_model = tf.keras.applications.ResNet50( input_shape=(params.size, params.size, params.num_channels), include_top=False, weights='imagenet') if params.preprocess: base_model.trainable = False if params.regularization: print('L2 regularization added') base_model = add_regularization( base_model, regularizer=tf.keras.regularizers.l2(0.0001)) inputs = tf.keras.Input(shape=(params.size, params.size, params.num_channels)) x = tf.keras.applications.mobilenet.preprocess_input(inputs) x = base_model(x, training=False) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = Dropout(0.2)(x) outputs = Dense(params.num_classes, activation='softmax')(x) model = tf.keras.Model(inputs, outputs) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) else: base_model.trainable = False inputs = tf.keras.Input(shape=(params.size, params.size, params.num_channels)) x = base_model(inputs, training=False) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = Dropout(0.2)(x) outputs = Dense(params.num_classes, activation='softmax')(x) model = tf.keras.Model(inputs, outputs) model.compile(loss=params.loss_obj, optimizer=params.optimizer_obj, metrics=[tf.keras.metrics.CategoricalAccuracy()]) return model # credits to <NAME>: https://gist.github.com/sthalles def add_regularization(model, regularizer=tf.keras.regularizers.l2(0.0001)): if not isinstance(regularizer, tf.keras.regularizers.Regularizer): print( "Regularizer must be a subclass of tf.keras.regularizers.Regularizer" ) return model for layer in model.layers: for attr in ['kernel_regularizer']: if hasattr(layer, attr): setattr(layer, attr, regularizer) # Save the weights before reloading the model. config_json = model.to_json() tmp_weights_path = os.path.join(tempfile.gettempdir(), 'tmp_weights_resnet.h5') model.save_weights(tmp_weights_path) model = tf.keras.models.model_from_json(config_json) # Reload the model weights model.load_weights(tmp_weights_path, by_name=True) return model def run_models_generator(versions, data_generator, test_dataset, test_labels, train_params, experiment=''): v_outputs = {} log_folder = train_params.log_folder log_cm_path = train_params.log_cm_path for i, version in enumerate(versions): v = [] v_history = [] v_loss = [] v_grid = [] v_dict = {} version_folder = os.path.join(log_folder, version + experiment) mkdir(log_cm_path) v, v_history, v_loss, v_grid = run_baseline_model_generator( version, data_generator, test_dataset, test_labels, train_params, version_folder) shutil.copytree(log_cm_path, os.path.join(version_folder, 'cm')) shutil.rmtree(log_cm_path) v_meta = { 'channels': train_params.channels, 'image_size': train_params.size, 'num_images_train': train_params.num_images_train, 'num_images_val': train_params.num_images_val, 'num_images_test': train_params.num_images_test, 'channels': train_params.num_channels, 'epochs': train_params.num_epochs, 'batch_size': train_params.batch_size, 'loss_type': train_params.loss_type, 'opt_type': train_params.opt_type, 'learning_rate': train_params.learning_rate, 'momentum': train_params.momentum, 'regularization': train_params.regularization, 'horizontal_flip': train_params.horizontal_flip, 'vertical_flip': train_params.vertical_flip, 'rotation_range': train_params.rotation_range, 'shear_range': train_params.shear_range } v_dict['meta'] = v_meta v_dict['model'] = v v_dict['history'] = v_history v_dict['loss'] = v_loss v_dict['grid'] = v_grid v_outputs[version] = v_dict return v_outputs def run_baseline_model_generator(version, data_generator, test_dataset, test_labels, train_params, version_folder): if version.startswith('ResNet'): model = create_resnet(train_params) print('Version: Resnet model - {}'.format( version_folder.split('/')[-1])) elif version.startswith('SpectralNet'): model = SpectralNet(train_params) print('Version: SpectralNet model - {}'.format( version_folder.split('/')[-1])) else: model = create_model(version, train_params) print('Version: {}'.format(version_folder.split('/')[-1])) # History if train_params.rlronplateau: print('RLRonPlateau: active\n') cm_callback = ConfusionMatrixCallback(test_dataset, test_labels, train_params) ReduceLRonPLateau_callback = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=3, mode='min', min_lr=0.000001) tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=os.path.join( version_folder, datetime.datetime.now().strftime("%Y%m%d-%H%M%S")), histogram_freq=1) history = model.fit_generator( data_generator['train'], steps_per_epoch=train_params.num_images_train // train_params.batch_size, epochs=train_params.num_epochs, validation_data=data_generator['val'], validation_steps=train_params.num_images_val // train_params.batch_size, callbacks=[ tensorboard_callback, cm_callback, ReduceLRonPLateau_callback ]) else: tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=os.path.join( version_folder, datetime.datetime.now().strftime("%Y%m%d-%H%M%S")), histogram_freq=1) # Confusion matrix cm_callback = ConfusionMatrixCallback(test_dataset, test_labels, train_params) history = model.fit_generator( data_generator['train'], steps_per_epoch=train_params.num_images_train // train_params.batch_size, epochs=train_params.num_epochs, validation_data=data_generator['val'], validation_steps=train_params.num_images_val // train_params.batch_size, callbacks=[tensorboard_callback, cm_callback]) loss, val_loss, categorical_accuracy, val_categorical_accuracy = learning_curves( history, version) grid = perf_grid(test_dataset, test_labels, train_params.columns, model, n_thresh=100) return model, history, loss, grid def results_to_file(versions, experiment): # save assets_path = './reports/assets/' saved_models_dir = './reports/saved_models' save_path = os.path.join(assets_path, list(versions.keys())[0] + experiment) save_meta_csv_path = os.path.join( save_path, list(versions.keys())[0] + experiment + '_meta_.csv') save_grid_csv_path = os.path.join( save_path, list(versions.keys())[0] + experiment + '_grid_.csv') mkdir(save_path) df_meta = pd.DataFrame(versions['ResNet']['meta']).iloc[0] df_grid = pd.DataFrame(versions['ResNet']['grid']) # save meta and grid to csv pd.DataFrame.to_csv(df_meta, save_meta_csv_path, index=False) pd.DataFrame.to_csv(df_grid, save_grid_csv_path, index=False) # save model versions['ResNet']['model'].save( os.path.join(saved_models_dir, list(versions.keys())[0] + experiment)) class ConfusionMatrixCallback(tf.keras.callbacks.Callback): def __init__(self, X_test, y_test, params): self.X_test = X_test self.y_test = y_test self.params = params def on_epoch_end(self, epoch, logs=None): train_params = params('jpg', 1) log_folder = './reports/logs' log_cm_path = os.path.join(log_folder, 'cm') cm_writer = tf.summary.create_file_writer(log_cm_path) test_pred = self.model.predict(self.X_test) # Calculate the confusion matrix using sklearn.metrics cm = tfa.metrics.MultiLabelConfusionMatrix( num_classes=(train_params.num_classes))(self.y_test, np.where( test_pred > 0.5, 1, 0)) figure = plot_confusion_matrix(cm, train_params.columns) cm_image = plot_to_image(figure) # Log the confusion matrix as an image summary. with cm_writer.as_default(): tf.summary.image("Confusion Matrix", cm_image, step=epoch) def perf_grid(dataset, labels, columns, model, n_thresh=100): """Computes the performance table containing target, label names, label frequencies, thresholds between 0 and 1, number of tp, fp, fn, precision, recall and f-score metrics for each label. Args: dataset (tf.data.Datatset): contains the features array labels (numpy array): target matrix of shape (BATCH_SIZE, N_LABELS) tags (list of strings): column names in target matrix model (tensorflow keras model): model to use for prediction n_thresh (int) : number of thresholds to try Returns: grid (Pandas dataframe): performance table """ # Get predictions y_hat_val = model.predict(dataset) # Define target matrix y_val = np.array(labels) # Find label frequencies in the validation set label_freq = np.array(labels).sum(axis=0) # Get label indexes label_index = [i for i in range(len(columns))] # Define thresholds thresholds = np.linspace(0, 1, n_thresh + 1).astype(np.float32) # Compute all metrics for all labels ids, labels, freqs, tps, fps, fns, precisions, recalls, f1s = [], [], [], [], [], [], [], [], [] for l in label_index: for thresh in thresholds: ids.append(l) labels.append(columns[l]) freqs.append(round(label_freq[l] / len(y_val), 2)) y_hat = y_hat_val[:, l] y = y_val[:, l] y_pred = y_hat > thresh tp = np.count_nonzero(y_pred * y) fp = np.count_nonzero(y_pred * (1 - y)) fn = np.count_nonzero((1 - y_pred) * y) precision = tp / (tp + fp + 1e-16) recall = tp / (tp + fn + 1e-16) f1 = tp / (tp + (fn + fp) * 0.5 + 1e-16) tps.append(tp) fps.append(fp) fns.append(fn) precisions.append(precision) recalls.append(recall) f1s.append(f1) # Create the performance dataframe grid = pd.DataFrame({ 'id': ids, 'label': np.array(labels), 'freq': freqs, 'threshold': list(thresholds) * len(label_index), 'tp': tps, 'fp': fps, 'fn': fns, 'precision': precisions, 'recall': recalls, 'f1': f1s }) grid = grid[[ 'id', 'label', 'freq', 'threshold', 'tp', 'fn', 'fp', 'precision', 'recall', 'f1' ]] return grid # Modified versions of functions implemented by <NAME> def learning_curves(history, version): """Plot the learning curves of loss and macro f1 score for the training and validation datasets. Args: history: history callback of fitting a tensorflow keras model """ path_assets = './reports/assets/{}'.format(version) mkdir(path_assets) title_loss = 'Training and Validation Loss - Model {}'.format(version) title_f1_score = 'Training and Validation Categorical Accuracy - Model {}'.format( version) loss = history.history['loss'] val_loss = history.history['val_loss'] categorical_accuracy = history.history['categorical_accuracy'] val_categorical_accuracy = history.history['val_categorical_accuracy'] epochs = len(loss) style.use("bmh") plt.figure(figsize=(8, 8)) plt.subplot(2, 1, 1) plt.plot(range(1, epochs + 1), loss, label='Training Loss') plt.plot(range(1, epochs + 1), val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.ylabel('Loss') plt.title(title_loss) plt.tight_layout() plt.savefig('./reports/assets/{}/{}.png'.format(version, title_loss)) plt.subplot(2, 1, 2) plt.plot(range(1, epochs + 1), categorical_accuracy, label='Training categorical accuracy') plt.plot(range(1, epochs + 1), val_categorical_accuracy, label='Validation categorical accuracy') plt.legend(loc='lower right') plt.ylabel('Categorical accuracy') plt.title(title_f1_score) plt.xlabel('epoch') plt.tight_layout() plt.savefig('./reports/assets/{}/{}.png'.format(version, title_f1_score)) plt.show() return loss, val_loss, categorical_accuracy, val_categorical_accuracy def plot_confusion_matrix(cm, columns): fig = plt.figure(figsize=(10, 20)) for i, (label, matrix) in enumerate(zip(columns, cm)): ax = plt.subplot(6, 3, i + 1) labels = [f'not_{label}', label] sns.heatmap(matrix, ax=ax, annot=True, square=True, fmt='.0f', cbar=False, cmap='Blues', xticklabels=labels, yticklabels=labels, linecolor='black', linewidth=1) plt.title(labels[1], size=8) plt.subplots_adjust(wspace=5, hspace=5) ax.set_yticklabels(labels, va='center', position=(0, 0.28), size=8) ax.set_xticklabels(labels, ha='center', position=(0.28, 0), size=8) plt.xlabel('PREDICTED CLASS', labelpad=10) plt.ylabel('TRUE CLASS', labelpad=10) plt.tight_layout() return fig def plot_to_image(figure): """ Converts the matplotlib plot specified by 'figure' to a PNG image and returns it. The supplied figure is closed and inaccessible after this call. """ buf = BytesIO() # Use plt.savefig to save the plot to a PNG in memory. plt.savefig(buf, format='png') # Closing the figure prevents it from being displayed directly inside # the notebook. plt.close(figure) buf.seek(0) # Use tf.image.decode_png to convert the PNG buffer # to a TF image. Make sure you use 4 channels. image = tf.image.decode_png(buf.getvalue(), channels=4) # Use tf.expand_dims to add the batch dimension image = tf.expand_dims(image, 0) return image
utils.py.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="0f111cb4-8639-4418-a23c-4a9100c923ae" _uuid="421f8ddd9538e9295cd5465e0f828c83d445f93e" deletable=true editable=true # # Introduction # This case study is about predicting which passengers survived the [sinking of the famous Titanic](https://en.wikipedia.org/wiki/Sinking_of_the_RMS_Titanic). # In our work, we would like to establish a model that predicts the survival of each passenger. In order to do this, we will use a dataset that describe each passenger (multiple features) and if they survived or not. # + [markdown] _cell_guid="3a985469-79f8-4b85-86e0-f4329ad1d64d" _uuid="85b53fff57969fba32fffb43fe3da95e1972181b" deletable=true editable=true # # Data description # In this section, we load and explore the dataset. First, we import the libraries needed along the project. # + _cell_guid="c0c94c5a-07a6-4f8f-8276-188d4502e448" _uuid="434c433d8a0bf9956513c3744926966e7f8296ca" code_folding=[] deletable=true editable=true # Import numerical and data processing libraries import numpy as np import pandas as pd # Import helpers that make it easy to do cross-validation from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score # Import machine learning models from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB # Import visualisation libraries import matplotlib.pyplot as plt # %matplotlib inline # Import a method in order to make deep copies from copy import deepcopy # Import an other usefull libraries import itertools # Set the paths for inputs and outputs local = 1 if(local == 0): inputPath = "../input/" outputPath = "../output/" else: inputPath = "data/" outputPath = "data/" # + [markdown] deletable=true editable=true # Here, we load the datasets. We actually have 2 datasets: # - "train.csv": contains informations about some passengers (multiple columns) and the fact that they survived or not (one column). You may download this dataset <a href="https://www.kaggle.com/c/titanic/data">here</a> in CSV format. # - "test.csv": contains informations about some other passengers (multiple columns) but without the survival information. You may download this dataset <a href="https://www.kaggle.com/c/titanic/data">here</a> in CSV format. # # Note that the only difference in the structure of the 2 datasets is that the "test" dataset does not contain "Survived" column (the "label" or the "class" to which the passenger belongs). # # We describe in what follows the columns of the "train" dataset. # + _cell_guid="b709ed10-e333-40ea-8d15-26027d4e117d" _uuid="f51a6a1b9bc3ad281b3abd7cb38874a9477348ff" code_folding=[] deletable=true editable=true # This creates a pandas dataframe and assigns it to the titanic variable titanicOrigTrainDS = pd.read_csv(inputPath + "train.csv") titanicTrainDS = deepcopy(titanicOrigTrainDS) titanicOrigTestDS = pd.read_csv(inputPath + "test.csv") titanicTestDS = deepcopy(titanicOrigTestDS) # Print the first five rows of the dataframe titanicTrainDS.head(5) # + [markdown] deletable=true editable=true # Here is a short description of the different columns: # - <b>PassengerId</b>: Id of the passenger # - <b>Pclass</b>: Ticket class (1 = 1st, 2 = 2nd, 3 = 3rd) # - <b>Name</b>: Name # - <b>Sex</b>: Sex # - <b>Age</b>: Age in years # - <b>Sibsp</b>: Number of siblings / spouses aboard the Titanic # - <b>Parch</b>: Number of parents / children aboard the Titanic # - <b>Ticket</b>: Ticket number # - <b>Fare</b>: Passenger fare # - <b>Cabin</b>: Cabin number # - <b>Embarked</b>: Port of Embarkation (C = Cherbourg, Q = Queenstown, S = Southampton) # - <b>Survival</b>: Survival (0 = No, 1 = Yes) # # + [markdown] deletable=true editable=true # # Hypothesis # # Let's consider which variables might affect the outcome of survival (feature selection). # In this section, we test the variability of the survival percentage according to each feature. # It is to be noted that a variability induce that the feature has some influence. # But the opposite is not automatically true. # # We consider the following features: # - "Pclass": knowing that first class cabins were closer to the deck of the ship, are passengers from the first class more likely to survive? If yes, passenger class "Pclass" might affect the outcome. # - "Fare": the passenger fare is probably tied to passenger class and could have a correlation too. # - "Sex": are women more likely to survive? If yes, "Sex" would be a good predictor. # - "Age": are children more likely to survive? If yes, "Age" would be a good predictor. # - "Embarked": people who boarded at certain ports may have had cabins closer or farther away exits. # - "Sibsp": does being alone give more chance of surviving or less because no one is thinking about you. # - "Parch": same remark. # # However, we do not consider these features: # - PassengerId # - Name # - Ticket # - Cabin # # Let us explore the pre-selected features and their correlations with the variable "Survived". # # # # # + deletable=true editable=true # What is the percentage of survival by class (1st, 2nd, 3rd)? titanicTrainDS[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean() # We find a big variability. The first class passengers had definetely more chances to survive. # This means that "Pclass" is an important feature. # + deletable=true editable=true # What is the percentage of survival by sex? titanicTrainDS[["Sex", "Survived"]].groupby(['Sex'], as_index=False).mean() # We find a huge variability. Woman had more chances to survive. # This is definitely an important feature. # + deletable=true editable=true # What is the percentage of survival according to the port of embarkation titanicTrainDS[["Embarked", "Survived"]].groupby(['Embarked'], as_index=False).mean() # + deletable=true editable=true # What is the percentage of survival according to the number of siblings? titanicTrainDS[["SibSp", "Survived"]].groupby(['SibSp'], as_index=False).mean() # + deletable=true editable=true # What is the percentage of survival according to the number of parents? titanicTrainDS[["Parch", "Survived"]].groupby(['Parch'], as_index=False).mean() # - # What is the percentage of survival according to the age (grouped)? interval = 10 TempV = round(titanicTrainDS["Age"]//interval)*interval titanicTrainDS["AgeIntervalMin"] = TempV titanicTrainDS["AgeIntervalMax"] = TempV + interval titanicTrainDS[["AgeIntervalMin", "AgeIntervalMax", "Survived"]].groupby(["AgeIntervalMin"], as_index=False).mean() # What is the percentage of survival according to the fare (grouped)? interval = 25 TempV = round(titanicTrainDS["Fare"]//interval)*interval titanicTrainDS["FareIntervalMin"] = TempV titanicTrainDS["FareIntervalMax"] = TempV + interval titanicTrainDS[["FareIntervalMin", "FareIntervalMax", "Survived"]].groupby(["FareIntervalMin"], as_index=False).mean() # We decide to keep all pre-selected features. # However, some of them need to be "cleaned" before running models on our datasets. # + [markdown] deletable=true editable=true # # Data cleaning # Let us have a look to the datasets ("train" and "test"). # Note that we need to do the cleaning part in parallel for both datasets. # + deletable=true editable=true titanicDSs = [titanicTrainDS, titanicTestDS] # + deletable=true editable=true # lenght of the dataframe len(titanicTrainDS) # + deletable=true editable=true # Summary on the dataframe titanicTrainDS.describe() # + deletable=true editable=true # lenght of the dataframe len(titanicTestDS) # + deletable=true editable=true # Summary on the dataframe titanicTestDS.describe() # + [markdown] deletable=true editable=true # If we have a look to the first dataset (the "train" one), we see that all the numerical columns have indeed a count of 891 except the "Age" column that has a count of 714. # This indicates that there are missing values (null, NA, or not a number). # # As we don't want to remove the rows with missing values, we choose to clean the data by filling in all of the missing values. # It would be a good idea to test if the missing value for "Age" is correlated with other variable. # For example, we see that it is there are way more missing values for the "Q" port of embarkation. # + deletable=true editable=true titanicTrainDS["AgeEmptyOrNot"] = titanicTrainDS["Age"].apply(lambda x: 1 if x>=0 else 0) titanicTrainDS[['Embarked', 'AgeEmptyOrNot']].groupby(['Embarked'], as_index=False).mean() # - # However, the mean age does not seem to differ strongly according to the port of embarkation. # + deletable=true editable=true titanicTrainDS[['Embarked', 'Age']].groupby(['Embarked'], as_index=False).mean() # - # Finally, we decide to clean the data by filling in all of the missing values with simply the median of all the values in the column # + deletable=true editable=true # Fill missing values with the median value for dataset in titanicDSs: dataset["Age"] = dataset["Age"].fillna(dataset["Age"].median()) # + [markdown] deletable=true editable=true # The "Sex" column is non-numeric, we need to convert it. # But first, we confirm that this column does not have empty values. then we make the conversion. # + deletable=true editable=true # What are the values for this column? for dataset in titanicDSs: print(dataset["Sex"].unique()) # + deletable=true editable=true # Convert to numerical values for dataset in titanicDSs: dataset.loc[dataset["Sex"] == "male", "Sex"] = 0 dataset.loc[dataset["Sex"] == "female", "Sex"] = 1 # + [markdown] deletable=true editable=true # We do the same with the "Embarked" column. # We First analyse if there are missing values. # We will see that yes and choose to fill the missing values with the most frequent value. # + deletable=true editable=true # What are the values for this column? for dataset in titanicDSs: print(dataset["Embarked"].unique()) # + deletable=true editable=true # Fill missing values with most frequent value mostFrequentOccurrence = titanicTrainDS["Embarked"].dropna().mode()[0] titanicTrainDS["Embarked"] = titanicTrainDS["Embarked"].fillna(mostFrequentOccurrence) # Convert to numerical values for dataset in titanicDSs: dataset.loc[dataset["Embarked"] == "S", "Embarked"] = 0 dataset.loc[dataset["Embarked"] == "C", "Embarked"] = 1 dataset.loc[dataset["Embarked"] == "Q", "Embarked"] = 2 # + [markdown] deletable=true editable=true # Finally, we clean the "Fare" variable of the "test" dataset. # + deletable=true editable=true titanicTestDS["Fare"] = titanicTestDS["Fare"].fillna(titanicTestDS["Fare"].median()) # + [markdown] deletable=true editable=true # # Model application # Now, we can turn to the core of the analysis. # We will introduce a couple of functions. # The first function is the one that will enable evaluating the accuracy of one classification method type. # However, we introduce a second function that enables to run the first function on each combination of predictors (ex: ["Sex", "Age", "Embarked"] or ["Age", "SibSp", "Parch", "Fare"] etc.). # # In what follows, we build the list of combinations and then introduce the these functions. # + deletable=true editable=true # The columns that can be used in the prediction predictorsAll = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"] # Create all combinations of predictors predictorCombinations = [] # all combination of predictord for index in range(1, len(predictorsAll)+1): for subset in itertools.combinations(predictorsAll, index): predictorCombinations.append(list(subset)) #predictorCombinations # + deletable=true editable=true # Function: Evaluate one algorithm type (and return n fitted algorithms) # -input # predictorsDs: the dataset projected to the predictors of interest # targetDs: the target or label vector of interest (the column "Survived" in our work) # algModel: the "template" or model of the algorithm to apply # nbFK: the number of cross validation folders # -output # algs: nbKF fitted algorithms # accuracy: the evaluation of the accuracy def binClassifModel_kf(predictorsDs, targetDs, algModel, nbKF): # List of algorithms algs = [] # Generate cross-validation folds for the titanic data set # It returns the row indices corresponding to train and test # We set random_state to ensure we get the same splits every time we run this kf = KFold(nbKF, random_state=1) # List of predictions predictions = [] for trainIndexes, testIndexes in kf.split(predictorsDs): # The predictors we're using to train the algorithm # Note how we only take the rows in the train folds predictorsTrainDs = (predictorsDs.iloc[trainIndexes,:]) # The target we're using to train the algorithm train_target = targetDs.iloc[trainIndexes] # Initialize our algorithm class alg = deepcopy(algModel) # Training the algorithm using the predictors and target alg.fit(predictorsTrainDs, train_target) algs.append(alg) # We can now make predictions on the test fold thisSlitpredictions = alg.predict(predictorsDs.iloc[testIndexes,:]) predictions.append(thisSlitpredictions) # The predictions are in three separate NumPy arrays # Concatenate them into a single array, along the axis 0 (the only 1 axis) predictions = np.concatenate(predictions, axis=0) # Map predictions to outcomes (the only possible outcomes are 1 and 0) predictions[predictions > .5] = 1 predictions[predictions <=.5] = 0 accuracy = len(predictions[predictions == targetDs]) / len(predictions) # return the multiple algoriths and the accuracy return [algs, accuracy] # + code_folding=[] deletable=true editable=true # Helper that return the indexed of the sorted list def sort_list(myList): return sorted(range(len(myList)), key=lambda i:myList[i]) # Function: Run multiple evaluations for one algorithm type (one for each combination of predictors) # -input # algModel: the "template" or model of the algorithm to apply # nbFK: the number of cross validation folders # -output # {} def getAccuracy_forEachPredictor(algModel, nbKF): accuracyList = [] # For each combination of predictors for combination in predictorCombinations: result = binClassifModel_kf(titanicTrainDS[combination], titanicTrainDS["Survived"], algModel, nbKF) accuracy = result[1] accuracyList.append(accuracy) # Sort the accuracies accuracySortedList = sort_list(accuracyList) # Diplay the best combinations for i in range(-5, 0): print(predictorCombinations[accuracySortedList[i]], ": ", accuracyList[accuracySortedList[i]]) #for elementIndex in sort_list(accuracyList1): # print(predictorCombinations[elementIndex], ": ", accuracyList1[elementIndex]) print("--------------------------------------------------") # Display the accuracy corresponding to combination that uses all the predictors lastIndex = len(predictorCombinations)-1 print(predictorCombinations[lastIndex], ":", accuracyList[lastIndex]) # - # Now that we have introduce the above functions, we evaluate a set of classification methods on each combination of predictors. # Here are the evaluated classification methods: # - LinearRegression # - LogisticRegression # - GaussianNB # - KNeighborsClassifier # - DecisionTreeClassifier # - RandomForestClassifier # + deletable=true editable=true algModel = LinearRegression(fit_intercept=True, normalize=True) getAccuracy_forEachPredictor(algModel, 5) # + deletable=true editable=true algModel = LogisticRegression() getAccuracy_forEachPredictor(algModel, 5) # + deletable=true editable=true algModel = GaussianNB() getAccuracy_forEachPredictor(algModel, 5) # + deletable=true editable=true algModel = KNeighborsClassifier(n_neighbors=5) getAccuracy_forEachPredictor(algModel, 5) # + deletable=true editable=true algModel = DecisionTreeClassifier(min_samples_split=4, min_samples_leaf=2) getAccuracy_forEachPredictor(algModel, 5) # + deletable=true editable=true algModel = RandomForestClassifier(n_estimators=100, min_samples_split=4, min_samples_leaf=2) getAccuracy_forEachPredictor(algModel, 5) # + [markdown] deletable=true editable=true # # Prediction application # After having run all the models, we decide to choose the model that gave the best performance. # This model is the "RandomForestClassifier" with the specific parameters above. # Furthermore, we will use it with the best combination of predictors which is ['Pclass', 'Sex', 'Age', 'Parch', 'Fare'] that gave approximately 83% of accuracy. # + deletable=true editable=true # Run again the model with the tuned parameters on the dataset using the best combination of predictors algModel = RandomForestClassifier(n_estimators=100, min_samples_split=4, min_samples_leaf=2) predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'Fare'] result = binClassifModel_kf(titanicTrainDS[predictors], titanicTrainDS["Survived"], algModel, 5) algList = result[0] # the set of algorithms predictionsList = [] for alg in algList: predictions = alg.predict(titanicTestDS[predictors]) predictionsList.append(predictions) # There are different preditions, we take the mean (a voting-like system) predictionsFinal = np.mean(predictionsList, axis=0) # Map predictions to outcomes (the only possible outcomes are 1 and 0) predictionsFinal[predictionsFinal > .5] = 1 predictionsFinal[predictionsFinal <=.5] = 0 # Cast as int predictionsFinal = predictionsFinal.astype(int) # - # Finally, we can generate the submission file with the prediction of the survival for each passenger of the test dataset. # + deletable=true editable=true # Create a new dataset with only the id and the target column submission = pd.DataFrame({ "PassengerId": titanicTestDS["PassengerId"], "Survived": predictionsFinal }) # submission.to_csv(outputPath + 'submission.csv', index=False) # + [markdown] deletable=true editable=true # # Conclusion # # Throughout this work, we tried to establish a good model for predicting the survival of passengers in the Titanic disaster. # As outlooks, we could investigate the influence of some features cleaning and scaling (such as the "Fare" scaling) on the overall performance. # + [markdown] deletable=true editable=true # # References # Many ideas in this work are inspired by the great [tutorials](https://www.kaggle.com/c/titanic#tutorials) of the Titanic competition and other sources. # + deletable=true editable=true
dev/titanic/.ipynb_checkpoints/titanic-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Privacy and utility tradeoff (experiments 6 and 7, issue 120) # # * Synthetic methods: synthpop and CTGAN # * Dataset: datasets/polish_data_2011/polish_data_2011 # # For more information, refer to Experiments 6 and 7 in this issue: https://github.com/alan-turing-institute/QUIPP-collab/issues/120 import glob import json import matplotlib.pyplot as plt import matplotlib.cm as cm import numpy as np import os # ## Inputs # + # path to synthpop results (QUIPP pipeline) synthpop_paths = "./outputs/synth-output_polish_6_cols_random_*/polish-synthpop-*" # How to plot the results, options: scatter (default) or line # "scatter" should be used when several results are going to be plotted in one figure plot_type = "scatter" # CTGAN path/colors/labels ctgan_paths = ["./outputs/ctgan_synth-output_polish_6_cols/ctgan-polish-1", "./outputs/ctgan_synth-output_polish_6_cols/ctgan-polish-2", "./outputs/ctgan_synth-output_polish_6_cols/ctgan-polish-3"] ctgan_colors = ["cyan", "blue", "red"] ctgan_labels = ["CTGAN, 100", "CTGAN, 500", "CTGAN, 2000"] # Privacy metric to be plotted # options:"EMRi_norm" and "TMRi_norm" privacy_metric = "EMRi_norm" # - # ## Plot # + # --- Synthpop print("Plot synthpop outputs") syn_dirs = glob.glob(synthpop_paths) syn_dirs.sort() collect_privacy = [] collect_utility = [] counter = [] for i, one_syn in enumerate(syn_dirs): # - disclosure risk disclosure_fio = open(os.path.join(one_syn, "disclosure_risk.json")) disclosure = json.load(disclosure_fio) # normalize collect_privacy.append(100. - disclosure[privacy_metric]*100) # - utility utility_fio = open(os.path.join(one_syn, "utility_overall_diff.json")) utility = json.load(utility_fio) collect_utility.append(100. - utility["overall"]["f1"]["weighted"]*100.) counter.append(str(i+1)) plt.figure(figsize=(10, 5)) if plot_type == "scatter": plt.scatter(collect_privacy, collect_utility, s=100, c="k", edgecolor="k", alpha=0.5, label="Synthpop") else: plt.plot(collect_privacy, collect_utility, markersize=10, ls='-', marker="o", c="k", alpha=1.0, label="Synthpop") # --- CTGAN print("Plot CTGAN outputs") for igan, ctgan_path in enumerate(ctgan_paths): syn_dirs = glob.glob(ctgan_path) syn_dirs.sort() collect_privacy = [] collect_utility = [] counter = [] for i, one_syn in enumerate(syn_dirs): # - disclosure risk disclosure_fio = open(os.path.join(one_syn, "disclosure_risk.json")) disclosure = json.load(disclosure_fio) # normalize collect_privacy.append(100. - disclosure[privacy_metric]*100) # - utility utility_fio = open(os.path.join(one_syn, "utility_overall_diff.json")) utility = json.load(utility_fio) collect_utility.append(100. - utility["overall"]["f1"]["weighted"]*100.) counter.append(str(i+1)) plt.scatter(collect_privacy, collect_utility, s=100, c=ctgan_colors[igan], edgecolor=ctgan_colors[igan], alpha=1.0, label=ctgan_labels[igan]) # for i in range(len(collect_privacy)): # plt.text(collect_privacy[i] - 0.15, # collect_utility[i] + 0.1, # counter[i], # size=18, c='r') plt.xlabel(f"Privacy ({privacy_metric})", size=24, color="black") plt.ylabel("Utility", size=24, color="black") plt.xlim(xmax=105) plt.xticks(size=18, color="black") plt.yticks(size=18, color="black") plt.grid() plt.legend(prop={'size': 16}) plt.show() # -
examples/privacy_utility_tradeoff/exp_synthpop_ctgan_polish_data/plot_privacy_utility.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## What's this TensorFlow business? # # You've written a lot of code in this assignment to provide a whole host of neural network functionality. Dropout, Batch Norm, and 2D convolutions are some of the workhorses of deep learning in computer vision. You've also worked hard to make your code efficient and vectorized. # # For the last part of this assignment, though, we're going to leave behind your beautiful codebase and instead migrate to one of two popular deep learning frameworks: in this instance, TensorFlow (or PyTorch, if you switch over to that notebook) # # #### What is it? # TensorFlow is a system for executing computational graphs over Tensor objects, with native support for performing backpropogation for its Variables. In it, we work with Tensors which are n-dimensional arrays analogous to the numpy ndarray. # # #### Why? # # * Our code will now run on GPUs! Much faster training. Writing your own modules to run on GPUs is beyond the scope of this class, unfortunately. # * We want you to be ready to use one of these frameworks for your project so you can experiment more efficiently than if you were writing every feature you want to use by hand. # * We want you to stand on the shoulders of giants! TensorFlow and PyTorch are both excellent frameworks that will make your lives a lot easier, and now that you understand their guts, you are free to use them :) # * We want you to be exposed to the sort of deep learning code you might run into in academia or industry. # ## How will I learn TensorFlow? # # TensorFlow has many excellent tutorials available, including those from [Google themselves](https://www.tensorflow.org/get_started/get_started). # # Otherwise, this notebook will walk you through much of what you need to do to train models in TensorFlow. See the end of the notebook for some links to helpful tutorials if you want to learn more or need further clarification on topics that aren't fully explained here. # ## Load Datasets # import tensorflow as tf import numpy as np import math import timeit import matplotlib.pyplot as plt # %matplotlib inline # + from cs231n.data_utils import load_CIFAR10 def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the two-layer neural net classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # Subsample the data mask = range(num_training, num_training + num_validation) X_val = X_train[mask] y_val = y_train[mask] mask = range(num_training) X_train = X_train[mask] y_train = y_train[mask] mask = range(num_test) X_test = X_test[mask] y_test = y_test[mask] # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_val -= mean_image X_test -= mean_image return X_train, y_train, X_val, y_val, X_test, y_test # Invoke the above function to get our data. X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data() print('Train data shape: ', X_train.shape) print('Train labels shape: ', y_train.shape) print('Validation data shape: ', X_val.shape) print('Validation labels shape: ', y_val.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) # - # ## Example Model # # ### Some useful utilities # # . Remember that our image data is initially N x H x W x C, where: # * N is the number of datapoints # * H is the height of each image in pixels # * W is the height of each image in pixels # * C is the number of channels (usually 3: R, G, B) # # This is the right way to represent the data when we are doing something like a 2D convolution, which needs spatial understanding of where the pixels are relative to each other. When we input image data into fully connected affine layers, however, we want each data example to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. # ### The example model itself # # The first step to training your own model is defining its architecture. # # Here's an example of a convolutional neural network defined in TensorFlow -- try to understand what each line is doing, remembering that each layer is composed upon the previous layer. We haven't trained anything yet - that'll come next - for now, we want you to understand how everything gets set up. # # In that example, you see 2D convolutional layers (Conv2d), ReLU activations, and fully-connected layers (Linear). You also see the Hinge loss function, and the Adam optimizer being used. # # Make sure you understand why the parameters of the Linear layer are 5408 and 10. # # ### TensorFlow Details # In TensorFlow, much like in our previous notebooks, we'll first specifically initialize our variables, and then our network model. # + # clear old variables tf.reset_default_graph() # setup input (e.g. the data that changes every batch) # The first dim is None, and gets sets automatically based on batch size fed in X = tf.placeholder(tf.float32, [None, 32, 32, 3]) y = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder(tf.bool) def simple_model(X,y): # define our weights (e.g. init_two_layer_convnet) # setup variables Wconv1 = tf.get_variable("Wconv1", shape=[7, 7, 3, 32]) bconv1 = tf.get_variable("bconv1", shape=[32]) W1 = tf.get_variable("W1", shape=[5408, 10]) b1 = tf.get_variable("b1", shape=[10]) # define our graph (e.g. two_layer_convnet) a1 = tf.nn.conv2d(X, Wconv1, strides=[1,2,2,1], padding='VALID') + bconv1 h1 = tf.nn.relu(a1) h1_flat = tf.reshape(h1,[-1,5408]) y_out = tf.matmul(h1_flat,W1) + b1 return y_out y_out = simple_model(X,y) # define our loss total_loss = tf.losses.hinge_loss(tf.one_hot(y,10),logits=y_out) mean_loss = tf.reduce_mean(total_loss) # define our optimizer optimizer = tf.train.AdamOptimizer(5e-4) # select optimizer and set learning rate train_step = optimizer.minimize(mean_loss) # - # TensorFlow supports many other layer types, loss functions, and optimizers - you will experiment with these next. Here's the official API documentation for these (if any of the parameters used above were unclear, this resource will also be helpful). # # * Layers, Activations, Loss functions : https://www.tensorflow.org/api_guides/python/nn # * Optimizers: https://www.tensorflow.org/api_guides/python/train#Optimizers # * BatchNorm: https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization # ### Training the model on one epoch # While we have defined a graph of operations above, in order to execute TensorFlow Graphs, by feeding them input data and computing the results, we first need to create a `tf.Session` object. A session encapsulates the control and state of the TensorFlow runtime. For more information, see the TensorFlow [Getting started](https://www.tensorflow.org/get_started/get_started) guide. # # Optionally we can also specify a device context such as `/cpu:0` or `/gpu:0`. For documentation on this behavior see [this TensorFlow guide](https://www.tensorflow.org/tutorials/using_gpu) # # You should see a validation loss of around 0.4 to 0.6 and an accuracy of 0.30 to 0.35 below # + def run_model(session, predict, loss_val, Xd, yd, epochs=1, batch_size=64, print_every=100, training=None, plot_losses=False): # have tensorflow compute accuracy correct_prediction = tf.equal(tf.argmax(predict,1), y) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # shuffle indicies train_indicies = np.arange(Xd.shape[0]) np.random.shuffle(train_indicies) training_now = training is not None # setting up variables we want to compute (and optimizing) # if we have a training function, add that to things we compute variables = [mean_loss,correct_prediction,accuracy] if training_now: variables[-1] = training # counter iter_cnt = 0 for e in range(epochs): # keep track of losses and accuracy correct = 0 losses = [] # make sure we iterate over the dataset once for i in range(int(math.ceil(Xd.shape[0]/batch_size))): # generate indicies for the batch start_idx = (i*batch_size)%Xd.shape[0] idx = train_indicies[start_idx:start_idx+batch_size] # create a feed dictionary for this batch feed_dict = {X: Xd[idx,:], y: yd[idx], is_training: training_now } # get batch size actual_batch_size = yd[idx].shape[0] # have tensorflow compute loss and correct predictions # and (if given) perform a training step loss, corr, _ = session.run(variables,feed_dict=feed_dict) # aggregate performance stats losses.append(loss*actual_batch_size) correct += np.sum(corr) # print every now and then if training_now and (iter_cnt % print_every) == 0: print("Iteration {0}: with minibatch training loss = {1:.3g} and accuracy of {2:.2g}"\ .format(iter_cnt,loss,np.sum(corr)/actual_batch_size)) iter_cnt += 1 total_correct = correct/Xd.shape[0] total_loss = np.sum(losses)/Xd.shape[0] print("Epoch {2}, Overall loss = {0:.3g} and accuracy of {1:.3g}"\ .format(total_loss,total_correct,e+1)) if plot_losses: plt.plot(losses) plt.grid(True) plt.title('Epoch {} Loss'.format(e+1)) plt.xlabel('minibatch number') plt.ylabel('minibatch loss') plt.show() return total_loss,total_correct with tf.Session() as sess: with tf.device("/cpu:0"): #"/cpu:0" or "/gpu:0" sess.run(tf.global_variables_initializer()) print('Training') run_model(sess,y_out,mean_loss,X_train,y_train,1,64,100,train_step,True) print('Validation') run_model(sess,y_out,mean_loss,X_val,y_val,1,64) # - # ## Training a specific model # # In this section, we're going to specify a model for you to construct. The goal here isn't to get good performance (that'll be next), but instead to get comfortable with understanding the TensorFlow documentation and configuring your own model. # # Using the code provided above as guidance, and using the following TensorFlow documentation, specify a model with the following architecture: # # * 7x7 Convolutional Layer with 32 filters and stride of 1 # * ReLU Activation Layer # * Spatial Batch Normalization Layer (trainable parameters, with scale and centering) # * 2x2 Max Pooling layer with a stride of 2 # * Affine layer with 1024 output units # * ReLU Activation Layer # * Affine layer from 1024 input units to 10 outputs # # # + # clear old variables tf.reset_default_graph() # define our input (e.g. the data that changes every batch) # The first dim is None, and gets sets automatically based on batch size fed in X = tf.placeholder(tf.float32, [None, 32, 32, 3]) y = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder(tf.bool) # define model def complex_model(X,y,is_training): # setup variables # First conv layer Wconv1 = tf.get_variable("Wconv1", shape=[7, 7, 3, 32]) bconv1 = tf.get_variable("bconv1", shape=[32]) # BN params gamma = tf.get_variable("gamma", shape=[32]) beta = tf.get_variable("beta", shape=[32]) # First Affine layer, 1024 output units W1 = tf.get_variable("W1", shape=[5408, 1024]) b1 = tf.get_variable("b1", shape=[1024]) # Second Affine layer, 10 output units W2 = tf.get_variable("W2", shape=[1024, 10]) b2 = tf.get_variable("b2", shape=[10]) # Build the layers # Padding is valid cuz we can drop the unused pixels conv1_out = tf.nn.conv2d(X, filter=Wconv1, strides=[1, 1, 1, 1], padding='VALID') + bconv1 relu1_out = tf.nn.relu(conv1_out) mean, var = tf.nn.moments(relu1_out, [0, 1, 2]) # See https://www.tensorflow.org/api_docs/python/tf/nn/moments bn1_out = tf.nn.batch_normalization(relu1_out, mean, var, beta, gamma, 1e-3) pool1_out = tf.nn.max_pool(bn1_out, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') # ksize = [1, height, width, 1] | strides = [1, stride, stride, 1] flat_pool1_out = tf.reshape(pool1_out, [-1,5408]) affine1_out = tf.matmul(flat_pool1_out, W1) + b1 relu2_out = tf.nn.relu(affine1_out) affine2_out = tf.matmul(relu2_out, W2) + b2 return affine2_out y_out = complex_model(X,y,is_training) # - # To make sure you're doing the right thing, use the following tool to check the dimensionality of your output (it should be 64 x 10, since our batches have size 64 and the output of the final affine layer should be 10, corresponding to our 10 classes): # Now we're going to feed a random batch into the model # and make sure the output is the right size x = np.random.randn(64, 32, 32,3) with tf.Session() as sess: with tf.device("/cpu:0"): #"/cpu:0" or "/gpu:0" tf.global_variables_initializer().run() ans = sess.run(y_out,feed_dict={X:x,is_training:True}) # %timeit sess.run(y_out,feed_dict={X:x,is_training:True}) print(ans.shape) print(np.array_equal(ans.shape, np.array([64, 10]))) # You should see the following from the run above # # `(64, 10)` # # `True` # ### GPU! # # Now, we're going to try and start the model under the GPU device, the rest of the code stays unchanged and all our variables and operations will be computed using accelerated code paths. However, if there is no GPU, we get a Python exception and have to rebuild our graph. On a dual-core CPU, you might see around 50-80ms/batch running the above, while the Google Cloud GPUs (run below) should be around 2-5ms/batch. try: with tf.Session() as sess: with tf.device("/gpu:0") as dev: #"/cpu:0" or "/gpu:0" tf.global_variables_initializer().run() ans = sess.run(y_out,feed_dict={X:x,is_training:True}) # %timeit sess.run(y_out,feed_dict={X:x,is_training:True}) except tf.errors.InvalidArgumentError: print("no gpu found, please use Google Cloud if you want GPU acceleration") # rebuild the graph # trying to start a GPU throws an exception # and also trashes the original graph tf.reset_default_graph() X = tf.placeholder(tf.float32, [None, 32, 32, 3]) y = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder(tf.bool) y_out = complex_model(X,y,is_training) # You should observe that even a simple forward pass like this is significantly faster on the GPU. So for the rest of the assignment (and when you go train your models in assignment 3 and your project!), you should use GPU devices. However, with TensorFlow, the default device is a GPU if one is available, and a CPU otherwise, so we can skip the device specification from now on. # ### Train the model. # # Now that you've seen how to define a model and do a single forward pass of some data through it, let's walk through how you'd actually train one whole epoch over your training data (using the complex_model you created provided above). # # Make sure you understand how each TensorFlow function used below corresponds to what you implemented in your custom neural network implementation. # # First, set up an **RMSprop optimizer** (using a 1e-3 learning rate) and a **cross-entropy loss** function. See the TensorFlow documentation for more information # * Layers, Activations, Loss functions : https://www.tensorflow.org/api_guides/python/nn # * Optimizers: https://www.tensorflow.org/api_guides/python/train#Optimizers # + # Inputs # y_out: is what your model computes # y: is your TensorFlow variable with label information # Outputs # mean_loss: a TensorFlow variable (scalar) with numerical loss # optimizer: a TensorFlow optimizer # This should be ~3 lines of code! mean_loss = None optimizer = None one_hot_labels = tf.one_hot(y, 10) loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=one_hot_labels, logits=y_out) mean_loss = tf.reduce_mean(loss) optimizer = tf.train.RMSPropOptimizer(1e-3) # - # batch normalization in tensorflow requires this extra dependency extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(extra_update_ops): train_step = optimizer.minimize(mean_loss) # ### Train the model # Below we'll create a session and train the model over one epoch. You should see a loss of 1.4 to 2.0 and an accuracy of 0.4 to 0.5. There will be some variation due to random seeds and differences in initialization # + sess = tf.Session() sess.run(tf.global_variables_initializer()) print('Training') run_model(sess,y_out,mean_loss,X_train,y_train,1,64,100,train_step) # - # ### Check the accuracy of the model. # # Let's see the train and test code in action -- feel free to use these methods when evaluating the models you develop below. You should see a loss of 1.3 to 2.0 with an accuracy of 0.45 to 0.55. print('Validation') run_model(sess,y_out,mean_loss,X_val,y_val,1,64) # ## Train a _great_ model on CIFAR-10! # # Now it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves ** >= 70% accuracy on the validation set** of CIFAR-10. You can use the `run_model` function from above. # ### Things you should try: # - **Filter size**: Above we used 7x7; this makes pretty pictures but smaller filters may be more efficient # - **Number of filters**: Above we used 32 filters. Do more or fewer do better? # - **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions? # - **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster? # - **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? Good architectures to try include: # - [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] # - [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] # - [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM] # - **Use TensorFlow Scope**: Use TensorFlow scope and/or [tf.layers](https://www.tensorflow.org/api_docs/python/tf/layers) to make it easier to write deeper networks. See [this tutorial](https://www.tensorflow.org/tutorials/layers) for how to use `tf.layers`. # - **Use Learning Rate Decay**: [As the notes point out](http://cs231n.github.io/neural-networks-3/#anneal), decaying the learning rate might help the model converge. Feel free to decay every epoch, when loss doesn't change over an entire epoch, or any other heuristic you find appropriate. See the [Tensorflow documentation](https://www.tensorflow.org/versions/master/api_guides/python/train#Decaying_the_learning_rate) for learning rate decay. # - **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter#), which is then reshaped into a (Filter#) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture). # - **Regularization**: Add l2 weight regularization, or perhaps use [Dropout as in the TensorFlow MNIST tutorial](https://www.tensorflow.org/get_started/mnist/pros) # # ### Tips for training # For each network architecture that you try, you should tune the learning rate and regularization strength. When doing this there are a couple important things to keep in mind: # # - If the parameters are working well, you should see improvement within a few hundred iterations # - Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all. # - Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs. # - You should use the validation set for hyperparameter search, and we'll save the test set for evaluating your architecture on the best parameters as selected by the validation set. # # ### Going above and beyond # If you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these; however they would be good things to try for extra credit. # # - Alternative update steps: For the assignment we implemented SGD+momentum, RMSprop, and Adam; you could try alternatives like AdaGrad or AdaDelta. # - Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut. # - Model ensembles # - Data augmentation # - New Architectures # - [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output. # - [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together. # - [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32) # # If you do decide to implement something extra, clearly describe it in the "Extra Credit Description" cell below. # # ### What we expect # At the very least, you should be able to train a ConvNet that gets at **>= 70% accuracy on the validation set**. This is just a lower bound - if you are careful it should be possible to get accuracies much higher than that! Extra credit points will be awarded for particularly high-scoring models or unique approaches. # # You should use the space below to experiment and train your network. The final cell in this notebook should contain the training and validation set accuracies for your final trained network. # # Have fun and happy training! # + # Feel free to play with this cell def my_model(X,y,is_training): conv1 = tf.layers.conv2d( inputs=X, filters=32, kernel_size=(7, 7), padding="same", activation=tf.nn.relu ) conv2 = tf.layers.conv2d( inputs=conv1, filters=32, kernel_size=(7, 7), padding="same", activation=tf.nn.relu ) bn1 = tf.layers.batch_normalization(inputs=conv2, training=is_training) pool1 = tf.layers.max_pooling2d(inputs=bn1, pool_size=(2, 2), strides=2) conv3 = tf.layers.conv2d( inputs=pool1, filters=64, kernel_size=(5, 5), padding="same", activation=tf.nn.relu ) conv4 = tf.layers.conv2d( inputs=conv3, filters=64, kernel_size=(5, 5), padding="same", activation=tf.nn.relu ) bn2 = tf.layers.batch_normalization(inputs=conv4, training=is_training) pool2 = tf.layers.max_pooling2d(inputs=bn2, pool_size=(2, 2), strides=2) conv5 = tf.layers.conv2d( inputs=pool2, filters=128, kernel_size=(3, 3), padding="same", activation=tf.nn.relu ) conv6 = tf.layers.conv2d( inputs=conv5, filters=128, kernel_size=(3, 3), padding="same", activation=tf.nn.relu ) bn3 = tf.layers.batch_normalization(inputs=conv6, training=is_training) pool3 = tf.layers.max_pooling2d(inputs=bn3, pool_size=(2, 2), strides=2) flatten = tf.layers.flatten(inputs=pool3) dense1 = tf.layers.dense(inputs=flatten, units=512, activation=tf.nn.relu) drop1 = tf.layers.dropout(inputs=dense1, training=is_training) dense2 = tf.layers.dense(inputs=drop1, units=512, activation=tf.nn.relu) drop2 = tf.layers.dropout(inputs=dense2, training=is_training) logits = tf.layers.dense(inputs=drop2, units=10) return logits tf.reset_default_graph() X = tf.placeholder(tf.float32, [None, 32, 32, 3]) y = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder(tf.bool) y_out = my_model(X,y,is_training) one_hot_labels = tf.one_hot(y, 10) loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=one_hot_labels, logits=y_out) mean_loss = tf.reduce_mean(loss) optimizer = tf.train.AdamOptimizer(1e-3) # batch normalization in tensorflow requires this extra dependency extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(extra_update_ops): train_step = optimizer.minimize(mean_loss) # + # Feel free to play with this cell # This default code creates a session # and trains your model for 10 epochs # then prints the validation set accuracy sess = tf.Session() sess.run(tf.global_variables_initializer()) print('Training') run_model(sess,y_out,mean_loss,X_train,y_train,15,64,200,train_step,True) print('Validation') run_model(sess,y_out,mean_loss,X_val,y_val,1,64) # - # Test your model here, and make sure # the output of this cell is the accuracy # of your best model on the training and val sets # We're looking for >= 70% accuracy on Validation print('Training') run_model(sess,y_out,mean_loss,X_train,y_train,1,64) print('Validation') run_model(sess,y_out,mean_loss,X_val,y_val,1,64) # ### Describe what you did here # In this cell you should also write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network # I model my network based on some famous networks, including VGG, GoogLeNet and ResNet, but in a much smaller scale. # # The idea is to use fewer filters with a larger convolution dimention at the front of the model, and increase the number of filters and decrease the convolution dimention along the model. This can effectively splitting image features into more weight-aware neurons. # # I also stacked multiple convolution layers together, as this is a trick to reduce number of parameters while representing the same filter, as described here: https://www.jeremyjordan.me/convnet-architectures/ # # Finally I use the good old fully connected layers and dropout layers to tied everything together. # ### Test Set - Do this only once # Now that we've gotten a result that we're happy with, we test our final model on the test set. This would be the score we would achieve on a competition. Think about how this compares to your validation set accuracy. print('Test') run_model(sess,y_out,mean_loss,X_test,y_test,1,64) # ## Going further with TensorFlow # # The next assignment will make heavy use of TensorFlow. You might also find it useful for your projects. # # # Extra Credit Description # If you implement any additional features for extra credit, clearly describe them here with pointers to any code in this or other files if applicable.
Assignment2/TensorFlow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # convert the multinews dataset to aylien's MDS jsonl format # - from pathlib import Path import json DATADIR = Path('/home/chris/projects/aylien/dynamic-ensembles/data/multi-news/') # + source_targets = { 'test': (DATADIR / 'test.src.cleaned', DATADIR / 'test.tgt') } # + from collections import Counter import numpy as np # + multinews_article_delimiter = ' ||||| ' cluster_rows = [] article_cnts = Counter() summary_lens = [] for prefix, (srcs_f, tgt_f) in source_targets.items(): with open(srcs_f) as c_srcs, open(tgt_f) as c_tgt, open(DATADIR / (prefix + '.jsonl'), 'w') as out: for srcs, tgt in zip(c_srcs, c_tgt): articles = [{'title': '', 'text': t} for t in srcs.split(multinews_article_delimiter)] out.write(f'{json.dumps({"articles": articles, "summary": tgt})}\n') article_cnts.update([len(articles)]) summary_lens.append(len(tgt.split())) print(article_cnts.most_common()) print((np.mean(summary_lens), np.var(summary_lens), np.std(summary_lens))) # -
research/multinews_to_mds_jsonl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Support Vector Machine # https://scikit-learn.org/stable/modules/svm.html # ## Dummy Data Generation import numpy as np X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) Y = np.array([1, 1, 1, 2, 2, 2]) # ## Training from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators=20, random_state=0) clf.fit(X, Y) # ## Prediction print(clf.predict([[-0.8, -1]])) print(clf.predict([[-0.8, -1],[1, 2]])) # ## Accuracy from sklearn.metrics import accuracy_score predictions = clf.predict(X) accuracy = accuracy_score(Y,predictions) print (accuracy)
Classification/Random Forest/Example Usage - Random Forest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="a8bkqME5m9d_" # # COVID-19 # + [markdown] id="z5i82BdIm9d_" # ---- # + [markdown] id="_JFpricum9d_" # The world is still struggling with one the most rapidly spreading pandemics. There are a lot of people who say that data is the best weapon we can use in this "Corona Fight". # # Imagine that you are one of the best data scientists in your country. The president of your country asked you to analyze the COVID-19 patient-level data of South Korea and prepare your homeland for the next wave of the pandemic. You, as the lead data scientist of your country **have to create and prove a plan of fighting the pandemics in your country** by analyzing the provided data. You must get most important insights using learned data science techniques and present them to the lead of your country. # + [markdown] id="Uvga-O0Bm9d_" # https://www.kaggle.com/kimjihoo/coronavirusdataset/ # + [markdown] id="4iUabJgym9d_" # ### Requirements # + [markdown] id="aJ64ciUzQL8o" # #### Exploratory Data Analysis # * [x] Describe the data with basic statistical parameters - mean, median, quantiles, etc. Use parameters that give you the most important statistical insights of the data. # * [x] Grouping the data and analyzing the groups - using Pandas aggregate methods. # * [x] Work with features - handle missing data if needed, use pandas date APIs. # * [x] Manipulate datasets - use joins if needed. # * [x] Visualize the data - you can use line, scatter, histogram plots, density plots, regplots, etc. # # #### Statistical hypothesis testing # * [x] Use at least one statistical significance test. # * [x] Report p-values. # * [x] Use visualizations. # # #### Modeling # * [x] Visualize data with dimensionality reduction algorithms. # - [x] Perform cluster analysis. # * [x] Use a linear model to explain relationships by correlation and predict new values. # # #### Presentation # * Present the project - the data, methods and results. # # #### Evaluation Criteria # - Code quality # - Fulfillment of the idea # - Adherence to the requirements # - Delivery of the presentation # # #### Statistical hypothesis testing # - Correct statistical test method is used, based on the situation. # - Reasoning on chosen statistical significance level. # # #### Modeling # - [x] Both PCA and T-SNE algorithms are used. # # # <div><img width="400px" height="auto" src="https://images.unsplash.com/photo-1574515944794-d6dedc7150de?ixlib=rb-1.2.1&ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&auto=format&fit=crop&w=1532&q=80" /></div> # + [markdown] id="1JEdfBPX9BzT" # ### Getting started # + id="FYNfJwgy3cKu" import pandas as pd import numpy as np import scipy.stats as stats import statsmodels.api as sm import math import seaborn as sns import matplotlib.pyplot as plt from matplotlib import pyplot from sklearn.decomposition import PCA from sklearn.manifold import TSNE from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.cluster import KMeans # + id="iAJDLW-ibiqc" patient = pd.read_csv("https://raw.githubusercontent.com/TuringCollegeSubmissions/lcramw-DS.1.4/master/PatientInfo.csv?token=AFU2SI3MJJRN7U6ASU47T4C72HE2G") region = pd.read_csv("https://raw.githubusercontent.com/TuringCollegeSubmissions/lcramw-DS.1.4/master/Region.csv?token=<KEY>") search_trend = pd.read_csv("https://raw.githubusercontent.com/TuringCollegeSubmissions/lcramw-DS.1.4/master/SearchTrend.csv?token=<KEY>") weather = pd.read_csv("https://raw.githubusercontent.com/TuringCollegeSubmissions/lcramw-DS.1.4/master/Weather.csv?token=<KEY>") # + [markdown] id="AxqzrPas9HPY" # ### EDA # + [markdown] id="e3Xx3MeIT8pt" # #### Exploring # + id="s1zQlF5Q-UfF" colab={"base_uri": "https://localhost:8080/"} outputId="adfbfc42-78fc-45b3-9aed-ebe4340e4b28" patient.shape patient.head(2) # + colab={"base_uri": "https://localhost:8080/"} id="m5CqE1g0ZGDS" outputId="232b286a-e332-4f2b-e89c-eae2c9d426dc" patient.info() # + colab={"base_uri": "https://localhost:8080/"} id="sVM6a33UeC7r" outputId="96de464e-8ecc-429f-cbe3-e751e8b7944b" print("first Infected date in korea: ", patient['confirmed_date'][0]) # + colab={"base_uri": "https://localhost:8080/"} id="yVATw2MccfKN" outputId="28909142-fda3-4dac-d406-9d5b2def4aae" region.shape region.head(2) # + colab={"base_uri": "https://localhost:8080/"} id="VR119QXoUHXT" outputId="982729fc-2907-468f-f9fa-189b9fc8113d" region.info() # + colab={"base_uri": "https://localhost:8080/"} id="c6eE8y4hUbUZ" outputId="350810c1-a6da-4f98-b791-3b7fa7ec59af" weather.shape weather.head(2) # + colab={"base_uri": "https://localhost:8080/"} id="7y98aUPUUbUa" outputId="a04b8f06-4171-44a7-a56f-1647bbe82220" weather.info() # + colab={"base_uri": "https://localhost:8080/"} id="pXATBM0uYyz2" outputId="bd3813d7-d179-4a85-e8fc-13d9ec94f8d6" weather.province.value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="jrDv8y7JU2Jk" outputId="0dcf68d7-77e9-4c30-b7ca-4150076fbebc" weather['province'] = weather['province'].str.replace('Chunghceongbuk', 'Chungcheongbuk') weather_grouped = weather.groupby(['province']).mean().round(1) weather_grouped # + id="FXKh9sCTxE_T" colab={"base_uri": "https://localhost:8080/"} outputId="0004a45c-311a-45a8-d96d-5dd6e90ad9c0" search_trend.shape search_trend.head(2) # + id="I2rF8y4Dx07Q" colab={"base_uri": "https://localhost:8080/"} outputId="1a1a994f-4ab7-4706-8b4d-9de573a1b835" search_trend.info() # + [markdown] id="ISz_efBFULum" # #### Visualizing # + [markdown] id="XN7YgbuFgX25" # ##### Search trends # + id="r6jiCDlIyIu3" # Set date as index search_trend.set_index('date', inplace=True) # + id="5QPhY1bV6swU" # Set as day for the plot search_trend = search_trend.asfreq('D', method='pad') # + id="kLAGI6BLxZfy" colab={"base_uri": "https://localhost:8080/"} outputId="46b795a7-66e4-4ae5-ddf5-ab778602a7bd" # Plot search_trend plt.figure(figsize=(22,10)) plt.plot(search_trend) # Set axis settings and labels plt.ylabel('Relative search volume'); plt.xlabel('Date'); plt.title('Relative keyword searches in NAVER South-Korea') plt.xlim((pd.to_datetime("2020-01-01"),pd.to_datetime("2020-07-01"))) # show a legend on the plot plt.legend(labels=['cold', 'flu', 'pneumonia', 'coronavirus']) # + [markdown] id="c9nq6_SSgbO4" # ##### Provinces # + id="HVkCEDTRhBBy" # Select columns from region df region_province = region[['province','city','elementary_school_count','kindergarten_count', 'university_count', 'academy_ratio', 'elderly_population_ratio', 'elderly_alone_ratio', 'nursing_home_count']] # + id="hVzaLtjDg6x4" # Merge with weather province = region_province.merge(weather_grouped, how='left', on=['province']) # + colab={"base_uri": "https://localhost:8080/"} id="rrtueL2qhbux" outputId="25cfe538-9041-44b5-fe81-e3720ddbe2da" # Check province df province.head() # + id="Baj-ziEkjIuA" # Drop korea because it's the total province = province[province.province != 'Korea'] # + colab={"base_uri": "https://localhost:8080/"} id="aeKixJF2v88r" outputId="8dbaf5cb-b987-4d13-c436-09c79158c222" # Plot the elderly population ratio per province plt.figure(figsize=(10,10)) province_sorted = province[['province', 'elderly_population_ratio']].sort_values(by='elderly_population_ratio', ascending=False) sns.set(style="whitegrid") sns.boxplot(data=province_sorted, x='province', y='elderly_population_ratio') plt.xticks(rotation=90) plt.title('Elderly population ratio per province in South-Korea') plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="Ru2CStL5im2y" outputId="35ed78bd-7ac1-4ed1-f4fe-fe162f2dc162" # Plot the avg temp per province plt.figure(figsize=(10,10)) province_sorted_temp = province[['province', 'avg_temp']].sort_values(by='avg_temp', ascending=False) sns.set(style="whitegrid") sns.pointplot(data=province_sorted_temp, x='province', y='avg_temp') plt.xticks(rotation=90) plt.title('Avg temperature per province in South-Korea') plt.show() # + id="CVZao6xvuKvL" # Groupby province province_df = province.groupby(['province']).mean().round(1) # + colab={"base_uri": "https://localhost:8080/"} id="gP8MNBSvm8RA" outputId="f3fa3fd3-df9b-4a52-eff3-f3a3ce7b32a4" # Get the mean, median and standard deviation for the variables province_df.agg(['mean', 'std', 'median']).T # + colab={"base_uri": "https://localhost:8080/"} id="Qrlk_fHVox4H" outputId="38ce0468-6bae-40ec-8ce2-f692465d7fb3" # Plot provinces with most/least elementary schools fig, ax = plt.subplots(1, 2, sharey=True) province_df.elementary_school_count.nlargest(3).plot(kind='bar', ax=ax[0]) province_df.elementary_school_count.nsmallest(3).plot(kind='bar', ax=ax[1]) ax[0].set_title('Most elementary schools') ax[1].set_title('Least elementary schools') plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="rIiOpGXdtd6F" outputId="0dfdd507-592f-454a-d613-6af0c1bfa7f6" # Plot provinces with most/least nursing homes fig, ax = plt.subplots(1, 2, sharey=True) province_df.nursing_home_count.nlargest(3).plot(kind='bar', ax=ax[0]) province_df.nursing_home_count.nsmallest(3).plot(kind='bar', ax=ax[1]) ax[0].set_title('Most nursing homes') ax[1].set_title('Least nursing homes') plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="G6RSGQxKtsI4" outputId="60a707cd-7fd4-4ecb-df2a-cdff6671a256" # Plot provinces with most/least universities fig, ax = plt.subplots(1, 2, sharey=True) province_df.university_count.nlargest(3).plot(kind='bar', ax=ax[0]) province_df.university_count.nsmallest(3).plot(kind='bar', ax=ax[1]) ax[0].set_title('Most universities') ax[1].set_title('Least universities') plt.show() # + [markdown] id="pynfKya8gwVz" # ##### Patients # + id="5ku7BbOCdRMp" # Get aggregated states agg = patient[['state','patient_id']].groupby('state').count().reset_index().sort_values(by = 'patient_id') # + colab={"base_uri": "https://localhost:8080/"} id="zEYYykPCc1qH" outputId="317f52a6-ef62-4c8a-9eb0-06cc4ed15f95" # Plot the aggregated states plt.bar(x=list(agg['state']), height=list(agg['patient_id'])) plt.show() # + id="ivhkZo8dvDy9" # Drop NaN patient.dropna(subset=['infection_case'], inplace=True) # + id="kqV_Lrrau2v6" # Aggregate/group some infection cases patient.loc[patient['infection_case'].str.contains('church', case=False), 'infection_case'] = 'church' patient.loc[patient['infection_case'].str.contains('nursing home', case=False), 'infection_case'] = 'nursing home' patient.loc[patient['infection_case'].str.contains('hospital', case=False), 'infection_case'] = 'hospital' patient.loc[patient['infection_case'].str.contains('gym', case=False), 'infection_case'] = 'gym' patient.loc[patient['infection_case'].str.contains('town', case=False), 'infection_case'] = 'town' patient.loc[patient['infection_case'].str.contains('center', case=False), 'infection_case'] = 'center' patient.loc[patient['infection_case'].str.contains('club', case=False), 'infection_case'] = 'club' # + colab={"base_uri": "https://localhost:8080/"} id="FR6Ls_HMwvYE" outputId="c45822be-0d85-4d62-a0de-3fe7ae9185b4" # Plot infection cases patient.infection_case.value_counts().nlargest(5).plot(kind='bar') plt.title('Most infection cases') plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="UAZN0cdnghl-" outputId="ca74ccaf-1eff-44b7-cef4-561a15ce8c94" # Get % of gender sizes = patient.sex.value_counts(normalize=True).tolist() # Pie chart labels = 'Female', 'Male' plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90, colors=['lightcoral', 'lightskyblue']) plt.axis('equal') plt.title('% of men/women in the patient data', loc='center') plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="AXE5ex08g-cs" outputId="abfafaa7-ad0f-410c-e4ec-f1cfd9ae3ad8" # Get gender counts per state gender = patient.groupby('sex') gender.state.value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="dfT_H1yzl_Y3" outputId="ec5e600d-1d23-433b-944d-0ceb82ed18d1" # Set counts per state in list states_list = gender.state.value_counts(normalize=True).tolist() # Pie chart labels = 'released', 'isolated', 'deceased' colors = ['lightcoral', 'moccasin', 'plum'] colors_male = ['lightskyblue', 'yellowgreen', 'seagreen'] fig, ax = plt.subplots(1, 2) ax[0].pie(states_list[0:3], labels=labels, autopct='%1.1f%%', startangle=90, colors=colors) ax[0].axis('equal') ax[0].set_title('Female state %') ax[1].pie(states_list[3:6], labels=labels, autopct='%1.1f%%', startangle=90, colors=colors_male) ax[1].axis('equal') ax[1].set_title('Male state %') plt.show() # + [markdown] id="aIW8TW-dWZDT" # #### Merging # + id="vt_1wskRcrGY" # Create full features DF full_features_df = patient.merge(region, how='left', on=['province', 'city']) # + id="xS6u1PPsfLyl" # Drop unneccesary columns full_features_df.drop(columns=['patient_id', 'confirmed_date', 'contact_number', 'infected_by', 'latitude', 'longitude', 'code', 'deceased_date', 'symptom_onset_date', 'released_date'], inplace=True) # + colab={"base_uri": "https://localhost:8080/"} id="nUjl9N3pfLMD" outputId="3e5a139e-3ed5-4064-b661-1cf21ca4605f" # Reset index and view the head of full_features_df full_features_df = full_features_df.reset_index(drop=True) full_features_df.head() # + id="vO8mE7YJOVXQ" # Remove s' from age full_features_df['age'] = full_features_df['age'].str.replace('s', '') # Set as float to handle NaN full_features_df['age'] = full_features_df['age'].astype(float) # + colab={"base_uri": "https://localhost:8080/"} id="Z4n-r0JdSeRC" outputId="158eaffc-2ec0-4cbd-a08d-b261538ccb91" # Check the info full_features_df.info() # + colab={"base_uri": "https://localhost:8080/"} id="iyXak1T5o8MU" outputId="87e6410a-4fa2-4801-8967-29862628110b" # Get the mean age per state grouped_age = full_features_df.groupby('state').mean() grouped_age.age # + colab={"base_uri": "https://localhost:8080/"} id="WY6g2DIB52rD" outputId="7f488aca-ac41-455d-a7ee-b55b37cb2bb3" # Get the mean age per state grouped_age # + colab={"base_uri": "https://localhost:8080/"} id="XvwxbTdgpnQY" outputId="8e2be276-c252-4890-a865-720fe8e92ab2" # Create dataframes for each state deceased = full_features_df.query('state == "deceased"') isolated = full_features_df.query('state == "isolated"') released = full_features_df.query('state == "released"') print(f'{deceased.shape}, {isolated.shape}, {released.shape}') # + colab={"base_uri": "https://localhost:8080/"} id="1rTAcoAfuPTX" outputId="522fbf12-7920-436d-8e4c-10e763fb6595" # Impute age based on age state mean deceased.age = deceased.loc[:, 'age'].transform(lambda x: x.fillna(x.mean())) isolated.age = isolated.loc[:, 'age'].transform(lambda x: x.fillna(x.mean())) released.age = released.loc[:, 'age'].transform(lambda x: x.fillna(x.mean())) # + colab={"base_uri": "https://localhost:8080/"} id="9N4cehAkxeEU" outputId="1266cf05-450e-460e-b214-c8c63bab4ce2" # Create full_df dataframe again with imputed values full_df = pd.concat([deceased, isolated, released]) full_df.info() # + id="U-M8DpDpgxxT" colab={"base_uri": "https://localhost:8080/"} outputId="9d9f4443-abf5-4294-afd6-f383aceda436" # Drop NaN values full_df.dropna(subset=['sex', 'age', 'country', 'province', 'city', 'infection_case', 'elementary_school_count', 'kindergarten_count', 'university_count', 'academy_ratio', 'elderly_population_ratio', 'elderly_alone_ratio', 'nursing_home_count'], inplace=True) full_df = full_df.reset_index(drop=True) # Check info full_df.info() # + colab={"base_uri": "https://localhost:8080/"} id="h9D4Mj0t6sSp" outputId="4fa9c72a-521b-428f-bf5e-40e6b094b60a" # Check distribution states full_df.state.value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="1raAPkm59Eyn" outputId="491bce1e-778e-47d0-daed-c46825af1af8" # Check distribution gender full_df.sex.value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="M_i2BAnT9KEP" outputId="6104075f-76df-4efc-b707-6f095d931ba6" # Check distribution provinces full_df.province.value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="KF2wpSWKzs_R" outputId="2a4d21c1-d16c-47a0-d084-b9f9a31560de" # Function for rounding to nearest 10th def roundup(num): return int(math.ceil(num / 10.0)) * 10 # Round all ages in full_df.age empty_list = [] for number in full_df.age: empty_list.append(roundup(number)) # df with rounded age df_rounded = pd.DataFrame(data=empty_list, columns=['age']) # Plot age distribution df_rounded.age.value_counts().sort_index().plot(kind='bar') plt.title('Age distribution') plt.show() # + [markdown] id="AA5Fs5HKi7yD" # ### Statistical testing # + [markdown] id="QUMcZhRHYBu_" # I am interested to know if there is a difference in the number of deceased men vs. the number of deceased women. In order to do this I will be using an **independent samples t-test**, which basically compares the means of two independent groups in order to determine whether there is statistical evidence that the associated population means are significantly different. # # I could also use a **z-test** because the sample size satisfies `n>30`, but the t-test is in my opinion the more correct one to use, since you can never really know the population standard deviation. # + [markdown] id="1E2IncItJD93" # Before we test the significance, let's specify our hypotheses: # # # * H0: There is no difference between the female mean deceased sample and the male mean deceased sample. # * HA: There is a difference between the female mean deceased sample and the male mean deceased sample. # # Let's set `α=95%` confidence level, this suggests that we can reject the H0 hypothesis when our P-value is less than or equal to `0.05`. I chose 95% instead of 99% because eventhough you have more chance of being wrong, it is good enough to inferences and this also ensures stability in the results when one would repeat this experiment. # # + id="ECLCCsdaLbe0" colab={"base_uri": "https://localhost:8080/"} outputId="412aa032-5917-47e8-9ff6-13243f33bc45" # pip install researchpy # + id="JWnSxN2LS6ZH" # Set gender_patients as df and dorp any NaN values gender_patient = patient.dropna(subset=['state']) # + id="fGBCC7bYH6pf" colab={"base_uri": "https://localhost:8080/"} outputId="9fd7474e-36e6-4080-f88f-bf57ee138d1c" # Get the totals totals_gender = gender_patient.sex.value_counts() totals_gender # + id="e0kf7PykKSu2" colab={"base_uri": "https://localhost:8080/"} outputId="9d1bee6b-5b1b-472c-a54f-cb1fd79525ab" # Groupby sex and count state values gender = gender_patient.groupby('sex') totals_gender_state = gender.state.value_counts() totals_gender_state # + colab={"base_uri": "https://localhost:8080/"} id="LNttUwsQf43B" outputId="7d98a583-fcc3-4be4-bd04-126fee0db082" # Pie chart labels = 'released', 'isolated', 'deceased' colors = ['lightcoral', 'moccasin', 'plum'] colors_male = ['lightskyblue', 'yellowgreen', 'seagreen'] fig, ax = plt.subplots(1, 2) ax[0].pie(totals_gender_state[0:3], labels=labels, autopct='%1.1f%%', startangle=90, colors=colors) ax[0].axis('equal') ax[0].set_title('Female state %') ax[1].pie(totals_gender_state[3:6], labels=labels, autopct='%1.1f%%', startangle=90, colors=colors_male) ax[1].axis('equal') ax[1].set_title('Male state %') plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="pm7cV_HXK3dC" outputId="5cf4620f-d604-4672-d8ad-5c5233b1963f" # Create dataframe data_gender = {'sex': ['female', 'male'], 'released': [1402, 1112], 'isolated': [788, 666], 'deceased': [28, 47], 'n': [2218, 1825]} gender_df = pd.DataFrame(data=data_gender) # Add proportion deceased gender_df['mean_deceased'] = gender_df['deceased'] /gender_df['n'] # View gender_df # + colab={"base_uri": "https://localhost:8080/"} id="16PQ6OBGKyYF" outputId="1cc196ba-1a83-4ff1-ff48-489fc2b36268" # Boxplot sns.barplot(data=gender_df, x='sex', y='deceased') # + [markdown] id="mshVUhp1XFyd" # We can verify the `mean_deceased` with the researchpy library. In order to do that we first have to change the value of state to numeric. # + id="G9FvkdHHRlUN" # Change state deceased to 1, rest to 0 gender_patient.loc[gender_patient['state'].str.contains('deceased', case=False), 'state'] = '1' gender_patient.loc[gender_patient['state'].str.contains('isolated', case=False), 'state'] = '0' gender_patient.loc[gender_patient['state'].str.contains('released', case=False), 'state'] = '0' # + id="NL5QdOfgTp7O" # Set as int so it is now numeric gender_patient.state = gender_patient.state.astype(int) # + colab={"base_uri": "https://localhost:8080/"} id="cWjx1YI-KpoD" outputId="9d5c1194-e8b8-4aad-b378-370b0165b564" import researchpy as rp # Check mean, sd, n and ci rp.summary_cont(gender_patient['state'].groupby(gender_patient['sex'])) # + [markdown] id="rHRDfqe3XY0v" # Here we have the same outcome as when we calculate it ourselves! Now we can run the independent samples t-test for the result: # + colab={"base_uri": "https://localhost:8080/"} id="pkVWiZZjVRZE" outputId="beb2faaa-5fc3-4ab8-9a49-ccac1cef7288" # Run t-test and prints test statistic, p-value and df ttest = sm.stats.ttest_ind(gender_patient[gender_patient['sex'] == 'female'].state, gender_patient[gender_patient['sex'] == 'male'].state) print(ttest) # + [markdown] id="7gUOpOPOafuv" # As we can see, with a p-value of `0.002` we can reject `H0` since `0.002 < 0.05`. This means that there is evidence at the 95% confidence level to assume that the proportion of female/male deceased patients is statistically different. # + colab={"base_uri": "https://localhost:8080/"} id="wylJ04XHdiMp" outputId="e7b8c93c-0bf8-4ce5-845f-76a4c561eca7" # As a means of comparison, let's run the ztest ztest = sm.stats.ztest(gender_patient[gender_patient['sex'] == 'female'].state, gender_patient[gender_patient['sex'] == 'male'].state) print(ztest) # + colab={"base_uri": "https://localhost:8080/"} id="KqdyDOZtdyrr" outputId="f9143f7a-c3ec-4ec3-b19e-23b19d39d400" # Create dataframe data_tests = {'test': ['t-test', 'z-test'], 'p-value': [ttest[1], ztest[1]], 'test-statistic': [ttest[0], ztest[0]], 'population': ['sd not known', 'sd known']} tests_df = pd.DataFrame(data=data_tests) # View tests_df # + [markdown] id="1MZjMAIQiuHh" # ### Modeling # + [markdown] id="uQtIiyWgiwed" # #### PCA # + id="p5PpDU1Q4PA-" # Leave out state features = ['elementary_school_count', 'kindergarten_count', 'university_count', 'academy_ratio', 'elderly_population_ratio', 'elderly_alone_ratio', 'nursing_home_count'] # Separating out the features x = full_df.loc[:, features].values # Separating out the target y = full_df.loc[:,['state']].values # Standardizing the features x = StandardScaler().fit_transform(x) # + colab={"base_uri": "https://localhost:8080/"} id="6nGZuS1h4Prj" outputId="6f995f57-277f-48d3-f437-adf5375c5146" # Visualise the standardised data (Just for simplicity) see_x = pd.DataFrame(data=x, columns=features) see_x.head() # + colab={"base_uri": "https://localhost:8080/"} id="c7AHLCraiz9b" outputId="722a8af7-1ba5-4e01-e12f-f8804853faa7" # While applying StandardScaler, each feature of your data should be normally # distributed such that it will scale the distribution to a mean of zero # and a standard deviation of one. np.mean(x),np.std(x) # + colab={"base_uri": "https://localhost:8080/"} id="iP1Qkcop4Rl7" outputId="dee62b13-564c-48d6-9bad-ea459d95d545" # Plot the explained variance per component added pca = PCA().fit(x) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('Number of components') plt.ylabel('Cumulative explained variance'); # + id="-CrVeF3y4Tqp" # 2 PCA components pca = PCA(n_components=2) principal_components = pca.fit_transform(x) principal_df = pd.DataFrame(data = principal_components , columns = ['principal component 1', 'principal component 2']) # + colab={"base_uri": "https://localhost:8080/"} id="WM5jN1m04XLG" outputId="fbf677cc-9a33-4f80-dc06-8a33028ddb61" # Check the head() principal_df.head() # + colab={"base_uri": "https://localhost:8080/"} id="-geZzMCU4bAM" outputId="ed8362d4-05d8-46d1-c427-2c98d989dd60" # Concatenate and view head() final_df_pca = pd.concat([principal_df[['principal component 1', 'principal component 2']], full_df[['state']]], axis = 1) final_df_pca.head() # + colab={"base_uri": "https://localhost:8080/"} id="MKCdim9I4dlo" outputId="919b2740-094c-41f0-c9b3-2be84566378d" # Scatterplot fig = plt.figure(figsize = (10,10)) fig, ax = plt.subplots() ax.set_xlabel('Principal Component 1', fontsize = 15) ax.set_ylabel('Principal Component 2', fontsize = 15) ax.set_title('2 component PCA', fontsize = 20) sns.scatterplot(x=final_df_pca['principal component 1'], y=final_df_pca['principal component 2'], hue=final_df_pca.state) ax.grid() # + colab={"base_uri": "https://localhost:8080/"} id="qYvyRoZv4f6i" outputId="2771f725-c6ca-4d47-a165-62efc5ff3a0b" # Explained variance of the components pca.explained_variance_ratio_ # + [markdown] id="0ys-K73nix7h" # #### T-SNE # + id="_wo7qEGPMdpO" # Initialise T-SNE and set 2 components tsne = TSNE(n_components=2, random_state=0) # + id="SXRqsk0jMjGX" # Feed the features dataset tsne_obj= tsne.fit_transform(x) # + colab={"base_uri": "https://localhost:8080/"} id="HbLIZyICMjXv" outputId="99d6b04e-c7c4-46fc-fd92-fd8af72b1269" # View the head() tsne_df = pd.DataFrame(data = tsne_obj , columns = ['X', 'Y']) tsne_df.head() # + colab={"base_uri": "https://localhost:8080/"} id="zOdKoP-SMrYe" outputId="114402ea-1eba-4455-d763-4a8f9dcde859" # Concatenate and view again final_tsne_df = pd.concat([tsne_df, full_df[['state']]], axis = 1) final_tsne_df.head() # + colab={"base_uri": "https://localhost:8080/"} id="Ws3R5YVbM317" outputId="a0786180-e0b8-4365-92f1-5ab6643c6482" # T-SNE scatterplot fig = plt.figure(figsize = (8,8)) fig, ax = plt.subplots() ax.set_xlabel('X', fontsize = 15) ax.set_ylabel('Y', fontsize = 15) ax.set_title('T-SNE scatterplot', fontsize = 20) sns.scatterplot(x="X", y="Y", hue="state", data=final_tsne_df); ax.grid() # + [markdown] id="oCzO6v40NAe_" # #### Kmeans clustering # + id="-DUADsegNysH" # Calculate the within cluster SSE (distortion) distortions = [] K = range(1,10) for k in K: kmeanModel = KMeans(n_clusters=k) kmeanModel.fit(principal_df) distortions.append(kmeanModel.inertia_) # + colab={"base_uri": "https://localhost:8080/"} id="28PuCzPKN00r" outputId="c9d75824-eb3d-4e34-f242-a6f202bb990a" # Plot the elbow figure plt.figure(figsize=(16,8)) plt.plot(K, distortions, 'bx-') plt.xlabel('Number of clusters') plt.ylabel('Within cluster SSE (Distortion)') plt.title('The Elbow Method showing the optimal k') plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="O_OXzG7GLPA-" outputId="2914f067-4a76-4457-af2e-765b4d4e1e4c" pip install kneed # + id="hhCA1ero8Rca" # Installing kneed from kneed import DataGenerator, KneeLocator # + colab={"base_uri": "https://localhost:8080/"} id="w0_BIxVI8Ajb" outputId="25da2327-7652-4fc2-8c8d-a3275ac999ea" # Using kneed to programmatically check elbow point kl = KneeLocator(range(1, 10), distortions, curve="convex", direction="decreasing") kl.elbow # + id="wiMEYOpGN3Gt" # Fit and predict the Kmeans model kmeanModel = KMeans(n_clusters=3) kmeanModel.fit(principal_df) y_kmeans = kmeanModel.predict(principal_df) # + colab={"base_uri": "https://localhost:8080/"} id="H6B6YDCKN5Yr" outputId="47c10824-802a-437f-cc8f-fd6099ed9be7" # plot points plt.figure(figsize=(16,8)) plt.title('K-means clusters') sns.scatterplot(data=principal_df, x='principal component 1', y='principal component 2', hue=y_kmeans) # plot cluster centers centers = kmeanModel.cluster_centers_ plt.scatter(centers[:, 0], centers[:, 1], c='black', s=500, alpha=0.5); # + id="P5mD6IUgCawJ" # Add column for cluster labels full_df['cluster'] = kmeanModel.labels_ # + id="GNQQyimBDGlZ" # change clusters to 1 - 3 instead of 0 - 2 full_df['cluster'] = full_df['cluster'].map({0:1, 1:2, 2:3}) # + id="bKGgbjBtC1nt" # Group by cluster grouped_by_cluster = full_df.groupby('cluster') # + colab={"base_uri": "https://localhost:8080/"} id="7jp4bCxtDqyT" outputId="03232ca7-1236-4484-c847-7f4554290aa0" # Get the percentages of states in each cluster df = pd.concat([grouped_by_cluster.state.value_counts(), grouped_by_cluster.state.value_counts(normalize=True).mul(100)],axis=1, keys=('counts','percentage')) df.round(2) # + [markdown] id="Z6kggFNJDDow" # Seems like the highest percentage of deceased patients are in cluster 1, but the highest absolute number of deceased patients is in cluster 2. Let's have a closer look at the data # + colab={"base_uri": "https://localhost:8080/"} id="VW4hPAvqCkvw" outputId="d6364f70-18fc-40e2-b547-cdd4963218ab" # See means grouped_by_cluster.mean() # + [markdown] id="YxLZvQQ2Ie-r" # It seems that cluster 1 has a higher percentage of `elderly_population_ratio` and `elderly_alone_ratio` compared to the other 2 clusters. The cluster also has the lowest number of nursing homes compared to the other 2 clusters and the highest average age. # + [markdown] id="aNtsizcQjJMw" # #### linear regression # + id="ftAoE-EzjtJn" # Add categorised values full_df['state_cat'] = full_df['state'].astype('category').cat.codes # + id="Zda0Ahx0CHnY" # Get all interesting features for linear regression features_lin = ['age','sex', 'infection_case', 'country', 'province', 'city', 'elementary_school_count', 'kindergarten_count', 'university_count', 'academy_ratio', 'elderly_population_ratio', 'elderly_alone_ratio', 'nursing_home_count'] # + id="Y2zE7ssCAdhu" # Set X as df X = full_df[features_lin] # + colab={"base_uri": "https://localhost:8080/"} id="exPwiTz2AarZ" outputId="771352a3-f2d9-4e29-d3f3-dd27fa42d223" # Set dummy variables X = pd.get_dummies(data=X, drop_first=True) X.head() # + id="chPd9gEj2Lf6" # Set all columns incl dummy variables as features_x features_x = X.columns # + id="YvbNHPS28K5O" # X = StandardScaler().fit_transform(X) y = full_df[['state_cat']] # + id="PJLMu-Gp77PW" # Set train and test data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) # Fit linear regression model_lin = LinearRegression().fit(X_train,y_train) # + id="-te9PW5J8gff" # Predictions predictions = model_lin.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="QRJXTTQz85bD" outputId="a77e0d57-b3a4-49ff-9d34-a3afa7821d07" # Get summary X_train_Sm= sm.add_constant(X_train) X_test_Sm= sm.add_constant(X_test) ls=sm.OLS(y_test,X_test_Sm).fit() print(ls.summary()) # + id="BP2q33f4-p50" # Map the states mapped_states = dict( enumerate(full_df['state'].astype('category').cat.categories)) # + id="mJjryeL_-p50" # Set prediction as dataframe predicted_df = pd.DataFrame(predictions.astype('int'), columns=['predicted']) # + colab={"base_uri": "https://localhost:8080/"} id="Cd_xxZWD-p50" outputId="4408fe92-83e4-4926-90be-24297dcb8de9" # Add the predicted state predicted_df['predicted_state'] = predicted_df['predicted'].map(mapped_states) predicted_df.head() # + colab={"base_uri": "https://localhost:8080/"} id="J5QR6fvDDtzA" outputId="d3b9a89d-5d02-4f2d-ced3-434a3bb332bc" # Reset the index for y_test y_test = y_test.reset_index(drop=True) y_test.head() # + colab={"base_uri": "https://localhost:8080/"} id="ysQExK07-p50" outputId="54f4bf12-cc6a-4d9b-8774-901a3a36ead0" # Check with actual state predicted_final_df = pd.concat([predicted_df, y_test[['state_cat']]], axis=1) predicted_final_df.tail() # + id="1u9ycwF9Eh6O" # Add the mapped states predicted_final_df['state'] = predicted_final_df['state_cat'].map(mapped_states) # + colab={"base_uri": "https://localhost:8080/"} id="gG0G_KmgEu0A" outputId="f4b3c930-ced8-42c0-ce71-4146f4d7d186" # Check predicted_final_df predicted_final_df.tail() # + colab={"base_uri": "https://localhost:8080/"} id="dI0XfKsL-p51" outputId="38bdb76a-db07-49a4-f362-ff6ae5f085a4" # Count values predicted_counts = pd.DataFrame(predicted_final_df.state.value_counts()) predicted_counts['predicted'] = predicted_final_df.predicted_state.value_counts() predicted_counts # + colab={"base_uri": "https://localhost:8080/"} id="lIcX3zQT-jFp" outputId="3a7a2ac8-7359-4fd4-ee73-4da0391612c2" # Plot the actual situation states = ['released', 'isolated', 'deceased'] actual = [374, 235, 7] predicted = [108, 443, 65] plt.bar(states, actual) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="wodLXr1JH9ow" outputId="36b8f708-3f6e-4887-e3ef-4553e8619471" # Plot the difference between actual and predicted plt.figure(figsize=[15, 10]) X = np.arange(len(actual)) plt.bar(X, actual, color = 'pink', width = 0.25) plt.bar(X + 0.25, predicted, color = 'orange', width = 0.25) plt.legend(['Total actual', 'Total predicted']) plt.xticks([i + 0.25 for i in range(3)], states) plt.title("Bar plot representing the total predicted and total actual cases state wise") plt.xlabel('States') plt.ylabel('Cases') plt.show()
141.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.4.2 # language: julia # name: julia-1.4 # --- # # Getting started with Julia & JAC # # The following tutorials guide you towards Julia and the JAC toolbox. # ## 11-getting-started-with-Julia.ipynb # ### **Getting started with Julia** # Here, we explain the syntax and meaning of some simple features of Julia as they occur frequently in working with JAC. For new users, this will (hopefully) help lowering the initial 'threshold' for looking into and using Julia & JAC. However, no attempt is made to provide any thorough guide into Julia which, indeed, is a very rich and powerful approach with many features far beyond of what is used in JAC (so far). # ## 12-getting-started-with-JAC.ipynb # ### **Getting started with JAC** # This tutorial tells you what JAC is, some of its goals and how you can define and manipulate simple entities, such as shells, configurations, etc. # ## 13-compute-hydrogenic-orbitals.ipynb # ### **Hydrogenic computations** # Here, we show how hydrogenic energies and orbital functions can be obtained by simple calls. We also explain how these orbitals are represented and applied within the JAC toolbox. # ## 14-compute-hydrogenic-expectation-values.ipynb # ### **Expectation values of hydrogenic ions** # This tutorial shows how easily one can calculate simple expectation values of hydrogenic ions within both, the non-relativistic and relativistic theory. This might help understand the Z-scaling and relativistic effects on atomic properties and spectra. # ## 15-get-data-from-periodic-table.ipynb # ### **Obtain data from the periodic table** # JAC also known basic information from the periodic table which can be utilized to compute and compile atomic information and data. # ## 16-define-nuclear-model-parameters.ipynb # ### **Define the nuclear (model) parameters & potential** # In JAC, the nuclear parameters such as spin, nuclear moments or the model (and the shape) of the nuclei can be re-defined very easily. Here, we show how the nuclear potential and parameters are kept together and utilized in JAC.
tutorials/10-summarize-getting-started.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## Interdisciplinary Communication # ### Part 4 of 4 # # Articles # # In this section, we’ll explore some short articles about jargon. # + [markdown] slideshow={"slide_type": "slide"} # # ## Reminder # <a href="#/slide-2-0" class="navigate-right" style="background-color:blue;color:white;padding:8px;margin:2px;font-weight:bold;">Continue with the lesson</a> # # <font size="+1"> # # By continuing with this lesson you are granting your permission to take part in this research study for the Hour of Cyberinfrastructure: Developing Cyber Literacy for GIScience project. In this study, you will be learning about cyberinfrastructure and related concepts using a web-based platform that will take approximately one hour per lesson. Participation in this study is voluntary. # # Participants in this research must be 18 years or older. If you are under the age of 18 then please exit this webpage or navigate to another website such as the Hour of Code at https://hourofcode.com, which is designed for K-12 students. # # If you are not interested in participating please exit the browser or navigate to this website: http://www.umn.edu. Your participation is voluntary and you are free to stop the lesson at any time. # # For the full description please navigate to this website: <a href="gateway-1.ipynb">Gateway Lesson Research Study Permission</a>. # # </font> # + hide_input=true init_cell=true slideshow={"slide_type": "skip"} tags=["Hide"] # This code cell starts the necessary setup for Hour of CI lesson notebooks. # First, it enables users to hide and unhide code by producing a 'Toggle raw code' button below. # Second, it imports the hourofci package, which is necessary for lessons and interactive Jupyter Widgets. # Third, it helps hide/control other aspects of Jupyter Notebooks to improve the user experience # This is an initialization cell # It is not displayed because the Slide Type is 'Skip' from IPython.display import HTML, IFrame, Javascript, display from ipywidgets import interactive import ipywidgets as widgets from ipywidgets import Layout import getpass # This library allows us to get the username (User agent string) # import package for hourofci project import sys sys.path.append('../../supplementary') # relative path (may change depending on the location of the lesson notebook) import hourofci # load javascript to initialize/hide cells, get user agent string, and hide output indicator # hide code by introducing a toggle button "Toggle raw code" HTML(''' <script type="text/javascript" src=\"../../supplementary/js/custom.js\"></script> <style> .output_prompt{opacity:0;} </style> <input id="toggle_code" type="button" value="Toggle raw code"> ''') # + [markdown] slideshow={"slide_type": "slide"} # Now that you’ve explored and interacted with some examples of interdisciplinary communication, let’s read a few examples from other sources. # + [markdown] slideshow={"slide_type": "fragment"} # In this first article, you’ll read about potential dangers in using jargon when communicating science: # # https://www.npr.org/sections/13.7/2017/06/12/532554252/the-dangers-of-hidden-jargon-in-communicating-science # + hide_input=true slideshow={"slide_type": "fragment"} tags=["Hide", "Init", "5A"] # Output function def out1(): print("Well done! Please move to the next slide.") # Submit button # hourofci.SubmitBtn(user_agent, lesson, lesson_level, "5A", "read", out1,"Click here when done reading") hourofci.SubmitBtn2('read', out1,"Click here when done reading", display_wgt = False) # + [markdown] slideshow={"slide_type": "slide"} # Considering the article you just read and the activities in this lesson, do you think that using jargon is ‘dangerous’? Respond in a few sentences. # + hide_input=true slideshow={"slide_type": "fragment"} tags=["Hide", "Init", "5B"] text1 = widgets.Textarea(placeholder='Type your response here...') # Display widget display(text1) # Output function def out2(): # Execute next cell for comparison display(Javascript("Jupyter.notebook.execute_cells([Jupyter.notebook.get_selected_index()+1])")) print("What an interesting response! Take a look at what we thought:\n") print("\"Jargon can be dangerous when it covers up or obscures meaning. If it makes science outcomes or details more confusing, it can be a major problem. Care and attention is necessary to make sure that jargon is used appropriately.\"") # Submit button # hourofci.SubmitBtn(user_agent, lesson, lesson_level, "5B", text1, out2, 'Click here to submit your response') hourofci.SubmitBtn2(text1, out2, 'Click here to submit your response') # + [markdown] hide_input=false slideshow={"slide_type": "slide"} variables={"text1.value": ""} # On a scale of 1 to 5, with 1 being "the responses didn't match at all" and 5 being "the responses matched really closely", how well did your response match ours? # # __Your response:__ {{text1.value}}<br> # __Our response:__ "Jargon can be dangerous when it covers up or obscures meaning. If it makes science outcomes or details more confusing, it can be a major problem. Care and attention is necessary to make sure that jargon is used appropriately." # + hide_input=true slideshow={"slide_type": "fragment"} tags=["Hide", "Init", "5C"] slider1 = widgets.IntSlider( value=3, min=1, max=5, step=1 ) display(slider1) def out(): return print('Successfully submitted!') # Submit button # hourofci.SubmitBtn(user_agent, lesson, lesson_level, "5C", slider1) hourofci.SubmitBtn2(slider1, out) # + [markdown] slideshow={"slide_type": "slide"} # The second article takes a different stance on jargon: # # https://medium.com/@alysonescalante/in-defense-of-jargon-eced58ff316d # + hide_input=true slideshow={"slide_type": "fragment"} tags=["Hide", "Init", "5D"] # Output function def out4(): print("Well done! Please move to the next slide.") # Submit button # hourofci.SubmitBtn(user_agent, lesson, lesson_level, "5D", "read", out4,"Click here when done reading") hourofci.SubmitBtn2("read", out4,"Click here when done reading", display_wgt = False) # + [markdown] slideshow={"slide_type": "slide"} # Considering the article you just read and the activities in this lesson, do you think that using jargon is part of becoming an expert? Respond in a few sentences. # + hide_input=true slideshow={"slide_type": "fragment"} tags=["Hide", "Init", "5E"] text2 = widgets.Textarea(placeholder='Type your response here...') # Display widget display(text2) # Output function def out5(): # Execute next cell for comparison display(Javascript("Jupyter.notebook.execute_cells([Jupyter.notebook.get_selected_index()+1])")) print("What an interesting response! Take a look at what we thought:\n") print("\"Appropriate, context relevant, and intentional use of jargon can be a major benefit in demonstrating expertise. In addition, it allows for different types of conversations among other experts. It can be a major advantage in enabling advanced communication.\"") # Submit button # hourofci.SubmitBtn(user_agent, lesson, lesson_level, "5E", text2, out5, 'Click here to submit your response') hourofci.SubmitBtn2(text2, out5, 'Click here to submit your response') # + [markdown] slideshow={"slide_type": "slide"} variables={"text2.value": ""} # On a scale of 1 to 5, with 1 being "the responses didn't match at all" and 5 being "the responses matched really closely", how well did your response match ours? # # __Your response:__ {{text2.value}}<br> # __Our response:__ "Appropriate, context relevant, and intentional use of jargon can be a major benefit in demonstrating expertise. In addition, it allows for different types of conversations among other experts. It can be a major advantage in enabling advanced communication." # + hide_input=true slideshow={"slide_type": "fragment"} tags=["Hide", "Init", "5F"] slider2 = widgets.IntSlider( value=3, min=1, max=5, step=1 ) display(slider2) def out(): return print('Successfully submitted!') # Submit button # hourofci.SubmitBtn(user_agent, lesson, lesson_level, "5F", slider2) hourofci.SubmitBtn2(slider2, out) # + [markdown] slideshow={"slide_type": "slide"} # The final article reflects on these ideas in the broader scope of interdisciplinary communication: # # https://ian.umces.edu/blog/2018/05/31/how-to-improve-interdisciplinary-collaborations-lessons-learned-from-scientists-studying-team-science/ # + hide_input=true slideshow={"slide_type": "fragment"} tags=["Hide", "Init", "5G"] # Output function def out7(): print("Well done! Please move to the next slide.") # Submit button # hourofci.SubmitBtn(user_agent, lesson, lesson_level, "5G", "read", out7,"Click here when done reading") hourofci.SubmitBtn2('read', out7,"Click here when done reading", display_wgt = False) # + [markdown] slideshow={"slide_type": "slide"} # Considering the article you just read and the activities in this lesson, do you think that interdisciplinary communication is important for cyberinfrastructure? Respond in a few sentences. # + hide_input=true slideshow={"slide_type": "fragment"} tags=["Hide", "Init", "5H"] text3 = widgets.Textarea(placeholder='Type your response here...') # Display widget display(text3) # Output function def out8(): # Execute next cell for comparison display(Javascript("Jupyter.notebook.execute_cells([Jupyter.notebook.get_selected_index()+1])")) print("What an interesting response! Take a look at what we thought\n") print("\"Interdisciplinary communication is vital in cyberinfrastructure. When working with people from different backgrounds and experiences, connecting their expertise and knowledge can create very interesting connections and collaborations.\"") # Submit button # hourofci.SubmitBtn(user_agent, lesson, lesson_level, "5H", text3, out8, 'Click here to submit your response') hourofci.SubmitBtn2(text3, out8, 'Click here to submit your response') # + [markdown] hide_input=false slideshow={"slide_type": "slide"} variables={"text3.value": ""} # On a scale of 1 to 5, with 1 being "the responses didn't match at all" and 5 being "the responses matched really closely", how well did your response match ours? # # __Your response:__ {{text3.value}}<br> # __Our response:__ "Interdisciplinary communication is vital in cyberinfrastructure. When working with people from different backgrounds and experiences, connecting their expertise and knowledge can create very interesting connections and collaborations." # + hide_input=true slideshow={"slide_type": "fragment"} tags=["Hide", "Init", "5I"] slider3 = widgets.IntSlider( value=3, min=1, max=5, step=1 ) display(slider3) def out(): return print('Successfully submitted!') # Submit button # hourofci.SubmitBtn(user_agent, lesson, lesson_level, "5I", slider3) hourofci.SubmitBtn2(slider3, out) # + [markdown] slideshow={"slide_type": "slide"} # Congratulations, you have almost completed the Hour of Cyberinfrastructure Interdisciplinary Communication lesson! # # The last segment allows you to explore interdisciplinary communication in a new way. Click the link below to start the final segment. # # **Continue to exploration: [Next Section](ic-exploration.ipynb)**
beginner-lessons/interdisciplinary-communication/ic-5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import dask import fsspec import netrc from urllib.parse import urlparse from urllib.error import HTTPError, URLError import rioxarray import xarray as xr URS_URL = "https://urs.earthdata.nasa.gov" import base64 from urllib.request import ( urlopen, Request, build_opener, HTTPCookieProcessor, ) from tqdm import tqdm # + with open("/home/jovyan/aster_granules.txt") as f: granules = f.read().split("\n") granules = [g for g in granules if "dem" in g] # - granules[:10] # + def get_username(): username = "" # For Python 2/3 compatibility: try: do_input = raw_input # noqa except NameError: do_input = input while not username: try: username = do_input("Earthdata username: ") except KeyboardInterrupt: quit() return username def get_credentials(url): """Get user credentials from .netrc or prompt for input.""" credentials = None errprefix = "" try: info = netrc.netrc() username, account, password = info.authenticators(urlparse(URS_URL).hostname) errprefix = "netrc error: " except Exception as e: if not ("No such file" in str(e)): print("netrc error: {0}".format(str(e))) username = None password = None while not credentials: if not username: username = get_username() password = get_password() credentials = "{0}:{1}".format(username, password) credentials = base64.b64encode(credentials.encode("ascii")).decode("ascii") if url: try: req = Request(url) req.add_header("Authorization", "Basic {0}".format(credentials)) opener = build_opener(HTTPCookieProcessor()) opener.open(req) except HTTPError: print(errprefix + "Incorrect username or password") errprefix = "" credentials = None username = None password = <PASSWORD> return credentials # - def cmr_download(urls, cache_location, credentials=None): """Download files from list of urls.""" out = [] if not urls: return out for index, url in enumerate(urls, start=1): if not credentials and urlparse(url).scheme == "https": credentials = get_credentials(url) filename = url.split("/")[-1] target_url = os.path.join(cache_location, filename) # skip existing try: fsspec.open(target_url).open() out.append(target_url) continue except FileNotFoundError: pass try: req = Request(url) if credentials: req.add_header("Authorization", "Basic {0}".format(credentials)) opener = build_opener(HTTPCookieProcessor()) with fsspec.open(target_url, mode="wb") as target: target.write(opener.open(req).read()) out.append(target_url) except HTTPError as e: print("HTTPError {0}, {1}".format(e.code, e.reason), filename) except URLError as e: print("URLError: {0}".format(e.reason), filename) except IOError: print("IOError: {0}".format(e.reason), filename) except KeyboardInterrupt: quit() except: print("unknown error", filename) return out @dask.delayed def process_granule(url, temp_dir="/home/jovyan/temp/"): target = aster_path.split("_")[-2] target_url = f"gs://carbonplan-climatetrace/inputs/aster/{target}.tif" try: fsspec.open(target_url).open() return target_url except FileNotFoundError: pass # download the granule try: local_path = cmr_download([url], f"{temp_dir}")[0] except Exception as e: print(url, e) return ("failed to download", target_url) # convert to geotiff try: da = xr.open_rasterio(local_path) except Exception as e: print(url, e) os.remove(local_path) return ("failed to convert", target_url) cog = f"{temp_dir}/{target}" da.rio.to_raster(cog, windowed=True) # write to object store with fsspec.open(target_url, mode="wb") as fdst: with open(cog, mode="rb") as fsrc: fdst.write(fsrc.read()) # cleanup os.remove(local_path) os.remove(cog) return ("passed", target_url) results = [] for url in tqdm(granules): results.append(process_granule(url)) results_c = dask.compute(results, retries=3) print("here")
notebooks/processing/download_srtm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # CS 5408 - Game Theory for Computing # # ## Topic 0: Introduction -- Prerequisites # \ # COPYRIGHTS: © <NAME>, 2021 # ### Things to Remember # # * All programming assignments should be written in **Python 3.8+** and submitted as **Jupyter notebooks** in assigned **Gitlab** repositories before the submission deadline. # * All other submissions will be rejected -- in which case, zero points will be automatically awarded. # * Any exceptions can only be granted at the discretion of the instructor only under justifiable circumstances. # # * To work on a Unix terminal from any Windows machine, you may use **PuTTY**, a free and open-source SSH/Telnet client, and follow the instructions in this [link](https://it.mst.edu/services/remote-desktop-connection/install-putty/) to gain access to our campus' standard Unix installation. # * If you are off-campus, you need to first connect to our campus VPN by following the instructions provided in this [link](https://it.mst.edu/services/vpn/). # # * Note that the code provided by the instructor cannot be changed. You can only write your code in the blanks, as instructed by comments within respective code cells. # ### Resources # # * Official Python Documentation: [https://www.python.org/doc/](https://www.python.org/doc/) # # * IBM's Markdown Cheatsheet for Jupyter Notebook: [https://www.ibm.com/docs/en/watson-studio-local/1.2.3?topic=notebooks-markdown-jupyter-cheatsheet](https://www.ibm.com/docs/en/watson-studio-local/1.2.3?topic=notebooks-markdown-jupyter-cheatsheet) \ # # * Git: [https://git-scm.com/doc](https://git-scm.com/doc) # ### One-Time Setup Instructions # # In Unix terminal, perform the following steps: # # **STEP 1:** Make sure you have git, Python and any other necessary prerequisites installed in your workstation: \ # `git --version` \ # `python3 --version` # # Then, check what the default Python version is, on your workstation: \ # `python --version` \ # If the default Python version is python2, then change it to Python 3, if it is already installed in your system: \ # `alias python=python3` # # **STEP 2:** Create a virtual environment: \ # `python -m venv fs2021-cs5408-venv` # # **STEP 3:** Activate the virtual environment: \ # `source fs2021-cs5408-venv/bin/activate` # # **STEP 4:** Install all the necessary dependencies within the virtual environment in the following order: \ # `pip install --upgrade pip` \ # `pip install git` # `pip install jupyter` \ # `pip install gym` # ### Instructions for every submission # # In order to work on your HWs and/or projects, perform the following steps: # # STEP 1: Activate the virtual environment: \ # `source fs2021-c5408-venv\bin\activate` # # STEP 2: Clone the appropriate git repository: \ # `git clone <git-repository-address>` # # STEP 3: Open Jupyter Notebook: \ # `jupyter notebook` # # STEP 4: Once the notebook opens in your browser, update the code in existing Jupyter notebooks. # # STEP 5: Upon closing the Jupyter Notebook, run the following command in your terminal: # `pip freeze > requirements.txt` # # STEP 6: Commit your code back to your respective git repositories after every new update: # `git commit ` # ### Example Code in Python # # **Note:** Every notebook is independent, and therefore the code needs to be self-contained. # + # Install any necessary package other than the one listed in the one-time setup instructions # !pip install numpy # This package allows you to compute Nash equilibrium using Lemke-Howson algorithm. # + # Import all necessary global packages import numpy as np # -
.ipynb_checkpoints/T0_Prerequisites-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # **Author**: <NAME> # # **Email**: <EMAIL> # # **Date**: 3/4/2020 #Dependencies import numpy as np import cPickle as pkl import os import codeDesigner as cd # # Choose genomic locations with equal spacing on each chromosome # + min_chr=30 #minimal number of loci to be chosen on a chromosome target_res = 3*10**6 #default target distance between chosen loci chosen_by_chr = [],[] #chosen loci genome-wide, by chromosome home_ = '/n/home02/seonkinrot/Genomes/human/hg38' #folder containing fasta files for the entire human genome shift = 5*10**4 #size of region in each direction required to be mappable for chr_ in range(23): chr_text = 'chr'+str(chr_+1) if chr_<22 else 'chrX' _,seq = cd.fastaread(home_+os.sep+chr_text+'.fa') seq = seq[0] chr_len = len(seq) target_dist = np.min([float(chr_len)/min_chr,target_res]) chr_chosen_ = [] while len(chr_chosen_)<min_chr: chr_chosen__ = np.arange(target_dist,chr_len,target_dist,dtype=int) #uniform probes chr_chosen__ = [c_ for c_ in chr_chosen__ if ((c_+shift<chr_len) and (c_>shift))] chr_chosen_ = [chosen_ for chosen_ in chr_chosen__ if seq[chosen_]!='N' and seq[chosen_]!='n'] chr_chosen_ = [chosen_ for chosen_ in chr_chosen_ if seq[chosen_+shift]!='N' and seq[chosen_+shift]!='n'] chr_chosen_ = [chosen_ for chosen_ in chr_chosen_ if seq[chosen_-shift]!='N' and seq[chosen_-shift]!='n'] target_dist = target_dist*0.9 chosen_by_chr.append(chr_chosen_) n_per_chr = [len(ch_) for ch_ in chosen_by_chr] print n_per_chr # - save_fld = '/n/home02/seonkinrot/TAD_sim/SI14' fid = open(save_fld+os.sep+'hg38_chosen_regs.txt','w') for ichr_,chr_ in enumerate(chosen_by_chr): chr_str = 'chr'+str(ichr_+1) if ichr_<22 else 'chrX' for reg_ in chr_: ln = chr_str+':'+str(int(reg_-shift))+'-'+str(int(reg_+shift))+'\n' fid.write(ln) fid.close() # Load list of loci used in this study chrs_regs = [ln for ln in open(r'hg38_chosen_regs.txt','r')] #loci chosen for each chromosome in actual study chr_strs = ['chr'+str(ichr_+1) if ichr_<22 else 'chrX' for ichr_ in range(23)] chosen_by_chr = [] for chr_ in chr_strs: c_regs = [reg for reg in chrs_regs if reg.split(':')[0]==chr_] #loci for given chromosome chr_chosen_ = map(np.mean,[map(int,reg.split(':')[1].split('-')) for reg in c_regs]) #mid-point of each locus chosen_by_chr.append(chr_chosen_) n_per_chr = [len(ch_) for ch_ in chosen_by_chr] print n_per_chr # # Generate barcodes per locus # + nhb = 50 #number of rounds of imaging (including separate colors) to design buf_ = 100 split_ = True #if true, the code design is for double the number of rounds in nhb, but can be pooled to halve imaging time #this is what was used in this study, but code generation can be done directly for 100 bits by setting: # nhb=100, split_=False hybes,tads_assign = cd.code_encoder_v2(nTADs=n_per_chr,refine_chr=500000,refine_TAD=250000,nchr=23,nhybes=nhb,buffer_=buf_) new_code, new_hybes, new_tads_assign = cd.split_code(code,hybes) #new_code corresponds to the sparser code sv_params = [n_per_chr] sv_params.append([code,hybes,tads_assign]) sv_params.append([new_code,new_hybes,new_tads_assign]) # - # Barcodes for intron probes were generated analogously, but without the code splitting step # FISH probes for each DNA locus were desgined as described in Bintu et al., Science (2018). Code for probe design can be found at https://github.com/BogdanBintu/ChromatinImaging, under LibraryDesign
combinatorial_tracing/BarcodeGeneration/Code_Design_Genome-scale.ipynb
# --- # title: "Expand Cells Containing Lists Into Their Own Variables In Pandas" # author: "<NAME>" # date: 2017-12-20T11:53:49-07:00 # description: "Expand cells containing lists into their own variables in pandas." # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import pandas import pandas as pd # + # create a dataset raw_data = {'score': [1,2,3], 'tags': [['apple','pear','guava'],['truck','car','plane'],['cat','dog','mouse']]} df = pd.DataFrame(raw_data, columns = ['score', 'tags']) # view the dataset df # + # expand df.tags into its own dataframe tags = df['tags'].apply(pd.Series) # rename each variable is tags tags = tags.rename(columns = lambda x : 'tag_' + str(x)) # view the tags dataframe tags # - # join the tags dataframe back to the original dataframe pd.concat([df[:], tags[:]], axis=1)
docs/python/data_wrangling/pandas_expand_cells_containing_lists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.7 64-bit (''venv-jsmltools'': venv)' # language: python # name: python37764bitvenvjsmltoolsvenvb053447eee1f4eacb166bf9acb179df8 # --- # + # default_exp design_doc # - # # Design Document # # > outlines the choices, condiderations and tradeoffs made # ``` # STATUS: Active development # AUTHOR(S): <NAME> (my_first_name.my_last_name[at]gmail.com) # CREATED: 2020-03-19 # UPDATED: 2020-05-25 # ``` # ## Background # # ### Objective # # New ideas, concepts and tools are constantly being created. Doing literature reviews and one off experiments for every new problem is painful. Can we create tools that can help the learning process but then be used later to solve related problem? # # # ### Motivation # # > How do you figure out what do you spend your time learning? # # When learning new concepts and idea, it is easy to glaze over the detials and get a false sense of confidence - but the devil is always in the detials. A lot of new concepts might seem intuitive, however, actually understanding tradeoffs, decision and limits is hard from initial reading. # # # > How to improve the decision making process? # # When trying to solve a new problem, a lot of ideas come to mind. However there is a lot of friction in prototyping all these alternative ideas. Hence, a lot of shortcuts are taken leading to sub-optimal outcomes in the long run. # # > Can one auto-catalyze the learning, decision and implimentation process? # # Having a good aresenal of ideas, improves the quality of decisions. Making better decisions helps create better solutions. Creating better solutions should give hints/ideas what to learn next. # ## Design # # Below are the design choices that I made: # # * Using [nbdev](https://github.com/fastai/nbdev) as a base structure # * ✅ Will use this to get something off the ground. However, will not tie too deeply into this strcuture (if I need to swap this out). # * ✅ Nice literate environment # * ✅ Get PyPI functionality out of the box # * ✅ Get docs functionality out of the box # * ✅ Get unit test functionality out of the box # * ❌ Might need to contribute to nbdev to add functionality # * 🛡 clone an internal verison of nbdev # * 🛡 write all non-supported use case directly in the `lib` folder # # * Monolithical git repo # * ✅ Will allow for incrementability # * ✅ Will become canonical place with experimentation/learning will happen. This will help with compounding learning/experimentation speed. # * ❌ Will enentually get hard to maintain? # * 🛡 things don't have a lot of deps will eventually fall out/depricate # * ❌ >0 startup time # # * Lack of structure # * ✅ [Hyrum's Law](https://www.hyrumslaw.com/) # * - hard for others to understand/follow # * 🛡 build tools to show dependency # * ✅ allow for incrementability # # * [Progressive disclosure of complexity](https://twitter.com/fchollet/status/1231285340335267840) # * ✅ easy to start # * ✅ allow for rough initial ideas to stable APIs to co-exist # * ❌ A lot of extra work needed to trace dependencies # * ️️🛡 build better tools to view dependencies # # * Multi-language/multi-framework/multi-domain support. # * ✅ This becomes the canonical repo for learning/experimentation. # * ❌ Only python will be used in notebooks (for now and because of nbdev limitation). # * 🛡 write all non-supported use case directly in the `lib` folder. # # ``` # Legend: # ✅ suppotive/positive point # ❌ fault/negative point # 🛡️ mitagation plan # ``` # # ### Alternatives Considered # * Use a collections of notebooks to store ideas # # # ### Performance Implications # * Single package bloat # * Inefficient when trying to productionize # * Cost to maintain loose ends and brokes paths # # # ### Dependencies # A list of all package dependencies are avaiable in the `settings` variable in the `settings.ini` file. # ## Open Ended Questions # * Is there a way to track how effective this tool is for # * the learning process # * speed of learning new ideas? # * complexity of ideas being learnt? # * the decision making process # * quality of ideas considered during the decision making process? # * the implimentation proces # * time from concept to final product? # * reliability of solution? # ## References # * [Andy M. Notes](https://notes.andymatuschak.org/) # * Metacognitive supports as cognitive scaffolding # * by contraint and blog by narritive # * [Incremental writing](https://supermemo.guru/wiki/Incremental_writing) # * Notes to APIs # * Exponential growth to learn # * Cross-decipline encouragement # * [nbdev](http://nbdev.fast.ai/) # * [Deisgn doc template](https://github.com/tensorflow/community/blob/master/rfcs/yyyymmdd-rfc-template.md)
notebooks/design_document.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 # Read in the image and convert to grayscale image = mpimg.imread('exit-ramp.jpg') gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY) plt.imshow(gray,cmap='gray') # + # Define a kernel size for Gaussian smoothing / blurring kernel_size = 3 # Must be an odd number (3, 5, 7...) blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0) # Define our parameters for Canny and run it low_threshold = 100 high_threshold = 200 edges = cv2.Canny(blur_gray, low_threshold, high_threshold) # Display the image plt.imshow(edges, cmap='Greys_r')
Computer Vision Fundamentals/Canny Edge Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.1 64-bit (''.venv'': venv)' # name: python3 # --- # + #import course;course.header() # + [markdown] slideshow={"slide_type": "slide"} # # Excersise No. 1 # # ## Count Amino acid propensity in human proteome # # Got to Uniprot and download the latests [here](https://www.uniprot.org/uniprot/?query=*&fil=organism%3A%22Homo+sapiens+%28Human%29+%5B9606%5D%22+AND+reviewed%3Ayes#) # # The file containes protein sequences in FASTA format, i.e. # ```txt # > (( Identifier line )) # (( Amino acid Sequence, can stretch over multiple lines )) # > (( next identifier line )) # ``` # # Write a function that takes two arguments, # * a path to the a fasta database and # * an output file name. # # The function should do the following: # * parses the input fasta file # * counts the amino acid frequency and # * write a csv output file that looks like # # ``` # aa, count # A, 123123123 # C, 3213 # D, ... # ``` # + import csv from collections import Counter import collections aminoacid_counter = Counter() with open ("uniprot-filtered-organism-human.fasta", "r") as input_file: for line in input_file: if line[0] != ">": line = line.strip() aminoacid_counter += Counter(line) # - with open("output_exercise1.csv", "w", newline = "") as output_file: writer = csv.writer(output_file) writer.writerow(["aa", "count"]) for key, value in sorted(aminoacid_counter.items()): writer.writerow([key, str(value)]) # ## Plot a histogram for human # Use the dictionary created in a) to plot a histogram using your favorit lib # # Create a standalone script that will plot the histogram # ```bash # $ ./plot_aa_histogram.py "uniprot-filtered-proteome%3AUP000005640+AND+reviewed%3Ayes+AND+organism%3A%22Hom--.fasta" # ``` # + col_names = ["Aminoacid", "Count"] aminoacid_names = [] for key in sorted(aminoacid_counter): aminoacid_names.append(str(key)) print(aminoacid_names) counter_values = [] for key in sorted(aminoacid_counter): counter_values.append((aminoacid_counter[key])) print(counter_values) # + import pandas import seaborn df = pandas.DataFrame(list(zip(aminoacid_names, counter_values)), columns=col_names) ax = seaborn.barplot(x="Aminoacid", y="Count", data=df) ax.set_title("Aminoacid Distribution Human") # - # ## Rinse and repeat # Repeat counting and plotting for at leat one organism of the following kingdoms of life: # * Bacteria # * Archea # * Plantae # * Animalia # + aminoacid_counter_bacteria = Counter() with open ("uniprot-Escherichia_coli.fasta", "r") as input_file: for line in input_file: if line[0] != ">": line = line.strip() aminoacid_counter_bacteria += Counter(line) print(aminoacid_counter_bacteria) # + col_names_bacteria = ["Aminoacid", "Count"] aminoacid_names_bacteria = [] for key in sorted(aminoacid_counter_bacteria): aminoacid_names_bacteria.append(str(key)) print(aminoacid_names_bacteria) counter_values_bacteria = [] for key in sorted(aminoacid_counter_bacteria): counter_values_bacteria.append((aminoacid_counter_bacteria[key])) print(counter_values_bacteria) # + df_bacteria = pandas.DataFrame(list(zip(aminoacid_names_bacteria, counter_values_bacteria)), columns=col_names_bacteria) ax = seaborn.barplot(x="Aminoacid", y="Count", data=df_bacteria) ax.set_title("Aminoacid Distribution Bacteria") # - ax = seaborn.histplot(data=df_bacteria, x="Count", bins=20) # Das Histogramm zeigt, wie viele Aminosäuren in welchen Count-Bereich liegen. # Are there difference? # There are differences in the Proteome of a Human and E. coli. In general, humans hava a larger Proteome and therefore the overall counts are way higher. Bacteria uses the Aminoacid Alanin (A) more frequently Humans use Serin (S) more often.Bacteria also use less of Cystein (C) but use Isoleucin (I) more frequently. # # Find special proteins # Find the most extreme proteins # * lowest / highest pI # * longest / shortest proteins # * highest density [mass / amino acid counts] # * most hydrophobic, hydrophic #
exercises/day 2/02.exercise_find_the_prots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Relatorio Entrega II cadeira Web-semantica @ Fcul 2020-2021 # Realizado por <NAME>(55018) e <NAME>(55841) # + Para a proxima fase da entrega do trabalho foram feitas as seguintes mudanças/funcionalidades 1) Revisão dos triplos após feedback do docente, nomeadamente: 1) Separaçao do primeiro e ultimo nome 2) Separaçao dos anos jogados do formato 1984-1986 para addicionar os anos todos jogados eg: 1984,1985,1986 3) tratamento do player id para não conter o nome 4) Tratamento do birth year e birth place 2) Ligaçao e utilizaçao do reasoner Fuseki para realizar as queries em vez de usar a biblioteca rdflib 3) Leitura e tratamento de um ficheiro com o nome e info as equipas para um melhor estruturamento da informaçao 4) Utilizaçao do namespace Schema.org para uniformação da informaçao. Nesta caso usamos o https://schema.org/Person , https://schema.org/athlete e https://schema.org/SportsTeam utilizando as suas proprieadades sempre que possivel (excepcao termos muito especificos ao futebol Americano) nos triplos. Por exemplo usando o schema addicionamos os jogadores a equipa através da propriedade "athlete" do Sports Team alterando a forma como o estavamos a fazer anteriormente. 5) Usando um SPARQLWrapper fazemos um query á dbpedia usando a propriedade rdfs:label = "nome de equipa" pedindo informaçao extra nomedamente 1) Treinador da equipa 2) CEO da equipa 3) Abstracto da equipa 4) Número de "superbowls" conquistadas Após feito o query tratamos a informaçao recebida em JSON e acrescentamo-la atráves de triplos aos dados de cada equipa. 6) Regras 7) Reasoner
RelatorioII.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ############################################################# # Author(s): Debaditya, Anwesha, Anna # ############################################################# # + #Download the data files. import os, requests fname = [] for j in range(3): fname.append('steinmetz_part%d.npz'%j) url = ["https://osf.io/agvxh/download"] url.append("https://osf.io/uv3mw/download") url.append("https://osf.io/ehmw2/download") for j in range(len(url)): if not os.path.isfile(fname[j]): try: r = requests.get(url[j]) except requests.ConnectionError: print("!!! Failed to download data !!!") else: if r.status_code != requests.codes.ok: print("!!! Failed to download data !!!") else: with open(fname[j], "wb") as fid: fid.write(r.content) # + #Setup dependencies #Dependencies: #numpy #matplotlib import numpy as np from matplotlib import rcParams from matplotlib import pyplot as plt import matplotlib.gridspec as gridspec rcParams['figure.figsize'] = [20, 10] rcParams['axes.spines.top'] = False rcParams['axes.spines.right'] = False rcParams['figure.autolayout'] = True # + #Groupings of brain regions regions = ["vis ctx", "thal", "hipp", "other ctx", "midbrain", "basal ganglia", "cortical subplate", "other"] brain_groups = [["VISa", "VISam", "VISl", "VISp", "VISpm", "VISrl"], # visual cortex ["CL", "LD", "LGd", "LH", "LP", "MD", "MG", "PO", "POL", "PT", "RT", "SPF", "TH", "VAL", "VPL", "VPM"], # thalamus ["CA", "CA1", "CA2", "CA3", "DG", "SUB", "POST"], # hippocampal ["ACA", "AUD", "COA", "DP", "ILA", "MOp", "MOs", "OLF", "ORB", "ORBm", "PIR", "PL", "SSp", "SSs", "RSP"," TT"], # non-visual cortex ["APN", "IC", "MB", "MRN", "NB", "PAG", "RN", "SCs", "SCm", "SCig", "SCsg", "ZI"], # midbrain ["ACB", "CP", "GPe", "LS", "LSc", "LSr", "MS", "OT", "SNr", "SI"], # basal ganglia ["BLA", "BMA", "EP", "EPd", "MEA"] # cortical subplate ] n_regions = len(regions) # + #Load the data def load_alldat(): alldat = np.array([]) for j in range(len(fname)): alldat = np.hstack((alldat, np.load('steinmetz_part%d.npz'%j, allow_pickle=True)['dat'])) return alldat alldat = load_alldat() # + #Print the keys print("The keys in this dataset are:\n----") for key in alldat[0].keys(): print(key) # + #Print information about brain area in mice: for idx, dat in enumerate(alldat): print(idx,dat['mouse_name'],dat['date_exp'],np.unique(dat['brain_area'])) # + #Select session dat = alldat[11] dt = dat['bin_size'] # + #Generate some metadata for the neuron regions. def generate_metadata(dat): n_neurons = (len(dat['brain_area'])) n_regions = len(regions) region_index = np.ones(n_neurons)*n_regions group_index = np.ones(n_neurons)*n_regions for region in range(len(regions)-1): region_index[np.isin(dat['brain_area'], brain_groups[region])] = region for group in range(len(brain_groups[region])): group_index[np.isin(dat['brain_area'],brain_groups[region][group])] = group return n_neurons, region_index, group_index # + #Define function to print plots def pcolormesh_with_means(region,spikes,session): for group in range(len(brain_groups[region])): if np.sum(np.logical_and(region_index==region,group_index == group)): print(np.shape(spikes[:][np.logical_and(region_index==region,group_index == group),:])) fig = plt.figure(constrained_layout=False) fig.suptitle("Avg Spike rate of all neurons in region "+brain_groups[region][group]+" for session "+str(session)) gs = fig.add_gridspec(4, 5) fig_raster= fig.add_subplot(gs[:3, :4]) raster_plot = fig_raster.pcolormesh(np.sum(spikes[:][np.logical_and(region_index==region,group_index == group),:],axis=0)) fig_raster.set_ylabel("Trials") fig_raster.set_xlabel("Binned Time (10ms)") fig_avg_trial = fig.add_subplot(gs[3,:4], sharex=fig_raster) fig_avg_trial.plot(np.mean(spikes[:][np.logical_and(region_index==region,group_index == group),:],axis=(0,1))/dt) fig_avg_trial.set_ylabel("Mean FR(Hz)") fig_avg_trial.set_xlabel("Binned Time (10ms)") fig_avg_trial.set_title("Mean Firing Rate Across Trials") fig_avg_time = fig.add_subplot(gs[:3,4],sharey=fig_raster) fig_avg_time.plot(np.mean(spikes[:][np.logical_and(region_index==region,group_index == group),:],axis=(0,2))/dt,range(np.shape(spikes)[1])) fig_avg_time.set_ylabel("Trials") fig_avg_time.set_xlabel("Mean FR (Hz)") fig_avg_time.set_title("Mean Firing in Trial") plt.show() # + #Print all plots n_regions = len(regions) for session, dat in enumerate(alldat): n_neurons, region_index, group_index = generate_metadata(dat) spikes = dat['spks'] for region in range(n_regions-1): pcolormesh_with_means(region,spikes,session) # + #Normalize spikes print(np.shape(dat['spks'])) print(np.shape(dat['spks_passive'])) spikes = dat['spks']-np.mean(dat['spks_passive'],axis=(1,2))[0] # - #Define function to print all mean plots. for session in range(39): dat = alldat[session] spikes = dat['spks']-np.mean(dat['spks_passive'],axis=(1,2))[0] for idx, region in enumerate(np.unique(dat['brain_area'])): if np.mod(idx,3)==0: fig, axs = plt.subplots(3) axs[0].plot(np.mean(spikes[:][dat['brain_area']==region,:],axis=(0,1))/dt,label=str(session)+" "+region) axs[0].set_xlabel('Time Bins (10ms)') axs[0].set_ylabel('Mean Firing Rate Across Trials (Hz)') axs[0].legend() if np.mod(idx,3)==1: axs[1].plot(np.mean(spikes[:][dat['brain_area']==region,:],axis=(0,1))/dt,label=str(session)+" "+region) axs[1].set_xlabel('Time Bins (10ms)') axs[1].set_ylabel('Mean Firing Rate Across Trials (Hz)') axs[1].legend() if np.mod(idx,3)==2: axs[2].plot(np.mean(spikes[:][dat['brain_area']==region,:],axis=(0,1))/dt,label=str(session)+" "+region) axs[2].set_xlabel('Time Bins (10ms)') axs[2].set_ylabel('Mean Firing Rate Across Trials (Hz)') axs[2].legend() plt.show() # plt.show() # + jupyter={"source_hidden": true} #Define function to print plots def pcolormesh_with_single_neuron(region,spikes,session,neuron): for group in range(len(brain_groups[region])): if np.sum(np.logical_and(region_index==region,group_index == group)): print(np.shape(spikes[:][np.logical_and(region_index==region,group_index == group),:])) fig = plt.figure(constrained_layout=False) fig.suptitle("Avg Spike rate of a single neurons in region "+brain_groups[region][group]+" for session "+str(session)) gs = fig.add_gridspec(4, 5) fig_raster= fig.add_subplot(gs[:3, :4]) t_spikes = spikes[:][np.logical_and(region_index==region,group_index == group),:] raster_plot = fig_raster.pcolormesh(t_spikes[:][neuron,:]) fig_raster.set_ylabel("Trials") fig_raster.set_xlabel("Binned Time (10ms)") fig_avg_trial = fig.add_subplot(gs[3,:4], sharex=fig_raster) fig_avg_trial.plot(np.mean(t_spikes[:][neuron,:],axis=(0))/dt) fig_avg_trial.set_ylabel("Mean FR(Hz)") fig_avg_trial.set_xlabel("Binned Time (10ms)") fig_avg_trial.set_title("Mean Firing Rate Across Trials") fig_avg_time = fig.add_subplot(gs[:3,4],sharey=fig_raster) fig_avg_time.plot(np.mean(t_spikes[:][neuron,:],axis=(1))/dt,range(np.shape(t_spikes)[1])) fig_avg_time.set_ylabel("Trials") fig_avg_time.set_xlabel("Mean FR (Hz)") fig_avg_time.set_title("Mean Firing in Trial") plt.show() # - dat = alldat[11] n_neurons, region_index, group_index = generate_metadata(dat) spikes = dat['spks'] for region in range(n_regions-1): pcolormesh_with_single_neuron(region,spikes,11,0) # The above results are not too useful. We have plotted the graphs once again, but this time we decided to plot for a single neuron and look at how it evolves over time. This does not seem to give a good indication of activity at all. We shall now be looking at how to plot the graphs based on only averages of all neurons on a trial wise basis. # + session = 10 dat = alldat[session] spikes = dat['spks']-np.mean(dat['spks_passive'],axis=(1,2))[0] n_neurons, region_index, group_index = generate_metadata(dat) trial = 36 print(np.shape(spikes)) for idx, region in enumerate(np.unique(dat['brain_area'])): if np.mod(idx,3)==0: fig, axs = plt.subplots(3) axs[0].plot(np.mean(spikes[dat['brain_area']==region,trial,:],axis=(0))/dt,label=str(session)+" "+region) axs[0].set_xlabel('Time Bins (10ms)') axs[0].set_ylabel('Mean Firing Rate for Trials '+str(trial)+' (Hz)') axs[0].legend() if np.mod(idx,3)==1: axs[1].plot(np.mean(spikes[dat['brain_area']==region,trial,:],axis=(0))/dt,label=str(session)+" "+region) axs[1].set_xlabel('Time Bins (10ms)') axs[1].set_ylabel('Mean Firing Rate for Trials '+str(trial)+' (Hz)') axs[1].legend() if np.mod(idx,3)==2: axs[2].plot(np.mean(spikes[dat['brain_area']==region,trial,:],axis=(0))/dt,label=str(session)+" "+region) axs[2].set_xlabel('Time Bins (10ms)') axs[2].set_ylabel('Mean Firing Rate for Trials '+str(trial)+' (Hz)') axs[2].legend() plt.show() # - # As we can see despite trying to plot the events trial wise, they don't seem to look very nice. But as looks can be decieving we shall try to run a cross correlation anyway. # # The correlation that we shall run will be a simple one. # + session = 0 dat = alldat[session] spikes = dat['spks']-np.mean(dat['spks_passive'],axis=(1,2))[0] n_neurons, region_index, group_index = generate_metadata(dat) n_trials = np.shape(spikes)[1] avg_corrcoef = np.zeros((len(np.unique(dat['brain_area'])),len(np.unique(dat['brain_area'])))) for trial in range(n_trials): avg_area_response = avg_area_response = np.zeros((len(np.unique(dat['brain_area'])),np.shape(spikes)[2])) for idx, region in enumerate(np.unique(dat['brain_area'])): avg_area_response[idx,:]=np.mean(spikes[dat['brain_area']==region,trial,:],axis=(0))/dt avg_corrcoef += np.corrcoef(avg_area_response) avg_corrcoef/=n_trials+1 fig, ax = plt.subplots(1,1) c=ax.pcolor(avg_corrcoef) plt.colorbar(c) ax.set_xticks(ax.get_xticks()[:-1]+0.5) ax.set_yticks(ax.get_yticks()[:-1]+0.5) ax.set_xticklabels(np.unique(dat['brain_area'])) ax.set_yticklabels(np.unique(dat['brain_area'])) plt.title("Corrcoef of regions for session "+str(session)+" across all trials.") plt.show() # - # As you can see, the correlation coefficient is being calculated properly. so I think there is no issue with this. Let us try to plot the same for all sessions. for session in range(39): dat = alldat[session] spikes = dat['spks']-np.mean(dat['spks_passive'],axis=(1,2))[0] n_neurons, region_index, group_index = generate_metadata(dat) n_trials = np.shape(spikes)[1] avg_corrcoef = np.zeros((len(np.unique(dat['brain_area'])),len(np.unique(dat['brain_area'])))) for trial in range(n_trials): avg_area_response = avg_area_response = np.zeros((len(np.unique(dat['brain_area'])),np.shape(spikes)[2])) for idx, region in enumerate(np.unique(dat['brain_area'])): avg_area_response[idx,:]=np.mean(spikes[dat['brain_area']==region,trial,:],axis=(0))/dt avg_corrcoef += np.corrcoef(avg_area_response) avg_corrcoef/=n_trials+1 fig, ax = plt.subplots(1,1) c=ax.pcolor(avg_corrcoef) plt.colorbar(c) ax.set_xticks(ax.get_xticks()[:-1]+0.5) ax.set_yticks(ax.get_yticks()[:-1]+0.5) ax.set_xticklabels(np.unique(dat['brain_area'])) ax.set_yticklabels(np.unique(dat['brain_area'])) plt.title("Corrcoef of regions for session "+str(session)+" across all trials.") plt.show() import gc gc.collect()
code/.ipynb_checkpoints/prelimTest-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.5 64-bit # language: python # name: python3 # --- # ## Question 1 # Given an array of meeting time intervals where ```intervals[i] = [starti, endi]```, determine if a person could attend all meetings within a 24 hour time period. # > Meeting times are in hours in a 24hour clock. # # # # Example 1: # ``` # Input: intervals = [[0,3],[2,6],[13,16]] # Output: false # ``` # Example 2: # ``` # Input: intervals = [[7,10],[2,4]] # Output: true # ``` # ## Question 2 # Design a class that takes # - in a list of integers # - stores the list # - runs a sorting algorithm of your choosing # - Run the sorting algorithm through a class object # - Define the Big O of that algorithm # ## Question 3 # Create a class named Person with properties for: # - first_name # - last_name # - print_name method # # Create a class named Student: # - Inherit the properties and methods from the Person class # - Add a property called graduation_year # ## Question 4 # Using two sentences explain the benefits of inheritance. # ## Question 5 # # Let's utilize our DataFrame from in class to make some visual graphs! # - import matplotlib # - read https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.scatter.html on how to make scatter plots # - read https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.figure.html on how to make figures using matplotlib # - create a figure of 4 scatter plots using grades from each quarter from our DataFrame as the y-axis and the Faculty Names as the x-axis. # # > Hints: # > Use the add_subplot(rows, columns, position) method to add plots to your figure # > Use set_title(string) method to add a title to your plots; ex: plot.set_title('First Quarter') # ### Your scatterplots should look like this: # ![TKH-Teachback](scatter_plots.jpg) # + import pandas as pd import names import random import numpy as np # Let's make a quick dataframe using pandas # But first, let's make a list of random names for 100 students student_names = [] for i in range(100): student_names.append(names.get_first_name()) # Now let's make random grades for each student # to do that we need to use the random library first_quarter = [] for i in range(100): first_quarter.append(random.randint(45, 100)) second_quarter = [] for i in range(100): second_quarter.append(random.randint(45, 100)) third_quarter = [] for i in range(100): third_quarter.append(random.randint(45, 100)) fourth_quarter = [] for i in range(100): fourth_quarter.append(random.randint(45, 100)) # Let's make some Faculty members in our school faculty_names = [] for i in range(5): faculty_names.append(names.get_first_name()) # Let's make the list larger with duplicates of the first 5 names so we can make classes of 20 students per faculty faculty_names = list(np.repeat(faculty_names, 20)) # We can use the shuffle method from the random library to shuffle our list and randomize it random.shuffle(faculty_names) # Let's create a dictionary of our school school_information = { 'Student Names': student_names, 'First Semester': first_quarter, 'Second Semester': second_quarter, 'Third Semester': third_quarter, 'Fourth Semester': fourth_quarter, 'Faculty Name' : faculty_names } # Now let's make a Database using our dictionary school = pd.DataFrame(data=school_information) school # + import matplotlib.pyplot as plt figure = plt.figure(figsize=(15, 15)) ax1 = figure.add_subplot(2, 2, 1) ax1.scatter(school['Faculty Name'], school['First Semester']) ax1.set_title("First Semester") ax2 = figure.add_subplot(2, 2, 2) ax2.scatter(school['Faculty Name'], school['Second Semester']) ax2.set_title("Second Semester") ax3 = figure.add_subplot(2, 2, 3) ax3.scatter(school['Faculty Name'], school['Third Semester']) ax3.set_title("Third Semester") ax4 = figure.add_subplot(2, 2, 4) ax4.scatter(school['Faculty Name'], school['Fourth Semester']) ax4.set_title("Fourth Semester") # - # ### Bonus Question # # Using our DataFrame and the pandas library, create 5 separate DataFrames of each Faculty and their class. Then display which class has the highest average grade using our get_average method from the comparable module. # # > Hints: # > Create a new DataFrame by comparing each student's Faculty member and separating them by the name of the Faculty member (there should be 20 students per faculty) wendy_class = school[school['Faculty Name'] == 'Wendy'] justin_class = school[school['Faculty Name'] == 'Justin'] raymond_class = school[school['Faculty Name'] == 'Raymond'] nellie_class = school[school['Faculty Name'] == 'Nellie'] rosanna_class = school[school['Faculty Name'] == 'Rosanna'] wendy_class justin_class raymond_class nellie_class rosanna_class
lab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/yildirimyy/permutation-combination/blob/master/permu.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="MipXSa8-Kyax" colab_type="code" outputId="196017a6-b70b-4239-df1a-2451af874304" colab={"base_uri": "https://localhost:8080/", "height": 86} def permutations(list): #Listede eleman olup olmadiginin kontrolu if len(list) == 0: return [] #Listede 1 eleman varsa sonuc 1 if len(list) == 1: return list # Listede 1'den fazla eleman varsa permutasyonu bul perm_list = [] # o anki permutasyonu tutan liste for i in range(len(list)): m = list[i] sub_list = list[:i] + list[i+1:] for p in permutations(sub_list): perm_list.append(m + p) return perm_list print(permutations(list('123'))) print("\t") print(permutations(list('abc'))) print("\t") # + id="iWd8zKRyK1Av" colab_type="code" colab={}
permu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt with np.load('prediction-challenge-02-data.npz') as fh: x_train = fh['x_train'] y_train = fh['y_train'] x_test = fh['x_test'] # TRAINING DATA: INPUT (x) AND OUTPUT (y) # 1. INDEX: IMAGE SERIAL NUMBER (6000) # 2/3. INDEX: PIXEL VALUE (32 x 32) # 4. INDEX: COLOR CHANNELS (3) print(x_train.shape, x_train.dtype) print(y_train.shape, y_train.dtype) # TEST DATA: INPUT (x) ONLY print(x_test.shape, x_test.dtype) # TRAIN MODEL ON x_train, y_train # - # # Data Stuff from matplotlib import pyplot as plt i = 4 plt.imshow(x_train[i]) y_train[i] def swap(x): return x.swapaxes(-1,-2).swapaxes(-2,-3) from sklearn.model_selection import train_test_split x_training, x_validation, y_training, y_validation = train_test_split( swap(x_train), y_train ) y_training.shape import torch # # Network # + class DenseNet(torch.nn.Sequential): def __init__(self, *sizes, activation=torch.nn.ReLU()): layers = self._make_layers(*sizes, activation=activation) super().__init__(*layers) @staticmethod def _make_layers(*sizes, activation): layers = [] for i in range(len(sizes)-1): layers.append(torch.nn.Linear(sizes[i], sizes[i+1])) if i < len(sizes)-2: layers.append(activation) return layers DenseNet(128, 2, 12, 24) # + class RegularizedConvBlock(torch.nn.Sequential): def __init__( self, in_features, out_features, kernel_size=2, dropout=0.2, activation=torch.nn.ReLU() ): layers = [ torch.nn.Conv2d(in_features, out_features, kernel_size), activation, torch.nn.BatchNorm2d(out_features), torch.nn.MaxPool2d(kernel_size), torch.nn.Dropout(dropout) ] super().__init__(*layers) def forward(self, x): dx = super().forward(x) return dx RegularizedConvBlock(3,3)(torch.tensor(x_training[:10])).shape # + class Classifier(torch.nn.Sequential): def __init__(self, *hidden, dropout=0.5, activation=torch.nn.ReLU()): layers = [] layers.append(RegularizedConvBlock(3,64)) layers.append(RegularizedConvBlock(64,16)) layers.append(RegularizedConvBlock(16,3)) layers.append(torch.nn.Flatten()) layers.append(DenseNet(27, *hidden, 3)) layers.append(torch.nn.LogSoftmax(dim=1)) super().__init__(*layers) Classifier(128,128)(torch.tensor(x_training[:10])).shape # - class NaiveClassifier(torch.nn.Sequential): def __init__(self, *hidden): layers = [] layers.append(torch.nn.Flatten()) layers.append(DenseNet(3*32*32, *hidden, 3)) layers.append(torch.nn.LogSoftmax(dim=1)) super().__init__(*layers) # # Training device = torch.device("cuda:0") classifier = Classifier(512, 64, dropout=0.5) #classifier = NaiveClassifier(2048,1024,256,16) classifier.to(device) #train_loader.to(device) #validation_loader.to(device) def accuracy(x,y): return (torch.argmax(x, dim=-1) == y).sum()/len(y) optim = torch.optim.Adam(classifier.parameters(), lr=5e-4) loss = torch.nn.NLLLoss() n_epoch = 200 from torch.utils.data import DataLoader, TensorDataset to_tensor = lambda x: torch.tensor(x, dtype=torch.float32, device=device) for epoch in range(n_epoch): train_loader = DataLoader( TensorDataset( to_tensor(x_training), to_tensor(y_training).to(dtype=int) ), batch_size=64, shuffle=True ) validation_loader = DataLoader( TensorDataset( to_tensor(x_validation), to_tensor(y_validation).to(dtype=int) ), batch_size=64, shuffle=False ) all_nlls = [] all_accuracies = [] for x,y in train_loader: optim.zero_grad() prediction = classifier(x) nll = loss(prediction, y) nll.backward() optim.step() print(accuracy(prediction,y).item(), end="\r") all_nlls.append(nll.item()) all_accuracies.append(accuracy(prediction,y).item()) train_string = ( "Train. Accuracy {:.2f} Error {:.4f}".format( np.mean(all_accuracies), np.mean(all_nlls) ) ) # validation with torch.no_grad(): classifier.train(False) all_nlls = [] all_accuracies = [] for x,y in validation_loader: prediction = classifier(x) nll = loss(prediction, y) all_nlls.append(nll.item()) all_accuracies.append(accuracy(prediction,y).item()) test_string = ( "Val. Accuracy {:.2f} Error {:.4f}".format( np.mean(all_accuracies), np.mean(all_nlls) ) ) classifier.train(True) print(f"{epoch+1:2d}/{n_epoch} | {train_string} | " + test_string) prediction = classifier(to_tensor(swap(x_test))).argmax(dim=-1).detach().cpu().numpy() prediction # + # 0:cat, 1:dog, 2:frog # - i = 12 plt.imshow(x_test[i]) prediction[i] # + # MAKE SURE THAT YOU HAVE THE RIGHT FORMAT assert prediction.ndim == 1 assert prediction.shape[0] == 300 # AND SAVE EXACTLY AS SHOWN BELOW np.save('prediction.npy', prediction.astype(int)) # MAKE SURE THAT THE FILE HAS THE CORRECT FORMAT def validate_prediction_format(): loaded = np.load('prediction.npy') assert loaded.shape == (300, ) assert loaded.dtype == int assert (loaded <= 2).all() assert (loaded >= 0).all() validate_prediction_format() # -
Worksheet04/CIFAR_tutor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: TF-GPU # language: python # name: mldl_env # --- # <h1>Deep-dentistry model for decay analysis</h1> # + #Setting up resources and libraries required import matplotlib.pyplot as plt # %matplotlib inline import keras from keras import backend as K from keras.models import Sequential from keras.layers import Activation from keras.layers.normalization import BatchNormalization from keras.optimizers import Adam from keras.metrics import categorical_crossentropy from keras.preprocessing.image import ImageDataGenerator from keras.layers.normalization import BatchNormalization from keras.layers.core import Flatten, Dense from keras.layers.convolutional import * import numpy as np import itertools from sklearn.metrics import confusion_matrix # - #Local and relative paths to the files are used.. train_path = 'Data/train' valid_path = 'Data/validate' test_path = 'Data/test' train_bconvert = ImageDataGenerator().flow_from_directory(train_path,target_size=(64,64),classes=['DECAYED','DECAYFREE'],batch_size=20) valid_bconvert = ImageDataGenerator().flow_from_directory(valid_path,target_size=(64,64),classes=['DECAYED','DECAYFREE'],batch_size=10) test_bconvert = ImageDataGenerator().flow_from_directory(test_path,target_size=(64,64),classes=['DECAYED','DECAYFREE'],batch_size=20) # Function plots images with labels def plots(ims, figsize=(24,12), rows=1, interp=False, titles=None): if type(ims[0]) is np.ndarray: ims = np.array(ims).astype(np.uint8) if (ims.shape[-1] != 3): ims = ims.transpose((0,2,3,1)) f = plt.figure(figsize=figsize) cols = len(ims)//rows if len(ims) % 2 == 0 else len(ims)//rows + 1 for i in range(len(ims)): sp = f.add_subplot(rows, cols, i+1) sp.axis('Off') if titles is not None: sp.set_title(titles[i], fontsize=16) plt.imshow(ims[i], interpolation=None if interp else 'none') imgs, labels = next(train_bconvert) plots(imgs, titles=labels) # <h2>Network Architecture</h2> # + input1 = keras.layers.Input(shape=(64, 64, 3)) C1 = Conv2D(32, (3, 3), strides=(2, 2), activation='linear', input_shape=(64,64,3), kernel_regularizer=regularizers.l2(9.999999747378752e-05), padding='valid')(input1) BN1 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C1) A1 = Activation('relu')(BN1) C2=Conv2D(64, (3, 3), strides=(1, 1), activation='linear', input_shape=(64,64,3), kernel_regularizer=regularizers.l2(9.999999747378752e-05), padding='same')(A1) BN2 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C2) A2 = Activation('relu')(BN2) C_sep_1 = SeparableConv2D(64, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A2) BN3 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_1) add_1 = keras.layers.add([A2,BN3]) A3 = Activation('relu')(add_1) #Bottleneck 1 C3 = Conv2D(128, (7, 7), strides=(1, 1), activation='linear', input_shape=(64,64,3), kernel_regularizer=regularizers.l2(9.999999747378752e-05), padding='same')(A3) BN4 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C3) A4 = Activation('relu')(BN4) C_sep_2 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A4) BN5 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_2) add_2= keras.layers.add([A4,BN5]) A5 = Activation('relu')(add_2) C_sep_3 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A5) BN6 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_3) add_3= keras.layers.add([A5,BN6]) A6 = Activation('relu')(add_3) C_sep_4 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A6) BN7 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_4) add_4= keras.layers.add([A6,BN7]) A7 = Activation('relu')(add_4) C_sep_5 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A7) BN8 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_5) add_5= keras.layers.add([A4,BN8]) A8 = Activation('relu')(add_5) #Bottleneck 2 MP1 = MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='valid',data_format='channels_last')(A8) #To tackle overfitting as acc=0.998 and val_acc = 0.66 in previous version DO1 = keras.layers.Dropout(rate=0.3, noise_shape=None, seed=None) (MP1) C_sep_6 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(DO1) BN9 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_6) add_6= keras.layers.add([BN9,DO1]) A9 = Activation('relu')(add_6) C4 = Conv2D(128, (7, 7), strides=(1, 1), activation='linear', input_shape=(64,64,3), kernel_regularizer=regularizers.l2(9.999999747378752e-05), padding='same')(A9) BN10 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C4) A10 = Activation('relu')(BN10) C_sep_7 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A10) BN11 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_7) add_7= keras.layers.add([A10,BN11]) A11 = Activation('relu')(add_7) C_sep_8 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A11) BN12 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_8) add_8= keras.layers.add([A11,BN12]) A12 = Activation('relu')(add_8) C_sep_9 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A12) BN13 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_9) add_9= keras.layers.add([A12,BN13]) A13 = Activation('relu')(add_9) C_sep_10 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A13) BN14 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_10) add_10= keras.layers.add([A10,BN14]) A14 = Activation('relu')(add_10) #Bottleneck3 MP2 = MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='valid',data_format='channels_last')(A14) #To tackle overfitting as acc=0.95 and val_acc = 0.69 in previous version DO2 = keras.layers.Dropout(rate=0.3, noise_shape=None, seed=None) (MP2) C_sep_11 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(DO2) BN15 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_11) add_11= keras.layers.add([BN15,DO2]) A15 = Activation('relu')(add_11) C5 = Conv2D(128, (7, 7), strides=(1, 1), activation='linear', input_shape=(64,64,3), kernel_regularizer=regularizers.l2(9.999999747378752e-05), padding='same')(A15) BN16 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C5) A16 = Activation('relu')(BN16) #Big Node 2 C_sep_12 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A16) BN17 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_12) add_12= keras.layers.add([A16,BN17]) #add_7 A17 = Activation('relu')(add_12) C_sep_13 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A17) BN18 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_13) add_13= keras.layers.add([A17,BN18]) #add_8 A18 = Activation('relu')(add_13) C_sep_14 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A18) BN19 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_14) add_14= keras.layers.add([A18,BN19]) #add_9 A19 = Activation('relu')(add_14) C_sep_15 = SeparableConv2D(128, (3, 3),dilation_rate=(1, 1), depth_multiplier=1, strides=(1, 1),padding='same', activation='linear',depthwise_initializer='Zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05), use_bias=True, pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=regularizers.l2(9.999999747378752e-05), pointwise_regularizer=regularizers.l2(9.999999747378752e-05))(A19) BN20 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C_sep_15) add_15= keras.layers.add([A16,BN20]) #add_9 A20 = Activation('relu')(add_15) C6 = Conv2D(128, (7, 7), strides=(1, 1), activation='linear', input_shape=(64,64,3), kernel_regularizer=regularizers.l2(9.999999747378752e-05), padding='same')(A20) BN21 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(C6) A21 = Activation('relu')(BN21) F1 = Flatten() (A21) D1 = keras.layers.Dense(units=128, activation='linear', use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=regularizers.l2(9.999999747378752e-05))(F1) BN22 = BatchNormalization(axis=-1, epsilon=0.001, mode=0, momentum=0.99, weights=None)(D1) A22 = Activation('relu')(BN22) DO3 = keras.layers.Dropout(rate=0.5, noise_shape=None, seed=None)(A22) out = keras.layers.Dense(units=2, activation='softmax', use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None)(DO3) model = keras.models.Model(inputs=[input1], outputs=out) # - model.compile(Adam(lr=.0001), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() for layer in model.layers: g=layer.get_config() h=layer.get_weights() print (g) print (h) model.fit_generator(train_bconvert, steps_per_epoch=55, validation_data=valid_bconvert, validation_steps=25, epochs=5, verbose=2) # <h2>Results</h2> test_bconf = ImageDataGenerator().flow_from_directory(test_path,target_size=(64,64),classes=['DECAYED','DECAYFREE'],batch_size=100) test_imgs, test_labels = next(test_bconf) test_labels = test_labels[:,0] test_labels predictions = model.predict_generator(test_bconf, steps=1, verbose=0) predictions predictions=np.round(predictions,0) cm = confusion_matrix(test_labels, predictions[:,0]) def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') cm_plot_labels = ['DECAYED','DECAYFREE'] plot_confusion_matrix(cm, cm_plot_labels, title='Confusion Matrix')
MDnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: python3_7_6 # language: python # name: py3_7_6 # --- # # 6. 예외 처리 # ### 06-1 구문 오류와 예외 # - 구문 오류, 예외(런타임 에러), 기본 예외 처리, try except 구문 # # # ### 오류의 종류 (2 가지) # - 1) 프로그램 실행 전에 발생하는 오류 # - 2) 프로그램 실행 중에 발생하는 오류 # # # #### 구문오류 # - 괄호의 개수, 들여쓰기 등으로 프로그램이 실행되기 전 발생하는 오류 # + # 프로그램 시작 print("# 프로그램이 시작되었습니다!") # 구문 오류 발생 코드 print("# 예외를 강제로 발생시켜 볼게요!) # 따옴표 열고 닫기 주의 # + # 프로그램 시작 print("# 프로그램이 시작되었습니다!") # 구문 오류 발생 코드 print("# 예외를 강제로 발생시켜 볼게요!") # 따옴표 열고 닫기 주의 # - # #### 예외 # - 예외 또는 런타임 오류는 실행 중에 발생하는 오류 # + # 프로그램 시작 print("# 프로그램이 시작되었습니다!") # 예외 발생 코드 list_a[1] # + # 프로그램 시작 print("# 프로그램이 시작되었습니다!") # 예외 발생 코드 list_a = [1,2,3,4,5] # 정의해줌 list_a[1] # - # ### 기본 예외 처리 # - 예외 처리: 예외를 해결하는 모든 것 (2 가지) # - 조건문 사용 # - try 구문 사용 # #### 예외 상황 확인 # + # 숫자 입력 number_input_a = int(input("정수입력> ")) # 출력 print("원의 반지름:", number_input_a) print("원의 둘레:", 2*3.14*number_input_a) print("원의 넓이:", 3.14*number_input_a*number_input_a) # + # 숫자 입력 number_input_a = int(input("정수입력> ")) # 출력 print("원의 반지름:", number_input_a) print("원의 둘레:", 2*3.14*number_input_a) print("원의 넓이:", 3.14*number_input_a*number_input_a) # - # #### 조건문으로 예외 처리하기 # ##### 문자열의 isdigit() 숫자로만 구성되었는지 # + # 숫자 입력 user_input_a = input("정수입력> ") # 사용자 입력이 숫자로만 구성되어 있을 때 if user_input_a.isdigit(): # 숫자로 변환 number_input_a = int(user_input_a) # 출력 print("원의 반지름:", number_input_a) print("원의 둘레:", 2*3.14*number_input_a) print("원의 넓이:", 3.14*number_input_a*number_input_a) else: print("정수를 입력하지 않았습니다.") # + # 숫자 입력 user_input_a = input("정수입력> ") # 사용자 입력이 숫자로만 구성되어 있을 때 if user_input_a.isdigit(): # 숫자로 변환 number_input_a = int(user_input_a) # 출력 print("원의 반지름:", number_input_a) print("원의 둘레:", 2*3.14*number_input_a) print("원의 넓이:", 3.14*number_input_a*number_input_a) else: print("정수를 입력하지 않았습니다.") # - # ### try except 구문 # - 예외를 처리할 수 있는 구문 제공 # # # - try: # - ㅁㅁㅁㅁ예외가 발생할 가능성이 있는 코드 # - except: # - ㅁㅁㅁㅁ예외가 발생했을 때 실행할 코드 # # # - 어떤 상황에 예외가 발생하는지 완벽하게 이해하고 있지 않아도 프로그램이 강제로 죽어 버리는 상황을 막을 수 있음. # ##### try except 구문 # try except 구문으로 예외 처리 try: # 숫자로 변환 number_input_a = int(input("정수 입력> ")) # 예외가 발생할 가능성이 있는 구문 # 출력 print("원의 반지름:", number_input_a) print("원의 둘레:", 2*3.14*number_input_a) print("원의 넓이:", 3.14*number_input_a*number_input_a) except: print("무언가 잘못되었습니다.") # 예외가 발생했을 때 실행할 구문 # ### try except 구문과 pass 키워드 조합 # - try: # - ㅁㅁㅁㅁ예외가 발생할 가능성이 있는 코드 # - except: # - ㅁㅁㅁㅁpass # ##### 숫자로 변환되는 것들만 리스트에 넣기 # + # 변수 선언 list_input_a = ["52", "273", "32", "스파이", "103"] # 반복 적용 list_number = [] for item in list_input_a: # 숫자로 변환해서 리스트에 추가 try: float(item) # 예외가 발생하면 알아서 다음으로 진행은 안 되겠지? list_number.append(item) # 예외 없이 통과했으면 리스트에 넣어줘! except: pass # 출력 print("{} 내부에 있는 숫자는".format(list_input_a)) print("{}입니다.".format(list_number)) # - # ### try except else 구문 # - try: # - ㅁㅁㅁㅁ예외가 발생할 가능성이 있는 코드 # - except: # - ㅁㅁㅁㅁ예외가 발생했을 때 실행할 코드 # - else: # - ㅁㅁㅁㅁ예외가 발생하지 않았을 때 실행할 코드 # ##### try except else 구문 try: # 숫자로 변환 number_input_a = int(input("정수 입력> ")) except: print("정수를 입력하지 않았습니다.") else: # 출력합니다. print("원의 반지름:", number_input_a) print("원의 둘레:", 2*3.14*number_input_a) print("원의 넓이:", 3.14*number_input_a*number_input_a) # try except else 구문 try: # 숫자로 변환 number_input_a = int(input("정수 입력> ")) except: print("정수를 입력하지 않았습니다.") else: # 출력합니다. print("원의 반지름:", number_input_a) print("원의 둘레:", 2*3.14*number_input_a) print("원의 넓이:", 3.14*number_input_a*number_input_a) # ### finally 구문 # - try: # - ㅁㅁㅁㅁ예외가 발생할 가능성이 있는 코드 # - except: # - ㅁㅁㅁㅁ예외가 발생했을 때 실행할 코드 # - else: # - ㅁㅁㅁㅁ예외가 발생하지 않았을 때 실행할 코드 # - finally: # - ㅁㅁㅁㅁ반드시 무조건 실행할 코드 # ##### try except 구문으로 예외 처리 try: # 숫자로 변환 number_input_a = int(input("정수 입력> ")) # 출력합니다. print("원의 반지름:", number_input_a) print("원의 둘레:", 2*3.14*number_input_a) print("원의 넓이:", 3.14*number_input_a*number_input_a) except: print("정수를 입력해달라고 했잖아요?!") else: print("예외가 발생하지 않았습니다.") finally: print("일단 프로그램이 어떻게든 끝났습니다.") # try except 구문으로 예외 처리 try: # 숫자로 변환 number_input_a = int(input("정수 입력> ")) # 출력합니다. print("원의 반지름:", number_input_a) print("원의 둘레:", 2*3.14*number_input_a) print("원의 넓이:", 3.14*number_input_a*number_input_a) except: print("정수를 입력해달라고 했잖아요?!") else: print("예외가 발생하지 않았습니다.") finally: print("일단 프로그램이 어떻게든 끝났습니다.") # - try 구문은 단독으로 사용하지 못하며, except나 finally 구문과 함께 사용한다. # - else 구문은 반드시 except 구문 뒤에 사용한다. # ### finally에 대한 오해 # ##### 파일이 제대로 닫혔는지 확인 # + # try except 구문 try: # 파일 열기 file = open("info.txt", "w") # 여러가지 처리 수행 # 파일 닫기 file.close() except Exception as e: print() print("# 파일이 제대로 닫혔는지 확인") print("file.closed:", file.closed) # - # ##### 파일 처리 중간에 예외 발생 # + # try except 구문 try: # 파일 열기 file = open("info.txt", "w") # 여러가지 처리 수행 예외.발생하라() # 파일 닫기 file.close() except Exception as e: print(e) print("# 파일이 제대로 닫혔는지 확인") print("file.closed:", file.closed) # - # ##### finally 구문 사용해 파일 닫기 # + # try except 구문 try: # 파일 열기 file = open("info.txt", "w") # 여러가지 처리 수행 예외.발생하라() except Exception as e: print(e) finally: # 파일 닫기 file.close() print("# 파일이 제대로 닫혔는지 확인") print("file.closed:", file.closed) # - # ##### try except 구문 끝난 후 파일 닫기 # + # try except 구문 try: # 파일 열기 file = open("info.txt", "w") # 여러가지 처리 수행 예외.발생하라() except Exception as e: print(e) finally: # 파일 닫기 file.close() print("# 파일이 제대로 닫혔는지 확인") print("file.closed:", file.closed) # - # ### try 구문 내부에서 return 키워드 사용하는 경우 # ##### finally 구문은 반복문 또는 함수 내부에 있을 때 위력 발휘 # + # test() 함수 선언 def test(): print("test() 함수의 첫 줄입니다.") try: print("try 구문이 실행되었습니다.") return print("try 구문의 return 키워드 뒤 입니다.") except: print("except 구문이 실행되었습니다.") else: print("else 구문이 실행되었습니다.") finally: print("finally 구문이 실행되었습니다.") print("test() 함수의 마지막 줄 입니다") # test() 함수 호출 test() # - # ##### finally 키워드 활용 # + # 함수 선언 def write_text_file(filename, text): # try except 구문 사용 try: # 파일 열기 file = open(filename, "w") # 파일에 텍스트 입력 file.write(text) # 여러가지 처리 수행 return except Exception as e: print(e) finally: # 파일 닫기 file.close() # 함수 호출 write_text_file("test.txt", "안녕하세요!") # - # ##### 반복문과 함께 사용하는 경우 # + print("프로그램이 시작되었습니다.") while True: try: print("try 구문이 실행되었습니다.") break print("try 구문의 break 키워드 뒤 입니다.") except: print("except 구문이 실행되었습니다.") finally: print("finally 구문이 실행되었습니다.") print("while 반복문의 마지막 줄 입니다") print("프로그램이 종료되었습니다.") # - # ### 핵심포인트 # - 구문오류: 프로그램의 문법적인 오류로 프로그램이 실행조차 되지 않게 만드는 오류 # - 예외(런타임 에러): 프로그램 실행 중 발생하는 오류. try catch 구문 등으로 처리할 수 있다. 반대로 구문 오류는 실행 자체가 안 되므로 try catch 구문으로 처리할 수 없다. # - 기본 예외 처리: 조건문 등을 사용해 예외를 처리하는 기본적인 방법 # - try except 구문: 예외 처리에 특화된 구문 # ### 확인문제 # + # 리스트 내부에서 특정 값이 어디 있는지 확인 시 리스트.index() 사용 numbers = [52, 273, 32, 103, 90, 10, 275] numbers.index(103) # + # 해당 값이 여러 개 있을 경우 첫 번째의 값 출력 numbers = [1,1,1,1,1,1,1] numbers.index(1) # + # 리스트의 없는 값에 접근하려고 할 때 ValueError 예외를 발생한다. numbers = [52, 273, 32, 103, 90, 10, 275] numbers.index(1000000) # + # 문제 numbers = [52, 273, 32, 103, 90, 10, 275] print("# (1) 요소 내부에 있는 값 찾기") print("- {}는 {} 위치에 있습니다.".format(52, numbers.index(52))) print() print("# (2) 요소 내부에 없는 값 찾기") number = 10000 if number in numbers: print("- {}는 {} 위치에 있습니다.".format(number, numbers.index(number))) else: print("- 리스트 내부에 없는 값입니다.") print() print("--- 정상적으로 종료되었습니다. ---") # + # 구문 오류, 예외 발생 예상되는 것 찾기 output = 10 + "개" # 예외, 숫자와 문자열의 연산은 불가능 # - int("안녕하세요") # 예외, int(숫자), str("문자열") cursor.close) # 구문 오류 [1,2,3,4,5][10] # 예외, 인덱스 밖
hongong Python/review_self study Python 06_2021.04.05.mon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # language: python # name: python38564bitbasecondaf46bf9a5e2cf42a38718902410e977be # --- # + import matplotlib.pyplot as plt import numpy as np from pomegranate import * def plot_data(data, ax, bins=np.linspace(0., 1., 100), name=None): ax.hist(data, bins, alpha=0.5, density=True, label=name) def plot_density(bins, pdf, ax, name=None): assert len(bins) == len(pdf) ax.plot(bins, pdf, label=name) # - # ## sample() and probability() # + d = TrueBetaDistribution(5, 2, x_eps=0) samples = d.sample(60000) bins = np.linspace(0., 1., 100) pdf = d.probability(bins) fig, ax = plt.subplots(1, 1, figsize=(5, 4)) plot_data(samples, ax, bins=bins, name='data') plot_density(bins, pdf, ax, name='density') plt.legend() plt.show() # - # ## from_samples() # + samples = np.random.beta(5, 3, size=50000) d = TrueBetaDistribution.from_samples(samples) bins = np.linspace(0., 1., 100) pdf = d.probability(bins) print(d) fig, ax = plt.subplots(1, 1, figsize=(5, 4)) plot_data(samples, ax, bins=bins, name='data') plot_density(bins, pdf, ax, name='density') plt.legend() plt.show() # - # ## Beta Mixture Model # + c1 = np.random.beta(2, 5, size=20000) c2 = np.random.beta(5, 0.8, size=10000) mixture = np.concatenate((c1, c2), axis=0) model = GeneralMixtureModel.from_samples(TrueBetaDistribution , n_components=2, X=mixture.reshape(-1, 1)) print(model) bins = np.linspace(0., 1., 100) pdf = model.probability(bins) fig, ax = plt.subplots(1, 1, figsize=(5, 4)) plot_data(mixture, ax, bins=bins, name='data') plot_density(bins, pdf, ax, name='mixture_density') plt.legend() plt.show() # -
examples/test_true_beta.ipynb
-- --- -- jupyter: -- jupytext: -- text_representation: -- extension: .hs -- format_name: light -- format_version: '1.5' -- jupytext_version: 1.14.4 -- kernelspec: -- display_name: Haskell -- language: haskell -- name: haskell -- --- -- # Column-based functions -- -- When building a pipeline, it is often the case that the top-level dataframe is complex and changing, but that the work focus on transforming _columns_ of this dataframe. Karps provides a way to express and compose complex functions on columns without having to run computations. Behind the scenes, Karps is able to take these functions and translate them into sequences of queries without having to deal with the details of collecting, joining and broadcasting data. -- -- As an example, we are going to build functions that take a numerical dataset and that produce a _centered_ version (the mean is substracted) and a _scaled_ version (the variance is scaled to 1). Of course, such basic -- operations are already built into Spark, but it is instructive to see how one would implement similar -- functions in practice. -- -- We will see that thanks to laziness and determinism, Karps is able to reuse some computations, and provide a high-level, lazy API. :load KarpsDisplays KarpsDagDisplay :extension DeriveGeneric :extension FlexibleContexts :extension OverloadedStrings :extension GeneralizedNewtypeDeriving :extension FlexibleInstances :extension MultiParamTypeClasses -- + import Spark.Core.Dataset import Spark.Core.Context import Spark.Core.Column import Spark.Core.ColumnFunctions import Spark.Core.Functions import Spark.Core.Row import Spark.Core.Types import Spark.Core.Try import qualified Data.Vector as V import qualified Data.Text as T import Data.Text(Text) import GHC.Generics -- - -- We will start with an extremely simple dataset: let ds = dataset [-1, 1] :: Dataset Int -- A column of data containing integers let myData = asCol ds -- Here is a first function that computes the mean of the data in a column. -- Giving some names to the elements is not necessary but helps when looking at the DAG of computations. -- -- Note that we can use all the usual operators (+, /, etc.) even if the computation is lazy. -- -- Also, note that all the operations are strongly typed: unlike SQL, the casting is almost always explicit -- since it can lead to loss of precision (or worse) otherwise. myMean :: Column ref Int -> LocalData Double myMean col = let cnt = asDouble (countCol col) @@ "mean_count" s = asDouble (sumCol col) @@ "mean_sum" in (s / cnt) -- Now, if we apply it to our data, the result is rather anti-climactic: we just get a `LocalData` out: myMean myData -- Let's build on this to make the centering function, which simply substracts the mean, and the scaling function, which builds on the other two: -- -- Note that again, we need to cast the column, it is not going to be done for us. -- -- Note: due a Haskell limitation, the `-` operation is replaced by a `.-`. This is because Haskell does not allow to mix different types together (here a column and an observable). This restriction is going to be lifted in the future. -- + myCenter :: Column ref Int -> Column ref Double myCenter col = let m = (myMean col) @@ "center_mean" in (asDoubleCol col) .- m myScaler :: Column ref Int -> Column ref Double myScaler col = let cnt = asDouble (countCol col) @@ "count" centered = myCenter col stdDev = sumCol (centered * centered) / cnt @@ "std_dev" in centered ./ stdDev -- - -- What does the transform look like if we apply it? Let's run `showGraph` on our simple dataset: -- make a new scaled column: let scaled = myScaler myData -- pack it into a dataset to visualize it: let out = pack1 scaled showGraph out -- This graph is pretty complicated, and you should click around to see what each node corresponds to. -- A couple of points are noteworthy: -- -- - Karps handles automatically and seemlessly the broadcasting and the reduction of the variables. -- In fact, Karps can broadcast pretty much anything that is understood by Spark dataframes. -- -- - Karps tries to reuse computations as much as possible: even if we did not make any attempt for it, -- the count of the dataset is reused between the calculation of the mean and of the variance. -- This is only possible because of laziness. -- -- - thanks to naming, even if the functions happen to be nested, we can still quickly relate one -- operator to the function that generated it. -- -- Now, let's execute all of that: -- + conf = defaultConf { confEndPoint = "http://10.0.2.2", confRequestedSessionName = "col_ops6" } createSparkSessionDef conf -- - exec1Def (collect scaled) -- As a preview of the next chapter, here is the function to display the RDDs generated by Spark when running this command. -- -- Each element comes from the graph before. You can see which ones are missing (they have been optimized away -- by Spark). When you click on a box, you can see the sequence of RDDs that was generated in the process. displayRDD "0"
notebooks/06_Column_operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Part 2 - 2D arrays as images # # Now we'll look at how we can view arrays as images. # %load_ext autoreload # %autoreload 2 from dtoolbioimage import Image import numpy as np # Let's create an empty array of zeroes: my_image = np.zeros((256, 256), dtype=np.uint8).view(Image) # Now we can display this: my_image # As you can see, zero is black. For normal images white is 255 and the values in between are grayscale. We can use the methods we've already learned for setting array values to "draw" on our array/image: my_image[20:50,20:50] = 255 my_image my_image[20:50,70:100] = 127 my_image # The image is still a matrix, so functions that operate on matrix like transpose will work: transposed = np.transpose(my_image) transposed # ### Exercises # # 1. Draw a hollow box. Here's an empty canvas to get you started: hollow_box = np.zeros((256, 256), dtype=np.uint8).view(Image) # 2. Can you draw a line from the top left of the image to the bottom right? Think about how you might do this. diagonal_line = np.zeros((256, 256), dtype=np.uint8).view(Image) # Hint: This will need a bit more programming! You'll need a loop that looks something like this: for x in range(256): print(x), # Do something here (modify the statement above! # ### Bonus exercise # # Draw a checkerboard. Here you'll need more than one loop... checkerboard = np.zeros((256, 256), dtype=np.uint8).view(Image) # Extra bonus point - can you make the individual squares bigger?
examples/Part3New.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:bodo] # language: python # name: conda-env-bodo-py # --- # # Create IPyParallel Profile # # Borrowed from https://gist.github.com/basnijholt/c375ea2d1df6702492b619e0873d6c7c # # And https://docs.bodo.ai/latest/source/ipyparallel.html # # And lotsa lotsa googling. # # Overtime this should be automated with QHub HPC and provide better slurm submission scripts # ! ipython profile create --parallel --profile=slurm # + # %%writefile ~/.ipython/profile_slurm/ipcontroller_config.py c.HubFactory.ip = u'*' c.HubFactory.registration_timeout = 600 # + # %%writefile ~/.ipython/profile_slurm/ipengine_config.py c.IPEngineApp.wait_for_url_file = 300 c.EngineFactory.timeout = 300 # + #HOSTFILE=$(pwd)/hostfile.$SLURM_JOB_ID #scontrol show hostnames > $HOSTFILE # + # %%writefile ~/.ipython/profile_slurm/ipcluster_config.py c.IPClusterStart.controller_launcher_class = 'SlurmControllerLauncher' c.IPClusterEngines.engine_launcher_class = 'SlurmEngineSetLauncher' # this is based on 4GB per core c.SlurmEngineSetLauncher.batch_template = """#!/bin/sh #SBATCH --ntasks={n} #SBATCH --job-name=ipy-engine- #SBATCH --mem=4G #SBATCH --cpus-per-task=1 HOSTFILE=$(pwd)/hostfile.$SLURM_JOB_ID scontrol show hostnames > $HOSTFILE mpiexec -n {n} -machinefile $HOSTFILE python -m ipyparallel.engine --mpi --profile-dir ~/.ipython/profile_slurm --cluster-id '' """
files/examples/runonce-ipyparallel-setup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.11 64-bit (''ldpc'': conda)' # name: python3 # --- # # Decoder setup # # To decode using belief propagation, first load an instance of the `ldpc.bp_decoder` class. # # + import numpy as np from ldpc.codes import rep_code from ldpc import bp_decoder H=rep_code(3) #parity check matrix for the length-3 repetition code n=H.shape[1] #the codeword length bpd=bp_decoder( H, #the parity check matrix error_rate=0.1, # the error rate on each bit max_iter=n, #the maximum iteration depth for BP bp_method="product_sum", #BP method. The other option is `minimum_sum' channel_probs=[None] #channel probability probabilities. Will overide error rate. ) # - # # Received vector decoding # # Given a corrupted codeword, the `bp_decoder.decode` will provide an estimate of its unerrored form. For example, consider the case where we are encoding via a three-bit repetition code: codeword=np.array([1,1,1]) # If the above codeword is subject to an error on its first bit the received vector is given by received_vector=np.array([0,1,1]) # The above vector can be corrected using the `bp_decoder.decode` as follows: # + decoded_codeword=bpd.decode(received_vector) print(decoded_codeword) # - # # Syndrome decoding # In syndrome decoding, the error syndrome is input to `bp_decoder.decode` function. This is useful in settings where the codeword cannot be directly measured. eg. in quantum error correction. The output of the syndrome recovery is an estimate of the error. error=np.array([0,1,0]) syndrome=H@error%2 decoding=bpd.decode(syndrome) print(f"Error: {error}") print(f"Syndrome: {syndrome}") print(f"Decoding: {decoding}") # # Assymetric error channels # If the code bits are subject to different error rates, a channel probability vector can be provided instead of the error rate. # + bpd=bp_decoder( H, max_iter=n, bp_method="product_sum", channel_probs=[0.1,0,0.1] #channel probability probabilities. Will overide error rate. ) error=np.array([1,0,1]) syndrome=H@error%2 decoding=bpd.decode(syndrome) print(f"Error: {error}") print(f"Syndrome: {syndrome}") print(f"Decoding: {decoding}") # - # # Example: error correction over the binary symmetric channel # + import numpy as np from ldpc.codes import rep_code from ldpc import bp_decoder n=13 error_rate=0.3 runs=5 H=rep_code(n) #BP decoder class. Make sure this is defined outside the loop bpd=bp_decoder(H,error_rate=error_rate,max_iter=n,bp_method="product_sum") error=np.zeros(n).astype(int) #error vector for _ in range(runs): for i in range(n): if np.random.random()<error_rate: error[i]=1 else: error[i]=0 syndrome=H@error %2 #calculates the error syndrome print(f"Error: {error}") print(f"Syndrome: {syndrome}") decoding=bpd.decode(syndrome) print(f"Decoding: {error}\n")
docs/source/bp_decoding_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:ml] # language: python # name: conda-env-ml-py # --- # # Cross validation # So far, we have train one model with one parameter setting. Ususally we want to compare different models. We don't want to use our test set for parameter optimization, so we can do yet another split, splitting the training data into a training set and validation set, and use the latter for paramater optimization. A more sophisticated way to do this is *cross validation*, here we split our data into N parts, for example `X1, X2, X3`. Then we use `X1+X2` for training and `X3` for validating, `X1+X3` for training and `X2` for validating and `X2+X3` for training and `X1` for validating. # # ### Exercise # - Why do we not want to use the test set for parameter optimization? # - What are advantages or disadvantages of cross validation over a single train-validation split? # # Luckily, cross validation is really easy in scikit-learn and requires little coding, especially if we already have the pipeline as we had earlier. Let's make that pipeline again from sklearn.preprocessing import MinMaxScaler from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV import pandas as pd pipe = Pipeline([ ('scale', MinMaxScaler()), ('model', KNeighborsClassifier()) # Now we leave out the parameter that we are going to tune! ]) # Let's see what parameters we could in theory tune: pipe.get_params() # There are many parameters here, but the one we are interested is the number of neighbors, denoted here as: `model__n_neighbors` (with default value 5). # # We give all the parameters we want to tune, with a list of the possible values, in the `param_grid`: model = GridSearchCV(estimator=pipe, cv = 3, param_grid = { 'model__n_neighbors': [1,2,3,4,5,6,7,8,9,10] }) # Read in our training data again weather_train = pd.read_csv('data/weather_train.csv') features = weather_train.columns[1:] X = weather_train[features] y = weather_train['MONTH'] # Fit the model model.fit(X,y) # We can inspect the results as follows: cv_results = pd.DataFrame(model.cv_results_) cv_results # This is a lot of information, but it basically tells us for each parameter, the scores for each cross validation splits. By default, this score is the mean accuracy but we could provide a different metric here. # # We see that k=4 gives the best performance, although the scores are quite far apart. We can plot the scores for each value of k: import matplotlib.pyplot as plt plt.scatter(cv_results['param_model__n_neighbors'], cv_results['mean_test_score']) plt.errorbar(cv_results['param_model__n_neighbors'], cv_results['mean_test_score'], yerr=cv_results['std_test_score'], fmt=' ') # ### Exercise: different models # 1. Look at the sklearn documentation and choose a different model. Create a pipeline, looping over different parameters. What do you find? Can you improve over the KNeighbors Classifier? # # 2. Create visualizations to understand the relationship between the parameter values and the model performance. What do you learn from the plots? # # 3. Apply your best model to the test set. Are you confident that the model works well on new data? # # Bonus exercise 1: # Look at the documentation of [model evaluation in scikit-learn](https://scikit-learn.org/stable/modules/model_evaluation.html#). Are there other metrics than accuaracy that make sense to calculate for this problem? Apply this metric and explain how you interpret it. # # Bonus exercise 2: # Some machine learning models, such as Random Forests or Linear Regression, allow you to inspect the *feature importance*. Look into the scikit-learn documentation for one of these models and inspect the feature importance. Which feature is the most important to the model?
notebooks/4-CrossValidation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Three Qubit Circuits # ###### Latest python notebook is available [here](https://github.com/q4quanta/quantum-circuits) # Useful additional packages import matplotlib.pyplot as plt # %matplotlib inline import numpy as np from math import pi # + from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute from qiskit.tools.visualization import circuit_drawer from qiskit.quantum_info import state_fidelity from qiskit import BasicAer backend = BasicAer.get_backend('unitary_simulator') # - # ### U - gates # #### 3. Three qubit circuit # Base states: |000>,|001>,|010>,|011>,|100>,|110>,|101>,|111> # ###### 3.1. $I \otimes I \otimes U$ q = QuantumRegister(3) qc = QuantumCircuit(q) qc.u3(pi/2,pi/2,pi/2,q[0]) qc.draw(output='mpl') job = execute(qc, backend) job.result().get_unitary(qc, decimals=3) # ###### 3.2. $I \otimes U \otimes I$ q = QuantumRegister(3) qc = QuantumCircuit(q) qc.u3(pi/2,pi/2,pi/2,q[1]) qc.draw(output='mpl') job = execute(qc, backend) job.result().get_unitary(qc, decimals=3) # ###### 3.3. $U \otimes I \otimes I$ q = QuantumRegister(3) qc = QuantumCircuit(q) qc.u3(pi/2,pi/2,pi/2,q[2]) qc.draw(output='mpl') job = execute(qc, backend) job.result().get_unitary(qc, decimals=3) # ###### 3.4. $I \otimes U \otimes U$ q = QuantumRegister(3) qc = QuantumCircuit(q) qc.u3(pi/2,pi/2,pi/2,q[0]) qc.u3(pi/2,pi/2,pi/2,q[1]) qc.draw(output='mpl') job = execute(qc, backend) job.result().get_unitary(qc, decimals=3) # ###### 3.4. $U \otimes U \otimes U$ q = QuantumRegister(3) qc = QuantumCircuit(q) qc.u3(pi/2,pi/2,pi/2,q[0]) qc.u3(pi/2,pi/2,pi/2,q[1]) qc.u3(pi/2,pi/2,pi/2,q[2]) qc.draw(output='mpl') job = execute(qc, backend) job.result().get_unitary(qc, decimals=3) # ###### 3.5. $(U \times U) \otimes (U \times U) \otimes (U \times U)$ q = QuantumRegister(3) qc = QuantumCircuit(q) qc.u3(pi/2,pi/2,pi/2,q[0]) qc.u3(pi/2,pi/2,pi/2,q[0]) qc.u3(pi/4,pi/4,pi/4,q[1]) qc.u3(3*pi/4,3*pi/4,3*pi/4,q[1]) qc.u3(pi/6,pi/6,pi/6,q[2]) qc.u3(5*pi/6,5*pi/6,5*pi/6,q[2]) qc.draw(output='mpl') job = execute(qc, backend) job.result().get_unitary(qc, decimals=3)
6-Three-qubit-circuit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import imgaug import numpy as np import os import pandas as pd import time import pickle import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import cv2 from tqdm.notebook import tqdm from IPython.display import display, HTML from pathlib import Path #image augmentation: import PIL from PIL import Image from imgaug import augmenters as iaa import imgaug.augmenters as augs # - IMAGE_SIZE = 96 import matplotlib as mpl mpl.rcParams['figure.dpi'] = 300 # Set device to GPU if available. device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device class BaselineTokenCNN(nn.Module): def __init__(self, num_classes): super(BaselineTokenCNN, self).__init__() self.conv1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=7) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=5) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2) self.fc1 = nn.Linear(16 * 9 * 9, 600) self.fc2 = nn.Linear(600, 200) self.fc3 = nn.Linear(200, num_classes) def forward(self, x): x = x.float() x = self.pool1(F.relu(self.conv1(x))) x = self.pool2(F.relu(self.conv2(x))) x = self.pool3(F.relu(self.conv3(x))) x = x.view(-1, 16 * 9 * 9) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x # + [markdown] id="7v80WZkqFPlo" # # Identifying Expressions # + id="8YFiiwzwFPlq" import cv2 class ExpressionImageDataset(Dataset): """ An expression-level dataset. """ def __init__(self, pickle_file, transform=None): """ Args: pickle_file (string): Path to dataset pickle file. transform (callable, optional): Optional transform to be applied on a sample. """ with open(pickle_file, 'rb') as f: self.df_data = pd.DataFrame(pickle.load(f)) # print(self.df_data) self.transform = transform def __len__(self): return len(self.df_data) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() row = self.df_data.iloc[idx] traces_data = row['traces_data'] img_path = row['img_path'] tokens = row['tokens'] latex = row['latex'] # CV2 will read the image with white being 255 and black being 0, but since # our token-level training set uses binary arrays to represent images, we # need to binarize our image here as well. image_raw = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) image_binarized = cv2.threshold(image_raw, 127, 255, cv2.THRESH_BINARY)[1] image_bitmap = image_binarized / 255.0 sample = { 'image': image_binarized, 'image_bitmap': image_bitmap, 'traces_data': traces_data, 'tokens': tokens, 'latex': latex } if self.transform: sample = self.transform(sample) return sample # + id="wFyDl84OFPly" train_exp_path = os.path.join(os.getcwd(), 'data', 'train=2011,2013_test=2012', 'train', 'train.pickle') test_exp_path = os.path.join(os.getcwd(), 'data', 'train=2011,2013_test=2012', 'test', 'test.pickle') # print('train') train_exp_set = ExpressionImageDataset(train_exp_path) # print('test') test_exp_set = ExpressionImageDataset(test_exp_path) # + id="BUUn6dTXFPl8" def image_to_components(img): """ Identifies the connected components (groups of black pixels) within an image. Returns a list of tuples. The first element of each tuple is the component image, and the second element is a tuple representing the bounding box of the component (relative to the original image dimensions). """ # Invert the image because for CV2, foreground objects should be white, and the background black. img_flipped = cv2.bitwise_not(img) # cv2.connectedComponents returns a mask of the input where each element is a number from 0 # to num_labels. All the 0's are considered the same component, 1's together, so on and # so forth. num_labels, labeled_img = cv2.connectedComponents(img_flipped) components = [] # Skip the first component because it will be the background. for l in range(1, num_labels): mask = np.zeros(labeled_img.shape, dtype=np.uint8) mask[labeled_img == l] = 255 rect = cv2.boundingRect(mask) # # Draw bounding box to make sure it's correct. # cv2.rectangle(mask, (x, y), (x + w, y + h), (36,255,12), 2) mask = cv2.bitwise_not(mask) components.append((mask, rect)) components.sort(key=lambda c: (c[1][0], c[1][1])) # for mask, rect in components: # plt.imshow(mask, cmap='gray') # plt.show() return components # + id="-f0h-Ud4FPmJ" def process_component(image, bounding_rect): """ Processes a component image so it can be fed into the CNN for as input. It crops the component, pads the cropped image so that it's square, and then resizes it to the shape the CNN expects. Args: image - Image containing a single component. bounding_rect - Tuple defining the bounding box of the component. """ x, y, w, h = bounding_rect # Crop the image to just the bounding box. image = image[y: y + h, x: x + w] # Pad the image so it's square. if w > h: pad = int((w - h) / 2) image = cv2.copyMakeBorder(image, top=pad, bottom=pad, left=0, right=0, borderType=cv2.BORDER_CONSTANT, value=255) else: pad = int((h - w) / 2) image = cv2.copyMakeBorder(image, top=0, bottom=0, left=pad, right=pad, borderType=cv2.BORDER_CONSTANT, value=255) # Scale down to the correct size. Seems like cv2.INTER_AREA is the best # interpolation method for maintaining stroke wdith. image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_AREA) # During the resize some pixels will get scaled to be between 0 (black) and 255 (white). # In order to maintain stroke width, we set all non-white pixels to full black. # If we don't do this the resulting image ends up looking rather sparse. # We also convert the image to binary so that it's ready for the model. image = (image == 255).astype(int) return image # + colab={"base_uri": "https://localhost:8080/", "height": 591} id="_llurYHxFPmR" outputId="969e4ba4-75f6-40da-8843-d1862b022226" # Example code. test_img = train_exp_set[0]['image'] print(test_img.shape) plt.imshow(test_img, cmap='gray') plt.show() components = image_to_components(test_img) for img, rect in components: print(rect) out = process_component(img, rect) plt.imshow(out, cmap='gray') plt.show() # - # # Testing Expression-Level Detection # + test_traces_data = train_exp_set[2]['traces_data'] def get_traces_data_stats(traces_data): all_coords = [] for pattern in traces_data: for trace in pattern['trace_group']: all_coords.extend(trace) all_coords = np.array(all_coords) x_min, y_min = np.min(all_coords, axis=0) width, height = np.max(all_coords, axis=0) - [x_min, y_min] + 1 return x_min, y_min, width, height def get_trace_group_bounding_box(trace_group): all_coords = [] for t in trace_group: all_coords.extend(t) all_coords = np.array(all_coords) x_min, y_min = np.min(all_coords, axis=0) width, height = np.max(all_coords, axis=0) - [x_min, y_min] + 1 return x_min, y_min, width, height def draw_traces_data(traces_data): im_x_min, im_y_min, width, height = get_traces_data_stats(traces_data) # Scale the image down. max_dim = 1000 # Maximum dimension pre-pad. sf = 1000 / max(height, width) scaled_height = int(height * sf) scaled_width = int(width * sf) image = np.ones((scaled_height, scaled_width)) # Pad the scaled image. We pad the image first before we draw the traces, because it's possible that # for certain stroke widths, part of the expression would be cut off. Adding (enough) padding before # drawing prevents this issue. pad_factor = 0.05 pad_amount = int(max(pad_factor * scaled_width, pad_factor * scaled_height)) pad_width = ((pad_amount, pad_amount), (pad_amount, pad_amount)) image = np.pad(image, pad_width=pad_amount, mode='constant', constant_values=1) # Draw the traces on the scaled/padded image. for pattern in traces_data: for trace in pattern['trace_group']: trace = np.array(trace) trace -= np.array([im_x_min, im_y_min]) trace = (trace.astype(np.float64) * sf).astype(int) trace += int(pad_factor * scaled_width) for coord_idx in range(1, len(trace)): cv2.line(image, tuple(trace[coord_idx - 1]), tuple(trace[coord_idx]), color=(0), thickness=5) # Binarize. image = (image > 0).astype(int) # Open CV wants images to be between 0 and 255. image *= 255 image = image.astype(np.uint8) boxes = [] # Get bounding boxes. for pattern in traces_data: trace_group = pattern['trace_group'] rect_x_min, rect_y_min, rect_width, rect_height = get_trace_group_bounding_box(trace_group) rect_x_min = (rect_x_min - im_x_min) * sf + pad_width[1][0] rect_y_min = (rect_y_min - im_y_min) * sf + pad_width[0][0] rect_width *= sf rect_height *= sf # Convert bounding box coords to integers. rect_x_min = int(rect_x_min) rect_y_min = int(rect_y_min) rect_width = int(rect_width) rect_height = int(rect_height) # Draw the rectangle. # image = cv2.rectangle(image, # (int(rect_x_min), int(rect_y_min)), # (int(rect_x_min + rect_width), int(rect_y_min + rect_height)), # (0), # 5) boxes.append((rect_x_min, rect_y_min, rect_width, rect_height)) # plt.imshow(image, cmap='gray') # plt.show() return image, boxes image, boxes = draw_traces_data(test_traces_data) plt.imshow(image, cmap='gray') print(image.shape) print(boxes) # + def intersect_area(rect1, rect2): """ rect1 and rect2 are tuples of the form (x, y, width, height). """ r1_x_min, r1_y_min, r1_width, r1_height = rect1 r2_x_min, r2_y_min, r2_width, r2_height = rect2 dx = min(r1_x_min + r1_width, r2_x_min + r2_width) - max(r1_x_min, r2_x_min) dy = min(r1_y_min + r1_height, r2_y_min + r2_height) - max(r1_y_min, r2_y_min) if (dx>=0) and (dy>=0): return dx*dy return 0 def get_iou(rect1, rect2): rect1_area = rect1[2] * rect1[3] rect2_area = rect2[2] * rect2[3] intersect = intersect_area(rect1, rect2) union = rect1_area + rect2_area - intersect return intersect / union # + def invert(img): return ~img def segmentation_erosion(img, k=5): kernel = np.ones((k,k),np.uint8) img = invert(img) img = cv2.erode(img, kernel, iterations = 1) img = invert(img) return img def segmentation_dilation(img, k=5): kernel = np.ones((k,k),np.uint8) img = invert(img) img = cv2.dilate(img, kernel, iterations = 1) img = invert(img) return img def segmentation_opening(img, k=5): kernel = np.ones((k,k),np.uint8) img = invert(img) opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel) img = invert(img) return img # + img, _ = draw_traces_data(test_exp_set[10]['traces_data']) k = 4 plt.imshow(img, cmap='gray') plt.title('Original') plt.show() plt.imshow(segmentation_erosion(img, k=k), cmap='gray') plt.title(f'Erosion(k={k})') plt.show() # plt.imshow(segmentation_dilation(img, k=k), cmap='gray') # plt.title(f'Dilation(k={k})') # plt.show() # plt.imshow(segmentation_opening(img, k=k), cmap='gray') # plt.title(f'Opening(k={k})') # plt.show() # + colab={"referenced_widgets": ["ba358a119274444fbd817d6c4e356b76"]} id="kUfZzG3HFPmd" outputId="12fa1037-335c-4e96-db69-7ea12f59081c" from collections import defaultdict def evaluate(model, classes, morph_transform=None): num_exp_correct = 0 num_tokens_wrong_dict = {} all_precision = defaultdict(list) all_recall = defaultdict(list) correct_per_token = defaultdict(lambda: defaultdict(int)) total_per_token = defaultdict(lambda: defaultdict(int)) # We evaluate on test_exp_set, which is generated only using data from 2012. This is to avoid data leakage # with the token-level training set, which is generated on data from 2011 and 2013. for data in tqdm(test_exp_set, leave=False): image, true_boxes = draw_traces_data(data['traces_data']) # Apply a morphological transform to help with segmentation, e.g. erosion/dilation. if morph_transform: image = morph_transform(image) # Get the components present in the image. components = image_to_components(image) # Generate 96x96 input images for each component. component_images_processed = [process_component(comp_img, comp_rect) for comp_img, comp_rect in components] # Construct CNN input tensor. features_numpy = np.array(component_images_processed) features_numpy = np.expand_dims(features_numpy, 1) features_tensor = torch.from_numpy(features_numpy).to(device) outputs = model(features_tensor) _, predicted = torch.max(outputs.data, dim=1) # For classification. predicted_tokens = classes[predicted.cpu()] actual_tokens = data['tokens'] # print('actual_tokens:', actual_tokens) # For bounding box quality. predicted_boxes = [comp_rect for comp_img, comp_rect in components] # Calculate average precision at different iou thresholds. iou_thresholds = [0.05, 0.5, 0.80] for thresh in iou_thresholds: true_pos = 0 false_pos = 0 true_neg = 0 false_neg = 0 # if thresh not in correct_per_token: # correct_per_token[thresh] = {} # if thresh not in total_per_token: # total_per_token[thresh] = {} # total_per_token[thresh] = {} for true_box, true_token in zip(true_boxes, actual_tokens): for pred_box, pred_token in zip(predicted_boxes, predicted_tokens): iou = get_iou(true_box, pred_box) if iou >= thresh: if pred_token == true_token: true_pos += 1 correct_per_token[thresh][true_token] += 1 else: false_pos += 1 for true_token in actual_tokens: total_per_token[thresh][true_token] += 1 curr_precision = true_pos / (true_pos + false_pos) if true_pos + false_pos > 0 else 0 curr_recall = true_pos / len(actual_tokens) all_precision[thresh].append(curr_precision) all_recall[thresh].append(curr_recall) # Calculate number of tokens predicted correctly. num_tokens_correct = 0 for p, a in zip(predicted_tokens, actual_tokens): p = p.replace('\\', '') a = a.replace('\\', '') if p == a: num_tokens_correct += 1 num_tokens_correct += np.max(len(actual_tokens) - len(predicted_tokens), 0) num_tokens_wrong = len(actual_tokens) - num_tokens_correct if num_tokens_wrong == 0: num_exp_correct += 1 num_tokens_wrong_dict[num_tokens_wrong] = num_tokens_wrong_dict.get(num_tokens_wrong, 0) + 1 results = [] for thresh in iou_thresholds: results.append({ 'IoU Thresh.': thresh, 'Precision': np.mean(all_precision[thresh]), 'Recall': np.mean(all_recall[thresh]) }) # print(f'Average precision at {thresh}: {np.mean(all_precision[thresh])}') # print(f'Average recall at {thresh}: {np.mean(all_recall[thresh])}') # print() precision_per_token = {} for thresh in iou_thresholds: precision_per_token[thresh] = {} for token in total_per_token[thresh].keys(): precision_per_token[thresh][token] = correct_per_token[thresh].get(token, 0) / total_per_token[thresh][token] precision_per_token_rows = [] for token in classes: row = {'token': token} for thresh in iou_thresholds: if token in precision_per_token[thresh]: row[thresh] = precision_per_token[thresh][token] else: row[thresh] = None precision_per_token_rows.append(row) return results, precision_per_token_rows # true_image = np.array(image, copy=True) # for box in true_boxes: # rect_x_min, rect_y_min, rect_width, rect_height = box # true_image = cv2.rectangle(true_image, # (int(rect_x_min), int(rect_y_min)), # (int(rect_x_min + rect_width), int(rect_y_min + rect_height)), # (0), # 5) # pred_image = np.array(image, copy=True) # for box in predicted_boxes: # rect_x_min, rect_y_min, rect_width, rect_height = box # pred_image = cv2.rectangle(pred_image, # (int(rect_x_min), int(rect_y_min)), # (int(rect_x_min + rect_width), int(rect_y_min + rect_height)), # (0), # 5) # print('Image with true boxes:') # plt.imshow(true_image, cmap='gray') # plt.show() # print('Image with predicted boxes:') # plt.imshow(pred_image, cmap='gray') # plt.show() # for thresh in iou_thresholds: # print(f'Precision at {thresh}: {all_precision[thresh]}') # print(f'Recall at {thresh}: {all_recall[thresh]}') # + def run_expression_level_experiment(name, model_name, model_path, int_to_token_path, morph_transform): model = BaselineTokenCNN(num_classes=101) model.load_state_dict(torch.load(model_path)) model.to(device) model.eval() with open(int_to_token_path, 'rb') as f: int_to_token = pickle.load(f) classes = np.array(list(int_to_token.values())) results, prec_per_token = evaluate(model, classes, morph_transform=morph_transform) print(f'Model \'{model_name}\' Results:') df_results = pd.DataFrame(results) display(df_results) print() # print('Precision per token:') df_prec_per_token = pd.DataFrame(prec_per_token) # display(df_prec_per_token) save_dir = os.path.join(os.getcwd(), 'experiments', 'full_expression', name, model_name) Path(save_dir).mkdir(parents=True, exist_ok=True) df_results.to_csv(os.path.join(save_dir, 'results.csv'), index=False) df_prec_per_token.to_csv(os.path.join(save_dir, 'prec_per_token.csv'), index=False) # - models = [ { 'name': 'CNN-I', 'model_path': os.path.join(os.getcwd(), 'experiments', 'token_cnn', 't=5', 'checkpoints', 'epoch=6_valLoss=0.5558.pt'), 'int_to_token_path': os.path.join(os.getcwd(), 'data', 'tokens','b=96_train=2011,2013_test=2012_c=all_t=5','int_to_token.pickle') }, { 'name': 'CNN-I-R', 'model_path': os.path.join(os.getcwd(), 'experiments', 'token_cnn', 't=5_rotate', 'checkpoints', 'epoch=12_valLoss=0.1704.pt'), 'int_to_token_path': os.path.join(os.getcwd(), 'data', 'tokens','b=96_train=2011,2013_test=2012_c=all_t=5','int_to_token.pickle') }, { 'name': 'CNN-I-S', 'model_path': os.path.join(os.getcwd(), 'experiments', 'token_cnn', 't=5_shear', 'checkpoints', 'epoch=10_valLoss=0.1434.pt'), 'int_to_token_path': os.path.join(os.getcwd(), 'data', 'tokens','b=96_train=2011,2013_test=2012_c=all_t=5','int_to_token.pickle') }, { 'name': 'CNN-II', 'model_path': os.path.join(os.getcwd(), 'experiments', 'token_cnn', 't=3,5,7', 'checkpoints', 'epoch=6_valLoss=0.205.pt'), 'int_to_token_path': os.path.join(os.getcwd(), 'data', 'tokens','b=96_train=2011,2013_test=2012_c=all_t=3,5,7','int_to_token.pickle') }, { 'name': 'CNN-II-R', 'model_path': os.path.join(os.getcwd(), 'experiments', 'token_cnn', 't=3,5,7_rotate', 'checkpoints', 'epoch=9_valLoss=0.1219.pt'), 'int_to_token_path': os.path.join(os.getcwd(), 'data', 'tokens','b=96_train=2011,2013_test=2012_c=all_t=3,5,7','int_to_token.pickle') }, { 'name': 'CNN-II-S', 'model_path': os.path.join(os.getcwd(), 'experiments', 'token_cnn', 't=3,5,7_shear', 'checkpoints', 'epoch=12_valLoss=0.1077.pt'), 'int_to_token_path': os.path.join(os.getcwd(), 'data', 'tokens','b=96_train=2011,2013_test=2012_c=all_t=3,5,7','int_to_token.pickle') }, { 'name': 'CNN-III', 'model_path': os.path.join(os.getcwd(), 'experiments', 'token_cnn', 't=1,3,5,7,9', 'checkpoints', 'epoch=0_valLoss=4.929.pt'), 'int_to_token_path': os.path.join(os.getcwd(), 'data', 'tokens','b=96_train=2011,2013_test=2012_c=all_t=1,3,5,7,9','int_to_token.pickle') } ] def run_experiment_set(exp_name, morph_transform): for m in models: run_expression_level_experiment(name=exp_name, model_name=m['name'], model_path=m['model_path'], int_to_token_path=m['int_to_token_path'], morph_transform=morph_transform) # # No Transform run_experiment_set(exp_name='no_transform', morph_transform=None) # # Dilation run_experiment_set(exp_name='dilation_k=3', morph_transform=lambda img: segmentation_dilation(img, k=3)) run_experiment_set(exp_name='dilation_k=5', morph_transform=lambda img: segmentation_dilation(img, k=5)) run_experiment_set(exp_name='dilation_k=7', morph_transform=lambda img: segmentation_dilation(img, k=7)) run_experiment_set(exp_name='dilation_k=9', morph_transform=lambda img: segmentation_dilation(img, k=9)) run_experiment_set(exp_name='dilation_k=11', morph_transform=lambda img: segmentation_dilation(img, k=11)) # # Erosion run_experiment_set(exp_name='erosion_k=3', morph_transform=lambda img: segmentation_erosion(img, k=3)) run_experiment_set(exp_name='erosion_k=5', morph_transform=lambda img: segmentation_erosion(img, k=5)) run_experiment_set(exp_name='erosion_k=7', morph_transform=lambda img: segmentation_erosion(img, k=7)) # # Opening run_experiment_set(exp_name='opening_k=3', morph_transform=lambda img: segmentation_opening(img, k=3)) run_experiment_set(exp_name='opening_k=5', morph_transform=lambda img: segmentation_opening(img, k=5)) run_experiment_set(exp_name='opening_k=7', morph_transform=lambda img: segmentation_opening(img, k=7)) run_experiment_set(exp_name='opening_k=9', morph_transform=lambda img: segmentation_opening(img, k=9)) run_experiment_set(exp_name='opening_k=11', morph_transform=lambda img: segmentation_opening(img, k=11))
expression_level_experiments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <b>CompEcon Toolbox:</b> # <div style="font-size:175%;color:white; background-color: #0064b0;">DemApp01</div> # <div style="font-size:250%;color:white; background-color: #0064b0;">Approximating functions on $R$</div> # # <b><NAME>, PhD</b> # <br><br> # # </center> # # This demo is based on the original Matlab demo accompanying the <a href="https://mitpress.mit.edu/books/applied-computational-economics-and-finance">Computational Economics and Finance</a> 2001 textbook by <NAME> and <NAME>. # # # <i>Last updated: 2020-Sep-08</i> # ## About # This demo illustrates how to use CompEcon Toolbox routines to construct and operate with an approximant for a function defined on an interval of the real line. # # In particular, we construct an approximant for $f(x)=\exp(-x)$ on the interval $[-1,1]$. The function used in this illustration posseses a closed-form, which will allow us to measure approximation error precisely. Of course, in practical applications, the function to be approximated will not possess a known closed-form. # # In order to carry out the exercise, one must first code the function to be approximated at arbitrary points. # Let's begin: # ## Initial tasks if 'google.colab' in str(get_ipython()): print("This notebook is running on Google Colab. Installing the compecon package.") # !pip install compecon import numpy as np import pandas as pd import matplotlib.pyplot as plt from compecon import BasisChebyshev, BasisSpline, demo # ### Defining the functions # Function to be approximated and derivatives def f(x): return np.exp(-x) def d1(x): return -np.exp(-x) def d2(x): return np.exp(-x) # Set degree and domain of interpolation n, a, b = 10, -1, 1 # ### Choose an approximation scheme. # #### Chebyshev interpolation # In this case, let us use an order 10 Chebychev approximation scheme: F = BasisChebyshev(n, a, b, f=f) # One may now evaluate the approximant at any point x calling the basis: x = 0 F(x) # ... one may also evaluate the approximant's first and second derivatives at x: print(f'1st derivative = {F(x, 1):.4f}, 2nd derivative = {F(x, 2):.4f}') # ... and one may even evaluate the approximant's definite integral between the left endpoint a and x: F(x, -1) # ### Compare analytic and numerical computations # + pd.set_option('precision', 8) pd.DataFrame({ 'Numerical': [F(x), F(x, 1), F(x,2), F(x, -1)], 'Analytic': [f(x), d1(x), d2(x), np.exp(1)-1]}, index = ['Function', 'First Derivative', 'Second Derivative', 'Definite Integral'] ) # - # ### Plots of approximation errors # One may evaluate the accuracy of the Chebychev polynomial approximant by computing the approximation error on a highly refined grid of points: # + nplot = 501 # number of grid nodes xgrid = np.linspace(a, b, nplot) # generate refined grid for plotting figures=[] # to save all figures def approx_error(true_func, appr_func, d=0, title=''): fig, ax = plt.subplots() ax.hlines(0,a,b, 'k', linestyle='--',linewidth=2) ax.plot(xgrid, appr_func(xgrid, d) - true_func(xgrid)) ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) ax.set(title=title, xlabel='x', ylabel='Error') figures.append(plt.gcf()) # - # #### Plot function approximation error approx_error(f,F,title='Chebychev Approximation Error - Function') # The plot indicates that an order 10 Chebychev approximation scheme, produces approximation errors no bigger in magnitude than $6\times10^{-10}$. The approximation error exhibits the "Chebychev equioscillation property", oscilating relatively uniformly throughout the approximation domain. # # This commonly occurs when function being approximated is very smooth, as is the case here but should not be expected when the function is not smooth. Further notice how the approximation error is exactly 0 at the approximation nodes --- which is true by contruction. # #### Plot first derivative approximation error approx_error(d1,F,1, title='Chebychev Approximation Error - First Derivative') # #### Plot second derivative approximation error approx_error(d2,F,2, title='Chebychev Approximation Error - Second Derivative') # ### Cubic spline interpolation # Let us repeat the approximation exercise, this time constructing a 21-function cubic spline approximant: n = 21 # order of approximation S = BasisSpline(n, a, b, f=f) # define basis yapp = S(xgrid) # approximant values at grid nodes # #### Plot function approximation error approx_error(f,S,title='Cubic Spline Approximation Error - Function') # The plot indicates that an order 21 cubic spline approximation scheme produces approximation errors no bigger in magnitude than $1.2\times10^{-6}$, about four orders of magnitude worse than with Chebychev polynomials. # #### Plot first derivative approximation error approx_error(d1,S,1, title='Cubic Spline Approximation Error - First Derivative') # #### Plot second derivative approximation error approx_error(d2,S,2, title='Cubic Spline Approximation Error - Second Derivative') # ### Linear spline interpolation # Let us repeat the approximation exercise, this time constructing a 31-function linear spline approximant: n = 31 L = BasisSpline(n, a, b, k=1, f=f) # #### Plot function approximation error approx_error(f,L,title='Linear Spline Approximation Error - Function') # The plot indicates that an order 21 cubic spline approximation scheme produces approximation errors no bigger in magnitude than $1.2\times10^{-6}$, about four orders of magnitude worse than with Chebychev polynomials. # ### Save all figures to disc # + #demo.savefig(figures,name="demapp01")
notebooks/app/01 Approximating function on R.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Reading CGMF Fission Events # + code_folding=[] ### initializations and import libraries import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec # %matplotlib inline # %pylab inline from CGMFtk import histories as fh # + code_folding=[0] ### rcParams are the default parameters for matplotlib import matplotlib as mpl print ("Matplotbib Version: ", mpl.__version__) mpl.rcParams['font.size'] = 18 mpl.rcParams['font.family'] = 'Helvetica', 'serif' #mpl.rcParams['font.color'] = 'darkred' mpl.rcParams['font.weight'] = 'normal' mpl.rcParams['axes.labelsize'] = 18. mpl.rcParams['xtick.labelsize'] = 18. mpl.rcParams['ytick.labelsize'] = 18. mpl.rcParams['lines.linewidth'] = 2. font = {'family' : 'serif', 'color' : 'darkred', 'weight' : 'normal', 'size' : 18, } mpl.rcParams['xtick.major.pad']='10' mpl.rcParams['ytick.major.pad']='10' mpl.rcParams['image.cmap'] = 'inferno' # - # The default output of a CGMF run is an ASCII file that contains characteristics of fission events. Once the **Histories** python class is uploaded, reading a CGMF output is straightforward: hist = fh.Histories ('98252sf.cgmf') # The number of fission events and fission fragments can be easily extracted from the **Histories** class print ('This file contains ',str(hist.getNumberEvents()),' events and ',str(hist.getNumberFragments()),' fission fragments') # With the option 'nevents', the number of fission events that are read can be specified: # # hist = fh.Histories('92235_1MeV.cgmf',nevents=5000) # # This can be particularly useful for testing routines on a small number of events since reading the full history file could be time consuming # Several python functions, part of the **Histories** class, make it easy to analyze those events. # ### Accessing Fission Histories # Although we rarely need to access directly the fission histories, it is possible by typing: events = hist.getFissionHistories() events[10] # This history first list the characteristics of the fission fragment in mass, charge, excitation energy, spin, parity, and kinetic energy. It then provides the number of neutrons and gamma rays emitted in this event, followed by their characteristics in energy and direction. # ### Accessing Fission Fragment Characteristics # Fission fragment characteristics can be obtained using accessors such as: getA(), getZ(), getNu(), etc. # + A = hist.getA() Z = hist.getZ() fig=figure(figsize(14,6)) plt.subplot(1,2,1) plt.hist(A,bins=np.arange(min(A),max(A)+1),density=True) plt.xlabel("Fission Fragment Mass") plt.ylabel("Probability") plt.subplot(1,2,2) plt.hist(Z,bins=np.arange(min(Z),max(Z)+1),density=True) plt.xlabel("Fission Fragment Charge") plt.ylabel("Probability") plt.tight_layout() plt.show() # - # Quantities for the light and heavy fragments can be extracted # + U = hist.getU() Ul = hist.getULF() Uh = hist.getUHF() bx = np.arange(min(U),max(U)+1) h,b = np.histogram(U,bins=bx,density=True) hl,bl = np.histogram(Ul,bins=bx,density=True) hh,bh = np.histogram(Uh,bins=bx,density=True) fig = plt.figure(figsize=(8,6)) plt.plot(b[:-1],h,'o-',label='All fragments') plt.plot(bl[:-1],hl,'o-',label='Light fragments') plt.plot(bh[:-1],hh,'o-',label='Heavy fragments') plt.legend() plt.xlabel('Excitation Energy (MeV)') plt.ylabel('Probability') plt.show() # - # Plotting the initial excitation energy of the fragments as a function of its kinetic energy can be done by invoking getU() and getKEpre() (before neutron emission). # + U=hist.getU() KE=hist.getKEpre() bx=np.arange(min(U),max(U)) by=np.arange(min(KE),max(KE)) fig=figure(figsize(10,8)) plt.hist2d(U,KE,bins=(bx,by)) plt.xlim(0,40) plt.xlabel("Initial Excitation Energy (MeV)") plt.ylabel("Initial Fragment Kinetic Energy (MeV)") plt.show() # - # ### Summary table # A table summarizing the main characteristics of all fission events, fission fragments, neutrons and gamma rays can be generated by using the **summaryTable()** function. hist.summaryTable()
doc/rtd/nb_events.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import numpy as np import pandas as pd import scipy.stats import matplotlib.pyplot as plt from matplotlib import animation from matplotlib import rcParams rcParams['figure.dpi'] = 120 from IPython.display import HTML from IPython.display import YouTubeVideo from functools import partial YouTubeVideo_formato = partial(YouTubeVideo, modestbranding=1, disablekb=0, width=640, height=360, autoplay=0, rel=0, showinfo=0) # - # # Estadística inferencial # # La inferencia busca # # > Extraer **conclusiones** a partir de **hechos u observaciones** a través de un **método o premisa** # # En el caso particular de la **inferencia estadística** podemos realizar las siguientes asociaciones # # - Hechos: Datos # - Premisa: Modelo probabilístico # - Conclusión: Una cantidad no observada que es interesante # # Y lo que buscamos es # # > Cuantificar la incerteza de la conclusión dado los datos y el modelo # # La inferencia estadística puede dividirse en los siguientes tres niveles # # 1. Ajustar un modelo a nuestros datos # 1. Verificar que el modelo sea confiable # 1. Responder una pregunta usando el modelo # # En esta lección estudiaremos las herramientas más utilizadas asociadas a cada uno de estos niveles # # 1. **Estimador de máxima verosimilitud** # 1. **Bondad de ajuste** e **Intervalos de confianza** # 1. **Test de hipótesis** # ## Ajuste de modelos: Estimación de máxima verosimilitud # # En este nivel de inferencia se busca **ajustar** un modelo teórico sobre nuestros datos. En esta lección nos enfocaremos en **modelos de tipo parámetrico**. Un modelo parámetrico es aquel donde **se explicita una distribución de probabilidad**. # # Recordemos que una distribución tiene **parámetros**. Por ejemplo la distribución Gaussiana (univariada) se describe por su media $\mu$ y su varianza $\sigma^2$. Luego ajustar una distribución Gaussiana corresponde a encontrar el valor de $\mu$ y $\sigma$ que hace que el modelo se parezca lo más posible a la distribución empírica de los datos. # # A continuación veremos los pasos necesarios para ajustar una distribución a nuestros datos # ### ¿Qué distribución ajustar? # # Antes de ajustar debemos realizar un supuesto sobre la distribución para nuestro modelo. En general podemos ajustar cualquier distribución pero un mal supuesto podría invalidar nuestra inferencia # # Podemos usar las herramientas de **estadística descriptiva** para estudiar nuestros datos y tomar esta decisión de manera informada # # En el siguiente ejemplo, un histograma de los datos revela que un modelo gaussiano no es una buena decisión # # <img src="../img/stats6.png"> # # ¿Por qué? La distribución empírica es claramente asimétrica, su cola derecha es más pesada que su cola izquierda. La distribución Gaussiana es simétrica por lo tanto no es apropiada en este caso ¿Qué distribución podría ser más apropiada? # # # ### ¿Cómo ajustar mi modelo? Estimación de máxima verosimilitud # # A continuación describiremos un procedimiento para ajustar modelos paramétricos llamado *maximum likelihood estimation* (MLE) # # Sea un conjunto de datos $\{x_1, x_2, \ldots, x_N\}$ # # **Supuesto 1** Los datos siguen el modelo $f(x;\theta)$ donde $f(\cdot)$ es una distribución y $\theta$ son sus parámetros # # $$ # f(x_1, x_2, \ldots, x_N |\theta) # $$ # # **Supuesto 2** Las observaciones son independientes e idénticamente distribuidas (iid) # # - Si dos variables son independientes se cumple que $P(x, y) = P(x)P(y)$ # - Si son además idénticamente distribuidas entonces tienen **la misma distribución y parámetros** # # Usando esto podemos escribir # # $$ # \begin{align} # f(x_1, x_2, \ldots, x_N |\theta) &= f(x_1|\theta) f(x_2|\theta) \ldots f(x_N|\theta) \nonumber \\ # & = \prod_{i=1}^N f(x_i|\theta) \nonumber \\ # & = \mathcal{L}(\theta) # \end{align} # $$ # # donde $\mathcal{L}(\theta)$ se conoce como la verosimilitud o probabilidad inversa de $\theta$ # # Si consideramos que los datos son fijos podemos buscar el valor de $\theta$ de máxima verosimilitud # # $$ # \begin{align} # \hat \theta &= \text{arg} \max_\theta \mathcal{L}(\theta) \nonumber \\ # &= \text{arg} \max_\theta \log \mathcal{L}(\theta) \nonumber \\ # &= \text{arg} \max_\theta \sum_{i=1}^N \log f(x_i|\theta) # \end{align} # $$ # # El segundo paso es valido por que el máximo de $g(x)$ y $\log(g(x))$ es el mismo. El logaritmo es monoticamente creciente. Además aplicar el logaritmo es muy conveniente ya que convierte la multiplicatoria en una sumatoria. # # Ahora sólo falta encontrar el máximo. Podemos hacerlo # # - Analíticamente, derivando con respecto a $\theta$ e igualando a cero # - Usando técnicas de optimización iterativas como gradiente descedente # **Ejemplo:** La pesa defectuosa # # <img src="../img/garfield.png" width="250"> # # Su profesor quiere medir su peso pero sospecha que su pesa está defectuosa. Para comprobarlo mide su peso $N$ veces obteniendo un conjunto de observaciones $\{x_i\}$. ¿Es posible obtener un estimador del peso real $\hat x$ a partir de estas observaciones? # # Modelaremos las observaciones como # # $$ # x_i = \hat x + \varepsilon_i # $$ # # donde $\varepsilon_i$ corresponde al ruido o error del instrumento y asumiremos que $\varepsilon_i \sim \mathcal{N}(0, \sigma_\varepsilon^2)$, es decir que el ruido es **independiente** y **Gaussiano** con media cero y **varianza** $\sigma_\varepsilon^2$ **conocida** # # Entonces la distribución de $x_i$ es # # $$ # f(x_i|\hat x) = \mathcal{N}(\hat x, \sigma_\varepsilon^2) # $$ # # Para encontrar $\hat x$, primero escribimos el logaritmo de la **verosimilitud** # # $$ # \begin{align} # \log \mathcal{L}(\hat x) &= \sum_{i=1}^N \log f(x_i|\hat x) \nonumber \\ # &= \sum_{i=1}^N \log \frac{1}{\sqrt{2\pi\sigma_\varepsilon^2}} \exp \left ( - \frac{1}{2\sigma_\varepsilon^2} (x_i - \hat x)^2 \right) \nonumber \\ # &= -\frac{N}{2}\log(2\pi\sigma_\varepsilon^2) - \frac{1}{2\sigma_\varepsilon^2} \sum_{i=1}^N (x_i - \hat x)^2 \nonumber # \end{align} # $$ # Luego debemos resolver # # $$ # \begin{align} # \hat \theta &= \text{arg} \max_\theta \log \mathcal{L}(\theta) \nonumber \\ # &= \text{arg} \max_\theta - \frac{1}{2\sigma_\varepsilon^2} \sum_{i=1}^N (x_i - \hat x)^2 # \end{align} # $$ # # donde podemos ignorar el primer término de la verosimilitud ya que no depende de $\theta$. Para encontrar el máximo derivamos la expresión anterior e igualamos a cero # # $$ # -\frac{1}{2\sigma_\varepsilon^2} \sum_{i=1}^N 2(x_i - \hat x ) = 0. # $$ # # Finalmente si despejamos llegamos a que # # $$ # \hat x = \frac{1}{N} \sum_{i=1}^N x_i, # $$ # # que se conoce como el estimador de máxima verosimilitud **para la media de una Gaussiana** # # Recordemos que podemos comprobar que es un máximo utilizando la segunda derivada # ### Estimación MLE con `scipy` # # Como vimos en la lección anterior el módulo [`scipy.stats`](https://docs.scipy.org/doc/scipy/reference/stats.html) provee de un gran número de distribuciones teóricas organizadas como # # - continuas de una variable # - discretas de una variable # - multivariadas # # Las distribuciones comparten muchos de sus métodos, a continuación revisaremos los más importantes. A modo de ejemplo consideremos la distribución Gaussiana (Normal) # # ```python # from scipy.stats import norm # dist = norm() # Esto crea una Gaussiana con media 0 y desviación estándar (std) 1 # dist = norm(loc=2, scale=2) # Esto crea una Gaussiana con media 2 y std 2 # ``` # # **Crear una muestra aleatoria con `rvs`** # # Luego de crear un objeto distribución podemos obtener una muestra aleatoria usando el método el atributo `rvs` # # ```python # dist = norm(loc=2, scale=2) # dist.rvs(size=10, # Cantidad de números aleatorios generados # random_state=None #Semilla aleatoria # ) # ``` # # Esto retorna un arreglo de 10 números generados aleatoriamente a partir de `dist` # # **Evaluar la función de densidad de probabilidad** # # La función de densidad de la Gaussiana es # # $$ # f(x; \mu, \sigma^2) = \frac{1}{\sqrt{2\pi \sigma^2}} \exp \left( -\frac{1}{2\sigma^2} (x-\mu)^2 \right) # $$ # # La densidad de un objeto distribución continuo puede obtenerse con el método `pdf` el cual es función de `x` # # # ```python # dist = norm(loc=2, scale=2) # p = dist.pdf(x # Un ndrray que representa x en la ecuación superior # ) # plt.plot(x, p) # Luego podemos graficar la fdp # ``` # # De forma equivalente, si deseamos la función de densidad acumulada usamos el método `cdf` # # Para objetos distribución discretos debemos usar el atributo `pmf` # # # **Ajustar los parámetros con MLE** # # Para hacer el ajuste se usa el método `fit` # # ```python # params = norm.fit(data # Un ndarray con los datos # ) # ``` # # En el caso de la Gaussiana el vector `params` tiene dos componentes `loc` y `scale`. La cantidad de parámetros depende de la distribución que estemos ajustando. También es importante notar que para ajustar se usa `norm` (clase abstracta) y no `norm()` (instancia) # # Una vez que tenemos los parámetros ajustados podemos usarlos con # # ```python # dist = norm(loc=params[0], scale=params[1]) # ``` # # Para distribuciones que tienen más de dos parámetros podemos usar # # ```python # dist = norm(*params[:-2], loc=params[-2], scale=params[-1]) # ``` # ### Ejercicio # # Observe la siguiente distribución y reflexione ¿Qué características resaltan de la misma? ¿Qué distribución sería apropiado ajustar en este caso? df = pd.read_csv('../data/cancer.csv', index_col=0) df = df[["diagnosis", "radius1", "texture1"]] x = df["radius1"].values fig, ax = plt.subplots(figsize=(5, 3), tight_layout=True) ax.hist(x, bins=20, density=True) ax.set_xlabel('Radio del nucleo'); # - Seleccione una distribución de `scipy.stats` ajustela a los datos # - Grafique la pdf teórica sobre el histograma # ## Verificación de modelos: Tests de bondad de ajuste # # Una vez que hemos ajustado un modelo es buena práctica verificar que tan confiable es este ajuste. Las herramientas más típicas para medir que tan bien se ajusta nuestra distribución teórica son # # - el [test de Akaike](https://en.wikipedia.org/wiki/Akaike_information_criterion) # - los [gráficos cuantil-cuantil](https://es.wikipedia.org/wiki/Gr%C3%A1fico_Q-Q) (QQ plot) # - el test no-paramétrico de Kolmogorov-Smirnov (KS) # # A continuación revisaremos el test de KS para bondad de ajuste # # **El test de Kolmogorov-Smirnov** # # Es un test no-paramétrico que compara una muestra de datos estandarizados (distribución empírica) con una distribución de densidad acumulada (CDF) teórica. Este test busca refutar la siguiente hipótesis # # > **Hipótesis nula:** Las distribuciones son idénticas # # Para aplicar el test primero debemos **estandarizar** los datos. Estandarizar se refiere a la transformación # # $$ # z = \frac{x - \mu_x}{\sigma_x} # $$ # # es decir los datos estándarizados tienen media cero y desviación estándar uno # # Esto puede hacerse fácilmente con NumPy usando # # ```python # z = (x - np.mean(x))/np.std(x) # ``` # # ### Test de KS con `scipy` # # Podemos realizar el test de KS con la función [`scipy.stats.kstest`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html) donde # # ```python # scipy.stats.kstest(rvs, # Una muestra de observaciones estandarizadas # cdf, # Una distribución acumulada teórica, por ejemplo scipy.stats.norm.cdf # ... # ) # ``` # # Esta función retorna el valor del estadístico de KS y su *p-value* asociado. Mientras más cerca de cero sea el estadístico de KS mejor es el ajuste. # # Más adelante haremos un repaso de tests de hipótesis en detalle. De momento recordemos que si el *p-value* es menor que una confianza $\alpha=0.05$ entonces rechazamos la hipótesis nula con confianza $1-\alpha = 0.95$ o $95\%$ # # ### Ejercicio # # Considere la muestra de datos anterior # - Seleccione un conjunto de distribuciones teóricas # - Encuentra la que tiene mejor ajuste usando `kstest` # ## Responder preguntas con nuestro modelo: Test de hipótesis # # Se aplica un tratamiento nuevo a una muestra de la población # # - ¿Es el tratamiento efectivo? # - ¿Existe una diferencia entre los que tomaron el tratamiento y los que no? # # El test de hipótesis es un procedimiento estadístico para comprobar si el resultado de un experimento es significativo en la población # # Para esto formulamos dos escenarios cada uno con una hipótesis asociada # # - Hipótesis nula ($H_0$): Por ejemplo # - "El experimento no produjo diferencia" # - "El experimento no tuvo efecto" # - "Las observaciones son producto del azar" # - Hipótesis alternativa ($H_A$): Usualmente el complemento de $H_0$ # # > El test de hipótesis se diseña para medir que tan fuerte es la evidencia **en contra** de la hipótesis nula # ### Algoritmo general de un test de hipótesis # # El siguiente es el algoritmo general de un test de hipótesis paramétrico # # 1. Definimos $H_0$ y $H_A$ # 1. Definimos un estadístico $T$ # 1. Asumimos una distribución para $T$ dado que $H_0$ es cierto # 1. Seleccionamos un nivel de significancia $\alpha$ # 1. Calculamos el $T$ para nuestros datos $T_{data}$ # 1. Calculamos el **p-value** # - Si nuestro test es de una cola: # - Superior: $p = P(T>T_{data})$ # - Inferior: $p = P(T<T_{data})$ # - Si nuestro test es dos colas: $p = P(T>T_{data}) + P(T<T_{data})$ # # Finalmente: # # `Si` $p < \alpha$ # # > Rechazamos la hipótesis nula con confianza (1-$\alpha$) # # `De lo contrario` # # > No hay suficiente evidencia para rechazar la hipótesis nula # # El valor de $\alpha$ nos permite controlar el **[Error tipo I](https://es.wikipedia.org/wiki/Errores_de_tipo_I_y_de_tipo_II)**, es decir el error que cometemos si rechazamos $H_0$ cuando en realidad era cierta (falso positivo) # # Tipicamente se usa $\alpha=0.05$ o $\alpha=0.01$ # # **Errores de interpretación comunes** # # Muchas veces se asume que el p-value es la probabilidad de que $H_0$ sea cierta dado nuestras observaciones # # $$ # p = P(H_0 | T> T_{data}) # $$ # # Esto es un **grave error**. Formálmente el **p-value** es la probabilidad de observar un valor de $T$ más extremo que el observado, es decir # # $$ # p = P(T> T_{data} | H_0) # $$ # # Otro error común es creer que no ser capaz de rechazar $H_0$ es lo mismo que aceptar $H_0$ # # No tener suficiente evidencia para rechazar no es lo mismo que aceptar # ### Un primer test de hipótesis: El t-test de una muestra # # Sea un conjunto de $N$ observaciones iid $X = {x_1, x_2, \ldots, x_N}$ con media muestral $\bar x = \sum_{i=1}^N x_i$ # # El t-test de una muestra es un test de hipótesis que busca verificar si $\bar x$ es significativamente distinta de la **media poblacional** $\mu$, en el caso de que **no conocemos la varianza poblacional** $\sigma^2$ # # Las hipótesis son # # - $H_0:$ $\bar x = \mu$ # - $H_A:$ $\bar x \neq \mu$ (dos colas) # # El estadístico de prueba es # # $$ # t = \frac{\bar x - \mu}{\hat \sigma /\sqrt{N-1}} # $$ # # donde $\hat \sigma = \sqrt{ \frac{1}{N} \sum_{i=1}^N (x_i - \bar x)^2}$ es la desviación estándar muestral (sesgada) # # Si asumimos que $\bar x$ se distribuye $\mathcal{N}(\mu, \frac{\sigma^2}{N})$ entonces # $t$ se distribuye [t-student](https://en.wikipedia.org/wiki/Student%27s_t-distribution) con $N-1$ grados de libertad # # - Para muestras iid y $N$ grande el supuesto se cumple por teorema central del límite # - Si $N$ es pequeño debemos verificar la normalidad de los datos # # ### Aplicación de t-test para probar que la regresión es significativa # # En un modelo de regresión lineal donde tenemos $N$ ejemplos # # $$ # y_i = x_i \theta_1 + \theta_0, ~ i=1, 2, \ldots, N # $$ # # Podemos probar que la correlación entre $x$ es $y$ es significativa con un test sobre $\theta_1$ # # Por ejemplo podemos plantear las siguientes hipótesis # # - $H_0:$ La pendiente es nula $\theta_1= 0$ # - $H_A:$ La pendiente no es nula: $\theta_1\neq 0$ (dos colas) # # Y asumiremos que $\theta_1$ es normal pero que desconocemos su varianza. Bajo este supuesto se puede formular el siguiente estadístico de prueba # # $$ # t = \frac{(\theta_1-\theta^*) }{\text{SE}_{\theta_1}/\sqrt{N-2}} = \frac{ r\sqrt{N-2}}{\sqrt{1-r^2}}, # $$ # # donde $r$ es el coeficiente de correlación de Pearson (detalles más adelante) y la última expresión se obtiene reemplazando $\theta^*=0$ y $\text{SE}_{\theta_1} = \sqrt{ \frac{\frac{1}{N} \sum_i (y_i - \hat y_i)^2}{\text{Var}(x)}}$. # # El estadístico tiene distribución t-student con dos grados de libertad (modelo de dos parámetros) # # ## Ejercicio formativo: Regresión lineal # # En lecciones anteriores estudiamos el modelo de regresión lineal el cual nos permite estudiar si existe correlación entre variables continuas. También vimos como ajustar los parámetros del modelo usando el método de mínimos cuadrados. En este ejercicio formativo veremos como verificar si el modelo de regresión ajustado es correcto # # Luego de revisar este ejercicio usted habrá aprendido # # - La interpretación probabilística de la regresión lineal y la relación entre mínimos cuadrados ordinarios y la estimación por máxima verosimilitud # - El estadístico $r$ para medir la fuerza de la correlación entre dos variables # - Un test de hipótesis para verificar que la correlación encontrada es estadística significativa # # Usaremos el siguiente dataset de consumo de helados. Referencia: [A handbook of small datasets](https://www.routledge.com/A-Handbook-of-Small-Data-Sets/Hand-Daly-McConway-Lunn-Ostrowski/p/book/9780367449667), estudio realizado en los años 50 df = pd.read_csv('../data/helados.csv', header=0, index_col=0) df.columns = ['consumo', 'ingreso', 'precio', 'temperatura'] display(df.head()) # El dataset tiene la temperatura promedio del día (grados Fahrenheit), el precio promedio de los helados comprados (dolares), el ingreso promedio familiar semanal de las personas que compraron helado (dolares) y el consumo ([pintas](https://en.wikipedia.org/wiki/Pint) per capita). # # A continuación se muestra un gráfico de dispersión del consumo en función de las demás variables. ¿Cree usted que existe correlación en este caso? fig, ax = plt.subplots(1, 3, figsize=(8, 3), tight_layout=True, sharey=True) for i, col in enumerate(df.columns[1:]): ax[i].scatter(df[col], df["consumo"], s=10) ax[i].set_xlabel(col) ax[0].set_ylabel(df.columns[0]); # ### Interpretación probabilística y MLE de la regresión lineal # # Sea $y$ el consumo y $x$ la temperatura. # # Asumiremos errores gaussianos iid # # $$ # y_i = \hat y_i + \epsilon_i, \epsilon_i \sim \mathcal{N}(0, \sigma^2), # $$ # # y un modelo lineal de **dos parámetros** (linea recta) # # $$ # \hat y_i = \theta_0 + \theta_1 x_i # $$ # # Bajo estos supuestos el estimador de máxima verosimilitud es # # $$ # \begin{align} # \hat \theta &= \text{arg}\max_\theta \log \mathcal{L}(\theta) \nonumber \\ # &=\text{arg}\max_\theta - \frac{1}{2\sigma^2} \sum_{i=1}^N (y_i - \theta_0 - \theta_1 x_i)^2 \nonumber # \end{align} # $$ # # Es decir que el estimador de máxima verosimilitud es equivalente al de mínimos cuadrados ordanrios $\hat \theta= (X^T X)^{-1} X^T y$ que vimos anteriormente # # **Importante:** Cuando utilizamos la solución de mínimos cuadrados estamos asumiendo implicitamente que las observaciones son iid y que la verosimilitud es Gaussiana # # # Derivando con respecto a los parámetros e igualado a cero tenemos que # # $$ # \begin{align} # \sum_i y_i - N\theta_0 - \theta_1 \sum_i x_i &= 0 \nonumber \\ # \sum_i y_i x_i - \theta_0 \sum_i x_i - \theta_1 \sum_i x_i^2 &= 0 \nonumber # \end{align} # $$ # # Finalmente podemos despejar # # $$ # \begin{align} # \theta_0 &= \bar y - \theta_1 \bar x \nonumber \\ # \theta_1 &= \frac{\sum_i x_i y_i - N \bar x \bar y}{\sum_i x_i^2 - M \bar x^2} \nonumber \\ # &= \frac{ \sum_i (y_i - \bar y)(x_i - \bar x)}{\sum_i (x_i - \bar x)^2} = \frac{\text{COV}(x, y)}{\text{Var}(x)} # \end{align} # $$ # # de donde reconocemos las expresiones para la covarianza entre $x$ e $y$ y la varianza de $x$ # # ### Coeficiente de correlación de Pearson # # La fuerza de la correlación se suele medir usando # # $$ # r^2 = 1 - \frac{\sum_i ( y_i - \hat y_i)^2}{\sum_i ( y_i - \bar y)^2} = 1 - \frac{\frac{1}{M} \sum_i (y_i - \hat y_i)^2}{\text{Var}(y)} = \frac{\text{COV}^2(x, y)}{\text{Var}(x) \text{Var}(y)} # $$ # # donde $r = \frac{\text{COV}(x, y)}{\sqrt{\text{Var}(x) \text{Var}(y)}} \in [-1, 1]$ se conoce como [coeficiente de correlación de Pearson](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient) # # donde # # - si $r=1$ existe una correlación lineal perfecta # - si $r=-1$ existe una anticorrelación lineal perfecta # - si $r=0$ no hay correlación lineal entre las variables # # En general un $r>0.5$ se considera una correlación importante # **Calculando $r$ con y los parámetros de la regresión lineal** # # Podemos usar el atributo de dataframe # # ```python # df.corr() # ``` # # Que retorna la matriz de correlaciones lineales df.corr() # Si queremos también el valor de los parámetros podemos usar la función de scipy # # ```python # scipy.stats.linregress(x, # Variable independiente unidimensional # y # Variable dependiente unidimensional # ) # ``` # # Esta función retorna una tupla con # # - Valor de la pendiente: $\theta_1$ # - Valor de la intercepta: $\theta_0$ # - Coeficiente de correlación $r$ # - p-value # - Error estándar del ajuste # + fig, ax = plt.subplots(1, 3, figsize=(8, 3), tight_layout=True, sharey=True) ax[0].set_ylabel(df.columns[0]); for i, col in enumerate(df.columns[1:]): res = scipy.stats.linregress(df[col], df["consumo"]) x_plot = np.linspace(np.amin(df[col]), np. amax(df[col]), num=100) ax[i].scatter(df[col], df["consumo"], label='datos', s=10) ax[i].plot(x_plot, res.slope*x_plot + res.intercept, lw=2, c='r', label='modelo'); ax[i].set_xlabel(col) ax[i].set_title(f"$r$: {res.rvalue:0.5f}") ax[i].legend() # - # Es decir que visualmente parece existir # # - una correlación positiva alta entre consumo y temperatura # - una correlación negativa moderada entre consumo y precio # - una correlación cercana a cero entre consumo e ingreso # ### Test de hipótesis y conclusiones # # La función `linregress` implementa el t-test sobre $\theta_1$ que vimos anteriormente. Usemos estos resultados para verificar si las correlaciones son estadísticamente significativas # + alpha = 0.05 for i, col in enumerate(df.columns[1:]): res = scipy.stats.linregress(df[col], df["consumo"]) print(f"{col}: \t p-value:{res.pvalue:0.4f} \t ¿Menor que {alpha}?: {res.pvalue < alpha}") # - # Como complemento visualizemos # # - las distribuciones bajo la hipótesis nula: linea azul # - los límites dados por $\alpha$: linea punteada negra # - El valor del observado para cada una de las variables: linea roja # + fig, ax = plt.subplots(1, 3, figsize=(8, 2), tight_layout=True, sharey=True) ax[0].set_ylabel(df.columns[0]); N = df.shape[0] t = np.linspace(-7, 7, num=1000) dist = scipy.stats.t(loc=0, scale=1, df=N-2) # dos grados de libertad for i, col in enumerate(df.columns[1:]): res = scipy.stats.linregress(df[col], df["consumo"]) t_data = res.rvalue*np.sqrt(N-2)/np.sqrt(1.-res.rvalue**2) ax[i].plot(t, dist.pdf(t)) ax[i].plot([dist.ppf(alpha/2)]*2, [0, np.amax(dist.pdf(t))], 'k--') ax[i].plot([dist.ppf(1-alpha/2)]*2, [0, np.amax(dist.pdf(t))], 'k--') ax[i].plot([t_data]*2, [0, np.amax(dist.pdf(t))], 'r-') ax[i].set_xlabel(col) # - # **Conclusión** # # Basado en los p-values y considerando $\alpha=0.05$ # # ¿Qué podemos decir de las correlaciones con el consumo de helados? # # > Rechazamos la hipótesis nula de que no existe correlación entre temperatura y consumo con un 95% de confianza # # Para las variables ingreso y precio no existe suficiente evidencia para rechazar $H_0$ # ### Reflexión final # # En el ejercicio anterior usamos t-test para una regresión lineal entre dos variables ¿Qué prueba puedo usar si quiero hacer regresión lineal multivariada? # # > Se puede usar [ANOVA](https://pythonfordatascience.org/anova-python/) # # ¿Qué pasa si... # # - mis datos tienen una relación que no es lineal? # - $\theta_1$ no es Gaussiano/normal? # - si el ruido no es Gaussiano? # - si el ruido es Gaussiano pero su varianza cambia en el tiempo? # # > En estos casos no se cumplen los supuestos del modelo o del test, por ende el resultado no es confiable # # Si mis supuestos no se cumplen con ninguna prueba parámetrica, la opión es utilizar pruebas no-paramétricas # ## Prueba no-paramétrica: *Bootstrap* # # Podemos estimar la incerteza de un estimador de forma no-paramétrica usando **muestreo tipo *bootstrap*** # # Esto consiste en tomar nuestro conjunto de datos de tamaño $N$ y crear $T$ nuevos conjuntos que "se le parezcan". Luego se calcula el valor del estimador que estamos buscando en los $T$ conjuntos. Con esto obtenemos una distribución para el estimador como muestra el siguiente diagrama # # # <img src="https://www.statisticshowto.datasciencecentral.com/wp-content/uploads/2016/10/bootstrap-sample.png"> # # # Para crear los subconjuntos podríamos suponer independencia y utilizar **muestreo con reemplazo**. Esto consiste en tomar $N$ muestras al azar permitiendo repeticiones, como muestra el siguiente diagrama # # <img src="../img/stats7.png" width="700"> # # Si no es posible suponer indepdencia se puede realizar bootstrap basado en residuos y bootstrap dependiente. Puedes consultar más detalles sobre [*bootstrap*](https://www.stat.cmu.edu/~cshalizi/402/lectures/08-bootstrap/lecture-08.pdf) [aquí](http://homepage.divms.uiowa.edu/~rdecook/stat3200/notes/bootstrap_4pp.pdf) y [acá](https://www.sagepub.com/sites/default/files/upm-binaries/21122_Chapter_21.pdf). A continuación nos enfocaremos en el clásico muestreo con reemplazo y como implementarlo en Python # ### Implementación con Numpy y Scipy # # La función `numpy.random.choice` permite remuestrear un conjunto de datos # # Por ejemplo para la regresión lineal debemos remuestrar las parejas/tuplas $(x_i, y_i)$ # # Luego calculamos y guardamos los parámetros del modelo para cada remuestreo. En este ejemplo haremos $1000$ repeticiones del conjunto de datos # + df = pd.read_csv('../data/helados.csv', header=0, index_col=0) df.columns = ['consumo', 'ingreso', 'precio', 'temperatura'] x, y = df["temperatura"].values, df["consumo"].values params = scipy.stats.linregress(x, y) def muestreo_con_reemplazo(x, y): N = len(x) idx = np.random.choice(N, size=N, replace=True) return x[idx], y[idx] def boostrap_linregress(x, y, T=100): # Parámetros: t0, t1 y r params = np.zeros(shape=(T, 3)) for t in range(T): res = scipy.stats.linregress(*muestreo_con_reemplazo(x, y)) params[t, :] = [res.intercept, res.slope, res.rvalue] return params boostrap_params = boostrap_linregress(x, y, T=1000) # - # ### Intervalos de confianza empíricos # # Veamos la distribución empírica de $r$ obtenida usando bootstrap # # En la figura de abajo tenemos # # - Histograma azul: Distribución bootstrap de $r$ # - Linea roja: $r$ de los datos # - Lineas punteadas negras: Intervalo de confianza empírico al 95% # # + r_bootstrap = boostrap_params[:, 2] fig, ax = plt.subplots(figsize=(4, 3), tight_layout=True) hist_val, hist_lim, _ = ax.hist(r_bootstrap, bins=20, density=True) ax.plot([params.rvalue]*2, [0, np.max(hist_val)], 'r-', lw=2) IC = np.percentile(r_bootstrap, [2.5, 97.5]) ax.plot([IC[0]]*2, [0, np.max(hist_val)], 'k--', lw=2) ax.plot([IC[1]]*2, [0, np.max(hist_val)], 'k--', lw=2) print(f"Intervalo de confianza al 95% de r: {IC}") # - # De la figura podemos notar que el 95% de la distribución empírica esta sobre $r=0.5$ # # También podemos notar que la distribución empírica de $r$ no es simétrica, por lo que aplicar un t-test parámetrico sobre $r$ no hubiera sido correcto # ### Visualizando la incerteza del modelo # # Usando la distribución empírica de los parámetros $\theta_0$ y $\theta_1$ podemos visualizar la incerteza de nuestro modelo de regresión lineal # # En la figura de abajo tenemos # - Puntos azules: Datos # - Linea roja: Modelo de regresión lineal en los datos # - Sombra rojo claro: $\pm 2$ desviaciones estándar del modelo en base a la distribución empírica # + fig, ax = plt.subplots(figsize=(4, 3), tight_layout=True) ax.set_ylabel('Consumo') ax.set_xlabel('Temperatura') ax.scatter(x, y, zorder=100, s=10, label='datos') def model(theta0, theta1, x): return x*theta1 + theta0 ax.plot(x_plot, model(params.intercept, params.slope, x_plot), c='r', lw=2, label='mejor ajuste') dist_lines = model(boostrap_params[:, 0], boostrap_params[:, 1], x_plot.reshape(-1, 1)).T mean_lines, std_lines = np.mean(dist_lines, axis=0), np.std(dist_lines, axis=0) ax.fill_between(x_plot, mean_lines - 2*std_lines, mean_lines + 2*std_lines, color='r', alpha=0.25, label='incerteza') plt.legend(); # -
clases/unidad2/3_statistics/stats3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pipenv-sickkids # language: python # name: pipenv-sickkids # --- # Tutorial on Computing HFOs (Part 2) # ========================== # # In this tutorial, we will walk through how to compute HFOs on a sample dataset that is defined in [1]. # # We will demonstrate usage of the following detectors: # # - Line Length detector # - RMS detector # - Morphology detector (used in the paper) # # Dataset Preprocessing # --------------------- # Note that the data has been converted to BIDS to facilitate easy loading using ``mne-bids`` package. Another thing to note is that the authors in this dataset # reported HFOs detected using bipolar montage. In addition, they only analyzed HFOs for a subset of the recording channels. # # In order to compare results to a monopolar reference, we define an HFO to be "found" if there was an HFO in either of the corresponding bipolar contacts. # # References # ---------- # [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Resection of high frequency oscillations predicts seizure outcome in the individual patient. # Scientific Reports. 2017;7(1):13836. # https://www.nature.com/articles/s41598-017-13064-1 # doi:10.1038/s41598-017-13064-1 # + # first let's load in all our packages import matplotlib import matplotlib.pyplot as plt import numpy as np import sys import os import re import pandas as pd from sklearn.metrics import make_scorer from sklearn.model_selection import GridSearchCV from mne_bids import (read_raw_bids, BIDSPath, get_entity_vals, get_datatypes, make_report) from mne_bids.stats import count_events import mne from mne import make_ad_hoc_cov basepath = os.path.join(os.getcwd(), "../..") sys.path.append(basepath) from mne_hfo import LineLengthDetector, RMSDetector from mne_hfo.score import _compute_score_data, accuracy from mne_hfo.sklearn import make_Xy_sklearn, DisabledCV # - # # 1 Working with Real Data # We are now going to work with the dataset from Fedele et al. linked above # ## 1.1 Load in Real Data # ### 1.1.1 Define dataset paths and load the data # # The data is assumed to be in BIDS format. We have converted the dataset into BIDS, # which you can load using [mne-bids](https://github.com/mne-tools/mne-bids). # this may change depending on where you store the data root = '/Users/patrick/Dropbox/fedele_hfo_data' # print a boiler plate summary report using mne-bids report = make_report(root, verbose=False) print(report) # + # there are iEEG datatypes throughout the dataset datatypes = get_datatypes(root) # get all the subjects, sessions subjects = get_entity_vals(root, 'subject') sessions = get_entity_vals(root, 'session') # + # construct BIDSPath for dataset we will demo subjectID = subjects[0] sessionID = sessions[0] bids_path = BIDSPath(subject=subjectID, session=sessionID, datatype='ieeg', suffix='ieeg', extension='.vhdr', root=root) # get all matching datasets fpaths = bids_path.match() print(f'Found {len(fpaths)} matching filepaths for this subject and session.') # + # analyze the first run dataset_path = fpaths[0] print(dataset_path) # count the different HFO types (fr, ripple, frandr) that were detected using # the published Morphology detector counts_df = count_events(dataset_path) display(counts_df) # - # %%capture # load dataset into mne Raw object extra_params = dict(preload=True) raw = read_raw_bids(dataset_path, extra_params) # Optional - Change backend to interactive for below plot # Allows you to scroll dataset, highlight (or grey out) certain channels, zoom in/out, etc. # %matplotlib notebook # Plot the raw data with vertical colorbars to denote where HFOs were detected raw_plot = raw.plot() raw_plot.show() print('plotting channels with HFO events detected in ' 'the original publication in color.') # Optional - Change back to regular plots # %matplotlib inline # ### 1.1.2 Convert to bipolar referencing scheme # The Fedele paper seems to use bipolar referenced channels, so we do our best to compare here # def convert_to_bipolar(raw, drop_originals=True): original_ch_names = raw.ch_names ch_names_sorted = sorted(original_ch_names) ch_pairs = [] for first, second in zip(ch_names_sorted, ch_names_sorted[1:]): firstName = re.sub(r'[0-9]+', '', first) secondName = re.sub(r'[0-9]+', '', second) if firstName == secondName: ch_pairs.append((first,second)) for ch_pair in ch_pairs: raw = mne.set_bipolar_reference(raw, ch_pair[0], ch_pair[1], drop_refs=False) if drop_originals: raw = raw.drop_channels(original_ch_names) return raw # %%capture raw = convert_to_bipolar(raw) # ### 1.1.3 Load Annotated HFOs # All annotated HFO events for this file annotations = raw.annotations # + # The fedele bipolar names use the scheme CH#-#, but mne-bipolar uses the scheme CH#-CH#. Reconstructing # the fedele names to match mne names def reconstruct_channel_name_to_mne(ch_name): ch_split = ch_name.split("-") cont_name = re.sub(r'[0-9]+', '', ch_split[0]) ch_name_mne = f"{ch_split[0]}-{cont_name}{ch_split[1]}" return ch_name_mne # You can also go the other way around. If we convert the mne names to the fedele names, you can use # the mne.io.Raw.rename_channels function def reconstruct_mne_channel_name_to_fedele(ch_name): ch_split = ch_name.split("-") cont_name = re.sub(r'[0-9]+', '', ch_split[0]) ch_name_fedele = f"{ch_split[0]-{ch_split[1].replace(cont_name, '')}}" return ch_name_fedele # + # Convert to convenient data structure (pandas DF) column_names = ["onset", "duration", "sample", "label", "channels"] sfreq = raw.info["sfreq"] rows = [] for annot in annotations: onset = float(annot.get("onset")) duration = float(annot.get("duration")) sample = onset * sfreq trial_type = annot.get("description").split("_")[0] ch_name = annot.get("description").split("_")[1] ch_name = reconstruct_channel_name_to_mne(ch_name) annot_row = [onset, duration, sample, trial_type, ch_name] rows.append(annot_row) gs_df = pd.concat([pd.DataFrame([row], columns=column_names) for row in rows], ignore_index=True) # - # for now, lets just look at ripple events: gs_df_ripple = gs_df[gs_df['label'].str.contains("ripple")] gs_df_ripple = gs_df_ripple.dropna() gs_df_ripple.reset_index(drop=True, inplace=True) # ## 1.2 Detect HFOs # # ### 1.2.1 Line Length Detector # Set Key Word Arguments for the Line Length Detector and generate the class object kwargs = { 'filter_band': (80, 250), # (l_freq, h_freq) 'threshold': 3, # Number of st. deviations 'win_size': 100, # Sliding window size in samples 'overlap': 0.25, # Fraction of window overlap [0, 1] 'hfo_name': "ripple" } ll_detector = LineLengthDetector(**kwargs) # + # %%capture # Detect HFOs in the raw data using the LineLengthDetector method. # Return the class object with HFOs added ll_detector = ll_detector.fit(raw) # Dictionary where keys are channel index and values are a list of tuples in the form of (start_samp, end_samp) ll_chs_hfo_dict = ll_detector.chs_hfos_ # nCh x nWin ndarray where each value is the line-length of the data window per channel ll_hfo_event_array = ll_detector.hfo_event_arr_ # Pandas dataframe containing onset, duration, sample trial, and trial type per HFO ll_hfo_df = ll_detector.df_ # - # ### 1.2.2 RMS Detector # # Set Key Word Arguments for the RMS Detector and generate the class object kwargs = { 'filter_band': (80, 250), 'threshold': 3, 'win_size': 100, 'overlap': 0.25, 'hfo_name': 'ripple', } rms_detector = RMSDetector(**kwargs) # + # %%capture # Detect HFOs in the raw data using the RMSDetector method. rms_detector = rms_detector.fit(raw) rms_chs_hfo_dict = rms_detector.chs_hfos_ rms_hfo_event_array = rms_detector.hfo_event_arr_ rms_hfo_df = rms_detector.df_ # - # ## 1.3 Compare Results # ### 1.3.1 Find matches # Now that our dataframes are in the same format, we can compare them. We will simply look at the matches for ripples, since that is what the the detectors looked for. We will compute, for each detection, the accuracy, precision, true positive rate, false negative rate, and false discovery rate. def scores_to_df(score_dict): df = pd.DataFrame(columns=['detector', 'accuracy', 'true positive rate', 'precision', 'false negative rate', 'false discovery rate']) for detector_name, scores in score_dict.items(): to_append = [detector_name] [to_append.append(str(score)) for score in scores] append_series = pd.Series(to_append, index = df.columns) df = df.append(append_series, ignore_index=True) return df # + # Note: Since we are computing every score at once, we take a shortcut by calling the internal # function _compute_score_data, which gives the number of true positives, false positives, # and false negatives. There are no true negatives in this dataset # Gold standard vs LineLengthDetector scores_dict = {} tp, fp, fn = _compute_score_data(gs_df, ll_hfo_df, method="match-total") acc_ll = tp / (tp + fp + fn) tpr_ll = tp / (tp + fn) prec_ll = tp / (tp + fp) fnr_ll = fn / (fn + tp) fdr_ll = fp / (fp + tp) scores_dict["LineLengthDetector"] = [acc_ll, tpr_ll, prec_ll, fnr_ll, fdr_ll] # Gold standard vs RMSDetector tp, fp, fn = _compute_score_data(gs_df, rms_hfo_df, method="match-total") acc_rms = tp / (tp + fp + fn) tpr_rms = tp / (tp + fn) prec_rms = tp / (tp + fp) fnr_rms = fn / (fn + tp) fdr_rms = fp / (fp + tp) scores_dict["RMSDetector"] = [acc_rms, tpr_rms, prec_rms, fnr_rms, fdr_rms] scores_df = scores_to_df(scores_dict) scores_df # - # # 2 Optimizing the Detectors # The above detectors did decently well, but the hyperparameters were randomly set. Let's walk through the procedure # for optimizing the hyperparameters based using GridSearch Cross Validation on the LineLengthDetector # ## 2.1 Set up the data # SKlearn requires some changes to the input data and true labels in order for the procedure to function. We # provide some helper functions to assist in the data conversion raw_df, y = make_Xy_sklearn(raw, gs_df_ripple) # ## 2.2 Set up the GridSearchCV function # We will be testing three possible threshold values and three possible win_size values, for a total of 9 tests. # Accuracy will be the only test used for speed purposes, but multiple scoring functions can be passed in at once. scorer = accuracy parameters = {'threshold': [1, 2, 3], 'win_size': [50, 100, 250]} kwargs = { 'filter_band': (80, 250), 'overlap': 0.25, 'hfo_name': 'ripple', } detector = LineLengthDetector(**kwargs) scorer = make_scorer(scorer) cv = DisabledCV() gs = GridSearchCV(detector, param_grid=parameters, scoring=scorer, cv=cv, verbose=True) # ## 2.3 Perform the Search and Print Output # %%time # %%capture gs.fit(raw_df, y, groups=None) # Nicely display the output pd.concat([pd.DataFrame(gs.cv_results_["params"]),pd.DataFrame(gs.cv_results_["mean_test_score"], columns=["Accuracy"])],axis=1)
docs/tutorials/compute_hfos_on_fedele_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook shows the use of the Nashpy library for playing the Rock Paper Scissors game. Nashpy is a Python library used for the computation of equilibria in 2 player strategic form games. # # Nashpy: https://nashpy.readthedocs.io/en/stable/index.html # ## The game # Rock Paper Scissors is a common game where two players choose one of 3 options (in game theory we call these strategies): # # * Rock # * Paper # * Scissors # # The winner is decided according to the following: # # * Rock crushes scissors # * Paper covers Rock # * Scissors cuts paper # # This can be represented with a 3x3 matrix: # # | Throw | Rock | Paper | Scissors | # |---|---|---|---| # | Rock | 0 | -1 | 1 | # | Paper | 1 | 0 | -1 | # | Scissors | -1 | 1 | 0 | # # The matrix A<sub>ij</sub> shows the utility to the player controlling the rows when they play the *i* th row and their opponent (the column player) plays the *j* th column. For example, if the row player played Scissors (the 3rd strategy) and the column player played Paper (the 2nd strategy) then the row player gets: A<sub>32</sub>=1 because Scissors cuts Paper. # ## Imports import nashpy as nash import numpy as np # ## Find the equilibrium for a simple game A = np.array([[0, -1, 1], [1, 0, -1], [-1, 1, 0]]) rps = nash.Game(A) rps # ## Calculate the utility of a pair of strategies sigma_r = [0, 0, 1] # row player always plays the third strategy sigma_c = [0, 1, 0] # column player always plays the second stragegy rps[sigma_r, sigma_c] # The row player's strategy has more utility # Column player randomly plays rock (1) and paper (1) sigma_c = [1/2, 1/2, 0] rps[sigma_r, sigma_c] # Row player changes stragegy and "randomly" plays paper (2) and scissors (3) sigma_r = [0, 1/2, 1/2] rps[sigma_r, sigma_c] # ## Compute Nash equilibria # # Nash equilibria is (in two player games) a pair of strategies at which both players do not have an incentive to deviate. Nash equilibria is an important concept as it allows to gain an initial understanding of emergent behaviour in complex systems. eqs = rps.support_enumeration() list(eqs)
game_theory/Rock Paper Scissors (Nashpy).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/SanghunOh/test_machinelearning/blob/main/wholesale_decisiontreeclassification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="RoPgEUvqeJAg" outputId="0f2f84ac-5fff-4824-8715-52d50558a771" # !ls # + colab={"base_uri": "https://localhost:8080/"} id="3NsQxvznfXyB" outputId="5003006e-714e-43d5-c63c-45d60a53c512" # !ls -l # + colab={"base_uri": "https://localhost:8080/"} id="Iec7YevSfgyr" outputId="8476fc0d-db43-4a10-8a5f-3b95fc8fb4e1" # !pwd # + colab={"base_uri": "https://localhost:8080/"} id="ylhbaMdZfxPU" outputId="daf83d21-a8a7-47f6-a820-08c2c0c23cc4" # !ls -l ./sample_data # + colab={"base_uri": "https://localhost:8080/"} id="TOa_cbg1gugU" outputId="4fbbb358-a9a9-4cec-a056-13d523ad8bd3" # !ls -l ./wholesale.xls # + colab={"base_uri": "https://localhost:8080/", "height": 143} id="judvTZvcin6i" outputId="0bc8e974-c59f-4f3e-ad3f-80ca44c51235" import pandas as pd df = pd.read_excel('./wholesale.xls') df.head(3) # + colab={"base_uri": "https://localhost:8080/"} id="QRAGfGKH3JJ-" outputId="20478a28-6c7d-4f64-a5fb-c0c4f11a87ee" Y = df['label'] X = df.iloc[:,1:9] Y.shape, X.shape # + colab={"base_uri": "https://localhost:8080/"} id="7R8fVqE-9SOE" outputId="5a619004-fa09-4950-9e8d-472f6eb0dd48" from sklearn.tree import DecisionTreeClassifier dtree = DecisionTreeClassifier() dtree.fit(X,Y) # + colab={"base_uri": "https://localhost:8080/"} id="mvOxODuK9-SL" outputId="c9cf2bb6-785d-44d0-d82e-d1969a9df1d5" dtree.score(X,Y) # + id="tS3yUVg5-Fnr"
wholesale_decisiontreeclassification.ipynb