code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Scaling Examples # # ## Prerequisites # # * A kubernetes cluster with kubectl configured # * curl # * grpcurl # * pygmentize # # # ## Setup Seldon Core # # Use the setup notebook to [Setup Cluster](seldon_core_setup.ipynb) to setup Seldon Core with an ingress - either Ambassador or Istio. # # Then port-forward to that ingress on localhost:8003 in a separate terminal either with: # # * Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080` # * Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80` # !kubectl create namespace seldon # !kubectl config set-context $(kubectl config current-context) --namespace=seldon # ## Replica Settings # # A deployment that illustrate the settings for # # * `.spec.replicas` # * `.spec.predictors[].replicas` # * `.spec.predictors[].componentSpecs[].replicas` # # !pygmentize resources/model_replicas.yaml # !kubectl create -f resources/model_replicas.yaml # !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=test-replicas -o jsonpath='{.items[0].metadata.name}') # !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=test-replicas -o jsonpath='{.items[1].metadata.name}') # !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=test-replicas -o jsonpath='{.items[2].metadata.name}') # Check each container is running in a deployment with correct number of replicas # classifierReplicas=!kubectl get deploy test-replicas-example-0-classifier -o jsonpath='{.status.replicas}' classifierReplicas = int(classifierReplicas[0]) assert(classifierReplicas==2) # classifier2Replicas=!kubectl get deploy test-replicas-example-1-classifier2 -o jsonpath='{.status.replicas}' classifier2Replicas = int(classifier2Replicas[0]) assert(classifier2Replicas==3) # classifier3Replicas=!kubectl get deploy test-replicas-example2-0-classifier3 -o jsonpath='{.status.replicas}' classifier3Replicas = int(classifier3Replicas[0]) assert(classifier3Replicas==1) # !curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \ # -X POST http://localhost:8003/seldon/seldon/test-replicas/api/v1.0/predictions \ # -H "Content-Type: application/json" # !kubectl delete -f resources/model_replicas.yaml # ## Scale SeldonDeployment # !pygmentize resources/model_scale.yaml # !kubectl create -f resources/model_scale.yaml # !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-scale -o jsonpath='{.items[0].metadata.name}') # replicas=!kubectl get deploy seldon-scale-example-0-classifier -o jsonpath='{.status.replicas}' replicas = int(replicas[0]) assert(replicas==1) # !kubectl scale --replicas=2 sdep/seldon-scale # !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=seldon-scale -o jsonpath='{.items[0].metadata.name}') # replicas=!kubectl get deploy seldon-scale-example-0-classifier -o jsonpath='{.status.replicas}' replicas = int(replicas[0]) assert(replicas==2) # !curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \ # -X POST http://localhost:8003/seldon/seldon/seldon-scale/api/v1.0/predictions \ # -H "Content-Type: application/json" # !kubectl delete -f resources/model_scale.yaml
notebooks/scale.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nteract={"transient": {"deleting": false}} # The complete list of available packages can be found at: # https://aka.ms/azsdk/python/all # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1642542071268} pip install azure-search-documents # + gather={"logged": 1642605965319} import os from azure.core.credentials import AzureKeyCredential from azure.search.documents import SearchClient index_name = "margiedocs" # Get the service endpoint and API key from the environment endpoint = "https://maggietravelqa-astgra6nyukhoda.search.windows.net" key = "<KEY>" # Create a client credential = AzureKeyCredential(key) client = SearchClient(endpoint=endpoint, index_name=index_name, credential=credential) results = client.search(search_text="London + \"Buckingham Palace\"", include_total_count=True) print ('Total Documents Matching Query:', results.get_count()) for result in results: print("{}: {})".format(result["url"], result["id"])) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
AzSearch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Autoeconders with tensorflow # In this Tutorial we will implement various autoencoder architectures on our beloved (Fashion) MNIST data. # + colab={} colab_type="code" id="gTS_QOUwOiGu" import numpy as np import tensorflow as tf # import cv as cv2 # + colab={} colab_type="code" id="vG7XLJ0VOnG2" (x_train, train_label), (x_test, test_label) = tf.keras.datasets.mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:]))) # - # # MLP Autoencoders # + colab={} colab_type="code" id="kKaHnGGi5mG7" from keras.layers import Input, Dense from keras.models import Model input_img = Input(shape=(784,)) encoded = Dense(128, activation='relu')(input_img) encoded = Dense(64, activation='relu')(encoded) encoded = Dense(32, activation='relu')(encoded) decoded = Dense(64, activation='relu')(encoded) decoded = Dense(128, activation='relu')(decoded) decoded = Dense(784, activation='sigmoid')(decoded) # + colab={} colab_type="code" id="APpX5ZWV7ukP" autoencoder = Model(input_img, decoded) # + colab={"base_uri": "https://localhost:8080/", "height": 385} colab_type="code" id="OcPAKCACvG65" outputId="067c948f-8e71-4936-f2a8-8eccd2c18788" autoencoder.compile(optimizer='adam', loss='binary_crossentropy') autoencoder.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 406} colab_type="code" id="EEuhUfKaxi6m" outputId="555c20ef-17ae-4fb1-ad66-aa5398280b4e" autoencoder.fit(x_train, x_train, epochs=50, batch_size=256, shuffle=True, validation_data=(x_test, x_test)) # + [markdown] colab_type="text" id="QsK7719mW8Zi" # ## Filter visualization # + colab={"base_uri": "https://localhost:8080/", "height": 451} colab_type="code" id="pU3Sg_FJmrrn" outputId="ef0ab7f9-eff5-48d4-810d-45c13f668f35" import matplotlib.pyplot as plt from PIL import * weights = autoencoder.get_weights() encodedfeature = np.reshape(weights[3], (8, 8)) plt.title('Original Sample Data') plt.imshow(x_test[1].reshape(28,28)) fig=plt.figure(figsize=(10, 5)) fig.add_subplot(2, 1, 1) plt.title('Encoded Weights') plt.imshow(weights[3].reshape(8, 8), cmap='gray') c = cv2.filter2D(x_test[1].reshape(28, 28), -1, encodedfeature) fig.add_subplot(2, 2, 1) plt.title('Weights over original image') plt.imshow(c, cmap='gray') plt.show() # + colab={} colab_type="code" id="PlYfSuPO7GOn" decoded_imgs = autoencoder.predict(x_test) # + colab={"base_uri": "https://localhost:8080/", "height": 216} colab_type="code" id="GG4A6EAlt09u" outputId="560cab55-48a6-46de-a286-b67dfa2210f6" import matplotlib.pyplot as plt n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() # + [markdown] colab_type="text" id="QTRIHSlOv_AN" # # Convolutional Autoencoders # + colab={"base_uri": "https://localhost:8080/", "height": 619} colab_type="code" id="snSB8am-9OL8" outputId="12f750b9-76ff-4a92-b73a-6814d8a72678" from keras.layers import * from keras.models import Model # from keras import backend as K input_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) encoded = MaxPooling2D((2, 2), padding='same')(x) # at this point the representation is (4, 4, 8) i.e. 128-dimensional x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded) x = UpSampling2D((2, 2))(x) x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = UpSampling2D((2, 2))(x) x = Conv2D(16, (3, 3), activation='relu')(x) x = UpSampling2D((2, 2))(x) decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x) Conv_autoencoder = Model(input_img, decoded) Conv_autoencoder.compile(optimizer='adam', loss='binary_crossentropy') Conv_autoencoder.summary() # + colab={} colab_type="code" id="AJBAKk32-GxG" (x_train, train_label), (x_test, test_label) = tf.keras.datasets.mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format # - # Run the cells below to see how the output of the model without training # + colab={"base_uri": "https://localhost:8080/", "height": 406} colab_type="code" id="C77oLgcg9nno" outputId="0e179b44-f1dc-4e1d-f05f-f7954fcfa501" Conv_autoencoder.fit(x_train, x_train, epochs=50, batch_size=256, shuffle=True, validation_data=(x_test, x_test)) # - decoded_imgs = Conv_autoencoder.predict(x_test) # + colab={"base_uri": "https://localhost:8080/", "height": 248} colab_type="code" id="OLYp-FEF9oPV" outputId="5fb386f5-6a59-479f-c738-3ec7096d0208" import matplotlib.pyplot as plt n = 10 # how many digits we will display plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() # + colab={} colab_type="code" id="dj81mcoCcFUB" Conv_autoencoder.save('my_model1') # + colab={} colab_type="code" id="XFyzRmKbeOk1" import keras model = keras.models.load_model('my_model') # + [markdown] colab_type="text" id="ZMXtxNdkwMEt" # # Unsupervised Pretraining # + colab={"base_uri": "https://localhost:8080/", "height": 619} colab_type="code" id="aON7xS3LwLBV" outputId="25d7074e-367e-4b79-a5e2-c60441ac3d86" from keras.layers import * from keras.models import * model = keras.models.load_model('my_model') #Remove Decoder [model.layers.pop() for i in range(7, 15)] model.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 721} colab_type="code" id="qLfUKRC_S7kf" outputId="1e93fdfb-5ce4-4e0e-c458-c7adc6a43997" #Freeze Model model.trainables= True #Make model Sequential model = keras.Sequential([model]) model.add(Flatten()) model.add(Dense(units=10, activation='softmax')) # + colab={"base_uri": "https://localhost:8080/", "height": 184} colab_type="code" id="i_fCLGgIgAQm" outputId="57d30256-c001-4def-cb10-c863f50ead8c" model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() # - # # Training Model with Random 100 samples # + (x_train, train_label), (x_test, test_label) = tf.keras.datasets.mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) idx = np.random.randint(6000, size=100) train_label = np.take(train_label, idx) x_train = np.take(x_train, idx, axis=0) # + colab={"base_uri": "https://localhost:8080/", "height": 486} colab_type="code" id="56rWlkwcBbSK" outputId="8e32a393-47ef-420a-8a33-c083a288841a" model.fit(x_train, train_label, epochs=50, batch_size=20, shuffle=True, validation_data=(x_test, test_label)) # + # Define Sequential model model = keras.Sequential( [ Conv2D(16, (3, 3), activation='relu', padding='same'), MaxPooling2D((2, 2), padding='same'), Conv2D(8, (3, 3), activation='relu', padding='same'), MaxPooling2D((2, 2), padding='same'), Conv2D(8, (3, 3), activation='relu', padding='same'), Flatten(), Dense(units=10, activation='softmax') ] ) x = model(Input(shape=(28, 28, 1))) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() # + model.fit(x_train, train_label, epochs=50, batch_size=20, shuffle=True, validation_data=(x_test, test_label)) # - # 1. Transfer learning with same model.trainables = false, validation accurracy = 0.7466 # 2. Transfer learning with same model.trainables = True, validation accurracy = 0.7396 # 3. Same Model without transfer learning , validation accuracy = 0.7700 # Try a more interesting dataset?
Autoencoders.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mydsp # language: python # name: mydsp # --- # [<NAME>](https://orcid.org/0000-0001-7225-9992), # Professorship Signal Theory and Digital Signal Processing, # [Institute of Communications Engineering (INT)](https://www.int.uni-rostock.de/), # Faculty of Computer Science and Electrical Engineering (IEF), # [University of Rostock, Germany](https://www.uni-rostock.de/en/) # # # Tutorial Signals and Systems (Signal- und Systemtheorie) # # Summer Semester 2021 (Bachelor Course #24015) # # - lecture: https://github.com/spatialaudio/signals-and-systems-lecture # - tutorial: https://github.com/spatialaudio/signals-and-systems-exercises # # WIP... # The project is currently under heavy development while adding new material for the summer semester 2021 # # Feel free to contact lecturer [<EMAIL>](https://orcid.org/0000-0002-3010-0294) # # ## DFT Task 11.2 import matplotlib.pyplot as plt import numpy as np from numpy.fft import fft X = fft(np.exp(+1j*2*np.pi/8 * 2.5 * np.arange(8))) print(np.abs(X)) print(np.angle(X)/np.pi*16) # + N = 8 k = np.arange(N) mu = np.arange(N) x = np.exp(+1j*2*np.pi/N * 2.5 * k) X = fft(x) Xabs = np.abs(X) Xangle = np.angle(X)/np.pi*(2*N) # + plt.figure(figsize=(6, 4)) plt.subplot(2, 1, 1) for tmp in np.arange(-1, 2, 1): plt.stem(mu+tmp*N, Xabs, use_line_collection=True, linefmt='C0:', markerfmt='C0o', basefmt='C0:') plt.stem(mu, Xabs, use_line_collection=True, linefmt='C1:', markerfmt='C1o', basefmt='C1:') plt.xticks(np.arange(-N, 2*N, 2)) plt.xlim(-N, 2*N-1) plt.yticks(np.arange(6)) # plt.xlabel(r'$\mu$') plt.ylabel(r'$|X[\mu]|$') plt.grid(True) plt.subplot(2, 1, 2) for tmp in np.arange(-1, 2, 1): plt.stem(mu+tmp*N, Xangle, use_line_collection=True, linefmt='C0:', markerfmt='C0o', basefmt='C0:') plt.stem(mu, Xangle, use_line_collection=True, linefmt='C1:', markerfmt='C1o', basefmt='C1:') plt.xticks(np.arange(-N, 2*N, 2)) plt.xlim(-N, 2*N-1) plt.yticks(np.arange(-N+1, N, 2)) plt.xlabel(r'$\mu$') plt.ylabel(r'$\angle X[\mu] \cdot \frac{16}{\pi}$') plt.grid(True) plt.savefig('DFT_Spectrum_0C30EB5E76.pdf') # + kp = np.arange(-2*N, 3*N) xp = np.exp(+1j*2*np.pi/N * 2.5 * kp) plt.figure(figsize=(6, 4)) plt.subplot(2, 1, 1) plt.plot(kp, np.real(xp), 'C7o-', ms=1) for tmp in np.arange(-2, 3, 1): plt.stem(k+tmp*N, np.real(x), use_line_collection=True, linefmt='C0:', markerfmt='C0o', basefmt='C0:') plt.stem(k, np.real(x), use_line_collection=True, linefmt='C1:', markerfmt='C1o', basefmt='C1:') plt.xticks(np.arange(-2*N, 3*N, 4)) plt.xlim(-2*N, 3*N-1) plt.yticks(np.arange(-1, 1+0.25, 0.25)) plt.ylim(-1.25, 1.25) plt.ylabel(r'$\Re\{x[k]\}$') plt.grid(True) plt.subplot(2, 1, 2) plt.plot(kp, np.imag(xp), 'C7o-', ms=1) for tmp in np.arange(-2, 3, 1): plt.stem(k+tmp*N, np.imag(x), use_line_collection=True, linefmt='C0:', markerfmt='C0o', basefmt='C0:') plt.stem(k, np.imag(x), use_line_collection=True, linefmt='C1:', markerfmt='C1o', basefmt='C1:') plt.xticks(np.arange(-2*N, 3*N, 4)) plt.xlim(-2*N, 3*N-1) plt.yticks(np.arange(-1, 1+0.25, 0.25)) plt.ylim(-1.25, 1.25) plt.xlabel(r'$k$') plt.ylabel(r'$\Im\{x[k]\}$') plt.grid(True) plt.savefig('Signal_0C30EB5E76.pdf') # - # ## Copyright # # This tutorial is provided as Open Educational Resource (OER), to be found at # https://github.com/spatialaudio/signals-and-systems-exercises # accompanying the OER lecture # https://github.com/spatialaudio/signals-and-systems-lecture. # Both are licensed under a) the Creative Commons Attribution 4.0 International # License for text and graphics and b) the MIT License for source code. # Please attribute material from the tutorial as *<NAME>, # Continuous- and Discrete-Time Signals and Systems - A Tutorial Featuring # Computational Examples, University of Rostock* with # ``main file, github URL, commit number and/or version tag, year``.
dft/dft_complex_signal_0C30EB5E76.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DeepVision: Exploiting computer vision techniques to minimize CPU Utilization # # [![](https://img.shields.io/github/license/sourcerer-io/hall-of-fame.svg?colorB=ff0000)](https://github.com/akshaybahadur21/Autopilot/blob/master/LICENSE.txt) # # [![](https://img.shields.io/badge/Akshay-Bahadur-brightgreen.svg?colorB=ff0000)](https://akshaybahadur.com) # # This python notebook is for explanation of the core concepts used and the models developed for this webinar. # # ### Acknowledgement # # I would like to extend my gratitude towards Open Data Science Conference, Boston team for giving me this opportunity to showcase my findings especially Alena, Vimal and Rafael. # # ### <NAME> # # - Software engineer working with Symantec. # - ML Researcher # # #### Contact # # - [Portfolio](https://www.akshaybahadur.com/) # - [LinkedIN](https://www.linkedin.com/in/akshaybahadur21/) # - [GitHub](https://github.com/akshaybahadur21) # # ### Agenda # # # # # - Introduction # - Tania's Story # - MNIST # - Autopilot # - Malaria Detection # # # # # ### Tania's Story from IPython.display import YouTubeVideo YouTubeVideo('Oc_QMQ4QHcw') # ## MNIST Digit Recognition # %%HTML <iframe width="700" height="315" src="https://www.youtube.com/embed/MRNODXrYK3Q"></iframe> from keras import Sequential from keras.callbacks import ModelCheckpoint from keras.datasets import mnist import numpy as np import matplotlib.pyplot as plt from keras.layers import Flatten, Dense, Dropout from keras.utils import np_utils, print_summary from keras.models import load_model (x_train, y_train), (x_test, y_test) = mnist.load_data() def showData(x, label): pixels = np.array(x, dtype='uint8') pixels = pixels.reshape((28, 28)) plt.title('Label is {label}'.format(label=label)) plt.imshow(pixels, cmap='gray') plt.show() showData(x_train[0], y_train[0]) showData(x_train[24], y_train[24]) print(x_train[0].shape) print(x_train[0]) # ### Normalization # # Normalization is a technique often applied as part of data preparation for machine learning. The goal of normalization is to change the values of numeric columns in the dataset to a common scale, without distorting differences in the ranges of values # x_train_norm= x_train / 255. x_test_norm=x_test / 255. print(x_train_norm[0].shape) print(x_train_norm[0]) x_train_norm_mean_zero= x_train / 127.5 - 1. x_test_norm_mean_zero=x_test / 127.5 - 1. print(x_train_norm_mean_zero[0].shape) print(x_train_norm_mean_zero[0]) def preprocess_labels(y): labels = np_utils.to_categorical(y) return labels y_train = preprocess_labels(y_train) y_test = preprocess_labels(y_test) # + x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) x_train_norm = x_train_norm.reshape(x_train_norm.shape[0], 28, 28, 1) x_test_norm = x_test_norm.reshape(x_test_norm.shape[0], 28, 28, 1) x_train_norm_mean_zero = x_train_norm_mean_zero.reshape(x_train_norm_mean_zero.shape[0], 28, 28, 1) x_test_norm_mean_zero = x_test_norm_mean_zero.reshape(x_test_norm_mean_zero.shape[0], 28, 28, 1) # - print("number of training examples = " + str(x_train.shape[0])) print("number of test examples = " + str(x_test.shape[0])) print("X_train shape: " + str(x_train.shape)) print("Y_train shape: " + str(y_train.shape)) # + def keras_model(image_x, image_y): num_of_classes = 10 model = Sequential() model.add(Flatten(input_shape=(image_x, image_y, 1))) model.add(Dense(512, activation='relu')) model.add(Dropout(0.6)) model.add(Dense(128, activation='relu')) model.add(Dropout(0.6)) model.add(Dense(num_of_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) filepath = "mnist_odsc.h5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] return model, callbacks_list model, callbacks_list = keras_model(28, 28) print_summary(model) # - model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=1, batch_size=64, callbacks=callbacks_list) model.fit(x_train_norm, y_train, validation_data=(x_test_norm, y_test), epochs=1, batch_size=64, callbacks=callbacks_list) model.fit(x_train_norm_mean_zero, y_train, validation_data=(x_test_norm_mean_zero, y_test), epochs=1, batch_size=64, callbacks=callbacks_list) # ## Autopilot # # This code helps in getting the steering angle of self driving car. The inspiraion is taken from [Udacity Self driving car](https://github.com/udacity/CarND-Behavioral-Cloning-P3) module as well [End to End Learning for Self-Driving Cars](https://devblogs.nvidia.com/deep-learning-self-driving-cars/) module from NVIDIA # # The End to End Learning for Self-Driving Cars research paper can be found at (https://arxiv.org/abs/1604.07316) # This repository uses convnets to predict steering angle according to the road. # # 1) Autopilot Version 1 # 2) [Autopilot Version 2](https://github.com/akshaybahadur21/Autopilot/tree/master/Autopilot_V2) # # # ### Code Requirements # You can install Conda for python which resolves all the dependencies for machine learning. # # ### Description # An autonomous car (also known as a driverless car, self-driving car, and robotic car) is a vehicle that is capable of sensing its environment and navigating without human input. Autonomous cars combine a variety of techniques to perceive their surroundings, including radar, laser light, GPS, odometry, and computer vision. Advanced control systems interpret sensory information to identify appropriate navigation paths, as well as obstacles and relevant signage # # ## Autopilot V1 (Udacity Dataset based on Udacity Simulator) # # ### Dataset # You can get the dataset at [here](https://d17h27t6h515a5.cloudfront.net/topher/2016/December/584f6edd_data/data.zip) # # ## Autopilot V2 (NVIDIA Dataset based on real world) # # ### Dataset # Download the dataset at [here](https://github.com/SullyChen/driving-datasets) and extract into the repository folder # # # ### References: # # - <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. [End to End Learning for Self-Driving Cars](https://arxiv.org/abs/1604.07316) # - [Behavioral Cloning Project](https://github.com/udacity/CarND-Behavioral-Cloning-P3) # - This implementation also took a lot of inspiration from the Sully Chen github repository: https://github.com/SullyChen/Autopilot-TensorFlow # # # # # # # %%HTML <iframe width="700" height="315" src="https://www.youtube.com/embed/waLIPYy1Rdk"></iframe> from __future__ import division import cv2 import os import numpy as np import scipy import pickle import matplotlib.pyplot as plt from itertools import islice DATA_FOLDER = 'driving_dataset' TRAIN_FILE = os.path.join(DATA_FOLDER, 'data.txt') def showData(x, label): img = plt.imread(x) pixels = np.array(img, dtype='uint8') pixels = pixels.reshape((256, 455,3)) plt.title('Label is {label}'.format(label=label)) plt.imshow(pixels, cmap='gray') plt.show() showData("F:\\projects\\SIT_Sample\\AutoPilot\\driving_dataset\\500.jpg",1) showData("F:\\projects\\SIT_Sample\\AutoPilot\\driving_dataset\\595.jpg",1) def preprocess(img): resized = cv2.resize((cv2.cvtColor(img, cv2.COLOR_RGB2HSV))[:, :, 1], (100, 100)) return resized def showData_HSV(x, label): img = plt.imread(x) img=preprocess(img) pixels = np.array(img, dtype='uint8') pixels = pixels.reshape((100, 100)) plt.title('Label is {label}'.format(label=label)) plt.imshow(pixels, cmap='gray') plt.show() showData_HSV("F:\\projects\\SIT_Sample\\AutoPilot\\driving_dataset\\500.jpg",1) showData_HSV("F:\\projects\\SIT_Sample\\AutoPilot\\driving_dataset\\595.jpg",1) # + #Build the model import numpy as np from keras.layers import Dense, Activation, Flatten, Conv2D, Lambda from keras.layers import MaxPooling2D, Dropout from keras.utils import print_summary from keras.models import Sequential from keras.callbacks import ModelCheckpoint import keras.backend as K import pickle from sklearn.model_selection import train_test_split from sklearn.utils import shuffle # - # This excerpt of code collects images and the steering angle, does pre processing and stores in a pickle file def return_data(): X = [] y = [] features = [] with open(TRAIN_FILE) as fp: for line in islice(fp, LIMIT): path, angle = line.strip().split() full_path = os.path.join(DATA_FOLDER, path) X.append(full_path) # using angles from -pi to pi to avoid rescaling the atan in the network y.append(float(angle) * scipy.pi / 180) for i in range(len(X)): img = plt.imread(X[i]) features.append(preprocess(img)) features = np.array(features).astype('float32') labels = np.array(y).astype('float32') with open("features", "wb") as f: pickle.dump(features, f, protocol=4) with open("labels", "wb") as f: pickle.dump(labels, f, protocol=4) def loadFromPickle(): with open("features", "rb") as f: features = np.array(pickle.load(f)) with open("labels", "rb") as f: labels = np.array(pickle.load(f)) return features, labels features, labels = loadFromPickle() features, labels = shuffle(features, labels) train_x, test_x, train_y, test_y = train_test_split(features, labels, random_state=0, test_size=0.3) train_x = train_x.reshape(train_x.shape[0], 100, 100, 1) test_x = test_x.reshape(test_x.shape[0], 100, 100, 1) print("number of training examples = " + str(train_x.shape[0])) print("number of test examples = " + str(test_x.shape[0])) print("X_train shape: " + str(train_x.shape)) def showLoadedData(x, label): pixels = np.array(x, dtype='uint8') #pixels = pixels.reshape((100, 100)) plt.title('Label is {label}'.format(label=label)) plt.imshow(pixels, cmap='gray') plt.show() showLoadedData(train_x[0],train_y[0]) # + from keras.layers import BatchNormalization,Input from keras.layers.convolutional import Convolution2D import tensorflow as tf from keras.models import Model def atan(x): return tf.atan(x) # + #Lets look at the model for the original research paper def paper_model(): inputs = Input(shape=(66, 200, 3)) conv_1 = Convolution2D(24, 5, 5, activation='relu', name='conv_1', subsample=(2, 2))(inputs) conv_2 = Convolution2D(36, 5, 5, activation='relu', name='conv_2', subsample=(2, 2))(conv_1) conv_3 = Convolution2D(48, 5, 5, activation='relu', name='conv_3', subsample=(2, 2))(conv_2) conv_3 = Dropout(.5)(conv_3) conv_4 = Convolution2D(64, 3, 3, activation='relu', name='conv_4', subsample=(1, 1))(conv_3) conv_5 = Convolution2D(64, 3, 3, activation='relu', name='conv_5', subsample=(1, 1))(conv_4) flat = Flatten()(conv_5) dense_1 = Dense(1164)(flat) dense_1 = Dropout(.5)(flat) dense_2 = Dense(100, activation='relu')(dense_1) dense_2 = Dropout(.5)(flat) dense_3 = Dense(50, activation='relu')(dense_2) dense_3 = Dropout(.5)(flat) dense_4 = Dense(10, activation='relu')(dense_3) dense_4 = Dropout(.5)(flat) final = Dense(1, activation=atan)(dense_4) model = Model(input=inputs, output=final) return model # - model=paper_model() print_summary(model) def keras_model(image_x, image_y): model = Sequential() model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(image_x, image_y, 1))) model.add(Conv2D(16, (5,5), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D((5,5), padding='valid')) model.add(Conv2D(32, (5,5), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D((5,5), padding='valid')) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(128)) model.add(Dense(10)) model.add(Dense(1)) model.compile(optimizer='adam', loss="mse") filepath = "Autopilot.h5" checkpoint = ModelCheckpoint(filepath, verbose=1, save_best_only=True) callbacks_list = [checkpoint] return model, callbacks_list model, callbacks_list = keras_model(100, 100) print_summary(model) model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=1, batch_size=32, callbacks=callbacks_list) # ## Malaria Detection # + import matplotlib.pyplot as plt from PIL import Image import numpy as np from matplotlib.pyplot import imshow from scipy.misc import imread import cv2 def showMalariaData(x): image = Image.open(x) plt.imshow(np.asarray(image), cmap='gray') print(image.size) plt.show() showMalariaData("F:\\projects\\\Malaria_Detection\\cell_images\\Parasitized\\C33P1thinF_IMG_20150619_114756a_cell_179.png") # - showMalariaData("F:\\projects\\\Malaria_Detection\\cell_images\\Parasitized\\C39P4thinF_original_IMG_20150622_105335_cell_6.png") # Let's look at normal cells showMalariaData("F:\\projects\\\Malaria_Detection\\cell_images\\Uninfected\\C1_thinF_IMG_20150604_104722_cell_60.png") showMalariaData("F:\\projects\\\Malaria_Detection\\cell_images\\Uninfected\\C122P83ThinF_IMG_20151002_145014_cell_158.png") # + # Let's add some filters and see if we can remove some noise def showMalariaFiltered_HLS_Data(x): image = Image.open(x) image=np.asarray(image) hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) plt.imshow(hsv, cmap='gray') print(hsv.size) plt.show() showMalariaFiltered_HLS_Data("F:\\projects\\\Malaria_Detection\\cell_images\\Parasitized\\C33P1thinF_IMG_20150619_114756a_cell_179.png") # - showMalariaFiltered_HLS_Data("F:\\projects\\\Malaria_Detection\\cell_images\\Uninfected\\C1_thinF_IMG_20150604_104722_cell_60.png") def showMalariaFiltered_HSV_Data(x): image = Image.open(x) image=np.asarray(image) hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) plt.imshow(hsv, cmap='gray') print(hsv.size) plt.show() showMalariaFiltered_HSV_Data("F:\\projects\\\Malaria_Detection\\cell_images\\Parasitized\\C33P1thinF_IMG_20150619_114756a_cell_179.png") showMalariaFiltered_HSV_Data("F:\\projects\\\Malaria_Detection\\cell_images\\Uninfected\\C1_thinF_IMG_20150604_104722_cell_60.png") def showMalariaFiltered_LAB_Data(x): image = Image.open(x) image=np.asarray(image) hsv = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) plt.imshow(hsv, cmap='gray') print(hsv.size) plt.show() showMalariaFiltered_LAB_Data("F:\\projects\\\Malaria_Detection\\cell_images\\Parasitized\\C33P1thinF_IMG_20150619_114756a_cell_179.png") showMalariaFiltered_LAB_Data("F:\\projects\\\Malaria_Detection\\cell_images\\Uninfected\\C1_thinF_IMG_20150604_104722_cell_60.png")
DeepVision.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #!pip install FDB import fdb import os.path def path_base(base_name,folder): current_dir = os.path.abspath(os.path.join(os.getcwd())) print(current_dir) data_dir = current_dir.replace('notebook',folder) print(data_dir) data_base = data_dir + '\\' + base_name print(data_base) return data_base # ### Connection my_dsn = path_base('meu_arquivo.FDB','data') # FILE FDB WITH PATH my_user = 'SYSDBA' my_password = '<PASSWORD>' con = fdb.connect(dsn=my_dsn, user=my_user, password=my_password) cur = con.cursor() # ### String with SQL command SQL = '''SELECT column_1 || ';' || column_2 || ';' || column_3 FROM TABLE WHERE 1=1 ORDER BY 1 DESC''' print(SQL) # ### Generating File with SQL fetch cur.execute(SQL) arquivo = open(path_base('Pedidos.csv','data'), 'w') for row in cur.fetchall(): arquivo.write(row[0]+'\n') arquivo.close() cur.close() con.close()
notebook/Gerar_Arquivos_csv_from_Firebird.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Repaso Pandas LaLiga # # ## Ejercicio 1 # # 1. Lee, en un DataFrame llamado ``df``, el archivo CSV de la liga que tienes en esta misma carpeta # 2. Elimina la columna "Unnamed: 0" y muestra las 10 primeras filas import pandas as pd df = pd.read_csv("laliga.csv") df1 = df[df['season']=='2017-18'] df1.iloc[0:10, [2, 3]] # ## Ejercicio 2 # # 1. ¿Cuántas temporadas (season) diferentes hay? ¿Cuáles? # 2. ¿Cuántas jornadas (round)? # 3. Obtén todos los equipos que han participado en LaLiga durante las temporadas que se muestran en el DataFrame. ¿Cuántos han sido? df[(df['season']=='2017-18')] # ## Ejercicio 3 # # Ahora empieza lo divertido: # 1. Filtra el df para quedarte con la última temporada disponible. # 2. Recrea la tabla final de la temporada donde deberás recoger: # - Equipo # - Goles totales marcados # - Goles totales recibidos # - Diferencia de goles # - Partidos ganados # - Partidos perdidos # - Partidos empatados # - Puntos # # 3. Ordena la tabla para recrear las posiciones finales de los equipos # # 4. Asegúrate que el Equipo aparece como una columna y que el índice del DataFrame refleja la posición en la que han quedado # # Leyenda: # - LGAF: Local Goles A Favor # - LGEC: Local Goles En Contra # - VGAF: Visitante Goles A Favor # - VGEC: Visitante Goles En Contra # - GTAF: Goles Totales A Favor # - GTEC: Goles Totales En Contra # # - LG: Local partidos Ganados # - LE: Local partidos Empatados # - LP: Local partidos Perdidos # - VG: Visitante partidos Ganados # - VE: Visitante partidos Empatados # - VP: Visitante partidos Perdidos # - G: Total partidos Ganados # - E: Total partidos Empatados # - P: Total partidos Perdidos # # # - PTS: Puntos # # ## Ejercicio 4 # # Crea una función a partir de lo anterior para que reciba un parámetro season (con valores como los de la columna 'season') y sea capaz de devolver la tabla de clasificación tal cuál haya quedado. # # Utiliza esta función para calcular los descendidos por cada season (los 3 últimos) df1 = df.copy() df1 = df1[df1['season']=='2017-18'] df1 # ## Ejercicio 5 # # Analicemos ahora un poco los datos de la última temporada (puedes ayudarte de la tabla de clasificación generada):: # # 1. ¿Qué equipos han descendido? # 2. ¿Cuáles han entrado en Champions (4 primeros)? ¿Quién ha ganado? # 3. ¿Cuál ha sido el equipo más goleador? ¿Y el más goleado? # 4. ¿Cuál ha sido el equipo que menos goles ha encajado? # 5. ¿Cuáles han sido las medias de goles recibidos por equipo? ¿Y de goles marcados? # 6. ¿Qué equipo le ha sacado más rendimiento a los goles? Es decir, ¿qué equipo ha conseguido más puntos/gol? # 7. ¿Y cuál ha sido el que menos lo ha rentabilizado? # 8. ¿Cuál ha sido la media de anotación de todos los partidos? Saca el valor tanto para todos los resultados como para locales y visitantes # ## Ejercicio 6 # # Analicemos ahora el dataset original (puedes utilizar la función creada anteriormente): # # 1. ¿Qué temporada se han marcado más goles en total? # 2. ¿Cuál es el equipo que más goles ha marcado en todas estas temporadas? # 3. ¿Cuál es la media anotadora de Mayo? ¿Y de Diciembre? # 4. ¿En qué jornada de todas las temporadas se han marcado más goles? ¿Cuántos? # 5. ¿En qué jornada de todas las temporadas se han marcado menos goles? ¿Cuántos? # 6. ¿Cuál ha sido la season con más goles? ¿Y la que menos? # ## Ejercicio 7 # # Hace unas semanas, <NAME> propuso un novedoso sistema de puntuación de los partidos de fútbol en función del resultado. Crea una nueva versión de la función que has realizado en el ejercicio 4 (o replícalo directamente imitando el ejercicio 3), donde se aplique esta nueva puntuación: # # ![image.png](attachment:b98d8634-2294-4a18-8bb2-93124835889e.png) # # Una vez la tengas construida, utilízala para ver qué equipos hubieran descendido las últimas 3 temporadas.
Repaso/Pandas/Repaso_Pandas_LaLiga.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import psycopg2 import pandas as pd import numpy as np import os import seaborn as sns import matplotlib.pyplot as plt sns.set(style="ticks", color_codes=True) import matplotlib import warnings import yellowbrick as yb from sklearn.preprocessing import OneHotEncoder, LabelEncoder from sklearn.metrics import f1_score from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC, NuSVC, SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import OneHotEncoder, LabelEncoder from sklearn.linear_model import LogisticRegressionCV, LogisticRegression, SGDClassifier from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, AdaBoostClassifier, RandomForestClassifier, GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from yellowbrick.classifier import ClassificationReport from yellowbrick.classifier import ConfusionMatrix from sklearn.model_selection import learning_curve matplotlib.style.use('ggplot') warnings.simplefilter(action='ignore', category=FutureWarning) # - conn = psycopg2.connect( host = 'project.cgxhdwn5zb5t.us-east-1.rds.amazonaws.com', port = 5432, user = 'postgres', password = '<PASSWORD>', database = 'postgres') cursor = conn.cursor() DEC2FLOAT = psycopg2.extensions.new_type( psycopg2.extensions.DECIMAL.values, 'DEC2FLOAT', lambda value, curs: float(value) if value is not None else None) psycopg2.extensions.register_type(DEC2FLOAT) # + cursor.execute('Select * from "ahshouseholdclass2"') rows = cursor.fetchall() col_names = [] for elt in cursor.description: col_names.append(elt[0]) df = pd.DataFrame(data=rows, columns=col_names ) # - df['HINCP'].hist() df['RATINGHS_BIN'].value_counts() # ### Class Imbalanced X = df[['HHAGE', 'HINCP', 'BATHROOMS', 'UTILAMT','PERPOVLVL', 'ELECAMT', 'GASAMT', 'TRASHAMT', 'WATERAMT', 'OMB13CBSA','UNITSIZE','NUMPEOPLE','STORIES', 'HHNATVTY']] y = df['RATINGHS_BIN'] X.hist(figsize=(20,15)) pd.Series(y).value_counts().plot.bar(color=['purple', 'orange', 'green', 'red']) from imblearn.over_sampling import SMOTE sm = SMOTE(random_state = 33) X_sm, y_sm = sm.fit_sample(X, y.ravel()) pd.Series(y_sm).value_counts().plot.bar(color=['purple', 'orange', 'green', 'red']) X_sm, y_sm = sm.fit_sample(X_sm, y_sm.ravel()) pd.Series(y_sm).value_counts().plot.bar(color=['purple', 'orange', 'green', 'red']) X_sm, y_sm = sm.fit_sample(X_sm, y_sm.ravel()) pd.Series(y_sm).value_counts().plot.bar(color=['purple', 'orange', 'green', 'red']) # Precision: It is implied as the measure of the correctly identified positive cases from all the predicted positive cases. Thus, it is useful when the costs of False Positives is high. # # Recall: It is the measure of the correctly identified positive cases from all the actual positive cases. It is important when the cost of False Negatives is high. # # Accuracy: One of the more obvious metrics, it is the measure of all the correctly identified cases. It is most used when all the classes are equally important. # # Create the train and test data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X_sm, y_sm, test_size=0.2) from sklearn import metrics def score_model(X, y, estimator, **kwargs): y = LabelEncoder().fit_transform(y) model = Pipeline([ ('one_hot_encoder', OneHotEncoder()), ('estimator', estimator) ]) model.fit(X_train, y_train.ravel(), **kwargs) expected = y_test predicted = model.predict(X_test) print("{}: {}".format(estimator.__class__.__name__, f1_score(expected, predicted, average='micro'))) # print("{} Accuracy Score: {}".format(estimator.__class__.__name__, metrics.accuracy_score(expected, predicted))) # + models = [ SVC(), NuSVC(), LinearSVC(), SGDClassifier(), KNeighborsClassifier(), ExtraTreesClassifier(), RandomForestClassifier(), DecisionTreeClassifier(), AdaBoostClassifier(), GradientBoostingClassifier() ] for model in models: score_model(X_train, y_train.ravel(), model) # - # ## Classification Report # + from yellowbrick.classifier import ClassificationReport # Instantiate the classification model and visualizer classes=['extremely satisfied','very satisfied','satisfied','not satisfied '] model = ExtraTreesClassifier() visualizer = ClassificationReport(model, classes=classes, size=(600, 420), support=True) visualizer.fit(X_train, y_train.ravel()) # Fit the visualizer and the model visualizer.score(X_test, y_test.ravel()) # Evaluate the model on the test data visualizer.show() # Finalize and show the figure # + # Instantiate the classification model and visualizer classes=['extremely satisfied','very satisfied','satisfied','not satisfied '] model = RandomForestClassifier() visualizer = ClassificationReport(model, classes=classes, size=(600, 420), support=True) visualizer.fit(X_train, y_train.ravel()) # Fit the visualizer and the model visualizer.score(X_test, y_test.ravel()) # Evaluate the model on the test data visualizer.show() # Finalize and show the figure # + from yellowbrick.classifier import ROCAUC classes=['extremely satisfied','very satisfied','satisfied','not satisfied '] visualizer = ROCAUC( RandomForestClassifier(), classes=classes, size=(1080, 720) ) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data visualizer.show() # Draw the data # - # ### Confusion Matrixs # # + from yellowbrick.classifier import ConfusionMatrix model = ExtraTreesClassifier() cm = ConfusionMatrix(model, classes=['not satisfied ','satisfied','very satisfied','extremely satisfied']) # Fit fits the passed model. This is unnecessary if you pass the visualizer a pre-fitted model cm.fit(X_train, y_train.ravel()) cm.score(X_test, y_test.ravel()) cm.show() # + model = RandomForestClassifier() cm = ConfusionMatrix(model, classes=['not satisfied ','satisfied','very satisfied','extremely satisfied']) # Fit fits the passed model. This is unnecessary if you pass the visualizer a pre-fitted model cm.fit(X_train, y_train.ravel()) cm.score(X_test, y_test.ravel()) cm.show() # - # ### Cross Validation # + from sklearn.model_selection import StratifiedKFold from yellowbrick.model_selection import CVScores # Create a cross-validation strategy cv = StratifiedKFold(n_splits=12, random_state=42) # Instantiate the classification model and visualizer model = RandomForestClassifier() visualizer = CVScores( model, cv=cv, scoring='f1_weighted', size=(780, 520) ) visualizer.fit(X, y) visualizer.show() # + #With Class Balanced from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import StratifiedKFold from yellowbrick.model_selection import CVScores # Create a cross-validation strategy cv = StratifiedKFold(n_splits=12, random_state=42) # Instantiate the classification model and visualizer model = ExtraTreesClassifier() visualizer = CVScores( model, cv=cv, scoring='f1_weighted', size=(780, 520) ) visualizer.fit(X, y) visualizer.show() # - # ### GridSearchCV RandomForestClassifier().get_params() # + from sklearn.model_selection import GridSearchCV model = RandomForestClassifier() # TODO: Create a dictionary with the Ridge parameter options parameters = { 'n_estimators': [200, 300], 'max_features': ['auto', 'sqrt','log2'], 'min_samples_split':[2,4,6], 'n_jobs':[2,4] } clf = GridSearchCV(model, parameters, cv=5) clf.fit(X_train, y_train) print('If we change our parameters to: {}'.format(clf.best_params_)) print(clf.best_estimator_) # + models = [ ExtraTreesClassifier(n_estimators=100, max_features='log2', n_jobs=2, random_state=42), AdaBoostClassifier(learning_rate=0.7,n_estimators=200), GradientBoostingClassifier(learning_rate=.5,max_depth=4), RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=None, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=300, n_jobs=None, oob_score=False, random_state=None, verbose=0, warm_start=False) ] for model in models: score_model(X_train, y_train.ravel(), model) # + from sklearn.model_selection import learning_curve from sklearn.svm import SVC train_sizes, train_scores, valid_scores = learning_curve( RandomForestClassifier(), X, y, train_sizes=[50, 80, 110], cv=5) # - # A learning curve shows the validation and training score of an estimator for varying numbers of training samples. It is a tool to find out how much we benefit from adding more training data and whether the estimator suffers more from a variance error or a bias error. # + # Create CV training and test scores for various training set sizes train_sizes, train_scores, test_scores = learning_curve(RandomForestClassifier(), X, y, # Number of folds in cross-validation cv=12, # Evaluation metric scoring='accuracy', # Use all computer cores n_jobs=1, # 50 different sizes of the training set train_sizes=np.linspace(.1, 1.0, 100)) # Create means and standard deviations of training set scores train_mean = np.mean(train_scores, axis=1) train_std = np.std(train_scores, axis=1) # Create means and standard deviations of test set scores test_mean = np.mean(test_scores, axis=1) test_std = np.std(test_scores, axis=1) # Draw lines plt.plot(train_sizes, train_mean, '--', color="r", label="Training score") plt.plot(train_sizes, test_mean, color="g", label="Cross-validation score") # Draw bands plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std,color="r",) plt.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, color="g") # Create plot plt.title("Learning Curve") plt.xlabel("Training Set Size"), plt.ylabel("Accuracy Score"), plt.legend(loc="best") plt.tight_layout() plt.show() # - ''' from sklearn.model_selection import validation_curve # Create range of values for parameter param_range = np.arange(1, 250, 2) # Calculate accuracy on training and test set using range of parameter values train_scores, test_scores = validation_curve(RandomForestClassifier(), X, y, param_name="n_estimators", param_range=param_range, cv=3, scoring="accuracy", n_jobs=-1) # Plot mean accuracy scores for training and test sets plt.plot(param_range, train_mean, label="Training score", color="blue") plt.plot(param_range, test_mean, label="Cross-validation score", color="green") # Plot accurancy bands for training and test sets plt.fill_between(param_range, train_mean - train_std, train_mean + train_std, color="blue") plt.fill_between(param_range, test_mean - test_std, test_mean + test_std, color="gainsboro") # Create plot plt.title("Validation Curve With Random Forest") plt.xlabel("Number Of Trees") plt.ylabel("Accuracy Score") plt.tight_layout() plt.legend(loc="best") plt.show() ''' # ### Different Approach - 2 Classes # + # Labeling Rating column to 2 classes LABEL_MAP = { 1: "Less Satisfied", 2: "Less Satisfied", 3: "Less Satisfied", 4: "Less Satisfied", 5: "Less Satisfied", 6: "Less Satisfied", 7: "Less Satisfied", 8: "Less Satisfied", 9: "Highly Satisfied", 10: "Highly Satisfied" } # Convert class labels into text y = df['RATINGHS'].map(LABEL_MAP) y # - X = df[['HHAGE', 'HINCP', 'BATHROOMS', 'UTILAMT','PERPOVLVL', 'ELECAMT', 'GASAMT', 'TRASHAMT', 'WATERAMT', 'OMB13CBSA','NUMPEOPLE', 'STORIES', 'HHNATVTY']] #identify the classes balance pd.Series(y).value_counts().plot.bar(color=['blue', 'red']) # ### Split the data into 80 to 20 # Create the train and test data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2) from sklearn import metrics def score_model(X, y, estimator, **kwargs): y = LabelEncoder().fit_transform(y) model = Pipeline([ ('one_hot_encoder', OneHotEncoder()), ('estimator', estimator) ]) model.fit(X_train, y_train.ravel(), **kwargs) expected = y_test predicted = model.predict(X_test) print("{}: {}".format(estimator.__class__.__name__, f1_score(expected, predicted, average='micro'))) # + from sklearn.preprocessing import LabelEncoder # Encode our target variable encoder = LabelEncoder().fit(y) y = encoder.transform(y) y # + models = [ SVC(), NuSVC(), LinearSVC(), SGDClassifier(), KNeighborsClassifier(), LogisticRegression(), ExtraTreesClassifier(), RandomForestClassifier(), DecisionTreeClassifier(), AdaBoostClassifier(), GradientBoostingClassifier() ] for model in models: score_model(X_train, y_train.ravel(), model) # + #Classification Report classes=['highly satisfied', 'less satisfied'] model = GradientBoostingClassifier() visualizer = ClassificationReport(model, classes=classes, size=(600, 420), support=True) visualizer.fit(X_train, y_train.ravel()) # Fit the visualizer and the model visualizer.score(X_test, y_test.ravel()) # Evaluate the model on the test data visualizer.show() # + ## ROC Curve from yellowbrick.classifier import ROCAUC classes=['highly satisfied', 'less satisfied'] visualizer = ROCAUC( GradientBoostingClassifier(), classes=classes, size=(1080, 720) ) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data visualizer.show() # + ## Confusion Matrix model = GradientBoostingClassifier() cm = ConfusionMatrix(model, classes=['highly satisfied', 'less satisfied']) # Fit fits the passed model. This is unnecessary if you pass the visualizer a pre-fitted model cm.fit(X_train, y_train.ravel()) cm.score(X_test, y_test.ravel()) cm.show() # + ## GridSearch for hyperparameter tuning from sklearn.model_selection import GridSearchCV model = AdaBoostClassifier() # TODO: Create a dictionary with the Ridge parameter options parameters = { 'learning_rate': [1,2,3], 'algorithm': ['SAMME', 'SAMME.R'], 'n_estimators': [100,200,300] } clf = GridSearchCV(model, parameters, cv=5) clf.fit(X_train, y_train) print('If we change our parameters to: {}'.format(clf.best_params_)) print(clf.best_estimator_) # + models = [ AdaBoostClassifier(algorithm= 'SAMME.R', learning_rate= 1, n_estimators=200), GradientBoostingClassifier(loss='exponential', max_features= 'auto',n_estimators=300), RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=None, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=300, n_jobs=None, oob_score=False, random_state=None, verbose=0, warm_start=False) ] for model in models: score_model(X_train, y_train.ravel(), model) # + # Create CV training and test scores for various training set sizes train_sizes, train_scores, test_scores = learning_curve(RandomForestClassifier(), X, y, # Number of folds in cross-validation cv=10, # Evaluation metric scoring='accuracy', # Use all computer cores n_jobs=1, # 50 different sizes of the training set train_sizes=np.linspace(.1, 1.0, 100)) # Create means and standard deviations of training set scores train_mean = np.mean(train_scores, axis=1) train_std = np.std(train_scores, axis=1) # Create means and standard deviations of test set scores test_mean = np.mean(test_scores, axis=1) test_std = np.std(test_scores, axis=1) # Draw lines plt.plot(train_sizes, train_mean, '--', color="r", label="Training score") plt.plot(train_sizes, test_mean, color="g", label="Cross-validation score") # Draw bands plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std,color="r",) plt.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, color="g") # Create plot plt.title("Learning Curve") plt.xlabel("Training Set Size"), plt.ylabel("Accuracy Score"), plt.legend(loc="best") plt.tight_layout() plt.show()
Modeling_Final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D1_BayesianDecisions/student/W3D1_Tutorial3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Bonus Tutorial:Fitting to data # **Week 3, Day 1: Bayesian Decisions** # # **By Neuromatch Academy** # # __Content creators:__ <NAME>, <NAME> # # __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # --- # # Tutorial objectives # # In the first two tutorials, we learned about Bayesian models and decisions more intuitively, using demos. In this notebook, we will dive into using math and code to fit Bayesian models to data. # # We'll have a look at computing all the necessary steps to perform model inversion (estimate the model parameters such as $p_{common}$ that generated data similar to that of a participant). We will describe all the steps of the generative model first, and in the last exercise we will use all these steps to estimate the parameter $p_{common}$ of a single participant using simulated data. # # The generative model will be a Bayesian model we saw in Tutorial 2: a mixture of Gaussian prior and a Gaussian likelihood. # Steps: # # * First, we'll create the prior, likelihood, posterior, etc in a form that will make it easier for us to visualise what is being computed and estimated at each step of the generative model: # 1. Creating a mixture of Gaussian prior for multiple possible stimulus inputs # 2. Generating the likelihood for multiple possible stimulus inputs # 3. Estimating our posterior as a function of the stimulus input # 4. Estimating a participant response given the posterior # # * Next, we'll perform the model inversion/fitting: # 5. Create an distribution for the input as a function of possible inputs # 6. Marginalization # 7. Generate some data using the generative model provided # 8. Perform model inversion (model fitting) using the generated data and see if you recover the orignal parameters. # # --- # # Setup # # Please execute the cell below to initialize the notebook environment # + cellView="both" import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from scipy.optimize import minimize # + cellView="form" #@title Figure Settings import ipywidgets as widgets # %matplotlib inline # %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/NMA2020/nma.mplstyle") # + cellView="form" # @title Helper Functions def my_gaussian(x_points, mu, sigma): """ Returns a Gaussian estimated at points `x_points`, with parameters: `mu` and `sigma` Args : x_points (numpy arrays of floats)- points at which the gaussian is evaluated mu (scalar) - mean of the Gaussian sigma (scalar) - std of the gaussian Returns: Gaussian evaluated at `x` """ p = np.exp(-(x_points-mu)**2/(2*sigma**2)) return p / sum(p) def moments_myfunc(x_points, function): """ DO NOT EDIT THIS FUNCTION !!! Returns the mean, median and mode of an arbitrary function Args : x_points (numpy array of floats) - x-axis values function (numpy array of floats) - y-axis values of the function evaluated at `x_points` Returns: (tuple of 3 scalars): mean, median, mode """ # Calc mode of arbitrary function mode = x_points[np.argmax(function)] # Calc mean of arbitrary function mean = np.sum(x_points * function) # Calc median of arbitrary function cdf_function = np.zeros_like(x_points) accumulator = 0 for i in np.arange(x_points.shape[0]): accumulator = accumulator + function[i] cdf_function[i] = accumulator idx = np.argmin(np.abs(cdf_function - 0.5)) median = x_points[idx] return mean, median, mode def plot_myarray(array, xlabel, ylabel, title): """ Plot an array with labels. Args : array (numpy array of floats) xlabel (string) - label of x-axis ylabel (string) - label of y-axis title (string) - title of plot Returns: None """ fig = plt.figure() ax = fig.add_subplot(111) colormap = ax.imshow(array, extent=[-10, 10, 8, -8]) cbar = plt.colorbar(colormap, ax=ax) cbar.set_label('probability') ax.invert_yaxis() ax.set_xlabel(xlabel) ax.set_title(title) ax.set_ylabel(ylabel) ax.set_aspect('auto') return None def plot_my_bayes_model(model) -> None: """Pretty-print a simple Bayes Model (ex 7), defined as a function: Args: - model: function that takes a single parameter value and returns the negative log-likelihood of the model, given that parameter Returns: None, draws plot """ x = np.arange(-10,10,0.07) # Plot neg-LogLikelihood for different values of alpha alpha_tries = np.arange(0.01, 0.3, 0.01) nll = np.zeros_like(alpha_tries) for i_try in np.arange(alpha_tries.shape[0]): nll[i_try] = model(np.array([alpha_tries[i_try]])) plt.figure() plt.plot(alpha_tries, nll) plt.xlabel('p_independent value') plt.ylabel('negative log-likelihood') # Mark minima ix = np.argmin(nll) plt.scatter(alpha_tries[ix], nll[ix], c='r', s=144) #plt.axvline(alpha_tries[np.argmin(nll)]) plt.title('Sample Output') plt.show() return None def plot_simulated_behavior(true_stim, behaviour): fig = plt.figure(figsize=(7, 7)) ax = fig.add_subplot(1,1,1) ax.set_facecolor('xkcd:light grey') plt.plot(true_stim, true_stim - behaviour, '-k', linewidth=2, label='data') plt.axvline(0, ls='dashed', color='grey') plt.axhline(0, ls='dashed', color='grey') plt.legend() plt.xlabel('Position of true visual stimulus (cm)') plt.ylabel('Participant deviation from true stimulus (cm)') plt.title('Participant behavior') plt.show() return None # - # --- # # Introduction # # + cellView="form" #@title Video 1: Intro from IPython.display import YouTubeVideo video = YouTubeVideo(id='YSKDhnbjKmA', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # - # # ![Generative model](https://github.com/vincentvalton/figures_NMA_W2D1_T3/blob/master/Drawing%20Generative%20Model%20W2T3.png?raw=true) # # Here is a graphical representation of the generative model: # # 1. We present a stimulus $x$ to participants. # 2. The brain encodes this true stimulus $x$ noisily (this is the brain's representation of the true visual stimulus: $p(\tilde x|x)$. # 3. The brain then combine this brain encoded stimulus (likelihood: $p(\tilde x|x)$) with prior information (the prior: $p(x)$) to make up the brain's estimated position of the true visual stimulus, the posterior: $p(x|\tilde x)$. # 3. This brain's estimated stimulus position: $p(x|\tilde x)$, is then used to make a response: $\hat x$, which is the participant's noisy estimate of the stimulus position (the participant's percept). # # Typically the response $\hat x$ also includes some motor noise (noise due to the hand/arm move being not 100% accurate), but we'll ignore it in this tutorial and assume there is no motor noise. # # # # We will use the same experimental setup as in [tutorial 2](https://colab.research.google.com/drive/15pbgrfGjSKbUQoX51RdcNe3UXb4R5RRx#scrollTo=tF5caxVGYURh) but with slightly different probabilities. This time, participants are told that they need to estimate the sound location of a puppet that is hidden behind a curtain. The participants are told to use auditory information and are also informed that the sound could come from 2 possible causes: a common cause (95% of the time it comes from the puppet hidden behind the curtain at position 0), or an independent cause (5% of the time the sound comes from loud-speakers at more distant locations). # --- # # Section 1: Likelihood array # # First, we want to create a likelihood, but for the sake of visualization (and to consider all possible brain encodings) we will create multiple likelihoods $f(x)=p(\tilde x|x)$ (one for each potential encoded stimulus: $\tilde x$). We will then be able to visualize the likelihood as a function of hypothesized true stimulus positions: $x$ on the x-axis and encoded position $\tilde x$ on the y-axis. # # # Using the equation for the `my_gaussian` and the values in `hypothetical_stim`: # * Create a Gaussian likelihood with mean varying from `hypothetical_stim`, keeping $\sigma_{likelihood}$ constant at 1. # * Each likelihood will have a different mean and thus a different row-likelihood of your 2D array, such that you end up with a likelihood array made up of 1,000 row-Gaussians with different means. (_Hint_: `np.tile` won't work here. You may need a for-loop). # * Plot the array using the function `plot_myarray()` already pre-written and commented-out in your script # ###Exercise 1. Implement the auditory likelihood as a function of true stimulus position # + cellView="code" x = np.arange(-10, 10, 0.1) hypothetical_stim = np.linspace(-8, 8, 1000) def compute_likelihood_array(x_points, stim_array, sigma=1.): # initializing likelihood_array likelihood_array = np.zeros((len(stim_array), len(x_points))) # looping over stimulus array for i in range(len(stim_array)): ######################################################################## ## Insert your code here to: ## - Generate a likelihood array using `my_gaussian` function, ## with std=1, and varying the mean using `stim_array` values. ## remove the raise below to test your function raise NotImplementedError("You need to complete the function!") ######################################################################## likelihood_array[i, :] = ... return likelihood_array # Uncomment following lines to test your code # likelihood_array = compute_likelihood_array(x, hypothetical_stim) # plot_myarray(likelihood_array, # '$x$ : Potential true stimulus $x$', # 'Possible brain encoding $\~x$', # 'Likelihood as a function of $\~x$ : $p(\~x | x)$') # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial3_Solution_5883eb88.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=557 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D1_BayesianDecisions/static/W3D1_Tutorial3_Solution_5883eb88_0.png> # # # - # --- # # Section 2: Causal mixture of Gaussian prior # # + cellView="form" #@title Video 2: Prior array video = YouTubeVideo(id='F0IYpUicXu4', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # - # # As in Tutorial 2, we want to create a prior that will describe the participants' prior knowledge that, 95% of the time sounds come from a common position around the puppet, while during the remaining 5% of the time, they arise from another independent position. We will embody this information into a prior using a mixture of Gaussians. For visualization reasons, we will create a prior that has the same shape (form) as the likelihood array we created in the previous exercise. That is, we want to create a mixture of Gaussian prior as a function the the brain encoded stimulus $\tilde x$. Since the prior does not change as a function of $\tilde x$ it will be identical for each row of the prior 2D array. # # Using the equation for the Gaussian `my_gaussian`: # * Generate a Gaussian $Common$ with mean 0 and standard deviation 0.5 # * Generate another Gaussian $Independent$ with mean 0 and standard deviation 10 # * Combine the two Gaussians (Common + Independent) to make a new prior by mixing the two Gaussians with mixing parameter $p_{independent}$ = 0.05. Make it such that the peakier Gaussian has 95% of the weight (don't forget to normalize afterwards) # * This will be the first row of your prior 2D array # * Now repeat this for varying brain encodings $\tilde x$. Since the prior does not depend on $\tilde x$ you can just repeat the prior for each $\tilde x$ (hint: use np.tile) that row prior to make an array of 1,000 (i.e. `hypothetical_stim.shape[0]`) row-priors. # * Plot the matrix using the function `plot_myarray()` already pre-written and commented-out in your script # ### Exercise 2: Implement the prior array # + cellView="code" x = np.arange(-10, 10, 0.1) def calculate_prior_array(x_points, stim_array, p_indep, prior_mean_common=.0, prior_sigma_common=.5, prior_mean_indep=.0, prior_sigma_indep=10): """ 'common' stands for common 'indep' stands for independent """ prior_common = my_gaussian(x_points, prior_mean_common, prior_sigma_common) prior_indep = my_gaussian(x_points, prior_mean_indep, prior_sigma_indep) ############################################################################ ## Insert your code here to: ## - Create a mixture of gaussian priors from 'prior_common' ## and 'prior_indep' with mixing parameter 'p_indep' ## - normalize ## - repeat the prior array and reshape it to make a 2D array ## of 1000 rows of priors (Hint: use np.tile() and np.reshape()) ## remove the raise below to test your function raise NotImplementedError("You need to complete the function!") ############################################################################ prior_mixed = ... prior_mixed /= ... # normalize prior_array = np.tile(...).reshape(...) return prior_array p_independent=.05 # Uncomment following lines, once the task is complete. # prior_array = calculate_prior_array(x, hypothetical_stim, p_independent) # plot_myarray(prior_array, # 'Hypothesized position $x$', 'Brain encoded position $\~x$', # 'Prior as a fcn of $\~x$ : $p(x|\~x)$') # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial3_Solution_dddc3e14.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=555 height=413 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D1_BayesianDecisions/static/W3D1_Tutorial3_Solution_dddc3e14_0.png> # # # - # --- # # Section 3: Bayes rule and Posterior array # + cellView="form" #@title Video 3: Posterior array video = YouTubeVideo(id='HpOzXZUKFJc', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # - # We now want to calcualte the posterior using *Bayes Rule*. Since we have already created a likelihood and a prior for each brain encoded position $\tilde x$, all we need to do is to multiply them row-wise. That is, each row of the posterior array will be the posterior resulting from the multiplication of the prior and likelihood of the same equivalent row. # # Mathematically: # # \begin{eqnarray} # Posterior\left[i, :\right] \propto Likelihood\left[i, :\right] \odot Prior\left[i, :\right] # \end{eqnarray} # # where $\odot$ represents the [Hadamard Product](https://en.wikipedia.org/wiki/Hadamard_product_(matrices)) (i.e., elementwise multiplication) of the corresponding prior and likelihood row vectors `i` from each matrix. # # Follow these steps to build the posterior as a function of the brain encoded stimulus $\tilde x$: # * For each row of the prior and likelihood (i.e. each possible brain encoding $\tilde x$), fill in the posterior matrix so that every row of the posterior array represents the posterior density for a different brain encode $\tilde x$. # * Plot the array using the function `plot_myarray()` already pre-written and commented-out in your script # # Optional: # * Do you need to operate on one element--or even one row--at a time? NumPy operations can often process an entire matrix in a single "vectorized" operation. This approach is often much faster and much easier to read than an element-by-element calculation. Try to write a vectorized version that calculates the posterior without using any for-loops. _Hint_: look at `np.sum` and its keyword arguments. # ### Exercise 3: Calculate the posterior as a function of the hypothetical stimulus x # + cellView="code" def calculate_posterior_array(prior_array, likelihood_array): ############################################################################ ## Insert your code here to: ## - calculate the 'posterior_array' from the given ## 'prior_array', 'likelihood_array' ## - normalize ## remove the raise below to test your function raise NotImplementedError("You need to complete the function!") ############################################################################ posterior_array = ... posterior_array /= ... # normalize each row separately return posterior_array # Uncomment following lines, once the task is complete. # posterior_array = calculate_posterior_array(prior_array, likelihood_array) # plot_myarray(posterior_array, # 'Hypothesized Position $x$', # 'Brain encoded Stimulus $\~x$', # 'Posterior as a fcn of $\~x$ : $p(x | \~x)$') # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial3_Solution_3b290b41.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=555 height=413 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D1_BayesianDecisions/static/W3D1_Tutorial3_Solution_3b290b41_0.png> # # # - # --- # # Section 4: Estimating the position $\hat x$ # + cellView="form" #@title Video 4: Binary decision matrix video = YouTubeVideo(id='gy3GmlssHgQ', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # - # Now that we have a posterior distribution (for each possible brain encoding $\tilde x$)that represents the brain's estimated stimulus position: $p(x|\tilde x)$, we want to make an estimate (response) of the sound location $\hat x$ using the posterior distribution. This would represent the subject's estimate if their (for us as experimentalist unobservable) brain encoding took on each possible value. # # This effectively encodes the *decision* that a participant would make for a given brain encoding $\tilde x$. In this exercise, we make the assumptions that participants take the mean of the posterior (decision rule) as a response estimate for the sound location (use the function `moments_myfunc()` provided to calculate the mean of the posterior). # # Using this knowledge, we will now represent $\hat x$ as a function of the encoded stimulus $\tilde x$. This will result in a 2D binary decision array. To do so, we will scan the posterior matrix (i.e. row-wise), and set the array cell value to 1 at the mean of the row-wise posterior. # # **Suggestions** # * For each brain encoding $\tilde x$ (row of the posterior array), calculate the mean of the posterior, and set the corresponding cell of the binary decision array to 1. (e.g., if the mean of the posterior is at position 0, then set the cell with x_column == 0 to 1). # * Plot the matrix using the function `plot_myarray()` already pre-written and commented-out in your script # ### Exercise 4: Calculate the estimated response as a function of the hypothetical stimulus x # + cellView="code" def calculate_binary_decision_array(x_points, posterior_array): binary_decision_array = np.zeros_like(posterior_array) for i in range(len(posterior_array)): ######################################################################## ## Insert your code here to: ## - For each hypothetical stimulus x (row of posterior), ## calculate the mean of the posterior using the povided function ## `moments_myfunc()`, and set the corresponding cell of the ## Binary Decision array to 1. ## Hint: you can run 'help(moments_myfunc)' to see the docstring ## remove the raise below to test your function raise NotImplementedError("You need to complete the function!") ######################################################################## # calculate mean of posterior using 'moments_myfunc' mean, _, _ = ... # find the postion of mean in x_points (closest position) idx = ... binary_decision_array[i, idx] = 1 return binary_decision_array # Uncomment following lines, once the task is complete. # binary_decision_array = calculate_binary_decision_array(x, posterior_array) # plot_myarray(binary_decision_array, # 'Chosen position $\hat x$', 'Brain-encoded Stimulus $\~ x$', # 'Sample Binary Decision Array') # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial3_Solution_0cd39fa7.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=547 height=413 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D1_BayesianDecisions/static/W3D1_Tutorial3_Solution_0cd39fa7_0.png> # # # - # --- # # Section 5: Probabilities of encoded stimuli # + cellView="form" #@title Video 5: Input array video = YouTubeVideo(id='C1d1n_Si83o', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # - # Because we as experimentalists can not know the encoding $\tilde x$ of the stimulus $x$ that we do know, we had to compute the binary decision array for each possible encoding. # # First however, we need to calculate how likely each possible encoding is given the true stimulus. That is, we will now create a Gaussian centered around the true presented stimulus, with $\sigma = 1$, and repeat that gaussian distribution across as a function of potentially encoded values $\tilde x$. That is, we want to make a *column* gaussian centered around the true presented stimulus, and repeat this *column* Gaussian across all hypothetical stimulus values $x$. # # This, effectively encodes the distribution of the brain encoded stimulus (one single simulus, which we as experimentalists know) and enable us to link the true stimulus $x$, to potential encodings $\tilde x$. # # **Suggestions** # # For this exercise, we will assume the true stimulus is presented at direction 2.5 # * Create a Gaussian likelihood with $\mu = 2.5$ and $\sigma = 1.0$ # * Make this the first column of your array and repeat that *column* to fill in the true presented stimulus input as a function of hypothetical stimulus locations. # * Plot the array using the function `plot_myarray()` already pre-written and commented-out in your script # ###Exercise 5: Generate an input as a function of hypothetical stimulus x # + cellView="code" def generate_input_array(x_points, stim_array, posterior_array, mean=2.5, sigma=1.): input_array = np.zeros_like(posterior_array) ######################################################################## ## Insert your code here to: ## - Generate a gaussian centered on the true stimulus 2.5 ## and sigma = 1. for each column ## remove the raise below to test your function raise NotImplementedError("You need to complete the function!") ######################################################################## for i in range(len(x_points)): input_array[:, i] = ... return input_array # Uncomment following lines, once the task is complete. # input_array = generate_input_array(x, hypothetical_stim, posterior_array) # plot_myarray(input_array, # 'Hypothetical Stimulus $x$', '$\~x$', # 'Sample Distribution over Encodings:\n $p(\~x | x = 2.5)$') # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial3_Solution_f61fa492.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=557 height=413 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D1_BayesianDecisions/static/W3D1_Tutorial3_Solution_f61fa492_0.png> # # # - # --- # # Section 6: Normalization and expected estimate distribution # + cellView="form" #@title Video 6: Marginalization video = YouTubeVideo(id='5alwtNS4CGw', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # - # Now that we have a true stimulus $x$ and a way to link it to potential encodings, we will be able to calculate the distribution of encodings and ultimately estimates. To integrate over all possible hypothetical values of $\tilde x$ we marginalize, that is, we first compute the dot-product from the true presented stimulus and our binary decision array and then sum over x. # # Mathematically, this means that we want to compute: # # \begin{eqnarray} # Marginalization Array = Input Array \odot Binary Decision Array # \end{eqnarray} # # \begin{eqnarray} # Marginal = \int_{\tilde x} Marginalization Array # \end{eqnarray} # # Since we are performing integration over discrete values using arrays for visualization purposes, the integration reduces to a simple sum over $\tilde x$. # # **Suggestions** # # * For each row of the input and binary arrays, calculate product of the two and fill in the 2D marginal array. # * Plot the result using the function `plot_myarray()` already pre-written and commented-out in your script # * Calculate and plot the marginal over `x` using the code snippet commented out in your script # - Note how the limitations of numerical integration create artifacts on your marginal # ###Exercise 6: Implement the marginalization matrix # + cellView="code" def my_marginalization(input_array, binary_decision_array): ############################################################################ ## Insert your code here to: ## - Compute 'marginalization_array' by multiplying pointwise the Binary ## decision array over hypothetical stimuli and the Input array ## - Compute 'marginal' from the 'marginalization_array' by summing over x ## (hint: use np.sum() and only marginalize along the columns) ## remove the raise below to test your function raise NotImplementedError("You need to complete the function!") ############################################################################ marginalization_array = ... marginal = ... # note axis marginal /= ... # normalize return marginalization_array, marginal # Uncomment following lines, once the task is complete. # marginalization_array, marginal = my_marginalization(input_array, binary_decision_array) # plot_myarray(marginalization_array, 'estimated $\hat x$', '$\~x$', 'Marginalization array: $p(\^x | \~x)$') # plt.figure() # plt.plot(x, marginal) # plt.xlabel('$\^x$') # plt.ylabel('probability') # plt.show() # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial3_Solution_3560aec0.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=553 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D1_BayesianDecisions/static/W3D1_Tutorial3_Solution_3560aec0_0.png> # # <img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D1_BayesianDecisions/static/W3D1_Tutorial3_Solution_3560aec0_1.png> # # # - # --- # # Generate some data # # We have seen how to calculate the posterior and marginalize to remove $\tilde x$ and get $p(\hat{x} \mid x)$. Next, we will generate some artificial data for a single participant using the `generate_data()` function provided, and mixing parameter $p_{independent} = 0.1$. # # Our goal in the next exercise will be to recover that parameter. These parameter recovery experiments are a powerful method for planning and debugging Bayesian analyses--if you cannot recover the given parameters, something has gone wrong! Note that this value for $p_{independent}$ is not quite the same as our prior, which used $p_{independent} = 0.05.$ This lets us test out the complete model. # # Please run the code below to generate some synthetic data. You do not need to edit anything, but check that the plot below matches what you would expect from the video. # + cellView="form" #@title #@markdown #### Run the 'generate_data' function (this cell) def generate_data(x_stim, p_independent): """ DO NOT EDIT THIS FUNCTION !!! Returns generated data using the mixture of Gaussian prior with mixture parameter `p_independent` Args : x_stim (numpy array of floats) - x values at which stimuli are presented p_independent (scalar) - mixture component for the Mixture of Gaussian prior Returns: (numpy array of floats): x_hat response of participant for each stimulus """ x = np.arange(-10,10,0.1) x_hat = np.zeros_like(x_stim) prior_mean = 0 prior_sigma1 = .5 prior_sigma2 = 3 prior1 = my_gaussian(x, prior_mean, prior_sigma1) prior2 = my_gaussian(x, prior_mean, prior_sigma2) prior_combined = (1-p_independent) * prior1 + (p_independent * prior2) prior_combined = prior_combined / np.sum(prior_combined) for i_stim in np.arange(x_stim.shape[0]): likelihood_mean = x_stim[i_stim] likelihood_sigma = 1 likelihood = my_gaussian(x, likelihood_mean, likelihood_sigma) likelihood = likelihood / np.sum(likelihood) posterior = np.multiply(prior_combined, likelihood) posterior = posterior / np.sum(posterior) # Assumes participant takes posterior mean as 'action' x_hat[i_stim] = np.sum(x * posterior) return x_hat # Generate data for a single participant true_stim = np.array([-8, -4, -3, -2.5, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 4, 8]) behaviour = generate_data(true_stim, 0.10) plot_simulated_behavior(true_stim, behaviour) # - # --- # #Section 7: Model fitting # + cellView="form" #@title Video 7: Log likelihood video = YouTubeVideo(id='jbYauFpyZhs', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # - # Now that we have generated some data, we will attempt to recover the parameter $p_{independent}$ that was used to generate it. # # We have provided you with an incomplete function called `my_Bayes_model_mse()` that needs to be completed to perform the same computations you have performed in the previous exercises but over all the participant's trial, as opposed to a single trial. # # The likelihood has already been constructed; since it depends only on the hypothetical stimuli, it will not change. However, we will have to implement the prior matrix, since it depends on $p_{independent}$. We will therefore have to recompute the posterior, input and the marginal in order to get $p(\hat{x} \mid x)$. # # Using $p(\hat{x} \mid x)$, we will then compute the negative log-likelihood for each trial and find the value of $p_{independent}$ that minimizes the negative log-likelihood (i.e. maximises the log-likelihood. See the model fitting tutorial from W1D3 for a refresher). # # In this experiment, we assume that trials are independent from one another. This is a common assumption--and it's often even true! It allows us to define negative log-likelihood as: # # \begin{eqnarray} # -LL = - \sum_i \log p(\hat{x}_i \mid x_i) # \end{eqnarray} # # where $\hat{x}_i$ is the participant's response for trial $i$, with presented stimulus $x_i$ # # * Complete the function `my_Bayes_model_mse`, we've already pre-completed the function to give you the prior, posterior, and input arrays on each trial # * Compute the marginalization array as well as the marginal on each trial # * Compute the negative log likelihood using the marginal and the participant's response # * Using the code snippet commented out in your script to loop over possible values of $p_{independent}$ # # ###Exercise 7: Fitting a model to generated data # # # # + def my_Bayes_model_mse(params): """ Function fits the Bayesian model from Tutorial 4 Args : params (list of positive floats): parameters used by the model (params[0] = posterior scaling) Returns : (scalar) negative log-likelihood :sum of log probabilities """ # Create the prior array p_independent=params[0] prior_array = calculate_prior_array(x, hypothetical_stim, p_independent, prior_sigma_indep= 3.) # Create posterior array posterior_array = calculate_posterior_array(prior_array, likelihood_array) # Create Binary decision array binary_decision_array = calculate_binary_decision_array(x, posterior_array) # we will use trial_ll (trial log likelihood) to register each trial trial_ll = np.zeros_like(true_stim) # Loop over stimuli for i_stim in range(len(true_stim)): # create the input array with true_stim as mean input_array = np.zeros_like(posterior_array) for i in range(len(x)): input_array[:, i] = my_gaussian(hypothetical_stim, true_stim[i_stim], 1) input_array[:, i] = input_array[:, i] / np.sum(input_array[:, i]) # calculate the marginalizations marginalization_array, marginal = my_marginalization(input_array, binary_decision_array) action = behaviour[i_stim] idx = np.argmin(np.abs(x - action)) ######################################################################## ## Insert your code here to: ## - Compute the log likelihood of the participant ## remove the raise below to test your function raise NotImplementedError("You need to complete the function!") ######################################################################## # Get the marginal likelihood corresponding to the action marginal_nonzero = ... + np.finfo(float).eps # avoid log(0) trial_ll[i_stim] = np.log(marginal_nonzero) neg_ll = - trial_ll.sum() return neg_ll # Uncomment following lines, once the task is complete. # plot_my_bayes_model(my_Bayes_model_mse) # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D1_BayesianDecisions/solutions/W3D1_Tutorial3_Solution_fe350657.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=559 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D1_BayesianDecisions/static/W3D1_Tutorial3_Solution_fe350657_0.png> # # # - # # Section 8: Summary # + cellView="form" #@title Video 8: Outro video = YouTubeVideo(id='F5JfqJonz20', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # - # Congratuations! You found $p_{independent}$, the parameter that describes how much weight subjects assign to the same-cause vs. independent-cause origins of a sound. In the preceeding notebooks, we went through the entire Bayesian analysis pipeline: # # * developing a model # * simulating data, and # * using Bayes' Rule and marginalization to recover a hidden parameter from the data # # This example was simple, but the same princples can be used to analyze datasets with many hidden variables and complex priors and likelihoods. Bayes' Rule will also play a cruical role in many of the other techniques you will see later this week. # # --- # # If you're still intrigued as to why we decided to use the mean of the posterior as a decision rule for a response $\hat{x}$, we have an extra Bonus Tutorial 4 which goes through the most common decision rules and how these rules correspond to minimizing different cost functions.
tutorials/W3D1_BayesianDecisions/student/W3D1_Tutorial3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # **Tutorial 08: Practical Examples of OOP Paradigm** 👀 # - # Computer games are quite suitable to practice **object oriented modelling** and **design**. Making simple games can demonstrate the effectiveness of **object orientation** to easily scale projects and build larger software components. In these special themed mini-projects, we will develop simple games using **object oriented approach**. # <br><br><a id='t1cls'></a> # ## ▙▂ **Part 2: Blackjack Game ▂▂** # In the second mini-project, we will practice more with inheritance and method overriding. We will show how to design and build blackjack game components, such as card, then deck and finally hand. Combining those with the 'abstract' class `Game` from the previous mini-project, we will make a console based blackjack game. # <a id='t4p2toc'></a> # #### Contents: #### # - [`Card` Class and Card Constants](#t4p2card) # - [Test and Experiment](#t4p2tecard) # - [`Deck` Class](#t4p2deck) # - [Test and Experiment](#t4p2tedeck) # - [`Hand` Class](#t4p2hand) # - [Test and Experiment](#t4p2tehand) # - [`Game` Class](#t4p2game) # - [Assembling the Pieces Together](#t4p2assembling) # - [`Blackjack` Class](#t4p2blackjack) # # <a id='t4p2card'></a> # #### **▇▂ `Card` Class and Card Constants ▂▂** # We will use cards from standard 52 card deck. Each card is depicted by a suit and a rank. Card suits are: spades (♠), hearts (♥), diamonds (♦), clubs (♣). # # To make printouts consistent and of same length, we will use one character to represent the suit and another character to represent the rank. For a suit, we will use the capital first letter of that suit: ```SUITS = ('S', 'H', 'D', 'C')```, while ranks will be given as: ```RANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')```. <br> # Note: For 10 we opted for character 'T'. # # In blackjack the value of a card is determined by its rank only, so cards with ranks 2 - 9 have the same value, while every other card is worth 10 points. The only exception is the ace, which is worth either 11 points or 1 point, always what is more favourable for the player. The objective of blackjack is to have a hand of the highest value, up to and including 21. Any value above 21 is considered as bust and means losing the round. Therefore, if the hand value would be below or equal to 21, ace would count as 11. In case of a potential bust, ace would count as 1. # # Therefore, we can define constants like below: ## DEFINING GLOBALS FOR CARDS SUITS = ('S', 'H', 'D', 'C') RANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K') VALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10} # A card class has only two basic purposes: (i) to be a container for suit and rank, (ii) to return a nicely formatted string, ready for printout, representing that card. Below you can find UML class diagram for `Card` class and its implementation in Python. # ![game2-card1.png](attachment:game2-card1.png) # + ## CARD CLASS class Card: """ Just a container for suit and rank values. Can return string representing those values as ASCII text or Unicode. Call get_symbol and pass the desired type. """ def __init__(self, suit, rank): if (suit in SUITS) and (rank in RANKS): self.suit = suit self.rank = rank else: self.suit = None self.rank = None print ("Invalid card: ", suit, rank) def get_suit(self): return self.suit def get_rank(self): return self.rank def __str__(self): return self._representation(1) def _representation(self, type=1): if type == 1: return self.suit + self.rank if type == 2: symbols = ('♠', '♥', '♦', '♣') return symbols[SUITS.index(self.suit)] + self.rank if type == 3: first_unicode_card = 0x0001F0A1 card_unicode = first_unicode_card + RANKS.index(self.rank) if RANKS.index(self.rank) >= 11: card_unicode += 1 card_unicode += SUITS.index(self.suit) * 16 return chr(card_unicode) if type == 4: return chr(0x0002592) * 2 if type == 5: return chr(0x0001F0A0) print("Invalid type passed to Card-->_representation()") return None def get_symbol(self, type=2): """ Returns 'symbol' representing the card's suit and rank. type = 1: returns "ST" for spade 10 type = 2: returns "♣T" for spade 10 type = 3: returns "🂪" for spade 10 type = 4: returns "▒▒" for any card (two characters representing back) type = 5: returns "🂠" for any card (one Unicode card back symbol) """ return self._representation(type) # - # Note how we check in initialization if the given parameters make a valid card. Robust checks at early stages reduce the time wasted in debugging in later stages of software development. As `__str__` is Python specific overload, used for early debugging only, we chose not to include it in UML class diagram. # # Essentially, everything this class does is accept suit and rank, report error if those are invalid, return via methods suit and rank and return a 'symbol' representing the card. Notice how we want to give more options to the users of our class, so we allow different symbols to be returned depending on the `type` passed. By default it is set to 2. # <br>[back to top ↥](#t4p2toc) # <a id='t4p2tecard'></a> # ##### **Test and Experiment** # Let's make some card objects and see how they work. Feel free to add more of your own. c1 = Card('S', 'A') # ace of spades c2 = Card('H', '2') # two of hearts c3 = Card('D', '5') # five of diamonds c4 = Card('C', 'T') # ten of clubs c5 = Card('H', 'Q') # queen of hearts # Try passing non-existent rank / suit values. w1 = Card('s', '2') # suit should be upcase w2 = Card('H', 'three') # rank written as text w3 = Card('D', 3) # integer passed as rank # Use `get` methods to get suit, rank or both. print(c1.get_suit()) print(c2.get_rank()) print(c3.get_suit(), c3.get_rank()) # These methods can be used by game logic to identify what card is that and how to process it. But, for a human, a different formatting is preferred. For that purpose, we use `get_symbol` method. <br> **Important:** we **don't** to **print** the card directly from this class. Doing so would limit our options later, when making the game, and deciding how to format the output. Therefore, we only need to return a string that is ready for printing. print(c1.get_symbol()) print(c2.get_symbol()) print(c3.get_symbol()) # Try using types 1 - 3. # + print(c1.get_symbol(1)) print(c1.get_symbol(2)) print(c1.get_symbol(3)) print() print(c2.get_symbol(1)) print(c2.get_symbol(2)) print(c2.get_symbol(3)) print() print(c3.get_symbol(1)) print(c3.get_symbol(2)) print(c3.get_symbol(3)) print() # - # Tip: aim to design your classes modular and scalable. Perhaps a game will require to print a card with face down? For that, we included types 4 and 5. # + print(c1.get_symbol(4)) print(c1.get_symbol(5)) print() print(c2.get_symbol(4)) print(c2.get_symbol(5)) print() print(c3.get_symbol(4)) print(c3.get_symbol(5)) print() # - # Keep experimenting on your own. # <br>[back to top ↥](#t4p2toc) # <a id='t4p2deck'></a> # #### **▇▂ `Deck` Class ▂▂** # `Deck` class is just a container to store `Card` objects, therefore the natural relation between `Deck` and `Card` classes is **aggregation**. # ![game2-deck1.png](attachment:game2-deck1.png) # As can be seen from UML diagram above, `Deck` class contains an attribute `cards` which is a list of `Card` objects. The rest are the methods to add, remove or manage cards on the deck. # # To add a card `add_cardobject()` is used, which takes a `Card` object and appends it to `cards` list. To get a card, there are two methods. `peek_card()` will return the first `Card` object in the list, but not remove it, while `draw_card()` also returns the first `Card` object, but removes it from the list. Those can be combined in `deal()` method, that will remove the first `Card` object from one `Deck` and append it at the end of another. `shuffle()` is used to shuffle the order of `Card` objects in `cards`. # # Since we expect most of the operations with `Deck` will start by making either a 52 or 32 card deck, two methods `make_deck_52()` and `make_deck_32()` are added to speed up that task. # # Finally, we have maintenance methods: `clear_deck()`, `get_card_num()`, `get_all_cards()` and for printing and debugging `get_card_str()`. # + # random module is needed for shuffle import random as rnd ## DECK CLASS class Deck: """ Container for CARD objects """ def __init__(self): self.cards = [] def add_cardobject(self, cardobject): self.cards.append(cardobject) def make_deck_52(self): self.cards = [] for s in SUITS: for r in RANKS: new_card = Card(s, r) self.add_cardobject(new_card) def make_deck_32(self): self.cards = [] for s in SUITS: for r in RANKS[6:]: new_card = Card(s, r) self.add_cardobject(new_card) def clear_deck(self): self.cards = [] def get_card_num(self): return len(self.cards) def get_all_cards(self): """ returns card object array that can be iterated by other functions """ return self.cards def get_card_str(self, separator = ' ', type = 2): """ Returns all cards as a string ready for printing. Choose separator and type (1-5) to format the output. """ return separator.join([c.get_symbol(type) for c in self.cards]) def __str__(self): return self.get_card_str(type=1) def shuffle(self): rnd.shuffle(self.cards) def peek_card(self): """ Get the first card from the deck, but DO NOT remove it. """ if len(self.cards) > 0: return self.cards[0] return None def draw_card(self): """ Get the first card from the deck and remove it. """ if len(self.cards) > 0: return self.cards.pop(0) return None def deal(self, target_deck): """ Removes the first card from the deck and adds it to the target. """ if len(self.cards) > 0: target_deck.add_cardobject(self.draw_card()) # - # <br>[back to top ↥](#t4p2toc) # <a id='t4p2tedeck'></a> # ##### **Test and Experiment** # # Create a deck and add previously created cards c1-c5 to it. Then, print the deck. # + deck = Deck() deck.add_cardobject(c1) deck.add_cardobject(c2) deck.add_cardobject(c3) deck.add_cardobject(c4) deck.add_cardobject(c5) print(deck.get_card_str()) # - # Print the same deck again with type = 3 and type = 5. print(deck.get_card_str(type=3)) print(deck.get_card_str(type=5)) # Next, create 52 and 32 card decks and print all cards with print type of your choosing. d52 = Deck() d52.make_deck_52() print(d52.get_card_str(type=3)) d32 = Deck() d32.make_deck_32() print(d32.get_card_str(separator="",type=3)) # Try shuffling d52. If cards are not easily visible, switch to type=2. d52.shuffle() print(d52.get_card_str(type=3)) # Print the number of cards in the first deck and d52. Then deal 3 cards from d52 to the first deck. Print the number of cards afterwards and the contents of those decks. # + print("Deck 'deck' contain", deck.get_card_num(),"cards.") print("Deck 'd52' contain", d52.get_card_num(),"cards.") print("\n","-"*30,"\n\tDealing 3 cards.\n","-"*30,"\n") for _ in range(3): d52.deal(deck) print("Deck 'deck' contain", deck.get_card_num(),"cards.") print("Deck 'd52' contain", d52.get_card_num(),"cards.") print("\n","-"*30,"\n\tDeck 'deck'\n","-"*30,"\n") print(deck.get_card_str(type=3)) print("\n","-"*30,"\n\tDeck d52\n","-"*30,"\n") print(d52.get_card_str(type=3)) # - # Keep experimenting on your own. # <br>[back to top ↥](#t4p2toc) # <a id='t4p2hand'></a> # #### **▇▂ `Hand` Class ▂▂** # We will further customize `Deck` class for the specific purposes of blackjack game and call it `Hand` class. As we have pretty much everything needed in `Deck` already, we will only add a method `get_value()` to return the number of points of that hand, according to blackjack rules, and two methods to directly print to the console, as this is the final class, which will be exposed to the user. <br> # For possible extensions in the future, we added the attribute `name`. # ![game2-hand1.png](attachment:game2-hand1.png) # + ## HAND CLASS class Hand(Deck): """ Extended deck class for Blackjack game. Added methods for easier printing and value calculation of a hand. """ def __init__(self, name): super().__init__() self.name = name def print_hand(self): """ Prints all cards in the hand to the console. Uses type=2. """ print("{}'s cards:".format(self.name)) if len(self.cards) > 0: print(self.get_card_str(separator=' ', type=2)) else: print("(empty)") def print_face_down(self, reveal_number = 1): """ Prints all cards face down, except the first reveal_number cards. Uses type=2 for face up and type=4 for face down. """ separator = ' ' face_up = separator.join( [c.get_symbol(2) for c in self.cards[:reveal_number]]) face_down = separator.join( [c.get_symbol(4) for c in self.cards[reveal_number:]]) print("{}'s cards:".format(self.name)) print(face_up + separator + face_down) def get_value(self): """ Returns BlackJack value of a hand. """ val = 0 has_ace = False for c in self.cards: if VALUES[c.get_rank()] == 1: has_ace = True val += VALUES[c.get_rank()] if has_ace and (val <= 21-10): val += 10 return val # - # <br>[back to top ↥](#t4p2toc) # <a id='t4p2tehand'></a> # ##### **Test and Experiment** # Recreate 52 card deck. Create 3 players. Deal few cards to each player and print their hands, as well as blackjack hand values. # + # making a new 52 card deck and shuffling it d52 = Deck() d52.make_deck_52() d52.shuffle() # creating 3 players ply1 = Hand("Andrej") ply2 = Hand("Babak") ply3 = Hand("Aleksandra") # deal 3 cards to ply1, 2 cards to ply2 and 4 cards to ply3 for _ in range(3): d52.deal(ply1) for _ in range(2): d52.deal(ply2) for _ in range(4): d52.deal(ply3) # print contents of hand; check if value is calculated correctly ply1.print_hand() print("ply1 hand value:", ply1.get_value()) print() ply2.print_hand() print("ply2 hand value:", ply2.get_value()) print() ply3.print_hand() print("ply3 hand value:", ply3.get_value()) print() # - # Make sure to test special cases and outliers. In this example, those are hands containing aces. card1 = Card('S', 'A') card2 = Card('H', 'A') card3 = Card('D', '3') card4 = Card('C', '7') card5 = Card('C', 'A') card6 = Card('D', 'K') # + test1 = Hand("test1") test1.add_cardobject(card1) test1.add_cardobject(card3) test1.add_cardobject(card4) test1.print_hand() print(test1.get_value()) # + test1.add_cardobject(card2) test1.print_hand() print(test1.get_value()) # + test2 = Hand("test2") test2.add_cardobject(card3) test2.add_cardobject(card4) test2.add_cardobject(card6) test2.add_cardobject(card2) test2.print_hand() print(test2.get_value()) # - # Depending on the situation, you may want to reveal only some cards and print others as face down. For that purpose, `print_face_down()` method is used. It expects an integer used to determine the first *n* number of cards to be printed as face up. Every other card from *n+1* will be printed face down. # + ply1.print_face_down() # default value ply2.print_face_down(1000000) # big number on purpose to test if it breaks the method ply3.print_face_down(3) # reveal first 3 cards ply3.print_face_down(0) # print all cards as face down # - # Keep experimenting on your own. # <br>[back to top ↥](#t4p2toc) # <a id='t4p2game'></a> # #### **▇▂ `Game` Class ▂▂** # We will reuse `Game` template class from the previous mini-project (Guess the number game). It is copy-pasted here for further reference. # ![game1-gameclass1.png](attachment:game1-gameclass1.png) class Game: """ Game is a base, template class, implementing a simple game loop. Create a child class and override the needed methods. """ def __init__(self): self.__running = True def get_state(self): return self.__running def start(self): """ Internal message to start the game. """ self.__running = True def quit(self): """ Internal message to end the game. """ self.__running = False def draw(self): pass def user_input(self): """ Expects input from the user and returns the result. Override.""" print("Enter Q or QUIT to terminate the game") res = input("Please enter your command:") return res def update(self, keys): """ Processes given input. By default quits on Q or QUIT. Override. """ if keys.upper() == "QUIT" or keys.upper() == "Q": self.__running = False def intro(self): """ The first method to be called (once) when the game starts. Override. """ pass def outro(self): """ The last method to be called (once) when the game ends. Override. """ pass def run(self): """ The main 'workhorse' method. Calls all other methods according to the game loop. """ self.intro() while self.__running: self.draw() usr = self.user_input() self.update(usr) self.outro() # <br>[back to top ↥](#t4p2toc) # <a id='t4p2assembling'></a> # #### **▇▂ Assembling the Pieces Together ▂▂** # All that is left to do is assemble the pieces together and make a new `Blackjack` class, that will be used to play the game. # # To organize input and possible decision both human and computer (dealer) can do, let us see how the game is player. First, two cards are dealt to player and dealer. Player's cards are given as face up, while dealer only reveals the first card. Then, the player has the option to either **HIT** or **STAND**. If **HIT** was selected, the player will be dealt another card and then the choice goes to the dealer, who can also choose one of the two option. Once either chooses to **STAND** that player will no longer be drawing cards. If anyone goes over 21, it is considered a **BUST** and that entity automatically loses and can no longer draw cards. When everyone stops getting the cards, either by **STAND**ing or **BUST**ing, the game ends, and the entity with the most points, up to and including 21, wins. # # The dealer's choice depends on the cards of the player. If player goes over 21 (**BUSTED**), the dealer will no longer be drawing cards and will choose to **STAND**. Else, if dealer's hand is worth less than 17 points, he will choose to **HIT**. Otherwise, the dealer's choice will be to **STAND**. # # We can implement this easily as an extension to our `Hand` class. # + ## Implementation of dealer specific for our blackjack game logic class Dealer(Hand): def __init__(self): super().__init__("dealer") def choice_hit(self): return True if self.get_value() < 17 else False # - # Similar to GuessTheNumber game, we implemented in the previous mini-project, we can implement `Blackjack` class, by inheriting from `Game` class template, and overriding the methods as needed. # # The design we have chosen is given in the class diagram below. # ![game2-blackjack1.png](attachment:game2-blackjack1.png) # If you can, try to implement the game based on the suggested design. The comments in the code are meant to guide you, but feel free to take a different approach if you prefer. # + ## TO BE IMPLEMENTED class Blackjack(Game): def __init__(self): # implement initialization # Assign and initialize deck, player and dealer def new_game(self): """ Resets all game objects to starting values. """ # This method will be called by intro(), but can also be used independently to restart the game # Set game flag to running. # Clear all aggregated objects from values of the previous game. # Make and shuffle 52 card deck # Deal 2 cards to player and dealer (one at a time) # ... implement ... # we will use these flags to track previous choices # (if entity picks STAND, that entity will not be asked to make a choice for the rest of the game) self.player_choice = "" self.dealer_choice = "" def intro(self): """ OVERRIDDEN: displays instructions and restarts the game """ # print game instructions # print() # ... add what is needed ... # call new game (resets all game objects) self.new_game() def draw(self): """ OVERRIDDEN: draw game objects in console """ # output (print) cards from dealer and player # dealer reveals only the first card # ... implement ... def _user_choice(self): """ helper function to process user input """ # this method was taken out of user_input() to make in clearer # it sets valid choices, presents them to the player, and loops until one is selected. # then it returns a string representing the player's choice: "HIT" or "STAND" # It is a good practice to let player terminate the game as he pleases, so add "QUIT" choice as well. # define valid input characters # ... implement ... # show user valid choices and keep in loop until one is given # ... implement ... # set player's choice flag according to the input # ... add what is missing and implement ... #if ... return "HIT" #if ... return "STAND" #if ... return "QUIT" def user_input(self): """ OVERRIDDEN: get and process user's input. Note: this function is to be called until user chooses STAND. If that happens, dealer plays as much as he wants according to his logic """ # If the previous choice was not STAND, ask user for a new choice # and set the flag accordingly (call _user_choice()) # ... implement ... # This method is also expected to return a value (string) # so return the same flag (str value) you have set just above # ... implement ... def update(self, keys): """ OVERRIDDEN: updates the game according to the choices user and dealer made """ # deal one more card to the player if HIT was selected # ... implement ... # dealer doesn't play if user busted # if not, call dealer's choice and update the flag accordingly # ... implement ... # if dealer opted to draw one more card, deal it # ... implement ... # check conditions to stop the game # 1 - user terminated the game; 2 - both players chose to STAND # 3 - player BUSTED # stop the game by sending the appropriate message (to change 'running' flag) # ... implement ... def outro(self): """ OVERRIDDEN: prints cards, points and announces winner """ # Announce that the game has ended # Print cards of dealer and player # Determine who wins and how (by points, opponent busted, ...) # Display points and the outcome # - # <br>[back to top ↥](#t4p2toc) # <a id='t4p2blackjack'></a> # #### **▇▂ `Blackjack` Class - Solution ▂▂** # It is critical that you are able to solve the problem on your own. Remember, software design doesn't have absolute right or wrong. Some designs have advantages in terms of implementation, other for maintenance, some others for scalability and so on. You learn by trial and error. Practice through coding. # # After you are successful, compare your approach with the one we took. # + ## BLACKJACK CLASS class Blackjack(Game): def __init__(self): super().__init__() self.deck = Deck() self.player = Hand("player") self.dealer = Dealer() def new_game(self): """ Resets all game objects to starting values. """ # set game flag: running = True self.start() # clear decks if used in the previous game self.player.clear_deck() self.dealer.clear_deck() self.deck.clear_deck() # make a new 52 card deck and shuffle it self.deck.make_deck_52() self.deck.shuffle() # Deal two cards to player and dealer. One at a time to each. for _ in range(2): self.deck.deal(self.player) self.deck.deal(self.dealer) # set flags to be read by game logic later. self.player_choice = "" self.dealer_choice = "" def intro(self): """ OVERRIDDEN: displays instructions and restarts the game """ # print game instructions print("Welcome to Blackjack!\n") print("Your objective is to beat the dealer's hand.") print("Aces count as 1 or 11 (whatever is more favourable).") print("The hand with higher value wins.") print("Values over 21 are considered as BUST.\n\n") # call new game (resets all game objects) self.new_game() def draw(self): """ OVERRIDDEN: draw game objects in console """ # output cards from dealer and player # dealer reveals only the first card self.dealer.print_face_down(1) self.player.print_hand() print() def _user_choice(self): """ helper function to process user input """ # define valid input characters hit_choices = ('1', 'H', 'HIT') stand_choices = ('2', 'S', 'STAND') quit_choices = ('0', 'Q', 'QUIT') valid_choices = hit_choices + stand_choices + quit_choices # show user valid choices and keep in loop until one is given choice = "" while not choice.upper() in valid_choices: print("Please enter your choice:") print("(1) - Hit") print("(2) - Stand") print("-----------") print("(0) - Quit") choice = input() # set player's choice flag according to the input if choice.upper() in hit_choices: return "HIT" if choice.upper() in stand_choices: return "STAND" if choice.upper() in quit_choices: return "QUIT" def user_input(self): """ OVERRIDDEN: get and process user's input. Note: this function is to be called until user chooses STAND. If that happens, dealer plays as much as he wants according to his logic """ # If the previous choice was not STAND, ask user for a new choice # and set the flag accordingly if not (self.player_choice == "STAND"): self.player_choice = self._user_choice() return self.player_choice def update(self, keys): """ OVERRIDDEN: updates the game according to the choices user and dealer made """ # deal one more card to the player if HIT was selected if self.player_choice == "HIT": self.deck.deal(self.player) # dealer doesn't play if user busted # if not, call dealer's choice and update the flag accordingly self.dealer_choice = "STAND" if self.player.get_value() <= 21 and self.dealer.choice_hit(): # dealer opted to draw one more card self.dealer_choice = "HIT" self.deck.deal(self.dealer) # check conditions to stop the game # 1 - user terminated the game; 2 - both players chose to STAND # 3 - player BUSTED if keys == "QUIT": self.quit() if self.player_choice == "STAND" and self.dealer_choice == "STAND": self.quit() if self.player.get_value() > 21: self.quit() def outro(self): """ OVERRIDDEN: prints cards, points and announces winner """ print("\nThe game ended.\n") self.dealer.print_hand() self.player.print_hand() print() if self.player.get_value() > 21: print("Player busted! Dealer wins.") elif self.dealer.get_value() > 21: print("Dealer busted! Player wins.") elif self.player.get_value() > self.dealer.get_value(): print("Dealer scores {}, player scores {}. Player wins.".format( self.dealer.get_value(), self.player.get_value())) else: print("Dealer scores {}, player scores {}. Dealer wins.".format( self.dealer.get_value(), self.player.get_value())) # - # Run the game and enjoy the spoils of your hard work! game = Blackjack() game.run() # <br>[back to top ↥](#t4p2toc)
ipynb/T08-02-Practical-Examples-OOP-Part-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.3 64-bit # name: python3 # --- import spotipy from spotipy.oauth2 import SpotifyClientCredentials import os import sys import pandas as pd # %store -r os.environ['SPOTIPY_CLIENT_ID'] = spotify_id os.environ['SPOTIPY_CLIENT_SECRET'] = spotify_secret os.environ['SPOTIPY_REDIRECT_URI'] = 'http://localhost:8080' # + birdy_uri = 'spotify:artist:1vyhD5VmyZ7KMfW5gqLgo5' spotify = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials()) results = spotify.artist_albums(birdy_uri, album_type='album') albums = results['items'] while results['next']: results = spotify.next(results) albums.extend(results['items']) for album in albums: print(album['name']) # + if len(sys.argv) > 1: name = ' '.join(sys.argv[1:]) else: name = 'Bad Bunny' results = spotify.search(q='artist:' + name, type='artist') items = results['artists']['items'] if len(items) > 0: artist = items[0] print(artist['name'], artist['images'][0]['url']) # + lz_uri = 'spotify:artist:4q3ewBCX7sLwd24euuV69X' spotify = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials()) results = spotify.artist_top_tracks(lz_uri) for track in results['tracks'][:10]: print('track : ' + track['name']) print('audio : ' + track['preview_url']) print('cover art: ' + track['album']['images'][0]['url']) print() # - # https://open.spotify.com/artist/4q3ewBCX7sLwd24euuV69X?si=VDYE-tAdQle6qIW9ZO9oaw&dl_branch=1 # + from spotipy.oauth2 import SpotifyOAuth scope = "user-library-read" sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope)) results = sp.current_user_saved_tracks(limit = 50) for idx, item in enumerate(results['items']): track = item['track'] print(idx, track['artists'][0]['name'], " – ", track['name']) # - added_tracks = pd.DataFrame(results['items']) track_info = pd.json_normalize(added_tracks['track']) pd.json_normalize(track_info['artists']) # Necesito escapar la fecha en que agregue la cancion. # Y ver que otras variables me sirven. track_info.columns pd.json_normalize(track_info['artists'][1]) track_info['artists'][1] track_info track_analysis = pd.json_normalize(sp.audio_analysis('6mm3K0yWp6uzfOMuipM9Zh')) track_analysis beats = pd.json_normalize(track_analysis['beats'][0]) beats # + import matplotlib.pyplot as plt plt.plot(beats.start/60, beats.confidence) plt.title('Tan Bonita - Piso 21') plt.xlabel('Min') plt.ylabel('Beat') plt.show() # - track_features = pd.json_normalize(sp.audio_features('6mm3K0yWp6uzfOMuipM9Zh')) track_features
00_get_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + init_cell=false run_control={"marked": false} # Enable in-notebook generation of plots # %matplotlib inline # - # # Experiments collected data # Data required to run this notebook are available for download at this link: # # https://www.dropbox.com/s/q9ulf3pusu0uzss/SchedTuneAnalysis.tar.xz?dl=0 # # This archive has to be extracted from within the LISA's results folder. # ## Initial set of data # + hidden=true hide_input=false res_dir = '../../results/SchedTuneAnalysis/' # !tree {res_dir} # + hidden=true noboost_trace = res_dir + 'trace_noboost.dat' boost15_trace = res_dir + 'trace_boost15.dat' boost25_trace = res_dir + 'trace_boost25.dat' # trace_file = noboost_trace trace_file = boost15_trace # trace_file = boost25_trace # - # ## Loading support data collected from the target # + hidden=true import json # Load the platform information with open('../../results/SchedTuneAnalysis/platform.json', 'r') as fh: platform = json.load(fh) print "Platform descriptio collected from the target:" print json.dumps(platform, indent=4) # + hidden=true from trappy.stats.Topology import Topology # Create a topology descriptor topology = Topology(platform['topology']) # - # # Trace analysis # + [markdown] hidden=true # We want to ensure that the task has the expected workload:<br> # - LITTLE CPU bandwidth of **[10, 35 and 60]%** every **2[ms]** # - activations every **32ms** # - always **starts on a big** core # + [markdown] hidden=true # ## Trace inspection # + [markdown] hidden=true # ### Using kernelshark # + hidden=true # Let's look at the trace using kernelshark... # !kernelshark {trace_file} 2>/dev/null # + [markdown] hidden=true # - Requires a lot of interactions and hand made measurements # - We cannot easily annotate our findings to produre a sharable notebook # + [markdown] hidden=true # ### Using the TRAPpy Trace Plotter # + [markdown] hidden=true # An overall view on the trace is still useful to get a graps on what we are looking at. # + code_folding=[] hidden=true # Suport for FTrace events parsing and visualization import trappy # NOTE: The interactive trace visualization is available only if you run # the workload to generate a new trace-file trappy.plotter.plot_trace(trace_file)#, execnames="task_ramp")#, pids=[2221]) # + [markdown] hidden=true # ## Events Plotting # + [markdown] hidden=true # The **sched_load_avg_task** trace events reports this information # + [markdown] hidden=true # ### Using all the unix arsenal to parse and filter the trace # + hidden=true # Get a list of first 5 "sched_load_avg_events" events sched_load_avg_events = !(\ grep sched_load_avg_task {trace_file.replace('.dat', '.txt')} | \ head -n5 \ ) print "First 5 sched_load_avg events:" for line in sched_load_avg_events: print line # + [markdown] hidden=true # A graphical representation whould be really usefuly! # + [markdown] hidden=true # ### Using TRAPpy generated DataFrames # + [markdown] hidden=true # #### Generate DataFrames from Trace Events # + hidden=true # Load the LISA::Trace parsing module from trace import Trace # Define which event we are interested into trace = Trace(trace_file, [ "sched_switch", "sched_load_avg_cpu", "sched_load_avg_task", "sched_boost_cpu", "sched_boost_task", "cpu_frequency", "cpu_capacity", ], platform) # + [markdown] hidden=true # #### Get the DataFrames for the events of interest # + hidden=true # Trace events are converted into tables, let's have a look at one # of such tables load_df = trace.data_frame.trace_event('sched_load_avg_task') load_df.head() # + hidden=true df = load_df[load_df.comm.str.match('k.*')] # df.head() print df.comm.unique() # + hidden=true cap_df = trace.data_frame.trace_event('cpu_capacity') cap_df.head() # + [markdown] hidden=true # #### Plot the signals of interest # + code_folding=[] hidden=true # Signals can be easily plot using the ILinePlotter trappy.ILinePlot( # FTrace object trace.ftrace, # Signals to be plotted signals=[ 'cpu_capacity:capacity', 'sched_load_avg_task:util_avg' ], # Generate one plot for each value of the specified column pivot='cpu', # Generate only plots which satisfy these filters filters={ 'comm': ['task_ramp'], 'cpu' : [2,5] }, # Formatting style per_line=2, drawstyle='steps-post', marker = '+', sync_zoom=True, group="GroupTag" ).view() # + [markdown] hidden=true # ### Use a set of standard plots # + [markdown] hidden=true # A graphical representation can always be on hand # - trace = Trace(boost15_trace, ["sched_switch", "sched_overutilized", "sched_load_avg_cpu", "sched_load_avg_task", "sched_boost_cpu", "sched_boost_task", "cpu_frequency", "cpu_capacity", ], platform, plots_prefix='boost15_' ) # + [markdown] hidden=true # Usually a common set of plots can be generated which capture the most useful information realted to a workload we are analysing # + [markdown] hidden=true # #### Example of task realted signals # + hidden=true trace.analysis.tasks.plotTasks( tasks=['task_ramp'], signals=['util_avg', 'boosted_util', 'sched_overutilized', 'residencies'], ) # - # #### Example of Clusters related singals trace.analysis.frequency.plotClusterFrequencies() # + [markdown] hidden=true # #### Take-away # + [markdown] hidden=true # In a single plot we can aggregate multiple informations which makes it easy to verify the expected behaviros. # # With a set of properly defined plots we are able to condense mucy more sensible information which are easy to ready because they are "standard".<br> # We immediately capture what we are interested to evaluate! # # Moreover, all he produced plots are available as high resolution images, ready to be shared and/or used in other reports. # + hidden=true # !tree {res_dir} # + [markdown] hidden=true # ## Behavioral Analysis # + [markdown] hidden=true # ### Is the task starting on a big core? # + [markdown] hidden=true # We always expect a new task to be allocated on a big core. # # To verify this condition we need to know what is the topology of the target. # # This information is **automatically collected by LISA** when the workload is executed.<br> # Thus it can be used to write **portable tests** conditions. # + [markdown] hidden=true # #### Create a SchedAssert for the specific topology # + hidden=true from bart.sched.SchedMultiAssert import SchedAssert # Create an object to get/assert scheduling pbehaviors sa = SchedAssert(trace_file, topology, execname='task_ramp') # + [markdown] hidden=true # #### Use the SchedAssert method to investigate properties of this task # + hidden=true # Check on which CPU the task start its execution if sa.assertFirstCpu(platform['clusters']['big']):#, window=(4,6)): print "PASS: Task starts on big CPU: ", sa.getFirstCpu() else: print "FAIL: Task does NOT start on a big CPU!!!" # + [markdown] hidden=true # ### Is the task generating the expected load? # + [markdown] hidden=true # We expect 35% load in the between 2 and 4 [s] of the execution # + [markdown] hidden=true # #### Identify the start of the first phase # + hidden=true # Let's find when the task starts start = sa.getStartTime() first_phase = (start, start+2) print "The task starts execution at [s]: ", start print "Window of interest: ", first_phase # + [markdown] hidden=true # #### Use the SchedAssert module to check the task load in that period # + hidden=true import operator # Check the task duty cycle in the second step window if sa.assertDutyCycle(10, operator.lt, window=first_phase): print "PASS: Task duty-cycle is {}% in the [2,4] execution window"\ .format(sa.getDutyCycle(first_phase)) else: print "FAIL: Task duty-cycle is {}% in the [2,4] execution window"\ .format(sa.getDutyCycle(first_phase)) # + [markdown] hidden=true # This test fails because we have not considered a scaling factor due running at a lower OPP. # # To write a portable test we need to account for that condition! # + [markdown] hidden=true # #### Take OPP scaling into consideration # + hidden=true # Get LITTLEs capacities ranges: littles = platform['clusters']['little'] little_capacities = cap_df[cap_df.cpu.isin(littles)].capacity min_cap = little_capacities.min() max_cap = little_capacities.max() print "LITTLEs capacities range: ", (min_cap, max_cap) # Get min OPP correction factor min_little_scale = 1.0 * min_cap / max_cap print "LITTLE's min capacity scale: ", min_little_scale # + hidden=true # Scale the target duty-cycle according to the min OPP target_dutycycle = 10 / min_little_scale print "Scaled target duty-cycle: ", target_dutycycle target_dutycycle = 1.01 * target_dutycycle print "1% tolerance scaled duty-cycle: ", target_dutycycle # + [markdown] hidden=true # #### Write a more portable assertion # + hidden=true # Add a 1% tolerance to our scaled target dutycycle if sa.assertDutyCycle(1.01 * target_dutycycle, operator.lt, window=first_phase): print "PASS: Task duty-cycle is {}% in the [2,4] execution window"\ .format(sa.getDutyCycle(first_phase) * min_little_scale) else: print "FAIL: Task duty-cycle is {}% in the [2,4] execution window"\ .format(sa.getDutyCycle(first_phase) * min_little_scale) # + [markdown] hidden=true # ### Is the task migrated once we exceed the LITTLE CPUs capacity? # + [markdown] hidden=true # #### Check that the task is switching the cluster once expected # + hidden=true # Consider a 100 [ms] window for the task to migrate delta = 0.1 # Defined the window of interest switch_window=(start+4-delta, start+4+delta) if sa.assertSwitch("cluster", platform['clusters']['little'], platform['clusters']['big'], window=switch_window): print "PASS: Task switches to big within: ", switch_window else: print "PASS: Task DOES NO switches to big within: ", switch_window # + [markdown] hidden=true # #### Check that the task is running most of its time on the LITTLE cluster # + hidden=true import operator if sa.assertResidency("cluster", platform['clusters']['little'], 66, operator.le, percent=True): print "PASS: Task exectuion on LITTLEs is {:.1f}% (less than 66% of its execution time)".\ format(sa.getResidency("cluster", platform['clusters']['little'], percent=True)) else: print "FAIL: Task run on LITTLE for MORE than 66% of its execution time" # + [markdown] hidden=true # ### Check that the util estimation is properly computed and CPU capacity matches # + hidden=true start = 2 last_phase = (start+4, start+6) analyzer_config = { "SCALE" : 1024, "BOOST" : 15, } # Verify that the margin is properly computed for each event: # margin := (scale - util) * boost margin_check_statement = "(((SCALE - sched_boost_task:util) * BOOST) // 100) == sched_boost_task:margin" # + hidden=true from bart.common.Analyzer import Analyzer # Create an Assertion Object a = Analyzer(trace.ftrace, analyzer_config, window=last_phase, filters={"comm": "task_ramp"}) # + hidden=true if a.assertStatement(margin_check_statement): print "PASS: Margin properly computed in : ", last_phase else: print "FAIL: Margin NOT properly computed in : ", last_phase # + [markdown] hidden=true # #### Check that the CPU capacity matches the task boosted value # + hidden=true # Get the two dataset of interest df1 = trace.data_frame.trace_event('cpu_capacity')[['cpu', 'capacity']] df2 = trace.data_frame.trace_event('boost_task_rtapp')[['__cpu', 'boosted_util']] # Join the information from these two df3 = df2.join(df1, how='outer') df3 = df3.fillna(method='ffill') df3 = df3[df3.__cpu == df3.cpu] #df3.ix[start+4:start+6,].head() # + hidden=true len(df3[df3.boosted_util >= df3.capacity]) # + [markdown] hidden=true # ##### Do it the TRAPpy way # + hidden=true # Create the TRAPpy class trace.ftrace.add_parsed_event('rtapp_capacity_check', df3) # Define pivoting value trace.ftrace.rtapp_capacity_check.pivot = 'cpu' # Create an Assertion a = Analyzer(trace.ftrace, {"CAP" : trace.ftrace.rtapp_capacity_check}, window=(start+4.1, start+6)) a.assertStatement("CAP:capacity >= CAP:boosted_util") # + [markdown] hidden=true # ## Going further on events processing # + [markdown] hidden=true # ### What are the relative residency on different OPPs? # + [markdown] hidden=true # We are not limited to the usage of pre-defined functions. We can exploit the full power of PANDAS to process the DataFrames to extract all kind of information we want. # + [markdown] hidden=true # #### Use PANDAs APIs to filter and aggregate events # + hidden=true import pandas as pd # Focus on cpu_frequency events for CPU0 df = trace.data_frame.trace_event('cpu_frequency') df = df[df.cpu == 0] # Compute the residency on each OPP before switching to the next one df.loc[:,'start'] = df.index df.loc[:,'delta'] = (df['start'] - df['start'].shift()).fillna(0).shift(-1) # Group by frequency and sum-up the deltas freq_residencies = df.groupby('frequency')['delta'].sum() print "Residency time per OPP:" df = pd.DataFrame(freq_residencies) df.head() # Compute the relative residency time tot = sum(freq_residencies) #df = df.apply(lambda delta : 100*delta/tot) for f in freq_residencies.index: print "Freq {:10d}Hz : {:5.1f}%".format(f, 100*freq_residencies[f]/tot) # + [markdown] hidden=true # #### Use MathPlot Lib to generate all kind of plot from collected data # + hidden=true # Plot residency time import matplotlib.pyplot as plt fig, axes = plt.subplots(1, 1, figsize=(16, 5)); df.plot(kind='barh', ax=axes, title="Frequency residency", rot=45); # + [markdown] hidden=true # <br><br><br><br> # Advanced DataFrame usage: filtering by columns/rows, merging tables, plotting data<br> # [notebooks/tutorial/05_TrappyUsage.ipynb](05_TrappyUsage.ipynb) # <br><br><br><br> # - # # Remote target connection and control # + [markdown] hidden=true # Using LISA APIs to control a remote device and run custom workloads # + [markdown] hidden=true # ## Configure the connection # + hidden=true # Setup a target configuration conf = { # Target is localhost "platform" : 'linux', "board" : "juno", # Login credentials "host" : "192.168.0.1", "username" : "root", "password" : "", # Binary tools required to run this experiment # These tools must be present in the tools/ folder for the architecture "tools" : ['rt-app', 'taskset', 'trace-cmd'], # Comment the following line to force rt-app calibration on your target "rtapp-calib" : { "0": 355, "1": 138, "2": 138, "3": 355, "4": 354, "5": 354 }, # FTrace events end buffer configuration "ftrace" : { "events" : [ "sched_switch", "sched_wakeup", "sched_wakeup_new", "sched_overutilized", "sched_contrib_scale_f", "sched_load_avg_cpu", "sched_load_avg_task", "sched_tune_config", "sched_tune_tasks_update", "sched_tune_boostgroup_update", "sched_tune_filter", "sched_boost_cpu", "sched_boost_task", "sched_energy_diff", "cpu_frequency", "cpu_capacity", ], "buffsize" : 10240 }, # Where results are collected "results_dir" : "SchedTuneAnalysis", # Devlib module required (or not required) 'modules' : [ "cpufreq", "cgroups" ], #"exclude_modules" : [ "hwmon" ], } # + [markdown] hidden=true # ## Setup the connection # + hidden=true # Support to access the remote target from env import TestEnv # Initialize a test environment using: # the provided target configuration (my_target_conf) # the provided test configuration (my_test_conf) te = TestEnv(conf) target = te.target print "DONE" # + [markdown] hidden=true # ## Target control # + [markdown] hidden=true # ### Run custom commands # + hidden=true # Enable Energy-Aware scheduler target.execute("echo ENERGY_AWARE > /sys/kernel/debug/sched_features"); target.execute("echo UTIL_EST > /sys/kernel/debug/sched_features"); # Check which sched_feature are enabled sched_features = target.read_value("/sys/kernel/debug/sched_features"); print "sched_features:" print sched_features # + [markdown] hidden=true # ### Example CPUFreq configuration # + hidden=true target.cpufreq.set_all_governors('sched'); # Check which governor is enabled on each CPU enabled_governors = target.cpufreq.get_all_governors() print enabled_governors # + [markdown] hidden=true # ### Example of CGruops configuration # + hidden=true schedtune = target.cgroups.controller('schedtune') # Configure a 50% boostgroup boostgroup = schedtune.cgroup('/boosted') boostgroup.set(boost=25) # Dump the configuraiton of each groups cgroups = schedtune.list_all() for cgname in cgroups: cgroup = schedtune.cgroup(cgname) attrs = cgroup.get() boost = attrs['boost'] print '{}:{:<15} boost: {}'.format(schedtune.kind, cgroup.name, boost) # - # # Remote workloads execution # + [markdown] hidden=true # ## Generate RTApp configurations # + hidden=true # RTApp configurator for generation of PERIODIC tasks from wlgen import RTA, Periodic, Ramp # Create a new RTApp workload generator using the calibration values # reported by the TestEnv module rtapp = RTA(target, 'test', calibration=te.calibration()) # Ramp workload ramp = Ramp( start_pct=10, end_pct=60, delta_pct=25, time_s=2, period_ms=32 ) # Configure this RTApp instance to: rtapp.conf( # 1. generate a "profile based" set of tasks kind = 'profile', # 2. define the "profile" of each task params = { # 3. Composed task 'task_ramp': ramp.get(), }, #loadref='big', loadref='LITTLE', run_dir=target.working_directory ); # + [markdown] hidden=true # ## Execution and tracing # + hidden=true def execute(te, wload, res_dir, cg='/'): logging.info('# Setup FTrace') te.ftrace.start() if te.emeter: logging.info('## Start energy sampling') te.emeter.reset() logging.info('### Start RTApp execution') wload.run(out_dir=res_dir, cgroup=cg) if te.emeter: logging.info('## Read energy consumption: %s/energy.json', res_dir) nrg_report = te.emeter.report(out_dir=res_dir) else: nrg_report = None logging.info('# Stop FTrace') te.ftrace.stop() trace_file = os.path.join(res_dir, 'trace.dat') logging.info('# Save FTrace: %s', trace_file) te.ftrace.get_trace(trace_file) logging.info('# Save platform description: %s/platform.json', res_dir) plt, plt_file = te.platform_dump(res_dir) logging.info('# Report collected data:') logging.info(' %s', res_dir) # !tree {res_dir} return nrg_report, plt, plt_file, trace_file # + hidden=true nrg_report, plt, plt_file, trace_file = execute(te, rtapp, te.res_dir, cg=boostgroup.name) # - # # Regression testing support # + [markdown] hidden=true # Writing and running regression tests using the LISA API # + [markdown] hidden=true # ## Defined configurations to test and workloads # + hidden=true stune_smoke_test = '../../tests/stune/smoke_test_ramp.config' # !cat {stune_smoke_test} # + [markdown] hidden=true # ## Write Test Cases # + hidden=true stune_smoke_test = '../../tests/stune/smoke_test_ramp.py' # !cat {stune_smoke_test} # + [markdown] hidden=true # ## Tests execution # + [markdown] hidden=true # The execution of a test can be triggered from a LISA shell using nosetest with the test class as a parameter. This command: # # ```bash # $ nosetests -v tests/stune/smoke_test_ramp.py # ``` # # will execute all the tests described in the **smoke_test_ramp.py** module and collect all the products in a timestamp named subfolder of the results folder. # Tests PASS/FAILURE is reported after the completion of each test execution. # + [markdown] hidden=true # ## Results reporting # - # Detailed results of the experiments which compares also some base configurations with each test configuration can be reported in a tablular format using this command: # # ```bash # $ lisa-report --base noboost --tests '(boost15|boost30|boost60)' # ``` # # + [markdown] hidden=true # <img src="SchedTune_SmokeTestResults.png"/>
ipynb/deprecated/tutorial/UseCaseExamples_SchedTuneAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} import numpy as np # classification # from models import * data = np.array([[-4, -4, 0], [-3, -3, 0], [-2, -2, 0], [-1, -1, 0], [1, 1, 0], [2, 2, 0], [3, 3, 0], [1, -1, 1], [2, -2, 1], [3, -3, 3], [-1, 1, 1], [-2, 2, 1], [-3, 3, 1]]) # - from models import * import numpy as np import pandas as pd # + from dataset import * import pandas as pd dataset = load_dataset('taiwan_credit_risk', train_prop=0.8, valid_prop=0.1) df = pd.DataFrame(dataset.X_train) df['y'] = dataset.y_train data = df.to_numpy() # + dataset2 = pd.read_csv('datasets/ames_housing/ames_housing_training.csv', index_col=0) df2 = pd.DataFrame(dataset2) df2=df2.select_dtypes([np.number]).dropna() data2_train = df2.to_numpy()[:(int)(0.9*len(df2))] data2_val = df2.to_numpy()[(int)(0.9*len(df2)):] # + from sklearn.model_selection import StratifiedKFold, KFold kf = KFold(n_splits=5, shuffle=True, random_state=123) def load_data(path): dataset = pd.read_csv(path, index_col=0) df = pd.DataFrame(dataset) # df = df.sample(frac=1).reset_index(drop=True) df= df.select_dtypes([np.number]).dropna() data_train = df.to_numpy()[:(int)(0.9*len(df))] data_val = df.to_numpy()[(int)(0.9*len(df)):] return df, data_train, data_val def k_fold(path): l1 = [] l2 = [] dataset = pd.read_csv(path, index_col=0) df = pd.DataFrame(dataset) df=df.select_dtypes([np.number]).dropna() for train_index, test_index in kf.split(df): b_train = df.to_numpy()[train_index] b_val = df.to_numpy()[test_index] l1.append(b_train) l2.append(b_val) return df, l1, l2 # - df_bike, b_train, b_val = k_fold('datasets/Bike-Sharing-Dataset/day.csv') b_train[0] # + def check_purity_regression(data, threshold): return len(data) < threshold def calculate_mse(data_below, data_above): # Getting the means below_mean = np.mean(data_below[:,-1]) above_mean = np.mean(data_above[:,-1]) # Getting the left and right residuals res_below = data_below[:,-1] - below_mean res_above = data_above[:,-1] - above_mean # Concatenating the residuals r = np.concatenate((res_below, res_above), axis=None) # Calculating the mse n = len(r) r = r ** 2 r = np.sum(r) mse_split = r / n return mse_split def calc_mse_whole(data): r = data[:,-1]-np.mean(data[:,-1]) n = len(r) r = r ** 2 r = np.sum(r) mse = r / n return mse def get_potential_splits(data, n_columns, isForest=False): # print(f"finding splits for {data.shape}") potential_splits = {} if isForest: features = list() # print(n_columns) while len(features) < n_columns: # print(len(features)) index = randrange(data.shape[1]-1) # print(index) if index not in features: features.append(index) else: features = [i for i in range(n_columns-1)] for column_index in features: potential_splits[column_index] = [] values = data[:, column_index] unique_values = np.unique(values) for index in range(1, len(unique_values)): previous_value = unique_values[index - 1] current_value = unique_values[index] potential_split = (current_value + previous_value) / 2 potential_splits[column_index].append(potential_split) return potential_splits def determine_best_split(data, potential_splits): # print(f"Determining best split for {data.shape}") overall_mse = calc_mse_whole(data) best_split_column = None for column_index in potential_splits: # print(f"column index: {column_index}") time_split = 0 for value in potential_splits[column_index]: data_below, data_above = split_data(data, split_column=column_index, split_value=value) current_overall_mse = calculate_mse(data_below, data_above) if current_overall_mse < overall_mse: overall_mse = current_overall_mse # print(f"overall mse: {overall_mse}") best_split_column = column_index best_split_value = value if best_split_column == None: return None, None return best_split_column, best_split_value def regression_tree(x_train, y_train, threshold): data = np.concatenate((x_train, y_train), axis=1) regression_tree_helper(data, threshold) def regression_tree_helper(data, threshold, isForest=False): # print(data.shape) if len(data) <= threshold: return data[:, -1] else: potential_splits = get_potential_splits(data, data.shape[1]) split_column, split_value = determine_best_split(data, potential_splits) data_below, data_above = split_data(data, split_column, split_value) # instantiate sub-tree question = "{} <= {}".format(split_column, split_value) sub_tree = {question: []} # find answers (recursion) yes_answer = regression_tree_helper(data_below, threshold) no_answer = regression_tree_helper(data_above, threshold) sub_tree[question].append(yes_answer) sub_tree[question].append(no_answer) return sub_tree def classify_example(example, decision_tree, q): question = list(decision_tree.keys())[0] # decision_tree.keys() returns a dictionary feature_name, comparison_operator, value = question.split() # question is of form '0' <= 0.5' # ask question if example[int(feature_name)] <= float(value): answer = decision_tree[question][0] else: answer = decision_tree[question][1] # base case if not isinstance(answer, dict): return np.quantile(answer, q) # recursive part else: residual_tree = answer return classify_example(example, residual_tree, q) # + def find_pred(main_tree, val): pred = [] for x in val: # print(f"predict: {classify_example(x[:-1], main_tree, 0.95)}") # print(f"predict: {classify_example(x[:-1], main_tree, 0.05)}") # print(f"predict: {classify_example(x[:-1], main_tree, 0.5)}") # print(f"real: {x[-1]}") pred.append(classify_example(x[:-1], main_tree, 0.5)) return pred # - def calc_mse_val(pred, y): r = np.sum((pred-y)**2) n = len(y) mse = r / n return mse def calc_mae_val(pred, y): r = np.sum(abs(pred-y)) n = len(y) mae = r / n return mae def evaluation(val, main_tree): accuracy = 0 for x in val: top = classify_example(x[:-1], main_tree, 0.975) bottom = classify_example(x[:-1], main_tree, 0.025) if x[-1] <= top and x[-1] >= bottom: accuracy += 1 percent = accuracy / len(val) return percent # + pycharm={"is_executing": true, "name": "#%%\n"} #baseline comparison of decision tree from sklearn.tree import DecisionTreeRegressor cmp = 0 acc1 = 0 acc2 = 0 for i in range(len(b_train)): result = [] main_tree = regression_tree_helper(b_train[i], 10) p = find_pred(main_tree, b_val[i]) regressor = DecisionTreeRegressor(random_state=0) regressor.fit(b_train[i][:,:-1], b_train[i][:,-1]) p2 = [x for x in regressor.predict(b_val[i][:,:-1])] qt = calc_mse_val(p, b_val[i][:,-1]) t = calc_mse_val(p2, b_val[i][:,-1]) acc1+= qt acc2+=t print("qt = ", qt) print("t = ", t) print("accucracy = ", evaluation(b_val[i], main_tree)) if qt < t: cmp += 1 print(cmp) #mean print("mean mse for quantile regression tree: ", acc1/len(b_train)) print("mean mse for sklearn implementation: ",acc2/len(b_train)) # + from random import seed from random import randrange from math import sqrt from sklearn.ensemble import RandomForestRegressor def regression_forest_helper(data, threshold, n_columns): # print(data.shape) if len(data) <= threshold: return data[:, -1] else: potential_splits = get_potential_splits(data, n_columns, True) split_column, split_value = determine_best_split(data, potential_splits) if split_column == None: return data[:, -1] data_below, data_above = split_data(data, split_column, split_value) # instantiate sub-tree question = "{} <= {}".format(split_column, split_value) sub_tree = {question: []} # find answers (recursion) yes_answer = regression_forest_helper(data_below, threshold, n_columns) no_answer = regression_forest_helper(data_above, threshold, n_columns) sub_tree[question].append(yes_answer) sub_tree[question].append(no_answer) return sub_tree # Evaluate an algorithm using a cross validation split def evaluate_algorithm(path, algorithm, *args): df_bike, b_train, b_val = k_fold(path) scores_mae = list() scores_mse = list() scores_mse_base = list() for i in range(len(b_train)): #add baseline comparitor regr = RandomForestRegressor(n_estimator=n_trees) regr.fit(b_train[i][:,:-1], b_train[i][:,-1]) baseline_predict = [x for x in regr.predict(b_val[i][:,:-1])] baseline_mse = calc_mse_val(baseline_predict, b_val[i][:,-1]) scores_mse_base.append(baseline_mse) #our algo's prediction predicted = algorithm(b_train[i], b_val[i], *args) actual = b_val[i][:,-1] mae = calc_mae_val(predicted, actual) mse = calc_mse_val(predicted, actual) print("mae = ", mae) print("mse = ", mse) scores_mae.append(mae) scores_mse.append(mse) return scores_mae, scores_mse, scores_mse_base # Create a random subsample from the dataset with replacement def subsample(dataset, ratio): sample = list() n_sample = round(len(dataset) * ratio) while len(sample) < n_sample: index = randrange(len(dataset)) sample.append(dataset[index]) return np.array(sample) # Make a prediction with a list of bagged trees # return: m x n matrix, m = number of trees, n = number of samples def bagging_predict(trees, val): predictions = [find_pred(tree, val) for tree in trees] ##todo: return mean of the column result = np.array(predictions) p = np.mean(result, axis=0) return p # Random Forest Algorithm def random_forest(train, test, threshold, sample_size, n_trees, n_features): trees = list() for i in range(n_trees): sample = subsample(train, sample_size) tree = regression_forest_helper(sample, threshold, n_features) trees.append(tree) return bagging_predict(trees, test) # Test the random forest algorithm seed(2) # - path = 'datasets/Bike-Sharing-Dataset/day.csv' threshold = 1 sample_size = 1.0 n_features = int(df.shape[1]/3) for n_trees in [10, 100, 1000]: scores_mae, scores_mse, scores_mse_base = evaluate_algorithm(path, random_forest, threshold, sample_size, n_trees, n_features) print('Trees: %d' % n_trees) print('Mean MSE: %.3f%' % (sum(scores_mse)/float(len(scores_mae)))) print('Mean MSE baseline: %.3f%' % (sum(scores_mse_base)/float(len(scores_mae)))) print('Mean MAE: %.3f%' % (sum(scores_mae)/float(len(scores_mse))))
.ipynb_checkpoints/basic_decision_tree-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import pandas as pd import numpy as np import nupic import matplotlib.pyplot as plt import time # + s = set() s.add('occupancy_6005') s.add('occupancy_t4013') s.add('speed_6005') s.add('speed_7578') s.add('speed_t4013') s.add('TravelTime_387') s.add('TravelTime_451') for x in s: exec(x + " = pd.read_csv('" + x + ".csv', parse_dates=True, index_col='timestamp')") for x in s: exec(x +" = " + x + ".rename(columns={'value': x})") for x in s: exec(x +".plot()") # - for x in s: exec(x + " = " + x + ".resample('H').mean()") occ = pd.concat([occupancy_6005, occupancy_t4013], axis = 1) spd = pd.concat([speed_6005, speed_7578, speed_t4013], axis = 1) tti = pd.concat([TravelTime_387, TravelTime_451], axis = 1) occ.plot() spd.plot() tti.plot() # # NuPIC HTM Implementation #Import from NuPIC library from nupic.encoders import RandomDistributedScalarEncoder from nupic.algorithms.spatial_pooler import SpatialPooler from nupic.algorithms.temporal_memory import TemporalMemory from nupic.algorithms.anomaly import Anomaly # + data_all = pd.concat([occupancy_6005, occupancy_t4013, speed_6005, speed_7578, speed_t4013], axis = 1) dataseq = data_all.resample('H').bfill().interpolate() import datetime as dt # Imports dates library a = dt.datetime(2015, 9, 8, 12) # Fixes the start date seldata = dataseq[a:] # Subsets the data # - Vars = set(["occupancy_6005", "occupancy_t4013", "speed_6005", "speed_7578", "speed_t4013"]) for x in Vars: exec("RDSE_"+ x +" = RandomDistributedScalarEncoder(resolution=seldata['"+ x +"'].std()/5)") prueba = seldata['speed_6005'] seldata['speed_6005'][0] prueba.plot() # ### Encoding # Es importante fijar la precisión adecuada para discernir los cambios requeridos en las variables <br> # - En nuestro caso hemos probado con $ ^\sigma / _5$ <br> # # Se ha de comprobar que efectivamente las diferencias en los encoders son signidicativas cuando lo son para las variables que se han tomado RDSE = RandomDistributedScalarEncoder(resolution=prueba.std()/5) a = np.zeros(len(prueba)-1) for x in range(len(prueba)-1): a[x] = sum(RDSE.encode(prueba[x]) != RDSE.encode(prueba[x-1])) plt.plot(prueba) plt.plot(a) # Spatial pooler # Define the imput width encoder_width = RDSE.getWidth() pooler_out = 2048 encoder_width = 0 for x in Vars: exec("encoder_width += RDSE_"+ x +".getWidth()") pooler_out = 4096 sp = SpatialPooler( # How large the imput encoding will be inputDimensions=(encoder_width), # Number of columns on the Spatial Pooler columnDimensions=(pooler_out), # Percent of the imputs that a column can be conected to, 1 means the colum is connected to every other column potentialPct = 0.8, # Eliminates the topology globalInhibition = True, # Recall that there is only one inhibition area numActiveColumnsPerInhArea = pooler_out//50, #Velocity of synapses grown an degradation synPermInactiveDec = 0.005, synPermActiveInc = 0.04, synPermConnected = 0.1, # boostStrength controls the strength of boosting. Boosting encourages efficient usage of SP columns. boostStrength = 3.0, seed = 25, # Determines whether the encoder is cyclic or not wrapAround = False) # + activeColumns = np.zeros(pooler_out) encoding = RDSE.encode(prueba[0]) sp.compute(encoding, True, activeColumns) activeColumnIndices = np.nonzero(activeColumns)[0] print activeColumnIndices plt.plot(activeColumns) # - tm = TemporalMemory( # Must be the same dimensions as the SP columnDimensions=(pooler_out,), # How many cells in each mini-column. cellsPerColumn=5, # A segment is active if it has >= activationThreshold connected synapses that are active due to infActiveState activationThreshold=16, initialPermanence=0.21, connectedPermanence=0.5, # Minimum number of active synapses for a segment to be considered during # search for the best-matching segments. minThreshold=12, # The max number of synapses added to a segment during learning maxNewSynapseCount=20, permanenceIncrement=0.1, permanenceDecrement=0.1, predictedSegmentDecrement=0.0, maxSegmentsPerCell=128, maxSynapsesPerSegment=32, seed=25) # Execute Temporal Memory algorithm over active mini-columns. tm.compute(activeColumnIndices, learn=True) activeCells = tm.getActiveCells() print activeCells # Univariate procedure # + activeColumns = np.zeros(pooler_out) from __future__ import division A_score = np.zeros(len(prueba)) for x in range(len(prueba)): encoding = RDSE.encode(prueba[x]) #encode each input value sp.compute(encoding, False, activeColumns) #Spatial Pooler activeColumnIndices = np.nonzero(activeColumns)[0] tm.compute(activeColumnIndices, learn=True) activeCells = tm.getActiveCells() if x > 0: inter = set(activeColumnIndices).intersection(predictiveColumns_prev) inter_l = len(inter) active_l = len(activeColumnIndices) A_score[x] = 1 - (inter_l/active_l) predictiveColumns_prev = list(set([x//5 for x in tm.getPredictiveCells()])) #print ("intersección ", inter_l, ", Activas ", active_l, " cociente ", inter_l/active_l) # + activeColumns = np.zeros(pooler_out) from __future__ import division A_score = np.zeros(len(prueba)) for x in range(len(prueba)): encoding = [] for y in Vars: exec("encoding_y = RDSE_" + y + ".encode(seldata['" + y + "'][x])") encoding = np.concatenate((encoding, encoding_y)) #RDSE.encode(prueba[x]) #encode each input value sp.compute(encoding, False, activeColumns) #Spatial Pooler activeColumnIndices = np.nonzero(activeColumns)[0] tm.compute(activeColumnIndices, learn=True) activeCells = tm.getActiveCells() if x > 0: inter = set(activeColumnIndices).intersection(predictiveColumns_prev) inter_l = len(inter) active_l = len(activeColumnIndices) A_score[x] = 1 - (inter_l/active_l) predictiveColumns_prev = list(set([x//5 for x in tm.getPredictiveCells()])) #print ("intersección ", inter_l, ", Activas ", active_l, " cociente ", inter_l/active_l) # - plt.plot(seldata) plt.figure() plt.plot(A_score) # ### Computes the anomaly likelhood # We are now computing the likelihood that the system is in a current anomalous state, to do so we have to determine 2 windows: # - W: 72 datapoints (three days), computes the normal error distribution # - W_prim: 6 datapoints (6 hours), computes the mean error at the current state from scipy.stats import norm W = 72 W_prim = 5 eps = 1e-6 AL_score = np.zeros(len(A_score)) for x in range(len(A_score)): if x > 0: W_vec = A_score[max(0, x-W): x] W_prim_vec = A_score[max(0, x-W_prim): x] AL_score[x] = 1 - 2*norm.sf(abs(np.mean(W_vec)-np.mean(W_prim_vec))/max(np.std(W_vec), eps)) plt.plot(seldata) plt.figure() plt.plot(AL_score) plt.figure() plt.plot(A_score)
Traffic/Traffic2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-block alert-info"> # <img style="float: right;" src="https://raw.githubusercontent.com/OpenSourceEconomics/ose-corporate-design/master/logos/OSE_logo_RGB.svg" width="150"> # <u><h1>respy showcase</h1></u> # </div> # + slideshow={"slide_type": "skip"} import io import copy import yaml import numpy as np import pandas as pd import respy as rp import seaborn as sns import matplotlib.pyplot as plt from IPython.display import Image from pathlib import Path from auxiliary.auxiliary import * # + [markdown] slideshow={"slide_type": "slide"} # ### Outline # # 1. <span style="color:#2C68B9">Package overview</span> # 2. Introductory tutorial # 2.1. Theoretical Framework # 2.2. Specifying a model # 2.3. Simulating data # 2.4. Extending the model # 3. Pre-implemented example models # 4. Outlook # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-block alert-info"> # <u><h2>1. Package overview</h2></u> # </div> # # # ### What is **respy**? # # - open source Python framework # - simulation and estimation of some finite-horizon discrete choice dynamic programming models # - class of models: Eckstein-Keane-Wolpin models (Aguirregabiria & Mira, 2009) # # + [markdown] slideshow={"slide_type": "subslide"} # ### Class of models # # - Eckstein-Keane-Wolpin models # - discrete choice dynamic life-cycle framework # - reward function structure: experience accumulation and wages # - economic applications: human capital investments, labor supply, occupational choice # # # **Model components** # - (any) number of choices and periods # - initial conditions: lagged choices, initial experience, observable characteristics # - unobserved heterogeneity: finite mixture models (types) # - measurement error # + [markdown] slideshow={"slide_type": "subslide"} # ### Use cases # # **For research ...** # - flexible model specification # - build and solve structural models in weeks or months # - extensive test suite # # **For learning & teaching ...** # - extensive online documentation: tutorials, explanations, how-to guides # - variety of pre-implemented example models that scale from toy examples to full empirical models # - sandbox for theses & student projects # - lecture materials: https://ekw-lectures.readthedocs.io # + [markdown] slideshow={"slide_type": "subslide"} # ### Steps of modeling # # 1. Theoretical development # 2. <span style="color:#2C68B9">Model implementation</span> # 2.1. <span style="color:#2C68B9">Specification</span> # 2.2. <span style="color:#2C68B9">Solution</span> # 2.3. <span style="color:#2C68B9">Simulation</span> # 3. <span style="color:#2C68B9">Calibration</span> # 3.1. <span style="color:#2C68B9">Likelihood- or simulation-based criterion</span> # 3.2. Optimization $\rightarrow$ [estimagic](https://estimagic.readthedocs.io/en/latest/) # 4. <span style="color:#2C68B9">Validation and policy evaluation</span> # + [markdown] slideshow={"slide_type": "slide"} # ### Source code & documentation # # - developed on GitHub: https://github.com/OpenSourceEconomics/respy # # # - online documentation: https://respy.readthedocs.io # # # **Installation via conda** # # # # ```bash # $ conda config --add channels conda-forge # $ conda install -c opensourceeconomics respy # ``` # + [markdown] slideshow={"slide_type": "subslide"} # #### Source code on GitHub: https://github.com/OpenSourceEconomics/respy # + slideshow={"slide_type": "fragment"} Image(filename='figures/respy-github.png') # + [markdown] slideshow={"slide_type": "subslide"} # #### Testing # + slideshow={"slide_type": "fragment"} Image(filename='figures/respy-ci.png') # + [markdown] slideshow={"slide_type": "subslide"} # #### Online documentation: https://respy.readthedocs.io # + slideshow={"slide_type": "fragment"} Image(filename='figures/respy-docs.png') # + [markdown] slideshow={"slide_type": "slide"} # ### Outline # # 1. Package overview # 2. <span style="color:#2C68B9">Introductory tutorial</span> # 2.1. Theoretical framework # 2.2. Specifying a model # 2.3. Simulating data # 2.4. Extending the model # 3. Pre-implemented example models # 4. Outlook # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-block alert-info"> # <u><h2>2. Introductory tutorial</h2></u> # </div> # # ### Situational setup # - <NAME> is stranded on a small island. # - He goes fishing to make ends meet and will relax in his hammock when he is tired. # - He cannot relax to often as he won't be able to eat otherwise. # - The more he fishes, the better his fishing skills become. # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-block alert-warning"> # <u><h3>2.1. Theoretical Framework: <NAME> on an island</h3></u> # </div> # # ### The economic problem # # - Robinson chooses every period $t = 0, \dots, T$ to either go fishing, $a = 0$, or spend the day in the hammock, $a = 1$. # # - If Robinson chooses to go fishing, he gains one additional unit of **experience** in the next period. Experience starts at zero. # # - The utility of a choice, $U(s_t, a_t)$, depends on the state $s_t$, which contains information on the individual's characteristics, and the chosen alternative $a_t$. # # - Robinson's utility for any given option can be denoted by # # $$\begin{align} # U(s_t, a_t) = \underbrace{W(s_t, a_t)}_{wage} + \underbrace{N(s_t, a_t)}_{non-pecuniary} # \end{align} # $$ # + slideshow={"slide_type": "subslide"} Image(filename="figures/tree_small.jpg") # + [markdown] slideshow={"slide_type": "subslide"} # ### Working alternatives # # - For working alternatives like fishing, utility consists of two components, a *wage* and a *non-pecuniary* component. # # **Wage** # # $$\begin{align} # W(s_t, a_t) &= r_{a} \exp\{x^w_{at} \beta^w_a + \epsilon_{at}\}\\ # \ln(W(s_t, a_t)) &= \ln(r_a) + x^w_{at} \beta^w_a + \epsilon_{at} # \end{align}$$ # # - Components: # - $r_a$ is a market rental price for skill units. # # - $x^w_{at}$ and $\beta^w_a$ are the choice- and time-dependent covariates and returns related to the wage signaled by superscript $w$. # # - $\epsilon_{at}$ is a choice-specific random shock from the shock vector $\epsilon_t \sim \mathcal{N}(0, \Sigma)$ for all choices. # + [markdown] slideshow={"slide_type": "subslide"} # **Non-pecuniary rewards** (for working alternatives) # # - Vector dot product of covariates $x_t^w$ and parameters $\beta^w$. # - Superscript $w$ signals that the components belong to working alternatives. # # $$\begin{align} # N^w(s_t, a_t) = x_t^w\beta^w # \end{align}$$ # + [markdown] slideshow={"slide_type": "subslide"} # ### Non-working alternatives # # **Wage** # $$\begin{align} # W(s_t, a_t) = 0 # \end{align}$$ # # **Non-pecuniary rewards** # # - Shocks enter the equation *additively* # - Superscript $n$ stands for non-pecuniary # $$ # N^n(s_t, a_t) = x_t^n\beta^n + \epsilon_{at} # $$ # + [markdown] slideshow={"slide_type": "notes"} # - Robinson's choice set thus consists of a "working" alternative which awards him a pecuniary compensation or wage and a "leisure" or non-working alternative which he derives a utility from, but no experience or wage. Experience in this basic model starts at zero and increases by one for every period $t$ in $1, ..., T$ where he chooses to go fishing. # + [markdown] slideshow={"slide_type": "subslide"} # ### Robinson's choice problem # # - Robinson is forward-looking and maximizes the expected present value of utility over the remaining lifetime $\rightarrow$ select the optimal sequence of choices $\{a_t\}^T_{t = 0}$. # # # - **Bellman equation** # # $$\begin{align} # V(s_{t})&= \max_{a_t} \, \{\underbrace{U(s_t, a_t)}_{\text{flow utility}}+ \delta \underbrace{\text{E}[ V(s_{t+1})]\}}_{\text{continuation value}} # \end{align}$$ # # - Realization of shocks becomes known in each period before Robinson makes his choice. # - As shocks in period $t + 1$ are unknown to the individual in period $t$, utility must be maximized given the joint distribution of shocks in period $t + 1$ which is a maximization problem over a two-dimensional integral. # # - Denote the non-stochastic part of a state as $s^-$. Then, Robinson maximizes. # # $$\begin{equation} # V(s_t) = \max_{a_t}\{ # U(s_t, a_t) + \delta \int_{\epsilon_{1, t + 1}} \int_{\epsilon_{0, t + 1}} # \max_{a_{t + 1}} V_{a_{t + 1}}(s^-_{t + 1}, \epsilon_{t + 1}) # f_\epsilon(\epsilon_{t + 1}) # d_{\epsilon_{0, t + 1}}, d_{\epsilon_{1, t + 1}} # \} # \end{equation}$$ # + [markdown] slideshow={"slide_type": "slide"} # ### Outline # # 1. Package overview # 2. Introductory tutorial # 2.1. Theoretical framework # 2.2. <span style="color:#2C68B9">Specifying a model</span> # 2.3. Simulating data # 2.4. Extending the model # 3. Pre-implemented example models # 4. Outlook # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-block alert-warning"> # <u><h3>2.2. Specifying the model</h3></u> # </div> # # **respy** models consists of two main inputs: # # - `params`: (estimable) model parameters # # # - `options` : model settings (number of periods, implementation details) # # + [markdown] slideshow={"slide_type": "subslide"} # ### Model parameters: ``params`` # # - Parameter vector of the model # # # - Specified as a [MultiIndex pandas.DataFrame](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html) with two index levels. # 1. *category*: which indicates certain parameters groups # 2. *name*: which indicates a specific parameter # # # - read in parameters from a .csv-file: # + slideshow={"slide_type": "fragment"} params = """category,name,value delta,delta,0.95 wage_fishing,exp_fishing,0.1 nonpec_fishing,constant,-1 nonpec_hammock,constant,2.5 nonpec_hammock,not_fishing_last_period,-1 shocks_sdcorr,sd_fishing,1 shocks_sdcorr,sd_hammock,1 shocks_sdcorr,corr_hammock_fishing,-0.2 lagged_choice_1_hammock,constant,1 """ # + slideshow={"slide_type": "subslide"} params_basic = pd.read_csv( io.StringIO(params), sep=",", index_col=["category", "name"] ) params_basic # + [markdown] slideshow={"slide_type": "subslide"} # ### Model options: ``options`` # # - dictionary which contains additional information # - number of periods in the model # - size of the simulated sample # - implementation details like seeds # - covariates # + slideshow={"slide_type": "fragment"} options_basic = { "n_periods": 10, "simulation_agents": 1000, "simulation_seed": 123, "covariates": { "constant": "1", "not_fishing_last_period": "lagged_choice_1 != 'fishing'", }, } options_basic # + [markdown] slideshow={"slide_type": "slide"} # ### Outline # # 1. Package overview # 2. Introductory tutorial # 2.1. Theoretical framework # 2.2. Specifying a model # 2.3. <span style="color:#2C68B9">Simulating data</span> # 2.4. Extending the model # 3. Pre-implemented example models # 4. Outlook # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-block alert-warning"> # <u><h3>2.3. Simulating data</h3></u> # </div> # # Using the `params` and `options` we can set up a simulator using the function `get_simulate_func` and subsequently simulate a data for our selected parametrization. # + slideshow={"slide_type": "fragment"} simulate = rp.get_simulate_func(params_basic, options_basic) df_basic = simulate(params_basic) df_basic.head(5) # + [markdown] slideshow={"slide_type": "subslide"} # **Choice shares** # + slideshow={"slide_type": "fragment"} plot_choice_shares(df_basic) # + [markdown] slideshow={"slide_type": "subslide"} # **Transition matrix** # + slideshow={"slide_type": "fragment"} data = pd.crosstab(df_basic.Lagged_Choice_1, df_basic.Choice, normalize=True) sns.heatmap(data, cmap="Blues", annot=True); # + [markdown] slideshow={"slide_type": "slide"} # ### Outline # # 1. Package overview # 2. Introductory tutorial # 2.1. Theoretical framework # 2.2. Specifying a model # 2.3. Simulating data # 2.4. <span style="color:#2C68B9">Extending the model</span> # 3. Pre-implemented example models # 4. Outlook # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-block alert-warning"> # <u><h3>2.4. Extending the Model</h3></u> # </div> # + [markdown] slideshow={"slide_type": "subslide"} # <div class="alert alert-block alert-success"> # <u><h4>2.4.1. Extension: Covariates</h4></u> # </div> # # **Covariates in the basic model** # # 1. The constant, which is the simplest covariate # 2. The indicator whether Robinson went fishing last period. # # **New covariate:** *age* # # - assume Robinson's return to fishing decreases as he gets older. # - assume that he arrives at the island with 16 years. # - assume his age in each period, which we assume for now is one year, increases by one. # + [markdown] slideshow={"slide_type": "subslide"} # I. Edit ``params`` # + slideshow={"slide_type": "fragment"} params_age = params_basic.copy() params_age.loc[("wage_fishing", "age"), "value"] = -0.08 params_age # + [markdown] slideshow={"slide_type": "subslide"} # II. Edit ``options`` # + slideshow={"slide_type": "fragment"} options_age = copy.deepcopy(options_basic) options_age["covariates"].update({"age": "period + 16"}) options_age # + [markdown] slideshow={"slide_type": "subslide"} # III. Simulate again # + slideshow={"slide_type": "fragment"} simulate = rp.get_simulate_func(params_age, options_age) df_age = simulate(params_age) # + [markdown] slideshow={"slide_type": "notes"} # $\rightarrow$ **Result:** Including the negative effect of age reduces the proportion of individuals that choose fishing, increasingly so in later periods. # + slideshow={"slide_type": "fragment"} plot_choice_shares(df_age) # + [markdown] slideshow={"slide_type": "subslide"} # <div class="alert alert-block alert-success"> # <u><h4>2.4.2. Extension: Adding Choices</h4></u> # </div> # # - after some time on the island, Robinson meets Friday who can help him improve his fishing. # # - add the choice alternative `"friday"` # # - affects the utility of fishing. # - choice should be available only once starting with the third period, and only after Robinson has been fishing before. # + [markdown] slideshow={"slide_type": "subslide"} # I. Edit `params` (loaded from .csv) # # - `wage_fishing` now includes a return to contemplation with Friday. # - There is a new category,`nonpec_friday`, which captures the non-pecuniary reward for of spending a period with Friday. # - The shock shock variance-covariance matrix has been altered to include the additional option. # # + slideshow={"slide_type": "subslide"} params_friday = pd.read_csv("params_files/robinson_crusoe_friday.csv", index_col=["category", "name"]) params_friday # + [markdown] slideshow={"slide_type": "subslide"} # II. Adjust `options` # # **Covariates** # - new covariate `"contemplation_with_friday"` is only affecting utility if .. # - Robinson is experienced in fishing # - only for one interaction with Friday # + slideshow={"slide_type": "fragment"} options_friday = yaml.safe_load(open("option_files/options_friday.yml")) options_friday["covariates"] # + [markdown] slideshow={"slide_type": "fragment"} # **Negative choice set** # - `"negative_choice_set"` can be used to restrict the choice Friday to the third and following periods # + slideshow={"slide_type": "fragment"} options_friday["negative_choice_set"] # + [markdown] slideshow={"slide_type": "subslide"} # III. Simulate # + slideshow={"slide_type": "fragment"} simulate = rp.get_simulate_func(params_friday, options_friday) df_friday = simulate(params_friday) plot_choice_shares(df_friday, friday=True) # + [markdown] slideshow={"slide_type": "subslide"} # <div class="alert alert-block alert-success"> # <u><h4>2.4.3. Extension: Initial Experience</h4></u> # </div> # # - example of initial conditions: characteristics individuals enter the model with # # # - assume in period $t=0$, Robinson will have $0$, $1$, or $2$ periods of experience in fishing. # # # - this introduces a source of heterogeneity between agents # + [markdown] slideshow={"slide_type": "subslide"} # I. Edit `params` # # - specify the distribution of initial experience via **probability mass functions** # # - keyword is `"initial_exp_fishing_*"` in the *category*-level of the index, where the asterisk needs to be replaced with the experience level. # # - the *name*-level, use `"probability"` to signal that the float in `"value"` is a probability. # # - specification: Robinson has equal probability to start out with 0, 1 or 2 periods of experience. # + slideshow={"slide_type": "subslide"} params_initial_exp = params_basic.copy() params_initial_exp.loc[ ("initial_exp_fishing_0", "probability"), "value"] = 0.33 params_initial_exp.loc[("initial_exp_fishing_1", "probability"), "value"] = 0.33 params_initial_exp.loc[ ("initial_exp_fishing_2", "probability"), "value"] = 0.34 params_initial_exp # + [markdown] slideshow={"slide_type": "subslide"} # II. Adjust `options` # # $\rightarrow$ not necessary in this case! # + [markdown] slideshow={"slide_type": "fragment"} # III. Simulate # + slideshow={"slide_type": "fragment"} simulate = rp.get_simulate_func(params_initial_exp, options_basic) df_exp = simulate(params_initial_exp) # + slideshow={"slide_type": "subslide"} plot_choice_prob_and_exp_level(df_basic) plot_choice_prob_and_exp_level(df_exp) # + [markdown] slideshow={"slide_type": "notes"} # Comparing the plots shows that the share of engagement in fishing increases compared to the baseline scenario, as two thirds of Robinsons land on the island with previous experience in fishing. # + [markdown] slideshow={"slide_type": "subslide"} # <div class="alert alert-block alert-success"> # <u><h4>2.4.4. Extension: Observables</h4></u> # </div> # # - add one observable characteristic to the baseline model, `"fishing_grounds"` # # # - assume Robinson can end up, with a certain probability, on the side of the island which has `"poor"` or `"rich"` fishing grounds. # # # - experiencing rich fishing grounds affects the **non-pecuniary reward** for fishing. # + [markdown] slideshow={"slide_type": "subslide"} # I. Add three additional rows to `params` # # - The probability with which Robinson will find himself in rich and in poor fishing grounds. # # - The return for being in a rich fishing ground, which here is set to be positive and constant. # + slideshow={"slide_type": "fragment"} params_fish_ground = params_basic.copy() params_fish_ground.loc[("observable_fishing_grounds_rich", "probability"), "value"] = 0.5 params_fish_ground.loc[("observable_fishing_grounds_poor", "probability"), "value"] = 0.5 params_fish_ground.loc[("nonpec_fishing", "rich_fishing_grounds"), "value"] = 0.5 params_fish_ground # + [markdown] slideshow={"slide_type": "subslide"} # II. Adjust covariates in `options` # + slideshow={"slide_type": "fragment"} options_fish_ground = options_basic.copy() options_fish_ground["covariates"] = { "constant": "1", "not_fishing_last_period": "lagged_choice_1 != 'fishing'", "rich_fishing_grounds": "fishing_grounds == 'rich'", } options_fish_ground # + [markdown] slideshow={"slide_type": "subslide"} # III. Simulate again # + slideshow={"slide_type": "fragment"} simulate = rp.get_simulate_func(params_fish_ground, options_fish_ground) df_fish_ground = simulate(params_fish_ground) # + [markdown] slideshow={"slide_type": "notes"} # As can be expected, Robinson engages in fishing more often if he is stranded on the side of the island with rich fishing grounds. # + slideshow={"slide_type": "fragment"} plot_fishing_grounds(df_fish_ground) # + [markdown] slideshow={"slide_type": "slide"} # ### Outline # # 1. Package overview # 2. Introductory tutorial # 2.1. Theoretical framework # 2.2. Specifying a model # 2.3. Simulating data # 2.4. Extending the model # 3. <span style="color:#2C68B9">Pre-implemented example models</span> # 4. Outlook # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-block alert-info"> # <u><h2>3. Pre-implemented example models</h2></u> # </div> # # # ```python # params, options, data = rp.get_example_model(<model>) # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ### Toy models # # - based on the story of <NAME> # # - used in the [tutorials](https://respy.readthedocs.io/en/latest/tutorials/index.html) in the documentation # # **Models** # # - `robinson_crusoe_basic` # # - `robinson_crusoe_extended` # # # **Advantages** # # - simple model structure # - computationally feasible due to the small number of available choices and low number of periods in the models # # $\rightarrow$ useful of learning & teaching # + slideshow={"slide_type": "subslide"} _, _, data = rp.get_example_model("robinson_crusoe_basic") plot_choice_shares(data) _, _, data = rp.get_example_model("robinson_crusoe_extended") plot_choice_shares(data, friday=True) # + [markdown] slideshow={"slide_type": "subslide"} # ### Keane and Wolpin (1994) # # # **Source** # # - <NAME>., & <NAME>. (1994). The Solution and Estimation of Discrete Choice Dynamic Programming Models by Simulation and Interpolation: Monte Carlo Evidence. *The Review of Economics and Statistics*, 648-672. # # **Characteristics** # - four mutually exclusive alternatives that individuals can choose in each period (work in sectors `a` or `b`, invest in education, or stay home) # - time horizon of 40 periods # # **Models** # # - `kw_94_one` # - `kw_94_two` # - `kw_94_three` # + slideshow={"slide_type": "subslide"} plot_choices_kw(example="1994") # + [markdown] slideshow={"slide_type": "subslide"} # ### <NAME> Wolpin (1997) # # **Source** # # * <NAME>., & <NAME>. (1997). The Career Decisions of Young Men. *Journal of Political Economy*, 105(3), 473-522. # # **Characteristics** # # * five mutually exclusive choice alternatives (occupations `white-collar`, `blue-collar`, `military`, school and staying home) # * basic and extended model specification with unobserved heterogeneity, measurement error, and initial experience # * time horizon of 50 periods # * empirical model calibrated to a sample of white men in the National Longitudinal Survey of Youth (NLSY) # # **Models** # # - `kw_97_basic` # # - `kw_97_extended` # # + slideshow={"slide_type": "subslide"} plot_choices_kw(example="1997") # + [markdown] slideshow={"slide_type": "subslide"} # ### <NAME> Wolpin (2000) # # **Source** # # - <NAME>., & <NAME>. (2000). Eliminating Race Differences in School Attainment and Labor Market Success. *Journal of Labor Economics*, 18(4), 614-652. # # **Model** # # - `kw_2000` # # The model is very similar to the extended model specification in Keane and Wolpin (1997). # + [markdown] slideshow={"slide_type": "slide"} # ### Outline # # 1. Package overview # 2. Introductory tutorial # 2.1. Theoretical framework # 2.2. Specifying a model # 2.3. Simulating data # 2.4. Extending the model # 3. Pre-implemented example models # 4. <span style="color:#2C68B9">Outlook</span> # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-block alert-info"> # <u><h2>4. Outlook</h2></u> # </div> # # #### Recent advancements # # - Exogenous processes (childbirth, health shocks etc.) # - Revision of the state space # - State specific choice sets # # #### Potential # - Consumption and saving decisions # - Continuous choices or time # + [markdown] slideshow={"slide_type": "subslide"} # ### Related software projects # # - [`estimagic`](https://estimagic.readthedocs.io/en/latest/) is a Python package that provides high-quality and user-friendly tools to fit large scale empirical models to data and make inferences about the estimated model parameters. # # - [`ruspy`](https://ruspy.readthedocs.io/en/latest/) is an open-source Python package for the simulation and estimation of a prototypical infinite-horizon dynamic discrete choice model based on Rust (1987). # # - [`grmpy`](https://grmpy.readthedocs.io/en/latest/) is an open-source Python package for the simulation and estimation of the generalized Roy model. # # - [`econsa`](https://econsa.readthedocs.io/en/latest/) is an open-source Python package that facilitates uncertainty propagation and global sensitivity of computational economic models.analysis. # # - [`robupy`](https://robupy.readthedocs.io/en/latest/) is an open-source Python package for robust decision making. # # # - [`gettsim`](https://gettsim.readthedocs.io/en/stable/) is an open-source Python package developed by OSE members providing a depiction of the German Taxes and Transfers System. # # + [markdown] slideshow={"slide_type": "slide"} # <div class="alert alert-block alert-info"> # <u><h2>Thank you!</h2></u> # </div> # # More information on OpenSourceEconomics software projects at https://open-econ.org.
respy-showcase-slides.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd data = pd.read_csv('../dataset/transform/data.csv') data = data.fillna(0) data.head() y = data.modulation X = data.drop(columns = "modulation").to_numpy() # + from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier algorithms = { "RF" : (RandomForestClassifier(), { "criterion": ["entropy"], "max_depth": [5], "n_estimators": [100] }), "DT": (DecisionTreeClassifier(), { "criterion": ["entropy"], "max_depth": [5] }) } # + import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.model_selection import StratifiedKFold, GridSearchCV, KFold from sklearn.metrics import accuracy_score, make_scorer, confusion_matrix, classification_report, jaccard_score, balanced_accuracy_score kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=17) #kf = KFold(n_splits=10, shuffle=True, random_state=17) gskf = StratifiedKFold(n_splits=10, shuffle=True, random_state=17) #gskf = KFold(n_splits=10, shuffle=True, random_state=17) perf = accuracy_score correct = {} for algorithm in algorithms.keys(): correct[algorithm] = y.copy() cm = {} score = {} predicties = {} jaccard_scores = {} classification_reports = {} balanced_accuracy_scores = { } for algorithm in algorithms.keys(): score[algorithm] = [] predicties[algorithm] = [] jaccard_scores[algorithm] = [] cm[algorithm] = np.zeros([8, 8]) classification_reports[algorithm] = [] balanced_accuracy_scores[algorithm] = [] idx = ["8PSK", "16QAM", "64QAM", "BPSK", "CPFSK", "GFSK", "QPSK", "PAM4"] for algorithm, (clf, parameters) in algorithms.items(): for train, test in kf.split(X, y): prep = StandardScaler() prep.fit(X[train]) best = GridSearchCV(clf, parameters, cv=gskf, scoring="accuracy", return_train_score=True) best.fit(prep.transform(X[train]), y[train]) predicted = best.predict(prep.transform(X[test])) predicties[algorithm].append(predicted) cm[algorithm] = np.array(cm[algorithm]) + np.array(confusion_matrix(y[test], predicted, labels=idx)) jaccard_scores[algorithm].append(jaccard_score(y[test], predicted, average=None)) classification_reports[algorithm].append(classification_report(y[test], predicted, labels=idx)) balanced_accuracy_scores[algorithm].append(balanced_accuracy_score(y[test], predicted)) correct[algorithm].loc[test] = predicted == y[test] score[algorithm].append(perf(best.predict(X[test]), y[test])) # - print("Mean\n", pd.DataFrame.from_dict(score).mean()) print("\nStd\n", pd.DataFrame.from_dict(score).std()) # + import seaborn as sn import pandas as pd import matplotlib.pyplot as plt for algorithm in algorithms.keys(): cm_ = cm[algorithm].astype('float') / cm[algorithm].sum(axis=1)[:, np.newaxis] df_cm = pd.DataFrame(cm_, index = [i for i in idx], columns = [i for i in idx]) plt.figure(figsize = (11,8)) sn.heatmap(df_cm, annot=True) plt.xlabel('Rótulos Preditos', fontsize=16) plt.ylabel('Rótulos Originais', fontsize=16) # - import numpy as np from mlxtend.evaluate import mcnemar_table # + # The correct target (class) labels idx_modulation = {"8PSK":0, "16QAM":1, "64QAM":2, "BPSK":3, "CPFSK":4, "GFSK":5, "QPSK":6, "PAM4":7} y_target = y.replace(idx_modulation) # Class labels predicted by model RandomForestClassifier y_RF = pd.DataFrame.from_dict(correct).RF # Class labels predicted by model DecisionTreeClassifier y_DT = pd.DataFrame.from_dict(correct).DT tb = mcnemar_table(y_target=y_target, y_model1=y_RF, y_model2=y_DT) ct=pd.DataFrame(tb, columns=["RF positive", "RF negative"]) ct.index=["DT positive", "DT negative"] ct # + from io import StringIO clf_df_report = pd.DataFrame.from_dict(classification_reports) for algorithm in algorithms.keys(): for cfl_algo_report in clf_report[algorithm]: StringData = StringIO(cfl_algo_report) df = pd.read_csv(StringData, sep =" ") print(df.precision)
notebook/evaluationAndValidation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import nltk # # Sentence Tokenization # # Process of breaking paragragraph into sentences nltk.download('punkt') from nltk.tokenize import sent_tokenize text='Hello Miss.Swati, how are you doing today? Today the weather seems great, and bangalore city is awesome. What did you eat in breakfast today morning' tokenized_text=sent_tokenize(text) print(tokenized_text) # # Word Tokenization from nltk.tokenize import word_tokenize tokenized_word=word_tokenize(text) print(tokenized_word) # # Conerting to Lowecase tokens=[w.lower() for w in tokenized_word] print(tokens) # # Noise Removal # # Removes unnessecarry words from the data not_required=['doing'] def noise_removal(text): words=text.split() final_text=[word for word in words if word not in not_required] final_text=" ".join(final_text) return final_text noise_removal(text) # # removing punctuation words=[word for word in tokens if word.isalpha()] print(words) # # Removing stopwords nltk.download('stopwords') from nltk.corpus import stopwords stop_words=set(stopwords.words('english')) print(stop_words) #Removing stpowords from the text words=[word for word in words if not word in stop_words] print(words) # + words=word_tokenize(text) #Stemming from nltk.stem import PorterStemmer ps=PorterStemmer() stemmed_words=[] for w in words: stemmed_words.append(ps.stem(w)) print('Filtered Sentence:',tokenized_text) print('Stemmed Sentence:',stemmed_words) # + #Lexicon Normalization #Performing stemming and lemmatization nltk.download('wordnet') from nltk.stem.wordnet import WordNetLemmatizer lem=WordNetLemmatizer() from nltk.stem.porter import PorterStemmer stem=PorterStemmer() word='flying' print('Lemmatized Word:',lem.lemmatize(word,'v')) print('Stemmed Word:',stem.stem(word)) # - word='better' print('Lemmatized Word:',lem.lemmatize(word,'a')) print('Stemmed Word:',stem.stem(word))
nltk.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: anaconda_kernel # language: python # name: anaconda_kernel # --- # Can remove once logomaker is installed import sys sys.path.append('../../') # + # matplotlib import import matplotlib.pyplot as plt # %matplotlib inline # logomaker import import logomaker # - # # CRP energy logo crp_fig = logomaker.demo('fig1b') # # Splice site probability logo ss_fig = logomaker.demo('fig1c') # # WW domain information logo ww_fig = logomaker.demo('fig1d') # # ARS enrichment logo ars_fig = logomaker.demo('fig1e') # # Neural network saliency logo nn_fig = logomaker.demo('fig1f') # # Logomaker logo logo_fig = logomaker.demo('logo') # # Color schemes colorschemes_fig = logomaker.demo('colorschemes') # # Save figures for documentation # + # save figures crp_fig.savefig('logos/crp_energy_logo.png', transparent=True) ss_fig.savefig('logos/ss_probability_logo.png', transparent=True) ww_fig.savefig('logos/ww_information_logo.png', transparent=True) ars_fig.savefig('logos/ars_enrichment_logo.png', transparent=True) nn_fig.savefig('logos/nn_saliency_logo.png', transparent=True) logo_fig.savefig('logos/logomaker_logo.png', transparent=True) colorschemes_fig.savefig('logos/color_schemes.png', transparent=True) # signal end of notebook print('Done!') # -
venv/lib/python3.9/site-packages/logomaker/examples/examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # !wget https://raw.githubusercontent.com/TensorSpeech/TensorFlowTTS/master/examples/hifigan/conf/hifigan.v2.yaml # + import os os.environ['CUDA_VISIBLE_DEVICES'] = '1' # + import sys SOURCE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__name__))) sys.path.insert(0, SOURCE_DIR) # - import malaya_speech import malaya_speech.train from malaya_speech.train.model import melgan, hifigan from malaya_speech.train.model import stft import malaya_speech.config from malaya_speech.train.loss import calculate_2d_loss, calculate_3d_loss import tensorflow as tf hifigan_config = malaya_speech.config.hifigan_config generator = hifigan.Generator( hifigan.GeneratorConfig(**hifigan_config["hifigan_generator_params"]), name="hifigan_generator", ) multiperiod_discriminator = hifigan.MultiPeriodDiscriminator( hifigan.DiscriminatorConfig(**hifigan_config["hifigan_discriminator_params"]), name="hifigan_multiperiod_discriminator", ) multiscale_discriminator = melgan.MultiScaleDiscriminator( melgan.DiscriminatorConfig( **hifigan_config["melgan_discriminator_params"], name="melgan_multiscale_discriminator", ) ) discriminator = hifigan.Discriminator(multiperiod_discriminator, multiscale_discriminator) y = tf.placeholder(tf.float32, (None, None)) x = tf.placeholder(tf.float32, (None, None, 80)) y_hat = generator(x) p_hat = discriminator(y_hat) p = discriminator(tf.expand_dims(y, -1)) y_hat
test/test-hifigan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:babble] # language: python # name: conda-env-babble-py # --- # # Part II: Writing Explanations # In this notebook, we'll walk through how to create your own explanations that can be fed into Babble Labble. # # Creating explanations generally happens in five steps: # 1. View candidates # 2. Write explanations # 3. Get feedback # 4. Update explanations # 5. Apply label aggregator # # Steps 3-5 are optional; explanations may be submitted without any feedback on their quality. However, in our experience, observing how well explanations are being parsed and what their accuracy/coverage on a dev set are (if available) can quickly lead to simple improvements that yield significantly more useful labeling functions. Once a few labeling functions have been collected, you can use the label aggregator to identify candidates that are being mislabeled and write additional explanations targeting those failure modes. # # We'll walk through each of the steps individually with examples; at the end of the notebook is an area for you to iterate with your own explanations. # ## Step 0: Setup # Once again, we need to first load the data (candidates and labels) from the pickle. # %load_ext autoreload # %autoreload 2 # + import pickle DATA_FILE = 'data/tutorial_data.pkl' with open(DATA_FILE, 'rb') as f: Cs, Ys = pickle.load(f) # - # ## Step 1: View Candidates # We've combined most of the steps required for writing explanations into a single class for convenience: the `BabbleStream`. This will allow you to view candidates, submit explanations, analyze the resulting parses, save explanations that you're satisfied with, and generate label matrices from the parses you've saved so far. (The `Babbler` class seen in the Tutorial 1 is simply a subclass of `BabbleStream` that submits explanations as a batch and commits them immediately, for non-iterative workflows). # + from babble import BabbleStream babbler = BabbleStream(Cs, Ys, balanced=True, shuffled=True, seed=321) # - # Now that the `BabbleStream` has been initiated, we can run the cell below repeatedly to iterate through candidates for labeling. Some candidates will prove very difficult to give explanations for; **feel free to skip these**! The number of unlabeled candidates is often orders of magnitude larger than the number of explanations we need, so we can afford to skip the tricky ones. # # Since many explanations end up referring to distances between words, each candidate will be displayed in two ways: as a list of tokens, and as a single string. In both cases, curly brackets have been placed around the entities; these are shown for your convenience only and are not actually a part of the raw text. # + from babble.utils import display_candidate candidate = babbler.next() display_candidate(candidate) # - # ## Step 2: Write Explanations # Now, looking at candidates one by one, we can create `Explanation` objects. Each `Explanation` requires 3 things (with an optional 4th): # - A label: An integer (For this task, 1 if X and Y were/are/will soon be married, and 2 otherwise. # - A condition: See below for details. # - A candidate: This will be used by the filter bank inside to check for semantic consistency. # - A name: (Optional) Adding names can be helpful for bookkeeping if you have many explanations floating around. # # The condition should satisfy the following properties: # 1. **Complete Sentences**: Form a complete sentence when preceded by "I labeled it \[label\] because..." (i.e., instead of simply the phrase "his wife", it should be a statement like "'his wife' is in the sentence"). # 2. **X and Y**: Refer to the person who occurs first in the sentence as **X** and the second person as **Y**. (These can be overwritten with custom strings, but for now we'll stick with X and Y). # 3. **Valid Primitives**: Utilize primitives supported by the grammar. These include: # true, false, strings, ints, floats, tuples, lists, sets, and, or, not, any, all, none, =, !=, <, ≤, >, ≥, lowercase, uppercase, capitalized, all caps, starts with, ends with, substring, basic NER tags (person, location, date, number, organization), count, contains, intersection, map, filter, distances in words or characters, relative positions (left/right/between/within). # # The rule-based parser is naive, not comprehensive, and can certainly be improved to support more primitives. These are just some of the ones we found to be the most commonly used and easily supported. When tempted to refer to real-world concepts (e.g., the "last name" of X), see if you can capture something similar using the supported primitives (e.g., "the last word of X"). from babble import Explanation explanation = Explanation( name='LF_fiance_between', label=1, condition='The word "fiance" is between X and Y', candidate=candidate, ) # When we call `babbler.apply()`, our explanation is parsed into (potentially multiple) parses, which are then passed through the filter bank, removing any that fail. It returns a list of passing parses, and filtered ones. parses, filtered = babbler.apply(explanation) # You can view a pseudocode translation of your parse using the `view_parse()` method. babbler.view_parse(parses[0]) # At this point, if you're confident in the value of your explanation, you can go ahead and it to the set of parses to keep by calling `babbler.commit()`. But if you'd like to investigate its quality first, continue on to Step 3. # ## Step 3: Get Feedback # If you have a labeled dev set, you can evaluate your resulting parse's performance on that set to get an estimate of what it's accuracy and coverage are. You may be surprised at how good/bad/broad/narrow your explanations actually are. # # **NOTE:** There is a risk to doing this evaluation, however. The dev set is generally small; be careful not to overfit to it with your explanations! This is especially important if you use the same dev set for explanation validation and hyperparameter tuning. babbler.analyze(parses) # In this case, we see that our explanation yielded a labeling function that has rather low accuracy (~22%), and low coverage (~1%). # You can view examples of candidates your parse labeled correctly or incorrectly for ideas. Once the viewer is instantiated, you can simply rerun the cell with `viewer.view()` to move on to the next candidate. # + from babble.utils import CandidateViewer correct, incorrect = babbler.error_buckets(parses[0]) viewer = CandidateViewer(incorrect) # - viewer.view() # If you want to see what parses were filtered and why, there's a helper method for that as well. Because of the simplicity of the parser, even some seemingly simple explanations can be parsed incorrectly or failed to yield any valid parses at all. But be warned: in general, we find that time spent analyzing the parser's performance is rarely as productive as time spent simply producing more labeling functions, possibly varying the way you phrase your explanations or the types of signals you refer to. babbler.filtered_analysis(filtered) # ## Step 4: Update Explanations # If an explanation we propose has lower accuracy than we'd like, we can try tightening it up (reducing the number of false positives) by making it more specific. If it has lower coverage than we'd like, one simple way to boost it is to replace keywords with aliases. # # As was mentioned in Tutorial 1, aliases are sets of words that can be referred to with a single term. To add aliases to the babbler, we call `babbler.add_aliases` with a dictionary containing key-value pairs corresponding to the name of the alias and the set it refers to. babbler.add_aliases({'spouse': ['husband', 'wife', 'spouse', 'bride', 'groom', 'fiance']}) explanation = Explanation( name='LF_spouse_between', label=1, condition='A spouse word is between X and Y', candidate=candidate, ) parses, filtered = babbler.apply(explanation) babbler.analyze(parses) # We can see that broadening our explanation in this way improved our parse both in coverage and accuracy! We'll go ahead and commit this parse. babbler.commit() # In an ideal world, our parses would all have both high coverage and high accuracy. In practice, however, there is usually a tradeoff. When in doubt, we give a slight edge to accuracy over coverage, since the discriminative model can help with generalization, but it is unlikely to be much more precise than the model that generated its labels. # ## Step 5: Apply Label Aggregator # At any point, we can extract our growing label matrices to view the summary statistics of all the parses we've commited so far. # + from metal.analysis import lf_summary Ls = [babbler.get_label_matrix(split) for split in [0,1,2]] lf_names = [lf.__name__ for lf in babbler.get_lfs()] lf_summary(Ls[1], Ys[1], lf_names=lf_names) # - # Once we've committed parses (i.e., labeling functions) to our babbler, we can use them to train the label aggregator to see how we're doing overall. # + from metal import LabelModel from metal.tuners import RandomSearchTuner search_space = { 'n_epochs': [50, 100, 500], 'lr': {'range': [0.01, 0.001], 'scale': 'log'}, 'show_plots': False, } tuner = RandomSearchTuner(LabelModel, seed=123) label_aggregator = tuner.search( search_space, train_args=[Ls[0]], X_dev=Ls[1], Y_dev=Ys[1], max_search=20, verbose=False, metric='f1') # - # It may be somewhat suprising to see how quickly quality improves with the first few labeling functions you submit. But remember: each labeling function you provide results in tens or hundreds of labels, so your effective training set size can actually be growing quite quickly. But as with traditional labels, there will come a point when adding more labeling functions will yield diminishing returns, so it's good to check in on the overall quality of your label aggregator every once in a while. # This process of iteratively tweaking # # Your Turn! # Now that you've seen the process, you can use this space run your own iterative loop of explanation gathering. # # If you need ideas for explanations, you can browse the 200 examples written by graduate students under `tutorial/spouse/data/gradturk_explanations`. Note, however, that these were collected in a non-iterative setting (i.e., the explanations were collected without any feedback on their parseability or performance on a dev set), so many of them have fairly low coverage/accuracy and some may not parse at all. # # And remember--some candidates can be really tricky to come up with an explanation for, so feel free to skip! # + from babble import BabbleStream babbler = BabbleStream(Cs, Ys, balanced=True, shuffled=True, seed=456) # - # ### Collection # + from babble.utils import display_candidate candidate = babbler.next() display_candidate(candidate) # - from babble import Explanation explanation = Explanation( name='', label=?, condition='', candidate=candidate, ) parses, filtered = babbler.apply(explanation) # ### Analysis babbler.analyze(parses) babbler.filtered_analysis(filtered) babbler.commit() # ### Evaluation # + from metal.analysis import lf_summary Ls = [babbler.get_label_matrix(split) for split in [0,1,2]] lf_names = [lf.__name__ for lf in babbler.get_lfs()] lf_summary(Ls[1], Ys[1], lf_names=lf_names) # + from metal import LabelModel from metal.tuners import RandomSearchTuner search_space = { 'n_epochs': [50, 100, 500], 'lr': {'range': [0.01, 0.001], 'scale': 'log'}, 'show_plots': False, } tuner = RandomSearchTuner(LabelModel, seed=123) label_aggregator = tuner.search( search_space, train_args=[Ls[0]], X_dev=Ls[1], Y_dev=Ys[1], max_search=20, verbose=False, metric='f1') # - # If you'd like to save the explanations you've generated, you can use the `ExplanationIO` object to write to or read them from file. # + from babble.utils import ExplanationIO FILE = "my_explanations.tsv" exp_io = ExplanationIO() exp_io.write(explanations, FILE) explanations = exp_io.read(FILE)
tutorial/Tutorial2_WriteExplanations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Studying the role of superspreading events in a simple examples # # To study it in compartment models, we split them into many compartments. We generalize the contact matrix to the simple two sub-compartments case as # # $$ # \tilde{C}_{ij}^{\alpha\beta}=\begin{pmatrix}r^{N}\tilde{C}_{ij} & & r^{C}(t)\,\tilde{C}_{ij}^{O}\\ # \\ # r^{C}(t)\tilde{C}_{ij}^{O} & & \bar{r}^{N}\,\tilde{C}_{ij}^{O} # \end{pmatrix} # $$ # # Thus, tuning the contact matrix by varing the parameter $r^C$ holds the key. # %matplotlib inline import numpy as np import pyross import matplotlib.pyplot as plt from scipy.io import loadmat np.set_printoptions(precision=2) plt.rcParams.update({'font.size': 26}) # + # get population in 4 age-groups: 0-20,20-40,40-60,60-80 M0=16; Ni0 = pyross.utils.getPopulation("India", M0) M=4; Ni=np.zeros(M) for i in range(M): Ni[i] = np.sum(Ni0[i*4:(i+1)*4]) N = np.sum(Ni) # get contact matrix for M=4 CH0, CW0, CS0, CO0 = pyross.contactMatrix.India() CH, CW, CS, CO = pyross.utils.get_summed_CM(CH0, CW0, CS0, CO0, M, M0, Ni, Ni0) # - def get_data(contactMatrix, x0): M = 8 beta = 0.02 # probability of infection on contact gIa = 1./14 # removal rate of asymptomatic infectives gE = 1/4.72 # removal rate of exposeds gIs = 1./14 # removal rate of symptomatic infectives alpha = 0. # asymptomatic fraction fsa = 1 # Fraction by which symptomatic individuals do not self isolate parameters = {'alpha':alpha,'beta':beta, 'gIa':gIa,'gIs':gIs,'gE':gE,'fsa':fsa} model = pyross.deterministic.SEIR(parameters, M, Ni1) # start simulation Tf, Nf = 200, 200; data = model.simulator(x0, contactMatrix, Tf, Nf) return model.Is(data) # + # get new population for two kind of spreaders rN=0.2; brN=1-rN rC=0; M=8 Ni1 = np.zeros(M); Ni1[0:4] = rN*Ni; Ni1[4:8] = brN*Ni; CMS = np.zeros((M, M)) CMS[0:4,0:4] = CH + CW + CS + CO CMS[4:8,0:4] = (CO)*rC/(rN) CMS[0:4,4:8] = (CO)*rC/(brN) CMS[4:8,4:8] = (CH + CW + CS + CO) def contactMatrix(t): return CMS print(CMS) # initial conditions Is_0 = np.zeros((M)); Is_0[0:4]=2; E_0 = np.zeros((M)); E_0[0:4]=4; x0 = np.concatenate(( Ni1-(Is_0 + E_0), E_0, Is_0*0, Is_0)) Is1 = get_data(contactMatrix, x0) Is_0 = np.zeros((M)); Is_0[0:8]=1; E_0 = np.zeros((M)); E_0[0:8]=2; x0 = np.concatenate(( Ni1-(Is_0 + E_0), E_0, Is_0*0, Is_0)) Is2 = get_data(contactMatrix, x0) fig = plt.figure(num=None, figsize=(28, 8), dpi=80, facecolor='w', edgecolor='k') plt.plot(np.sum(Is1, axis=1)/N, '-', lw=4, color='#A60628', label='infections in both groups', alpha=0.8); plt.plot(np.sum(Is2, axis=1)/N, '-', lw=4, color='#A60628', label='infections only in one group', alpha=0.2); plt.legend() # - # We can see that initial condition makes a difference. Keeping the number of infections constant, we confine them to one group or both. Also if there are infected people in each group and $r^C=0$, implies that the dynamics is same as it would have been without the partionting since two compartment values add-up! # + # get new population for two kind of spreaders rN=0.2; brN=1-rN rC=0.1; M=8 Ni1 = np.zeros(M); Ni1[0:4] = rN*Ni; Ni1[4:8] = brN*Ni; CMS = np.zeros((M, M)) CMS[0:4,0:4] = CH + CW + CS + CO CMS[4:8,0:4] = (CO)*rC/(rN) CMS[0:4,4:8] = (CO)*rC/(brN) CMS[4:8,4:8] = (CH + CW + CS + CO) def contactMatrix(t): return CMS print(CMS) # initial conditions Is_0 = np.zeros((M)); Is_0[0:4]=2; E_0 = np.zeros((M)); E_0[0:4]=4; x0 = np.concatenate(( Ni1-(Is_0 + E_0), E_0, Is_0*0, Is_0)) IsC2 = get_data(contactMatrix, x0) # + # get new population for two kind of spreaders rN=0.2; brN=1-rN rC=1; M=8 Ni1 = np.zeros(M); Ni1[0:4] = rN*Ni; Ni1[4:8] = brN*Ni; CMS = np.zeros((M, M)) CMS[0:4,0:4] = CH + CW + CS + CO CMS[4:8,0:4] = (CO)*rC/(rN) CMS[0:4,4:8] = (CO)*rC/(brN) CMS[4:8,4:8] = (CH + CW + CS + CO) def contactMatrix(t): return CMS print(CMS) # initial conditions Is_0 = np.zeros((M)); Is_0[0:4]=2; E_0 = np.zeros((M)); E_0[0:4]=4; x0 = np.concatenate(( Ni1-(Is_0 + E_0), E_0, Is_0*0, Is_0)) IsC3 = get_data(contactMatrix, x0) # + fig = plt.figure(num=None, figsize=(28, 8), dpi=80, facecolor='w', edgecolor='k') plt.plot(np.sum(Is2, axis=1)/N, '-', lw=4, color='gray', label='basic SEIR', alpha=0.8) plt.plot(np.sum(IsC2, axis=1)/N, '-', lw=4, color='#A60628', label='rC=0.1', alpha=0.6) plt.plot(np.sum(IsC3, axis=1)/N, '-', lw=4, color='#A60628', label='rC=1.0', alpha=0.4) plt.plot(np.sum(Is1, axis=1)/N, '--', lw=4, color='#A60628', label='rC=0.0', alpha=1) plt.legend(fontsize=26, loc='upper right'); plt.grid() plt.autoscale(enable=True, axis='x', tight=True) plt.ylabel('Fraction of Infected individuals'); # - # The initial condition is chosen such that the infection is only seeded in only one of the subc-compartments. If the two classes do not talk, then the infection is only confined to this class, else it runs to the whole population...
examples/contactMatrix/ex11-superSpreader.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''pymdp_env2'': conda)' # name: python3 # --- # # Tutorial on Active Inference with `pymdp` # This set of 3 tutorial notebooks aims to be an accessible introduction to discrete-state-space active inference modelling with the `pymdp` package. We assume no prerequisites other than a good grasp of Python and some basic mathematical knowledge (specifically some familiarity with probability and linear algebra). We assume no prior knowledge of active inference. Hopefully, by the end of this series of notebooks, you will understand active inference well enough to understand the recent literature, as well as implement your own agents! # These tutorials will walk you through the specification, and construction of an active inference agent which can solve a simple navigation task in a 2-dimensional grid-world environment. The goal here is to implement the agent 'from scratch'. Specifically, instead of just using magical library functions, we will show you an example of how these functions could be implemented from pure Python and numpy code. The goal at the end of these tutorials is that you understand at a fairly detailed level how active inference works in discrete state spaces and how to apply it, as well as how you could implement a simple agent without using the `pymdp` package. Once you understand what the `pymdp` package aims to abstract, we will go through the structure and functionality offered by the package, and show how you can construct complex agents in a simple and straightforward way. # # What is Active Inference? # Fundamentally, the core contention of active inference is that the brain (and agents in general) can be thought of as fundamentally performing (Bayesian) inference about the world. Specifically, an agent performs two functions. # # # 1.) **Perception**. An agent does not necessarily know the true state of the world, but instead must infer it from a limited set of (potentially ambiguous) observations. # # # 2.) **Action**. Typically, the agent can also perform the actions which change the state of the world. The agent can use these actions to drive the world towards a set of states that it desires. # # The theory of Active Inference argues that **both** perception and action can be represented and solved as Bayesian inference problems. # # What is Bayesian Inference? # Bayesian inference provides a recipe for performing *optimal* inference. That is, if you have some set of Hypotheses $H$ and some set of data $D$, then Bayesian inference allows you to compute the *best possible* update to your hypotheses given the data that you have. In other words, the best explanation $H$ that accounts for your actual data $D$. # # For instance, suppose you are a trader and you want to know whether some stock will go up tomorrow. And you have a set of information about that stock (for instance earnings reports, sales data, rumours a friend of a friend told you etc). Then Bayesian inference provides the *optimal way* to estimate the probability that the stock will go up. In this scenario, our "hypotheses" $H$ is that the stock will go up, or it will go down and our "data" $D$ is the various pieces of information we hold. # The fundamental equation in Bayesian inference is **Bayes Rule**. Which is # $$ # \begin{align} # p(H | D) = \frac{p(D | H)p(H)}{p(D)} # \end{align} # $$ # Here $p(H)$ etc are *probability distributions*. All a probability distribution is is a function that assigns a probability value (between 0 and 1) to a specific outcome. A probability distribution then represents the probability of that outcome for every possible outcome. For instance, take p(H). This is the probability distribution over our *hypothesis space*, which you can think of as our baseline assumptions about whether the stock tends to go up or down i.e. $p(H) = [p(stock\_goes\_up), p(stock\_goes\_down)]$, before we've encountered any data that provides evidence for/against either hypothesis. # # If we assume we have no idea whether the stock will go up or down, we can say that the probability in each case is 0.5 so that $p(H) = [0.5, 0.5]$. When there is a discrete set of possible outcomes or states, probability distributions can be simply represented as vectors with one element for each outcome - where the element itself is simply the probability of seeing that outcome. The sum of all the elements of a probability distribution must equal 1. The vector's elements encode the probabilities of *all possible events*, so one of them *must* occur -- i.e. the probability of seeing *some* outcome is 100%. # # There are three fundamental quantities in Bayesian inference: the **posterior**, the **likelihood** and the **prior**. The posterior is ultimately the goal of inference, it is $p(H | D)$. What this represents is the probability of each hypothesis in the hypothesis space *given the data $D$*. You can think of as you best guesses at the truth of each hypothesis after optimally integrating the data. Next is the **prior** $p(H)$ which represents your assumptions about how likely each hypothesis is *prior to seeing the data*. Finally, there is the likelihood $p(D | H)$ which quantifies how likely each outcome you see is, given the different hypotheses. The likelihood distribution can also be thought of as a *model* for how the data relates to the hypotheses. # # The key insight of Bayes rule is simply that the posterior probability -- how likely the hypothesis is, given the data -- is simply the likelihood times the prior. This multiplication can be expressed as computing the following: how likely is the data you actually saw, given the different hypotheses ($P(D = d | H)$), multiplied by the prior probability you assign to each hypothesis ($P(H)$). So the full posterior is a distribution over the different hypotheses - in the case of discrete / Categorical distributions, your posterior $p(H |D)$ will also be a vector of probabilities, e.g. $p(H | D) = [0.75, 0.25]$. # # Effectively, hypotheses are more likely if they can predict the data well, but are also weighted by their a-priori probability. The marginal likelihood $p(D)$ is basically just there to normalize the posterior (i.e. make sure it equals 1). # # Generative Models # In Active Inference we typically talk about *generative models* as a core component. But what are generative models? Technically a generative model is simply the product of a likelihood and a prior, for all the possible data points $D$ and hypotheses $H$. This is also known as a *joint distribution* $p(H,D) = p(D | H)p(H)$. The generative model, then, is simply the numerator of Bayes rule, and if we normalize it we can compute posterior probabilities. It is called a generative model because it allows you to *generate* samples of the data. To do so, we follow the following steps: # # 1.) Sample a hypothesis $h_i$ from the prior -- i.e. $p(H)$ # # 2.) Sample a datum $d_i$ from the likelihood distribution, given the particular hypothesis $h_i$ that you sampled i.e. sample $d_i$ from $p(D | H = h_i)$. # # Another way to think about a generative model is that it is simply a model, or set of beliefs/assumptions, of how observed data are generated. This is often a very helpful way to think about inference problems, since it aligns with our notion of causality -- i.e. there are unknown processes in the world which generate data. If we can imagine a process and then imagine the data generated by this process, then we are imagining a generative model. Inference of the posterior, on the other hand, is more difficult because it goes in the *reverse* direction -- i.e. you have some set of observations and want to reconstruct the process that gave rise to them. Fundamentally, all of Bayesian statistics can be broken down into two steps: # # 1.) Make a mathematical model of the data-generating process - the sort of environmental / world structures you think could give rise to the sort of data you have (i.e. come up with a generative model). Generative models are classically written down using a set of unknown parameters (e.g. parameters that describe probability distributions, like sufficient statistics). You want to "fit" (read: infer the values of) these parameters, given some data. # # 2.) Given your generative model and some data, compute the posterior distribution over the unknown parameters using Bayes rule, or some approximation of Bayes rule. # # 3.) Be happy! # # All the methods in Bayesian statistics essentially fall into two classes. Coming up with more expressive and powerful generative models and then figuring out algorithms to perform inference on them. # # Why is Bayesian Inference hard? # At this point, you may be wondering: Bayesian inference seems pretty easy. We known Bayes rule. We can invent generative models easily enough. Computing the posterior is just taking the generative model and dividing it by $p(D)$. Why all this fuss? Why do we need to make a whole academic field out of this anyway? Luckily this has a straightforward answer. Bayesian inference is hard for essentially just one reason: that computing $p(D)$, the normalizing constant, is hard. # # Let's think about why. $p(D)$, which is known as the **marginal likelihood**, is fundamentally just the probability of the data. What does that mean? How is there just some free-floating probability of the data? Fundamentally there isn't. $p(D)$ is the probability of the data *averaged over all possible hypotheses*. We can write this as, # $$ # \begin{align} # p(D) = \sum_h p(D | H)p(H) # \end{align} # $$ # # Effectively, $p(D)$ is the sum over all possible hypotheses of the probability of the data given that hypothesis, weighted by the prior probability of that hypothesis. This is challenging to compute in practice because you are often using really large (or indeed often infinite) hypothesis spaces. For instance, suppose your trader doesn't just want to know whether the stock will go up or down but *how much* it will go up or down. Now, with this simple change, you have an *infinite* amount of hypotheses: $p(H) = [p(stock\_goes\_up\_0.00001), p(stock\_goes\_up\_0.00002), p(stock\_goes\_up\_0.00003) ...]$. Then, if we want to compute the posterior in this case, we need to sum over every one of this infinite amount of hypotheses. You can see why this ends up being quite challenging in practice. # # Because of this intrinsic difficulty, there are a large number of special case algorithms which can solve (or usually approximate) the Bayesian posterior in various cases, and a large amount of work in statistics goes into inventing new ones or improving existing methods. Active Inference agents use one special class of approximate Bayesian inference methods called *variational methods* or *variational inference*. This will be discussed in much more detail in notebook 2. # # Beyond merely the difficulty of performing inference, another reason why Bayesian statistics is hard is that you often *don't know* the generative model. Or at least you are uncertain about some aspects of it. There is also a wide class of methods which let you perform inference and *learning* simultaneously, including for active inference, although they won't be covered in this tutorial. # # A Generative Model for an Agent # In Active Inference, we typically go a step beyond the simple cases of Bayesian inference described above, where we have a static set of hypotheses and some static data. We are instead interested in the case of an *agent* interacting with a *dynamic environment*. The key thing we need to add to the formalism to accomodate this is a notion of *time*. We consider an environment consisting of a set of *states* $[x_1, x_2, \dots x_t]$ evolving over time. Moreover, there is an agent which is in the environment over time. This agent receives a set of *observations* $[o_1, o_2 \dots o_t]$, which are a function of the state of the environment, and it can emit *actions* $[a_1, a_2 \dots a_t]$ which can change the state of the environment. The key step to get a handle on this situation mathematically is to define a *generative model* of it. # # To start, we must make some assumptions to simplify the problem. Specifically, we assume that the *state* of the environment $x_t$ only depends on the state at the previous timestep $x_{t-1}$ and the action emitted by the agent at the previous timestep $a_{t-1}$. Then, we assume that the observation $o_t$ at a given time-step is only a function of the environmental state at the current timestep $x_t$. Together, these assumptions are often called the **Markov Assumptions** and if the environment adheres to them it is often called a **Markov Decision Process**. # # The general computational "flow" of a Markov decision process can be thought of as following a sequence of steps. # # 1.) The state of the environment is $x_t$. # # 2.) The environment state $x_t$ generates an observation $o_t$. # # 3.) The agent receives an observation $o_t$, and based on it (or inferences derived thereof) decides to take some action $a_t$, which it executes in the environment. # # 4.) Given the current state $x_t$ and the agent's action $a_t$, the environment updates its own state to produce $x_{t+1}$ # # 5.) Go back to step 1. # # Now that we have this series of steps, we can try to define what it means mathematically. Specifically, we need to define two quantities. # # a.) We need to know how the state of the environment $x_t$ is reflected in the observation sent to the agent $o_t$. In Bayesian terms from the agent's perspective, the true state of the environment is unknown, and its various possible states can be thought of as "hypotheses", while the observations it receives are "data". The agent's generative model encodes some prior assumptions about each possible state $x_t$ (i.e. each hypothesis) relates to the probability of seeing each possible outcome $o_t$. This relationship (from the agent's perspective) is a function known as the **likelihood distribution** $p(D | H)$ or, in our new notation $p(o_t | x_t)$. # # b.) We need to know how the environment updates itself to form its new state $x_{t+1}$, given the old one $x_t$ and the action $a_t$. This can be thought of (from the perspective of the agent who does not observe it) as the **prior** since it can be thought of as the default expectation the agent can have about the state of the environment prior to receiving any observations. It can be written as $p(x_t | x_{t-1}, a_{t-1})$. This distribution is also known as the **transition distribution** since it specifies (the agent's assumptions about) how the environment transitions from one state to another. # # These two distributions $p(o_t | x_t)$ and $p(x_t | x_{t-1}, a_{t-1})$ are all that is needed to specify the evolution of the environment. At this point, it is necessary to make a distinction between the *actual* evolution of the environment -- known as the **generative process** -- and the *agent's model* of the evolution of the environment, which is known as the **generative model**. These are not necessarily the same, although in some sense the goal of the agent is to figure out a generative model that is as close to the true generative process as possible. In this example, we will consider a scenario in which the agent knows the true model, so the generative model and the generative process are the same, although this is not always the case. # # To make this concrete, all that is necessary to do is to specify precisely what the **likelihood** and **transition** distributions actually are. # # In the case of discrete state spaces, where it is possible to explicitly enumerate all states, a very generic way of representing these distributions is as *matrices*. Specifically, we can represent the likelihood distribution as a matrix denoted $\textbf{A}$, which is of shape $dimension\_of\_observation \times dimension\_of\_state$. The element $A_{i,j}$ of $\textbf{A}$ represents the probability that observation $i$ is generated by state $j$. # # Secondly, we can represent the transition distribution by a matrix $\textbf{B}$ of shape $state\_dim \times state\_dim \times action\_dim$ where element $\textbf{B}_{i,j,k}$ represents the probability of the environment moving to state $i$ given that it was previously in state $j$ and that action $k$ was taken. # # In the rest of this notebook, we will explicitly write down the generative process / generative model for a simple grid-world environment in code, to get a better handle on how the environment and the agent's model is specified. In the next notebook, we will turn to inference and action selection and discuss how active inference solves these two tricky problems. import numpy as np import matplotlib.pyplot as plt import seaborn as sns # ## Constructing a Generative Model # For the rest of this notebook, we will construct a simple generative model for an active inference agent navigating a 3x3 grid world environment. The agent can perform one of 5 movement actions at each time step: `LEFT, RIGHT, UP, DOWN, STAY`. The goal of the agent is to navigate to its preferred position. # # We will create matrices for both the environment as well as the agent itself. As we go up levels of abstraction, these environment and generative model matrices will be imported from classes - but this notebook is the lowest level representation of construction, to show how everything is built from the ground up. # ## Understanding the state space # The first thing to note is that we are in a 3x3 grid world which means we have 9 states in total. We can define the following mapping to better understand the space. state_mapping = {0: (0,0), 1: (1,0), 2: (2,0), 3: (0,1), 4: (1,1), 5:(2,1), 6: (0,2), 7:(1,2), 8:(2,2)} # All we're doing with this mapping dictionary is assigning a particular index (`0`, `1`, `2`, ..., `8`) to each grid position, which is defined as a pair of `(x, y)` coordinates ( `(0, 0), (1, 0)`, ..., `(2, 2)`). We will use the linear indices to refer to the grid positions in our probability distributions (e.g. `P(o_t | x_t = 5)`), so this `state_mapping` will allow us to easily move between these linear indices and the grid world indices. These kinds of mappings are very handy for intuition and visualization. # And the following heatmap just represents how the coordinates map to the real grid space grid = np.zeros((3,3)) for linear_index, xy_coordinates in state_mapping.items(): x, y = xy_coordinates grid[y,x] = linear_index # rows are the y-coordinate, columns are the x-coordinate -- so we index into the grid we'll be visualizing using '[y, x]' fig = plt.figure(figsize = (3,3)) sns.set(font_scale=1.5) sns.heatmap(grid, annot=True, cbar = False, fmt='.0f', cmap='crest') # ## Likelihood Matrix: A # The likelihood matrix represents $P(o_t | x_t)$ , the probability of an observation given a state. In a grid world environment, the likelihood matrix of the agent is identical to that of the environment. It is simply the identity matrix over all states (in this case 9 states, for a 3x3 grid world) which represents the fact that the agent has probability 1 of observing that it is occupying any state x, given that it is in state x. This just means that the agent has full transparency over its own location in the grid. A = np.eye(9) A # We can also plot the likelihood matrix as follows: labels = [state_mapping[i] for i in range(A.shape[1])] def plot_likelihood(A): fig = plt.figure(figsize = (6,6)) ax = sns.heatmap(A, xticklabels = labels, yticklabels = labels, cbar = False) plt.title("Likelihood distribution (A)") plt.show() plot_likelihood(A) # ## Transition matrix: B # The transition matrix determines how the agent can move around the gridworld given each of the 5 available actions (UP, DOWN, LEFT, RIGHT, STAY). # So the transition matrix will be a 9x9x5 matrix, where each entry corresponds to an end state, a starting state, and the action that defines that specific transition. # # To construct this matrix, we have to understand that when the agent is at the edges of the grid, it cannot move outward, so trying to move right at the right wall will cause the agent to stay still. # # We will start by constructing a dictionary which we call P, which maps each state to its next state given an action state_mapping # + P = {} dim = 3 actions = {'UP':0, 'RIGHT':1, 'DOWN':2, 'LEFT':3, 'STAY':4} for state_index, xy_coordinates in state_mapping.items(): P[state_index] = {a : [] for a in range(len(actions))} x, y = xy_coordinates '''if your y-coordinate is all the way at the top (i.e. y == 0), you stay in the same place -- otherwise you move one upwards (achieved by subtracting 3 from your linear state index''' P[state_index][actions['UP']] = state_index if y == 0 else state_index - dim '''f your x-coordinate is all the way to the right (i.e. x == 2), you stay in the same place -- otherwise you move one to the right (achieved by adding 1 to your linear state index)''' P[state_index][actions["RIGHT"]] = state_index if x == (dim -1) else state_index+1 '''if your y-coordinate is all the way at the bottom (i.e. y == 2), you stay in the same place -- otherwise you move one down (achieved by adding 3 to your linear state index)''' P[state_index][actions['DOWN']] = state_index if y == (dim -1) else state_index + dim ''' if your x-coordinate is all the way at the left (i.e. x == 0), you stay at the same place -- otherwise, you move one to the left (achieved by subtracting 1 from your linear state index)''' P[state_index][actions['LEFT']] = state_index if x == 0 else state_index -1 ''' Stay in the same place (self explanatory) ''' P[state_index][actions['STAY']] = state_index # - P # From here, we can easily construct the transition matrix num_states = 9 B = np.zeros([num_states, num_states, len(actions)]) for s in range(num_states): for a in range(len(actions)): ns = int(P[s][a]) B[ns, s, a] = 1 # B is a very large matrix, we can see its shape below, which is as expected: B.shape # We can also visualize B on the plots below. The x axis is the starting state, and the y axis is the ending state, and each plot corresponds to an action given by the title. fig, axes = plt.subplots(2,3, figsize = (15,8)) a = list(actions.keys()) count = 0 for i in range(dim-1): for j in range(dim): if count >= 5: break g = sns.heatmap(B[:,:,count], cmap = "OrRd", linewidth = 2.5, cbar = False, ax = axes[i,j], xticklabels=labels, yticklabels=labels) g.set_title(a[count]) count +=1 fig.delaxes(axes.flatten()[5]) plt.tight_layout() plt.show() # Now our generative model and environment are set up, and we can move on to Notebook 2, where we go through the core mechanics of how to perform inference and planning on this environment with this generative model.
examples/gridworld_tutorial_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/rafi007akhtar/coursera-tensorflow/blob/master/Intro_to_Tensorflow.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="gbegZ1EGbWuy" colab_type="text" # # Intro to Tensorflow # # Getting my hands dirty with Tensorflow for the first time. # + [markdown] id="Upu3jqWvcaeZ" colab_type="text" # ## Guess the number next in the series # # This notebook will try to guess the next number in the series that follows the following pattern. # # y = 2x - 1 # # The first few numbers of this series, starting from `x = 0`, are: # # ``` # ------------------------------------------------- # | x = 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | # ---------+---+---+---+---+---+----+----+----+----- # | y = -1 | 1 | 3 | 5 | 7 | 9 | 11 | 13 | 15 | 17 | # ------------------------------------------------- # ``` # # Accordinly, for `x = 10`, the neural network should return `y = 19`, or a value as close as that. # + [markdown] id="PGExgSPoeKrL" colab_type="text" # ## Import dependencies # + id="oaVgGCz1eH0E" colab_type="code" colab={} import tensorflow as tf import numpy as np from tensorflow import keras # + [markdown] id="dwY28ghtmJ25" colab_type="text" # ## Set up the network # # The following neural network is 1-layer deep consisting of one neuron only - the simplest kind of neural network! # + colab_type="code" id="OULhawKLldTk" colab={} model = keras.Sequential([keras.layers.Dense(units = 1, input_shape=[1])]) # + [markdown] id="BWJSjGp2lt4F" colab_type="text" # In the above model, the layers are given in a sequence, hence the method `keras.Sequential` is used. Of course, there is only one layer, so there's that. More about this method [here](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential). # # Furthermore, the one layer present is supposed to have densely connected neurons, hence the method `keras.layers.Dense`is used. Of course there is only one neuron, again. For the same reason, the argument `input_shape` is supplied. # # This method implements the operation `activate(y)` where `y = x.w + b`. More about this method and its parameters in the official documentation [here](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense). # + [markdown] id="3oEgS_ComkIp" colab_type="text" # ## Define and compile the network # # The above model will be trained by the data previously described as the set of examples, in form of numpy lists. # + id="3vY8PgCSmZae" colab_type="code" colab={} x_train = np.array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=float) y_train = np.array([-3, -1, 1, 3, 5, 7, 9, 11, 13, 15, 17], dtype=float) # + [markdown] id="DKb2sIKEoKdA" colab_type="text" # The next step is to compile it. A model cannot be trained if it is not compiled. # + id="H-Sy9qkKoY_8" colab_type="code" outputId="3f5a6f2e-3220-4c7d-972c-81c5440e4f9b" colab={"base_uri": "https://localhost:8080/", "height": 88} model.compile(optimizer="sgd", loss="mean_squared_error") # + [markdown] id="myX-YXVcohEo" colab_type="text" # The optimizer used is stochastic gradient descent, and the loss function is mean squared loss. # + [markdown] id="xYxkeNc1oqny" colab_type="text" # ## Train the model # + [markdown] id="DwJUXe8GndP_" colab_type="text" # The above data arrays will be fed into the model and trained using `model.fit` method, for 500 epochs. # + id="2VlhWkT7naLb" colab_type="code" outputId="fc08e161-487a-4cfc-e582-8adbd8c758a3" colab={"base_uri": "https://localhost:8080/", "height": 17034} model.fit(x_train, y_train, epochs=500) # + [markdown] id="-vqCTTSUvqY-" colab_type="text" # ## Test your model on new inputs # + id="poulCTdInuGV" colab_type="code" outputId="7662283a-4572-43a9-926b-6484b43c90bd" colab={"base_uri": "https://localhost:8080/", "height": 119} # Let's test the model on inputs from 10 to 15 x_test = [i for i in range(10, 16)] y_test = model.predict(x_test) # print(y_test) for i in range(6): print(f"y({x_test[i]}) = {y_test[i][0]}") # + [markdown] id="IatCXMPsymNM" colab_type="text" # ## Utility functions # # The following utility functions will be needed for getting the accuracy of my model on the tested data. # + id="Vi6ysRVjyuEo" colab_type="code" colab={} def get_y(x_vals): y_vals = [2*x - 1 for x in x_vals] return y_vals def accuracy(y_test, y): """ > y_test: values of y obtained on testing the model > y: actual values of y """ assert(len(y_test) == len(y)) # otherwise accuracy cannot be calculated l = len(y_test) accuracies = [] for i in range(l): accuracies.append(y_test[i][0] / y[i]) return sum(accuracies) / l # + [markdown] id="tlYvU5vBz7rg" colab_type="text" # ## Check the accuracy of the model # + id="WXNHxbXXz0ib" colab_type="code" outputId="1817b0c7-6e4f-4277-b6ad-c5ec8d57a4e5" colab={"base_uri": "https://localhost:8080/", "height": 34} y = get_y(x_test) acc = accuracy(y_test, y) print(f"Accuracy of the model: {acc}") # + [markdown] id="PZkJsfYC0dyb" colab_type="text" # Thus, the accuracy of the above model is is around 99.99%, as defined by my simple `accuracy` function above, although there exist better ways to calculate accuracy. # # The accuracy isn't 100% because of many reasons, some of which might be: # - the size of the dataset is too small # - the number of epochs is too few # - the neural network is too shallow # - the optimizer and loss function are not the proper ones # # and so forth. # # There are several ways to increase the accuracy of the model, or in some cases, reduce the over-fitting. # # + id="2n-bsPm61o67" colab_type="code" colab={}
Intro_to_Tensorflow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Coal production in mines 2013 # # by: <NAME> # # Abstract: We did alot of analysis, came to good conclusion. # + # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import explained_variance_score, r2_score, mean_squared_error sns.set(); # - # ## Cleaned Data # # We cleaned this data in the notebook stored in: deliver/Data_cleaning.ipynb df1 = pd.read_csv("../data/cleaned_coalpublic2013.csv", index_col='MSHA ID') df1[['year', 'mine_name']].head() # # Predict the Production of coal mines df1['log_production'] = np.log(df1['production_(short_tons)']) df1.union_code.unique() # + features = ['average_employees', 'labor_hours'] categoricals = ['mine_state', 'mine_county', 'mine_status', 'mine_type', 'company_type', 'operation_type', 'operating_company', 'union_code', 'coal_supply_region'] target = 'log_production' # - sns.set_context('poster') fig = plt.subplots(figsize=(14,8)) sns.violinplot(y="company_type", x="log_production", data=df1, split=True, inner="stick") plt.tight_layout() plt.savefig("../figures/Coal_prediction_company_type_vs_log_production.png") dummy_categoricals = [] for categorical in categoricals: # Avoid the dummy variable trap (need to remove one)! drop_var = sorted(df1[categorical].unique())[-1] temp_df = pd.get_dummies(df1[categorical], prefix=categorical) df1 = pd.concat([df1, temp_df], axis=1) temp_df.drop('_'.join([categorical, str(drop_var)]), axis=1, inplace=True) dummy_categoricals += temp_df.columns.tolist() # # Random Forest Regressor train, test = train_test_split(df1, test_size=0.3) rf = RandomForestRegressor(n_estimators=100, oob_score=True) rf.fit(train[features + dummy_categoricals], train[target]) fig = plt.subplots(figsize=(8,8)) sns.regplot(test[target], rf.predict(test[features + dummy_categoricals])) plt.ylabel("Predicted Production") plt.xlim(0, 22) plt.ylim(0, 22) plt.tight_layout() plt.savefig("../figures/Coal_prediction_rf_predition.png") # + predicted = rf.predict(test[features + dummy_categoricals]) print "R^2 score: ", r2_score(test[target], predicted) print "MSE: ", mean_squared_error(test[target], predicted) # - rf_importances = pd.DataFrame({'name':train[features + dummy_categoricals].columns, 'importance':rf.feature_importances_ }).sort_values(by='importance', ascending=False).reset_index(drop=True) rf_importances.head(5) # # Conclusion # # A detailed and amazing conclusion
deliver/Coal_prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: DESI master # language: python # name: desi-master # --- # ## Everest import os import numpy as np import fitsio from astropy.table import Table # + import matplotlib.pyplot as plt import seaborn as sns sns.set(context='talk', style='ticks', palette='deep', font_scale=1.3)#, rc=rc) colors = sns.color_palette() # %matplotlib inline # - specprod = 'everest' rootdir = os.path.join(os.getenv('DESI_ROOT'), 'spectro', 'fastspecfit') catdir = os.path.join(rootdir, specprod, 'catalogs') figdir = os.path.join(os.getenv('DESI_ROOT'), 'users', 'ioannis', 'fastspecfit', specprod) print(catdir) print(figdir) def read_results(prefix='fastspec', survey='main', program='bright'): catfile = os.path.join(catdir, '{}-{}-{}-{}.fits'.format(prefix, specprod, survey, program)) fast = Table(fitsio.read(catfile, prefix.upper())) meta = Table(fitsio.read(catfile, 'METADATA')) I = meta['ZWARN'] == 0 return fast[I], meta[I] fastspec, metaspec = read_results('fastspec', 'sv3', 'bright') fastspec metaspec fastphot, metaphot = read_results('fastphot', 'sv3', 'dark') metaphot
doc/nb/everest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- import numpy as np from tqdm import tqdm, trange import sys from azureml.core import Environment from azureml.core.conda_dependencies import CondaDependencies print (sys.version) # + conda_dep = CondaDependencies().create(python_version='3.6.9') conda_dep.add_pip_package("azureml-defaults") #add this first conda_dep.add_pip_package("torch") conda_dep.add_pip_package("torchvision") conda_dep.add_pip_package("tqdm") env_file = 'environment_internal.yml' with open(env_file,"w") as f: f.write(conda_dep.serialize_to_string()) print(conda_dep.serialize_to_string()) print("Saved dependency info in", env_file) # + import azureml.core print("SDK version:", azureml.core.VERSION) from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n') # - from azureml.core import Dataset datastore = ws.get_default_datastore() datastore.upload_files(files=['./chembl_1mil_dataset.csv'], target_path='./', overwrite=True) input_dataset = Dataset.Tabular.from_delimited_files(path=[(datastore, '/mnt/batch/tasks/shared/LS_root/mounts/clusters/mid-compute/code/users/aryanmisra/rdkit_testing_ground/1000smiles.csv')]) model_path = "./generative_model_max" # + #registering model from azureml.core.model import Model model = Model.register(model_path= model_path, model_name="gen_model_JAK2", tags={'drug': "design", 'patient': "RL"}, description="Generate molecules", workspace=ws) # - print('Name:', model.name) print('Version:', model.version) # + environment = Environment.from_conda_specification('base_env', './environment_internal.yml') # + from azureml.core.model import InferenceConfig inference_config = InferenceConfig(runtime= "python", source_directory = './', entry_script="./score.py", conda_file="../environment_internal.yml") # + from azureml.core.compute import AksCompute, ComputeTarget from azureml.core import Webservice from azureml.core.model import InferenceConfig from azureml.core.webservice import AciWebservice from azureml.exceptions import WebserviceException inference_config = InferenceConfig(entry_script='score.py', environment=environment) # + from azureml.core.compute import AksCompute, ComputeTarget # Use the default configuration (you can also provide parameters to customize this). # For example, to create a dev/test cluster, use: prov_config = AksCompute.provisioning_configuration(cluster_purpose = AksCompute.ClusterPurpose.DEV_TEST) # prov_config = AksCompute.provisioning_configuration() aks_name = 'myaks2' # Create the cluster aks_target = ComputeTarget.create(workspace = ws, name = aks_name, provisioning_configuration = prov_config) # Wait for the create process to complete aks_target.wait_for_completion(show_output = True) # - from azureml.core.compute import AksCompute, ComputeTarget resource_group = ws.resource_group cluster_name = "myaks" attach_config = AksCompute.attach_configuration(resource_group = resource_group, cluster_name = cluster_name, cluster_purpose = AksCompute.ClusterPurpose.DEV_TEST) aks_target = ComputeTarget.attach(ws, "myaks", attach_config) # + from azureml.core.webservice import AksWebservice, Webservice from azureml.core.model import Model # aks_target = AksCompute(ws,"myaks") # If deploying to a cluster configured for dev/test, ensure that it was created with enough # cores and memory to handle this deployment configuration. Note that memory is also used by # things such as dependencies and AML components. deployment_config = AksWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1) service = Model.deploy(ws, "myservice1", [model], inference_config, deployment_config, aks_target) service.wait_for_deployment(show_output = True) print(service.state) print(service.get_logs()) # -
scoring_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/misabhishek/gcp-iam-recommender/blob/main/IAM_recommender_demo_for_Google_Cloud_Storage_buckets.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="6jODJX4o5QiX" # # IAM recommender demo for Google Cloud Storage buckets # # The purpose of this colab is to give you a glimse of what can you do with GCS buckets' recommendations in achieving least privillege. # # You will learn how to get IAM Recommendations for GCS buckets using gcloud, BigQuery export, and Curl API. # # ## Before you begin # 1. Be familiar with [API authentication flow](https://cloud.google.com/docs/authentication). # 2. Understand the [IAM recommender](https://cloud.google.com/iam/docs/recommender-overview) and [enable its API](https://cloud.google.com/recommender/docs/enabling?hl=en). # 3. Make sure you have the [IAM permissions](https://cloud.google.com/iam/docs/recommender-managing#required-permissions) that you need for work with the IAM recommender. **You need these permissions on a project, folder, or organization that contains the storage bucket whose recommendations you want to manage.** # 4. Review the IAM recommender best practices. # # **Note:** Please take this [tutorial](https://colab.research.google.com/?utm_source=scs-index) if you are not familiar with colab. # + [markdown] id="yHoO2dFabgfl" # ## Authorize the colab # # We are using google_auth library to authorize. Please read more about it [here](https://google-auth.readthedocs.io/en/master/#google-auth). # # Note that this colab can also be used as an standalone jupyter notebook and can be used with service account authentications. # + id="8qwoaPzfbejL" from google.colab import auth as google_auth google_auth.authenticate_user() print("authorized") # + cellView="form" id="7EUPP6qI7pza" #@title Enter the configuration that will be used throughout the colab #@markdown The project where the buckets resides: project = "your-project-id" #@param {type:"string"} #@markdown Location of the buckets: location = "us" #@param {type:"string"} #@markdown The project that you used for onboarding the private preview: billing_project = "your-billing-project-id" #@param {type:"string"} # + [markdown] id="rr8ss-Y6jExM" # **Note:** To execute shell command in colab, just prepend it with exclamation mark (!). For example, one can execute `ls` command as below in colab. # # ``` # # # !ls # ``` # # We used this trick extensively to run `gcloud` and `curl` command. # + [markdown] id="5gs-NOa9_-L7" # # gcloud demo # # In this demo, we present how to access the IAM recommendations and IAM Policy insights for GCS buckets using gcloud. # + [markdown] id="49XJ52-ynZrg" # ## Recommendation # + [markdown] id="lNstuvIt8CFG" # ### All bucket recommendations for a given project and location # + id="W42IgN6qllZx" # !gcloud recommender recommendations list \ # --location="{location}" \ # --recommender=google.iam.policy.Recommender \ # --project="{project}" \ # --format="table[box,title='IAM Recommendations at GCS bucket level'](targetResources.basename().flatten():sort=1:label=BUCKET,content.overview.member,content.overview.removedRole,content.overview.addedRoles,priority)" \ # --billing-project="{billing_project}" \ # + [markdown] id="xxWFZR5-8VBS" # ### Recommendations to curtail public access on GCS buckets # + id="WQYf6Idw8bN-" # !gcloud recommender recommendations list \ # --location="{location}" \ # --recommender=google.iam.policy.Recommender \ # --project="{project}" \ # --billing-project="{billing_project}" \ # --format="table[box,title='IAM Recommendations for curtailing public access of GCS buckets'](targetResources.basename().flatten():sort=1:label=BUCKET,content.overview.member,content.overview.removedRole,content.overview.addedRoles,priority)" \ # --filter="content.overview.member:allUsers OR content.overview.member:allAuthenticatedUsers" # + [markdown] id="yc0IVK5EBweu" # ### Recommendations to curtail cross-project access # + id="oKLwxA6K-9tK" # !gcloud recommender recommendations list \ # --location="{location}" \ # --recommender=google.iam.policy.Recommender \ # --project="{project}" --format=json \ # --billing-project="{billing_project}" \ # --format="table[box,title='IAM Recommendations for curtailing public access of GCS buckets'](targetResources.basename().flatten():sort=1:label=BUCKET,content.overview.member,content.overview.removedRole,content.overview.addedRoles,priority)" \ # --filter="content.overview.member:project* AND NOT content.overview.member:{project}" # + [markdown] id="th23mM0cB7Tr" # ### Recommendations to curtail default access # # # + id="1QufQo-uCDPa" # !gcloud recommender recommendations list \ # --location="{location}" \ # --recommender=google.iam.policy.Recommender \ # --project="{project}" --format=json \ # --billing-project="{billing_project}" \ # --format="table[box,title='IAM Recommendations for curtailing public access of GCS buckets'](targetResources.basename().flatten():sort=1:label=BUCKET,content.overview.member,content.overview.removedRole,content.overview.addedRoles,priority)" \ # --filter="content.overview.member:project*{project}" # + [markdown] id="HvinX5fgnh7K" # ## Insight # + [markdown] id="jNfOISg_9rLg" # ### All bucket insights for a given project and location # + id="W1mEXfd9zzuw" # !gcloud recommender insights list \ # --insight-type=google.iam.policy.Insight \ # --project="{project}" \ # --billing-project="{billing_project}" \ # --location="{location}" \ # --format="table[box,title='IAM Policy Insight at GCS bucket level'](targetResources.basename().flatten():sort=1:label=BUCKET,content.member,content.role,content.currentTotalPermissionsCount,content.exercisedPermissions.len(),content.exercisedPermissions.permission,severity)" \ # + [markdown] id="VyuwpBj_90IJ" # ### Insights about public access # + id="Cl9zxEPq99uk" # !gcloud recommender insights list \ # --insight-type=google.iam.policy.Insight \ # --project="{project}" \ # --billing-project="{billing_project}" \ # --location="{location}" \ # --format="table[box,title='IAM Policy Insight at GCS bucket level'](targetResources.basename().flatten():sort=1:label=BUCKET,content.member,content.role,content.currentTotalPermissionsCount,content.exercisedPermissions.len(),content.exercisedPermissions.permission,severity)" \ # --filter="content.member:allUsers OR content.member:allAuthenticatedUsers" # + [markdown] id="RTAlFCJMDMk8" # ### Insight about cross-project access # + id="zomRXY1UC2MA" # !gcloud recommender insights list \ # --insight-type=google.iam.policy.Insight \ # --project="{project}" \ # --billing-project="{billing_project}" \ # --location="{location}" \ # --format="table[box,title='IAM Policy Insight at GCS bucket level'](targetResources.basename().flatten():sort=1:label=BUCKET,content.member,content.role,content.currentTotalPermissionsCount,content.exercisedPermissions.len(),content.exercisedPermissions.permission,severity)" \ # --filter="content.member:project* AND NOT content.member:{project}" # + [markdown] id="ArRjylo9DR2D" # ### Insight about default access # + id="0c_lHmqxDXTi" # !gcloud recommender insights list \ # --insight-type=google.iam.policy.Insight \ # --project="{project}" \ # --billing-project="{billing_project}" \ # --location="{location}" \ # --format="table[box,title='IAM Policy Insight at GCS bucket level'](targetResources.basename().flatten():sort=1:label=BUCKET,content.member,content.role,content.currentTotalPermissionsCount,content.exercisedPermissions.len(),content.exercisedPermissions.permission,severity)" \ # --filter="content.member:project*{project}" # + [markdown] id="u6BnI0_Vn-9G" # # BigQuery Export # # BigQuery export provides you a way to store a daily snapshot of all the recommendations in your organization into a BigQuery dataset. Please see this [link](https://cloud.google.com/recommender/docs/bq-export/export-recommendations-to-bq) for details. # # **Note** You can run all the queries below using BigQuery UI. # # ## For an organization level view of IAM recommendations and policy insights # # # + [markdown] id="1J1mRSX5_ntr" # 1. Please enter the project that is storing BigQuery dataset for IAM Recommendations # 2. Please enter the date (Ex - 2021-12-03) # 3. Please enr the BigQuery dataset that stores the recommendations # + id="e5GueCcB_I5j" cellView="form" #@title Configuration for BigQuery export of GCS bucket recommendations bigquery_export_project = "\u003Cyour-bigquery-export-project>" #@param{type:"string"} date="2021-12-13 (Enter the snapshot date of the recommendations)" #@param{type:"string"} bigquery_dataset = "\u003Cyour-bigquery-dataset>" #@param{type:"string"} # + id="nAWAauthjlvl" # Create a BigQuery client from google.cloud import bigquery bigquery_client = bigquery.Client(project=bigquery_export_project) # + [markdown] id="QHcbi-64wE-e" # ## Organization Level View # # The below query provides you the recommendations for all the storage buckets in the entire organization. # + [markdown] id="608LaWsZiGoP" # # + id="-HmJ831coG8y" query_to_see_all_recommendations_from_biqquery_export = f""" SELECT ancestors.organization_id, ancestors.folder_ids, cloud_entity_id AS project_number, location, SPLIT(target_resources[OFFSET(0)], "/")[OFFSET(3)] AS bucket_name, JSON_QUERY(recommendation_details, "$.overview.member") AS user, JSON_QUERY(recommendation_details, "$.overview.removedRole") AS removed_role, JSON_QUERY(recommendation_details, "$.overview.addedRoles") AS added_roles, JSON_QUERY(primary_impact.security_projection.details_json, "$.revokedIamPermissionsCount") AS revoked_permission_count, FROM `{bigquery_dataset}` WHERE recommender = 'google.iam.policy.Recommender' AND location != "global" AND DATE(_PARTITIONTIME) = "{date}" """ # + id="Vs6BI9Y3CXjN" ( bigquery_client.query( query_to_see_all_recommendations_from_biqquery_export) .to_dataframe() ) # + [markdown] id="vorElzLNv6hD" # ## Order projects based on impact of GCS bucket recommendations # # The below query list the projects in your organization ordered by the maximum over-granting of IAM permissions on the storage buckets. # + id="dsBsn1T2kPK1" query_to_see_overgranted_projects_for_GCS_bucket_recommendations = f""" SELECT cloud_entity_id AS project_number, SUM( CAST( JSON_VALUE(primary_impact.security_projection.details_json, "$.revokedIamPermissionsCount") AS FLOAT64) ) AS revoked_permission_count, FROM `{bigquery_dataset}` WHERE recommender = 'google.iam.policy.Recommender' AND location != "global" AND DATE(_PARTITIONTIME) = "{date}" GROUP BY cloud_entity_id ORDER BY revoked_permission_count DESC """ # + id="2Hag1iRuDacw" ( bigquery_client.query( query_to_see_overgranted_projects_for_GCS_bucket_recommendations) .to_dataframe() ) # + [markdown] id="NWFX4gKAwhFX" # ## Track progress of least privillege # # This query gives you a time series of overgranting in your organization for Storage Bucket IAM policies. Use this to track your progress in achieving principle of least privillege. # + id="tks7CrqSwy10" query_to_measure_progress_of_reduced_overgranting = f""" SELECT DATE(_PARTITIONTIME) AS date, cloud_entity_id AS project_number, SUM( CAST( JSON_VALUE(primary_impact.security_projection.details_json, "$.revokedIamPermissionsCount") AS FLOAT64) ) AS revoked_permission_count, FROM `{bigquery_dataset}` WHERE recommender = 'google.iam.policy.Recommender' AND location != "global" GROUP BY cloud_entity_id, DATE(_PARTITIONTIME) """ # + id="sz0_PgKbxpRU" ( bigquery_client.query( query_to_measure_progress_of_reduced_overgranting) .to_dataframe() .set_index("date") .groupby("project_number")["revoked_permission_count"] .plot(legend=True, figsize=(10,5), rot=45, ylabel="Excess permissions count", title="Least privillege of IAM policies at GCS bucket level") ); # + [markdown] id="T70FogrwLOcV" # # API using curl # # # Use curl to get recommendations for a particular bucket # + cellView="form" id="X5ah3xHWtiXu" bucket_name = "Enter the name your bucket for which you want recommendations." #@param{type:"string"} # + id="XLiiIeiRfS3_" # !curl -X GET \ # -H "Authorization: Bearer $(gcloud auth application-default print-access-token)" \ # -H "x-goog-user-project: {billing_project}" \ # "https://recommender.googleapis.com/v1alpha2/projects/{project}/locations/{location}/recommenders/google.iam.policy.Recommender/recommendations?"\ # "pageSize=10&"\ # "filter=targetResources://storage.googleapis.com/{bucket_name}"\
IAM_recommender_demo_for_Google_Cloud_Storage_buckets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import speech_recognition as speech speech.__version__ # + recog = speech.Recognizer() wavfile = speech.AudioFile('crblp_speech_corpus/wav/10032.wav') with wavfile as source: recog.adjust_for_ambient_noise(source) audio = recog.record(source) print(type(audio), audio) recog.recognize_google(audio) # - for i in range(0,10): src = 'crblp_speech_corpus/wav/1003'+str(i)+'.wav' wavfile = speech.AudioFile(src) with wavfile as source: # recog.adjust_for_ambient_noise(source) audio = recog.record(source) print(recog.recognize_google(audio))
recog_cloudapi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: py38tf # language: python # name: py38tf # --- # 1st trail: EEG_Biometrics_with_CNN-and-Ridge-Regression-regularisation # # + import tensorflow import tensorflow as tf import numpy as np import pandas as pd import cv2 import matplotlib.pyplot as plt import os from tensorflow import keras from tensorflow.keras import layers from keras.utils import np_utils from IPython.utils import io from sklearn.model_selection import train_test_split import scipy.io as sio from scipy import stats from scipy.signal import butter, lfilter import mne print(tf.__version__) # - # root directory and path to cats-dogs folder zip file path_dir = os.getcwd() # determine root directory folder_path = path_dir + '\\files1' + '\\S108R06.edf' # add folder name to root d # + # to read a sample file of edf format #get root directory and create path to the file path_dir = os.getcwd() folder_path = path_dir + '\\files1' + '\\S108R06.edf' #read the sample file data = mne.io.read_raw_edf(folder_path) raw_data = data.get_data() raw_data = raw_data print(raw_data.shape) # number of epoch for S108R06.edf protocol by 64 channels # you can get the metadata included in the file and a list of all channels: info = data.info channels = data.ch_names # + # T0 = rest state, T1= motion (left fits(runs 3,4,7,8,11 and 12), both fistruns 5, 6, 9, 10, 13, and 14 ) # T2 = right fist (in runs 3, 4, 7, 8, 11, and 12) or both feet (in runs 5, 6, 9, 10, 13, and 14) event, eventid= mne.events_from_annotations(data) # event or protocol typeid print(eventid) #raw eeg waveform details print(info) # + # create custom filter functions def butter_bandpass(lowcut, highcut, fs, order=5): nyq = 0.5 * fs low = lowcut / nyq high = highcut / nyq b, a = butter(order, [low, high], btype='band') return b, a def butter_bandpass_filter(data, lowcut, highcut, fs, order=5): b, a = butter_bandpass(lowcut, highcut, fs, order=order) y = lfilter(b, a, data) return y # + # get filtered signal Filtered = butter_bandpass_filter(raw_data, 0.5, 50, 160, order = 5) # filtered vs unfiltered plots by duration of recordings fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) # by duration of recordings ax1.plot(raw_data[0,:481]) ax1.set_title('EEG signal after 0 Hz and 80 Hz sinusoids') # by duration of recordings ax2.plot(Filtered[0,:481]) ax2.set_title('EEG signal after 0.5 to 50 Hz band-pass filter') ax2.set_xlabel('Duration of Recordings [milliseconds]') plt.tight_layout() plt.show() # + # filtered vs unfiltered plots by duration of recordings fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) # by channel of recordings ax1.plot(raw_data.T[0,:481]) ax1.set_title('EEG signal after 0 Hz and 80 Hz sinusoids') # by channel of recordings ax2.plot(Filtered.T[0,:481]) ax2.set_title('EEG signal after 0.5 to 50 Hz band-pass filter') ax2.set_xlabel('Number of Channels [64]') plt.tight_layout() plt.show() # + # Generate input and target pairs #folder path folder_path1 = path_dir + '\\files2' n_class = os.listdir(folder_path1) # create input and target classes input_data = [ ] m = 64 n = 64 image_size = (m,n) with io.capture_output() as captured: for i in n_class: fpath = os.path.join(folder_path1, i) cls_num = n_class.index(i) for imgs in os.listdir(fpath): if (imgs.endswith("edf")): data_egg = mne.io.read_raw_edf(os.path.join(fpath,imgs)) raw_eeg = data_egg.get_data() raw_eeg = raw_eeg.T raw_eeg = cv2.resize(raw_eeg, image_size) filtered = butter_bandpass_filter(raw_eeg, 0.5, 50, 160, order = 5) input_data.append([filtered, cls_num]) print(len(n_class)) # + # Create Input(Features) and Target(Labels) data array X = [] # input features y = [] # input labels m = 64 n = 64 for features, labels in input_data: X.append(features) y.append(labels) # input and target array for train and test X = np.array(X).reshape(-1,m,n,1) # 1 dim y = np.array(np_utils.to_categorical(y)) # input shape input_shape_X =X[0].shape print(input_shape_X) # total size of input and label pair len(input_data) print(len(n_class)) # + # train and test datasets separation X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) #declare noise and apply guassian noise to norm data eps = 0.5 # normalisation X_train = stats.zscore(X_train) + eps*np.random.random_sample(X_train.shape) X_test = stats.zscore(X_test) + eps*np.random.random_sample(X_test.shape) # output shape print(len(y_test[1])) print(X_train.shape) # + # create training batches in terms of tensors of input and target values train_ds1 = tf.data.Dataset.from_tensor_slices( (X_train,y_train)).shuffle(1000).batch(32) test_ds1 = tf.data.Dataset.from_tensor_slices( (X_test,y_test)).shuffle(1000).batch(32) # add buffer for effective performance train_ds = train_ds1.prefetch(buffer_size=32) test_ds = test_ds1.prefetch(buffer_size=32) # + from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout from keras.models import Sequential from keras.layers.normalization import BatchNormalization from keras import regularizers model = Sequential() model.add(Conv2D(64, (3,3), input_shape=(input_shape_X),activation='relu', kernel_regularizer=regularizers.l2((0.2)))) model.add(MaxPooling2D(pool_size=(2, 2), padding ='same')) model.add(BatchNormalization()) model.add(Conv2D(256, (3,3),activation='relu', kernel_regularizer=regularizers.l2((0.2)))) model.add(MaxPooling2D(pool_size=(2, 2), padding ='same')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(109, activation='relu')) model.add(Dense(len(n_class), activation='softmax')) model.summary() # + from keras.optimizers import SGD, Adam epochs = 10 model.compile( optimizer=keras.optimizers.SGD(1e-3), loss="categorical_crossentropy", metrics=["accuracy"], ) model.fit( train_ds, epochs=epochs, validation_data=test_ds, )
EEG_Biometrics_with_CNN-and-Ridge-Regression-regularisation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''torch_py38'': conda)' # name: python3 # --- # # Implementation of Quickselect algorithm # [Quickselect](https://en.wikipedia.org/wiki/Quickselect) selects the kth smallest element in an unordered list. # # List of problems that require quickselect: # 1. [973. K-closest points](../../leetcode/973_kclosest_points.ipynb) # ## Procedural implementation # + import random def partition(vector, left, right, pivotIndex): pivotValue = vector[pivotIndex] vector[pivotIndex], vector[right] = vector[right], vector[pivotIndex] # Move pivot to end storeIndex = left for i in range(left, right): if vector[i] < pivotValue: vector[storeIndex], vector[i] = vector[i], vector[storeIndex] storeIndex += 1 vector[right], vector[storeIndex] = vector[storeIndex], vector[right] # Move pivot to its final place return storeIndex def _select(vector, left, right, k): "Returns the k-th smallest, (k >= 0), element of vector within vector[left:right+1] inclusive." while True: pivotIndex = random.randint(left, right) # select pivotIndex between left and right pivotNewIndex = partition(vector, left, right, pivotIndex) pivotDist = pivotNewIndex - left if pivotDist == k: return vector[pivotNewIndex] elif k < pivotDist: right = pivotNewIndex - 1 else: k -= pivotDist + 1 left = pivotNewIndex + 1 def select(vector, k, left=None, right=None): """\ Returns the k-th smallest, (k >= 0), element of vector within vector[left:right+1]. left, right default to (0, len(vector) - 1) if omitted """ if left is None: left = 0 lv1 = len(vector) - 1 if right is None: right = lv1 assert vector and k >= 0, "Either null vector or k < 0 " assert 0 <= left <= lv1, "left is out of range" assert left <= right <= lv1, "right is out of range" return _select(vector, left, right, k) # + # An example v = [2, 4, 8, 1] print([select(v, i) for i in range(len(v))]) # - # ## With tail recursion # + def partition(arr, left, right, pivot): """Partition function inplace """ sorted_i = left # Move pivot to the end pivot_value = arr[pivot] arr[pivot], arr[right] = arr[right], arr[pivot] for i in range(left, right): if arr[i] < pivot_value: arr[i], arr[sorted_i] = arr[sorted_i], arr[i] sorted_i += 1 arr[sorted_i], arr[right] = arr[right], arr[sorted_i] print(arr) return sorted_i def quickselect(arr, left, right, k): if left==right: return arr[left] assert k<=right, "k is out of bounds" pivot = (left+right)//2 pivot_ind = partition(arr, left, right, pivot) print("pivot", pivot_ind) if k == pivot_ind: return arr[k] elif k < pivot_ind: return quickselect(arr, left, pivot_ind-1, k) else: return quickselect(arr, pivot_ind+1, right, k) # - inp = [2, 4, 8, 1] print(quickselect(inp, 0, len(inp)-1, 2)) # ## How to make selection even more faster? import heapq heapq.nsmallest(3, inp)
algorithms/sorting_and_selection/quickselect.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt import numpy as np import sklearn import json import datetime import math from random import randint import sklearn from sklearn.preprocessing import LabelEncoder,OneHotEncoder from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures from sklearn.model_selection import cross_val_score from sklearn.tree import DecisionTreeRegressor from sklearn.linear_model import LassoCV from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.metrics import classification_report # ## Importing data matches = pd.read_csv("../Data/matches.csv") matches.head() attributes = pd.read_csv("../Data/attributes.csv") attributes.head() batsmen = open('../Data/batsmen.json',) batsmen_data = json.load(batsmen) bowlers = open('../Data/bowlers.json',) bowlers_data = json.load(bowlers) invtmap = open('../Data/invtmap.json',) invtmap_data = json.load(invtmap) scorecard = open('../Data/scorecard.json',) scorecard_data = json.load(scorecard) region = open('../Data/region.json',) region_data = json.load(region) tmap = open('../Data/tmap.json',) tmap_data = json.load(tmap) # ## Model 1 # ## Making the Database def get_matches(team_1,team_2,date,num_years=5): matches_team=matches[matches['Team_1']==team_1] matches_team=matches_team[matches['Team_2']==team_2] matches_team1=matches[matches['Team_1']==team_2] matches_team1=matches_team1[matches['Team_2']==team_1] matches_team = pd.concat([matches_team,matches_team1],axis=0) matches_team['Date']= pd.to_datetime(matches_team['Date']) get_date = datetime.datetime.strptime(date, '%Y-%m-%d') min_date = datetime.datetime.strptime(str(get_date.year-num_years)+'-01'+'-01', '%Y-%m-%d') matches_team_latest = matches_team[matches_team['Date']>=min_date] matches_team_latest = matches_team_latest[matches_team_latest['Date']<date] matches_team_latest = matches_team_latest.sort_values(by='Date') matches_team_latest = matches_team_latest.reset_index() matches_team_latest=matches_team_latest.drop(['index'],axis=1) return matches_team_latest def create_Dataset(Dataset): Dataset['Toss'] = '' Dataset['Fours_team1'] = '' Dataset['Fours_team2'] = '' Dataset['Sixes_team1'] = '' Dataset['Sixes_team2'] = '' Dataset['Strike_rate_team1'] = '' Dataset['Strike_rate_team2'] = '' Dataset['Maidens_team1'] = '' Dataset['Maidens_team2'] = '' Dataset['Wickets_team1'] = '' Dataset['Wickets_team2'] = '' Dataset['Economy_rate_team1'] = '' Dataset['Economy_rate_team2'] = '' Dataset['Score_1']='' Dataset['Score_2']='' Dataset['Result'] = '' MatchCodes = Dataset['MatchCode'].tolist() for i in range(len(MatchCodes)): matchdata = scorecard_data[str(MatchCodes[i])] #Dataset = Dataset.drop(columns=['GroundCode','Team_1','Team_2']) if (matchdata['TOSS']==matchdata['ORDER'][0]): toss = 1 else: toss = 2 #Batting 1 batting1 = matchdata['BATTING1'] Runs_team1 = [] Balls_Played_team1 = [] Fours_team1 = [] Sixes_team1 = [] Strike_rate_team1 = [] for val in range(10): Runs_team1.append(batting1[val][2]) Balls_Played_team1.append(batting1[val][3]) Fours_team1.append(batting1[val][4]) Sixes_team1.append(batting1[val][5]) Strike_rate_team1.append(batting1[val][6]) #Batting 2 batting2 = matchdata['BATTING2'] Runs_team2 = [] Balls_Played_team2 = [] Fours_team2 = [] Sixes_team2 = [] Strike_rate_team2 = [] for val in range(10): Runs_team2.append(batting2[val][2]) Balls_Played_team2.append(batting2[val][3]) Fours_team2.append(batting2[val][4]) Sixes_team2.append(batting2[val][5]) Strike_rate_team2.append(batting2[val][6]) #Bowling 1 bowling1 = matchdata['BOWLING1'] Overs_team1 = [] Maidens_team1 = [] Runs_given_team1 = [] Wickets_team1 = [] Economy_rate_team1 = [] for val in range(10): if val<len(bowling1): Overs_team1.append(bowling1[val][1]) Maidens_team1.append(bowling1[val][2]) Runs_given_team1.append(bowling1[val][3]) Wickets_team1.append(bowling1[val][4]) Economy_rate_team1.append(bowling1[val][5]) else: Overs_team1.append(-1) Maidens_team1.append(-1) Runs_given_team1.append(-1) Wickets_team1.append(-1) Economy_rate_team1.append(-1) #Bowling 2 bowling2 = matchdata['BOWLING2'] Overs_team2 = [] Maidens_team2 = [] Runs_given_team2 = [] Wickets_team2 = [] Economy_rate_team2 = [] for val in range(10): if val<len(bowling2): Overs_team2.append(bowling2[val][1]) Maidens_team2.append(bowling2[val][2]) Runs_given_team2.append(bowling2[val][3]) Wickets_team2.append(bowling2[val][4]) Economy_rate_team2.append(bowling2[val][5]) else: Overs_team2.append(-1) Maidens_team2.append(-1) Runs_given_team2.append(-1) Wickets_team2.append(-1) Economy_rate_team2.append(-1) #Scores score = matchdata['SCORES'] if (score[0]>score[1]): result = 1 elif (score[1]>score[0]): result = 2 else: result = 'Tie' #insert into dataset Dataset['Toss'].iloc[i] = toss Dataset['Score_1'].iloc[i]=score[0] Dataset['Score_2'].iloc[i]=score[1] Dataset['Fours_team1'].iloc[i] = Fours_team1 Dataset['Fours_team2'].iloc[i] = Fours_team2 Dataset['Sixes_team1'].iloc[i] = Sixes_team1 Dataset['Sixes_team2'].iloc[i] = Sixes_team2 Dataset['Strike_rate_team1'].iloc[i] = Strike_rate_team1 Dataset['Strike_rate_team2'].iloc[i] = Strike_rate_team2 Dataset['Maidens_team1'].iloc[i] = Maidens_team1 Dataset['Maidens_team2'].iloc[i] = Maidens_team2 Dataset['Wickets_team1'].iloc[i] = Wickets_team1 Dataset['Wickets_team2'].iloc[i] = Wickets_team2 Dataset['Economy_rate_team1'].iloc[i] = Economy_rate_team1 Dataset['Economy_rate_team2'].iloc[i] = Economy_rate_team2 Dataset['Result'].iloc[i] = result Dataset = Dataset.drop(columns=['Date','GroundCode']) return Dataset #Example Input team_1 = 'AUS' team_2 = 'IND' date = '2016-01-20' Dataset=get_matches(team_1,team_2,date) Dataset=create_Dataset(Dataset) Dataset.head() # ### Data for model def get_model_data(Dataset): venue_encoding = { "Home" : 0, "Away" : 1, "Neutral" : 2 } Dataset.replace({'Venue':venue_encoding}, inplace=True) enc = OneHotEncoder(handle_unknown='ignore') enc_df = pd.DataFrame(enc.fit_transform(Dataset[['Result','Toss']]).toarray()) Dataset = Dataset.join(enc_df) Dataset.rename(columns = {0: 'Win1',1: 'Win2',2: 'Toss1',3: 'Toss2'}, inplace = True) y = Dataset[['Result','Win1','Win2','Score_1','Score_2']] Dataset.drop(['Result','Score_1','Score_2'],inplace=True, axis=1) Dataset.drop(['Team_1','Team_2'],inplace=True, axis=1) temp=Dataset['Venue'] Dataset.drop(['Win1','Win2'],inplace=True, axis=1) Dataset.drop(['Venue'],inplace=True, axis=1) Dataset['Venue']='' Dataset['Venue']=temp Dataset.drop(['Toss'],inplace=True, axis=1) Dataset['new']='' for i in range(len(Dataset)): l=[] for j in range(1,len(Dataset.columns)-4): if(type(Dataset.iloc[i][j])!=list): l.append(Dataset.iloc[i][j]) else: for k in range(len(Dataset.iloc[i][j])): l.append(Dataset.iloc[i][j][k]) Dataset['new'].iloc[i]=l X=Dataset[['MatchCode','new','Toss1','Toss2','Venue']] return X,y X,y=get_model_data(Dataset) # + # Test Train Split #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/5, random_state = 100) # - y_train=y X # ### Predict_data def allBatsmen(batting): batting_keys=batting.keys() performance={} for k in batting_keys: for j in batting[k]: performance.setdefault(k,[]).append([j[0],j[1][1],j[1][2],j[1][3],j[1][4],j[1][5]]) performance_keys = performance.keys() for per in performance_keys: l = performance[per] l1=sorted(l,reverse=True) performance[per]=l1 return performance def allBowlers(bowling): bowling_keys=bowling.keys() performance={} for k in bowling_keys: for j in bowling[k]: performance.setdefault(k,[]).append([j[0],j[1][0],j[1][1],j[1][2],j[1][3],j[1][4]]) performance_keys = performance.keys() for per in performance_keys: l = performance[per] l1=sorted(l,reverse=True) performance[per]=l1 return performance def getLatestPerf_Bat(perform,team_players, date, num_years=5): latest_performance={} perform_keys = perform.keys() for k in team_players: for i in perform[k]: d = datetime.datetime.strptime(date, '%Y-%m-%d') d1 = datetime.datetime.strptime(i[0], '%Y-%m-%d') min_date = datetime.datetime.strptime(str(d.year-num_years)+'-01'+'-01', '%Y-%m-%d') if(d1>=min_date and i[0]<date): latest_performance.setdefault(k,[]).append(i) return latest_performance def getAvgBat(team,index): final_score=[] for k in team.keys(): para=[] for j in team[k]: para.append(j[index]) para1=pd.Series(para) f1=para1.ewm(com=0.5,ignore_na=True,min_periods=1).mean() f1=np.nanmean(f1) final_score.append(round(f1,3)) return final_score def getLatestPerf_Bowl(perform,team_players, date, num_years=5): latest_performance={} perform_keys = perform.keys() for k in team_players: for i in perform[k]: d = datetime.datetime.strptime(date, '%Y-%m-%d') d1 = datetime.datetime.strptime(i[0], '%Y-%m-%d') min_date = datetime.datetime.strptime(str(d.year-num_years)+'-01'+'-01', '%Y-%m-%d') if(d1>=min_date and i[0]<date): latest_performance.setdefault(k,[]).append(i) return latest_performance def getAvgBowl(team,index): final_score=[] for k in team.keys(): para=[] for j in team[k]: para.append(j[index]) para1=[x if x!=-1 else np.nan for x in para] para1=pd.Series(para1) f1=para1.ewm(com=0.5).mean() f1=np.nanmean(f1) final_score.append(round(f1,3)) return final_score def adjust_length(l): if (len(l)<10): l.extend([-1] * (10 - len(l))) return l def get_matches_for_test(matches): x = [randint(0, len(matches)) for p in range(0, 10)] match_codes=[] for i in x: match_codes.append(matches['MatchCode'].iloc[i]) return match_codes # + def get_testing_data(X_test,match_codes, scorecard_data, matches): for i in range(len(match_codes)): match_code=match_codes[i] scorecard=scorecard_data[str(match_code)] team_1_batsmen = [scorecard['BATTING1'][i][0] for i in range(10)] team_2_batsmen = [scorecard['BATTING2'][i][0] for i in range(10)] team_1_bowlers = [scorecard['BOWLING1'][i][0] for i in range(len(scorecard['BOWLING1']))] team_2_bowlers = [scorecard['BOWLING2'][i][0] for i in range(len(scorecard['BOWLING2']))] date=matches[matches['MatchCode']==match_code]['Date'].iloc[0] if (scorecard['TOSS']==scorecard['ORDER'][0]): toss = 1 else: toss = 2 team1_bat=getLatestPerf_Bat(bat,team_1_batsmen, date, 5) team2_bat=getLatestPerf_Bat(bat,team_2_batsmen, date, 5) team1_bowl=getLatestPerf_Bowl(bowl,team_1_bowlers, date, 5) team2_bowl=getLatestPerf_Bowl(bowl,team_2_bowlers, date, 5) row = adjust_length(getAvgBat(team1_bat,3))+adjust_length(getAvgBat(team2_bat,3))+\ adjust_length(getAvgBat(team1_bat,4))+adjust_length(getAvgBat(team1_bat,4))+\ adjust_length(getAvgBat(team2_bat,5))+adjust_length(getAvgBat(team2_bat,5))+\ adjust_length(getAvgBowl(team1_bowl,1))+adjust_length(getAvgBowl(team2_bowl,1))+\ adjust_length(getAvgBowl(team1_bowl,2))+adjust_length(getAvgBowl(team2_bowl,2))+\ adjust_length(getAvgBowl(team1_bowl,4))+\ adjust_length(getAvgBowl(team2_bowl,4)) X_test['new'].iloc[i]=row X_test['Toss'].iloc[i]=toss score = scorecard_data[str(match_codes[i])]['SCORES'] if (score[0]>score[1]): X_test['Result'].iloc[i] = 1 elif (score[1]>score[0]): X_test['Result'].iloc[i] = 2 else: X_test['Result'].iloc[i] = 0 X_test['Score_1'].iloc[i]=score[0] X_test['Score_2'].iloc[i]=score[1] return X_test # - match_test=get_matches_for_test(matches) bat={} bat=allBatsmen(batsmen_data) print(len(bat)) bowl={} bowl=allBowlers(bowlers_data) print(len(bowl)) X_test=pd.DataFrame(match_test) X_test.columns=['MatchCode'] X_test['Toss']='' X_test['new']='' X_test['Score_1']='' X_test['Score_2']='' X_test['Result']='' X_test =get_testing_data(X_test,match_test, scorecard_data, matches) y_test=X_test[['Score_1','Score_2','Result']] X_train=X enc = OneHotEncoder(handle_unknown='ignore') enc_df = pd.DataFrame(enc.fit_transform(X_test[['Toss']]).toarray()) X_test = X_test.join(enc_df) X_test.rename(columns = {0: 'Toss1',1: 'Toss2'}, inplace = True) X_test len(X['new'].iloc[0]) # ### Model x=np.array(X['new']) x_train=list(x) x_test=np.array(X_test['new']) x_test=list(x_test) def model_eval(model, x_train, y_train, target, x_test,y_test): model.fit(x_train, y_train[target]) predictions = model.predict(x_test) rms = sklearn.metrics.mean_squared_error(predictions,y_test[target])**0.5 print('RMS : %s' % '{0:.2%}'.format(rms)) r2 = sklearn.metrics.r2_score(predictions,y_test[target]) print('R2 : %s' % '{0:.2%}'.format(r2)) return np.asarray(predictions) def winner_pred(model, X_train, Y_train, x_test, y_test): pred1 = model_eval(model, X_train, y_train,['Score_1'],x_test, y_test) pred2 = model_eval(model, X_train, y_train,['Score_2'],x_test, y_test) pred = pred1 - pred2 for i in range(len(pred)): if ((pred[i])>0): pred[i] = 1 else: pred[i] = 2 #print(pred) sum=0 print("Model Accuracy is: ") for i in range(len(pred)): if(pred[i]==y_test['Result'].iloc[i]): sum=sum+1 print(sum/len(pred)) len(y_train) model = DecisionTreeRegressor() winner_pred(model, x_train, y_train, x_test,y_test) model = LinearRegression() winner_pred(model, x_train, y_train, x_test,y_test) model = RandomForestRegressor() winner_pred(model, x_train, y_train, x_test,y_test) x_train=np.array(X_train['new']) x_train=list(x_train) x_test=np.array(X_test['new']) x_test=list(x_test) model = DecisionTreeRegressor() winner_pred(model, x_train, y_train, x_test,y_test) # ## Model 2 def create_Dataset1(Dataset): Dataset['Toss'] = '' Dataset['Strike_rate_team1'] = '' Dataset['Strike_rate_team2'] = '' Dataset['Wickets_team1'] = '' Dataset['Wickets_team2'] = '' Dataset['Economy_rate_team1'] = '' Dataset['Economy_rate_team2'] = '' Dataset['Score_1']='' Dataset['Score_2']='' Dataset['Result'] = '' MatchCodes = Dataset['MatchCode'].tolist() for i in range(len(MatchCodes)): matchdata = scorecard_data[str(MatchCodes[i])] #Dataset = Dataset.drop(columns=['GroundCode','Team_1','Team_2']) if (matchdata['TOSS']==matchdata['ORDER'][0]): toss = 1 else: toss = 2 #Batting 1 batting1 = matchdata['BATTING1'] Runs_team1 = [] Balls_Played_team1 = [] Fours_team1 = [] Sixes_team1 = [] Strike_rate_team1 = [] for val in range(10): Runs_team1.append(batting1[val][2]) Balls_Played_team1.append(batting1[val][3]) Fours_team1.append(batting1[val][4]) Sixes_team1.append(batting1[val][5]) Strike_rate_team1.append(batting1[val][6]) #Batting 2 batting2 = matchdata['BATTING2'] Runs_team2 = [] Balls_Played_team2 = [] Fours_team2 = [] Sixes_team2 = [] Strike_rate_team2 = [] for val in range(10): Runs_team2.append(batting2[val][2]) Balls_Played_team2.append(batting2[val][3]) Fours_team2.append(batting2[val][4]) Sixes_team2.append(batting2[val][5]) Strike_rate_team2.append(batting2[val][6]) #Bowling 1 bowling1 = matchdata['BOWLING1'] Overs_team1 = [] Maidens_team1 = [] Runs_given_team1 = [] Wickets_team1 = [] Economy_rate_team1 = [] for val in range(10): if val<len(bowling1): Overs_team1.append(bowling1[val][1]) Maidens_team1.append(bowling1[val][2]) Runs_given_team1.append(bowling1[val][3]) Wickets_team1.append(bowling1[val][4]) Economy_rate_team1.append(bowling1[val][5]) else: Overs_team1.append(-1) Maidens_team1.append(-1) Runs_given_team1.append(-1) Wickets_team1.append(-1) Economy_rate_team1.append(-1) #Bowling 2 bowling2 = matchdata['BOWLING2'] Overs_team2 = [] Maidens_team2 = [] Runs_given_team2 = [] Wickets_team2 = [] Economy_rate_team2 = [] for val in range(10): if val<len(bowling2): Overs_team2.append(bowling2[val][1]) Maidens_team2.append(bowling2[val][2]) Runs_given_team2.append(bowling2[val][3]) Wickets_team2.append(bowling2[val][4]) Economy_rate_team2.append(bowling2[val][5]) else: Overs_team2.append(-1) Maidens_team2.append(-1) Runs_given_team2.append(-1) Wickets_team2.append(-1) Economy_rate_team2.append(-1) #Scores score = matchdata['SCORES'] if (score[0]>score[1]): result = 1 elif (score[1]>score[0]): result = 2 else: result = 'Tie' #insert into dataset Dataset['Toss'].iloc[i] = toss Dataset['Score_1'].iloc[i]=score[0] Dataset['Score_2'].iloc[i]=score[1] Dataset['Strike_rate_team1'].iloc[i] = Strike_rate_team1 Dataset['Strike_rate_team2'].iloc[i] = Strike_rate_team2 Dataset['Wickets_team1'].iloc[i] = Wickets_team1 Dataset['Wickets_team2'].iloc[i] = Wickets_team2 Dataset['Economy_rate_team1'].iloc[i] = Economy_rate_team1 Dataset['Economy_rate_team2'].iloc[i] = Economy_rate_team2 Dataset['Result'].iloc[i] = result Dataset = Dataset.drop(columns=['Date','GroundCode']) return Dataset Dataset1=get_matches(team_1,team_2,date) Dataset1=create_Dataset1(Dataset1) Dataset1.head() # + def get_testing_data1(X_test,match_codes, scorecard_data, matches): for i in range(len(match_codes)): match_code=match_codes[i] scorecard=scorecard_data[str(match_code)] team_1_batsmen = [scorecard['BATTING1'][i][0] for i in range(10)] team_2_batsmen = [scorecard['BATTING2'][i][0] for i in range(10)] team_1_bowlers = [scorecard['BOWLING1'][i][0] for i in range(len(scorecard['BOWLING1']))] team_2_bowlers = [scorecard['BOWLING2'][i][0] for i in range(len(scorecard['BOWLING2']))] date=matches[matches['MatchCode']==match_code]['Date'].iloc[0] if (scorecard['TOSS']==scorecard['ORDER'][0]): toss = 1 else: toss = 2 team1_bat=getLatestPerf_Bat(bat,team_1_batsmen, date, 5) team2_bat=getLatestPerf_Bat(bat,team_2_batsmen, date, 5) team1_bowl=getLatestPerf_Bowl(bowl,team_1_bowlers, date, 5) team2_bowl=getLatestPerf_Bowl(bowl,team_2_bowlers, date, 5) row = adjust_length(getAvgBat(team2_bat,5))+adjust_length(getAvgBat(team2_bat,5))+\ adjust_length(getAvgBowl(team1_bowl,2))+adjust_length(getAvgBowl(team2_bowl,2))+\ adjust_length(getAvgBowl(team1_bowl,4))+\ adjust_length(getAvgBowl(team2_bowl,4)) X_test['new'].iloc[i]=row X_test['Toss'].iloc[i]=toss score = scorecard_data[str(match_codes[i])]['SCORES'] if (score[0]>score[1]): X_test['Result'].iloc[i] = 1 elif (score[1]>score[0]): X_test['Result'].iloc[i] = 2 else: X_test['Result'].iloc[i] = 0 X_test['Score_1'].iloc[i]=score[0] X_test['Score_2'].iloc[i]=score[1] return X_test # - X,y=get_model_data(Dataset1) X_train=X match_test=get_matches_for_test(matches) X_test=pd.DataFrame(match_test) X_test.columns=['MatchCode'] X_test['Toss']='' X_test['new']='' X_test['Score_1']='' X_test['Score_2']='' X_test['Result']='' X_test =get_testing_data1(X_test,match_test, scorecard_data, matches) y_test=X_test[['Score_1','Score_2','Result']] y_train=y # ### Model x=np.array(X['new']) x_train=list(x) x=np.array(X_test['new']) x_test=list(x) model = DecisionTreeRegressor() winner_pred(model, x_train, y_train, x_test,y_test) model = RandomForestRegressor() winner_pred(model, x_train, y_train, x_test,y_test) model = LinearRegression() winner_pred(model, x_train, y_train, x_test,y_test)
analysis/Batting-Bowling-Combined.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Popularity Analysis of Free-to-Play App Categories # # The goal of this project is to do a basic data analysis of the summary data available from the Google Play Store and the Apple iOS Mobile App Store. This project asks the question: 'If we create a free app, what category provides the best opportunity for it to be frequently installed?'. # # These datasets are available for download here: # # * [Google Play Store Apps](https://www.kaggle.com/lava18/google-play-store-apps/home) # * [Apple iOS Store Apps](https://www.kaggle.com/ramamet4/app-store-apple-data-set-10k-apps/home) # # The basic steps are as follows: # # * Open the CSV files in the Python environment, do a cursory data exploration # * Review dataset discussion links, check for errors, remove duplicates, remove all paid apps # * Create frequency tables for average downloads per app by category, using reviews as a proxy # * Research categories that could field a low production cost app, find the best option # ### Data Exploration # + from csv import reader opened_file = open('C:\Datasets\AppleStore.csv', 'r', encoding='mbcs') read_file = reader(opened_file) ios_data = list(read_file) ios_header = ios_data[0] ios_data = ios_data[1:] opened_file = open('C:\Datasets\googleplaystore.csv', 'r', encoding='mbcs') read_file = reader(opened_file) ggl_data = list(read_file) ggl_header = ggl_data[0] ggl_data = ggl_data[1:] # + # creates a function to view selected rows of the dataset # or to print the dimensions of the dataset # function assumes the data set does not have a header def explore_data(dataset, start, end, print_count=False): data_slice = dataset[start:end] for row in data_slice: print(row) print('\n') # creates space between rows if print_count: print('Number of rows:', len(dataset)) print('Number of columns', len(dataset[0])) # - print(ggl_header) print(ggl_data[1:5]) print(ios_header) explore_data(ggl_data,0,2,print_count=True) explore_data(ios_data,0,2,print_count=True) # We've got a general idea about what these datasets contain, now we need to clean them up a bit. # ### Data Cleaning # After reading through the discussion pages connected to the datasets, it looks like there is one corrupted entry in the Google Play Store data set. We can check the length of the surrounding entries to confirm it is bad data, then remove it. # + print(len(ggl_data[10470])) print(len(ggl_data[10471])) print(len(ggl_data[10472])) print(len(ggl_data[10473])) print(len(ggl_data)) del ggl_data[10472] print(len(ggl_data)) # - # The discussion page related to the Google Play Store data shows there are some duplicate entries in that data set. We need to check for duplicates and remove every duplicate entry except for the most recent. We can use the number of reviews to determine the most recent data point. We should also check the iOS App data as well, even though there is nothing in the discussion page. # + unique_apps_ggl = [] duplicate_apps_ggl = [] for i in ggl_data: name = i[0] if name in unique_apps_ggl: duplicate_apps_ggl.append(name) else: unique_apps_ggl.append(name) print('# of unique google apps: ', len(unique_apps_ggl)) print('# of duplicate google apps: ', len(duplicate_apps_ggl)) unique_apps_ios = [] duplicate_apps_ios = [] for i in ios_data: name = i[0] if name in unique_apps_ios: duplicate_apps_ios.append(name) else: unique_apps_ios.append(name) print('# of unique apple apps: ', len(unique_apps_ios)) print('# of duplicate apple apps: ', len(duplicate_apps_ios)) # - for i in ggl_data: name = i[0] if name == 'Facebook': print(i) # It appears only the Google data set contains duplicate entries. Printing a likely duplicate shows the only column that differs is the review count. Here the examples show: 78,158,306 & 78,128,208. # # Because there are 1,181 duplicate entries in the Google data set, after their removal the length of the data set should be reduced from 10,840 to 9,659. # + reviews_max = {} for i in ggl_data: name = i[0] n_reviews = float(i[3]) if name in reviews_max and reviews_max[name] < n_reviews: reviews_max[name] = n_reviews elif name not in reviews_max: reviews_max.update({name:n_reviews}) print(len(reviews_max)) # - # Above, we created a dictionary and added entries to the 'reviews_max' dictionary. If the name was already in the dictionary, the review count was updated to the higher amount. If they were not in the dictionary, we added the name and review count. # # Next, we can create a new, clean data set using an empty list and a 'ggl_added' list to check if we've already added the data. The two lists should be the same length to make sure we've added everything properly, and they should match our expected length. # + ggl_clean = [] ggl_added = [] for i in ggl_data: name = i[0] n_reviews = float(i[3]) if n_reviews == reviews_max[name] and name not in ggl_added: ggl_clean.append(i) ggl_added.append(name) print(len(ggl_clean)) print(len(ggl_added)) # - # Because there were no errors or duplicates in the iOS data, we can create a copy of that data set as our 'ios_clean' variable for further use. ios_clean = ios_data # Both data sets also have apps that are not built primarily for English users. We want to remove these entries before doing further analysis. The easiest way to do this is to remove any row of data that has text symbols not used in English. # # In ASCII encoding, the range of commonly used English characters is 0 - 127. Some real apps may have a couple non-standard characters or emoji (🙄), so we only want to remove an entry if it contains more than three non-standard characters. def char_check(string): error_count = 0 for char in string: if ord(char) > 127: error_count +=1 if error_count > 3: return False return True print(len(ggl_clean)) print(len(ios_clean)) # + ios_english = [] ggl_english = [] for row in ggl_clean: name = row[0] if char_check(name) == True: ggl_english.append(row) for row in ios_clean: name = row[2] if char_check(name) == True: ios_english.append(row) print(len(ios_english)) print(len(ggl_english)) # - # Lastly, we want to isolate all the free apps to be our final data set. # + ios_free = [] ggl_free = [] for row in ggl_english: price = row[6] if price == 'Free': ggl_free.append(row) for row in ios_english: price = row[5] if price == '0': ios_free.append(row) print(len(ios_free)) print(len(ggl_free)) # - # We should now have two data sets. Each containing the free iOS and Google Play apps that are now free from errors, duplicates, non-English apps. # ### App Optimization - Frequency Tables # To optimize an app, we need to figure out what works in the app stores. Below, we build frequency tables for a few data categories. Specifically, the 'Genres' and 'Category' columns from the Google Play data set. def freq_table(dataset, index): freq_dict = {} percent_dict = {} num_entries = 0 for row in dataset: num_entries += 1 if row[index] in freq_dict: freq_dict[row[index]] += 1 else: freq_dict[row[index]] = 1 for i in freq_dict: percent = (float(freq_dict[i]) / num_entries) percent_dict[i] = percent * 100 return percent_dict # Next we need to view the frequency table in decending order. def display_table(dataset, index): table = freq_table(dataset, index) table_disp = [] for key in table: new_tuple = (table[key], key) table_disp.append(new_tuple) table_sorted = sorted(table_disp, reverse = True) for i in table_sorted: print(i) # Lets look at some summary data: display_table(ggl_free, 1) # Google Play 'Category' column display_table(ios_free, -5) #iOS 'prime_genre' column # It seems like games and entertainment apps are the most popular in both the iOS and Google Play store. Practical applications less so. However, all we've done is reveal the most commonly published apps, not the ones with the most users. We can find out the average number of downloads per category to draw a better conclusion. # # The iOS store doesn't list a download count, and the Google Play store only lists ranges, so we're going to use the number of ratings as a proxy. # + ios_genres = freq_table(ios_free, -5) for genre in ios_genres: sum_installs = 0 num_apps = 0 for row in ios_free: if row[-5] == genre: installs = float(row[6]) sum_installs += installs num_apps += 1 avg_installs = (sum_installs / num_apps) print(genre,":",round(avg_installs)) # + ggl_genres = freq_table(ggl_free, 1) for genre in ggl_genres: sum_installs = 0 num_apps = 0 for row in ggl_free: if row[1] == genre: installs = float(row[3]) sum_installs += installs num_apps += 1 avg_installs = (sum_installs / num_apps) print(genre,":",round(avg_installs)) # - # Another method for the Google Play store is to use the downloads numbers as they are and check against our previous results using the ratings. # + ggl_genres = freq_table(ggl_free, 1) for genre in ggl_genres: sum_installs = 0 num_apps = 0 for row in ggl_free: if row[1] == genre: installs = row[5] installs = installs.replace('+','') installs = installs.replace(',','') sum_installs += int(installs) num_apps += 1 avg_installs = (sum_installs / num_apps) print(genre,":",round(avg_installs)) # - # This generally confirms the previous analysis. The download numbers are different, but the general order and relative magnitude are the same as our data table that used reviews as a proxy. # # Let's take a look at the most popular apps in a couple categories. # ### Category Analysis # # Again, for the Google Play Store there are two ways to go about finding the most popular apps in each category. for app in ggl_free: if app[1] == 'COMMUNICATION' and (app[5] == '1,000,000,000+' or app[5] == '500,000,000' or app[5] == '100,000,000'): print(app[0],':',app[5]) for app in ggl_free: if app[1] == 'COMMUNICATION' and int(app[3]) > 10000000: print(app[0],':',app[3]) # The above two sections attempt to pull the most popular apps in the Communication category. What we see is there are some Google apps that have a lot of downloads, but do not have as many total reviews as some other apps. This could be due to them being automatic downloads by the phone manufacturer or retailer. # # Either way, Communication apps seems like a crowded and highly competitive market. # + for app in ggl_free: if app[1] == 'FINANCE' and (app[5] == '100,000,000+' or app[5] == '50,000,000+' or app[5] == '10,000,000+'): print(app[0],':',app[5]) print("===================================") for app in ggl_free: if app[1] == 'FINANCE' and int(app[3]) > 100000: print(app[0],':',app[3]) # - # Finance apps seem to be mostly banking apps of similar popularities. Without a connected banking operations, it seems unlikely we could develop a low-cost finance app (such as a retirement planner) that would be highly downloaded. # + for app in ggl_free: if app[1] == 'NEWS_AND_MAGAZINES' and (app[5] == '100,000,000+' or app[5] == '50,000,000+' or app[5] == '10,000,000+'): print(app[0],':',app[5]) print("===================================") for app in ggl_free: if app[1] == 'NEWS_AND_MAGAZINES' and int(app[3]) > 100000: print(app[0],':',app[3]) # - # The interesting part of the 'News and Magazines' category is that it is not currently dominated by automatic downloads such as Google News. The most reviewed and most downloaded app appears to be Fox News, a non-technology company. This is closely followed by BBC and CNN. # # ### Conclusion # # Given that no major tech firm dominates this section with pre-installed apps, it could prove to be a good opportunity to develop a low-cost news aggregation app. # # By creating an app that displays a dashboard, links to other sites, and streams a list of headlines and AP photos, we could limit development costs and have to opportunity for substantial downloads in the future.
App Category Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mining meta information about Wikipedia so we can mine Wikipedia # We are looking for the templates that sometimes appear in text in the `...content.json` dataset. For example: # > Denne norske filmrelaterte artikkelen er foreløpig kort eller mangelfull, og du kan hjelpe Wikipedia ved å utvide den. # + from pyspark.sql import SparkSession from pyspark.sql.functions import array_contains spark = SparkSession \ .builder \ .appName("Analysing Wikipedia") \ .getOrCreate() # - df = spark.read.json("./nowiki-20210111-cirrussearch-general.json") df.printSchema() # We probably only need wikitext df.select("content_model").distinct().show() # About namespaces https://en.wikipedia.org/wiki/Wikipedia:Namespace # Templates have their own namespace! It is number 10 df.select("namespace").distinct().show(50) filtered_df = df \ .filter( \ (df["namespace"] == 10) & \ (df["content_model"] == "wikitext")) \ .drop("content_model", "language", "category", "coordinates", "defaultsort", \ "external_link", "heading", "incoming_links", "namespace", "namespace_text", \ "outgoing_link", "redirect", "text_bytes", "template", "wiki", \ "wikibase_item", "version_type", "file_bits", "file_height", "file_media_type", \ "file_resolution", "file_size", "file_text", "file_width", "index", \ "file_mime", "ores_articletopic", "ores_articletopics", "score", "popularity_score", \ "display_title", "auxiliary_text", "create_timestamp", "timestamp", "version", \ "opening_text", "source_text") # + tags=[] filtered_df \ .filter( \ df["text"] \ .contains("Denne norske filmrelaterte artikkelen er foreløpig kort eller mangelfull, og du kan hjelpe Wikipedia ved å utvide den.") \ )\ .show(truncate=False) # - # The text column contained the template text, but also more. And the bottom template should show # # > Aktuell artikkel: Denne artikkelen omhandler en aktuell hendelse. Vær ekstra oppmerksom på at innholdet kan være utdatert eller feilaktig, og at hyppige redigeringer kan forekomme. # # but it is not that at all! filtered_df.filter(df["title"] == "Aktuell").show(truncate=False) # # Conclusion # # This was hard and I give up. I'll just keep the templates in.
words/Mining meta information about Wikipedia.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from pandas import DataFrame, Series df = DataFrame([[1.4, np.nan], [7.1, -4.5],[np.nan, np.nan], [0.75, -1.3]],index=['a', 'b', 'c', 'd'],columns=['one', 'two']) df df.mean() df.sum() df.mean(axis=1) df.sum(axis=1) df.mean(skipna=False) df.mean(axis=1, skipna = False) df.idxmax() df.idxmin() df.cumsum() df df.cumsum(axis=1) df.describe() df = DataFrame([[1, 2], [3, 4],[5, 6], [7, 8]],index=['a', 'b', 'c', 'd'],columns=['one', 'two']) df df.describe() obj = Series(['c', 'a', 'd', 'a', 'a', 'b', 'b', 'c', 'c']) obj.unique() obj.value_counts() obj.value_counts(sort=False) data = DataFrame({'Qu1': [1, 3, 4, 3, 4],'Qu2': [2, 3, 1, 2, 3],'Qu3': [1, 5, 2, 4, 4]}) data import pandas as pd data.apply(pd.value_counts)
Pandas/4-Descriptive-finished.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Linear 2D solution # ## Init symbols for *sympy* # + from sympy import * from geom_util import * from sympy.vector import CoordSys3D import matplotlib.pyplot as plt import sys sys.path.append("../") # %matplotlib inline # %reload_ext autoreload # %autoreload 2 # %aimport geom_util # + # Any tweaks that normally go in .matplotlibrc, etc., should explicitly go here # %config InlineBackend.figure_format='retina' plt.rcParams['figure.figsize'] = (12, 12) plt.rc('text', usetex=True) plt.rc('font', family='serif') init_printing() # - N = CoordSys3D('N') alpha1, alpha2, alpha3 = symbols("alpha_1 alpha_2 alpha_3", real = True, positive=True) A,K,rho = symbols("A K rho") B=Matrix([[0, 1/(A*(K*alpha3 + 1)), 0, 0, 0, 0, 0, 0, K/(K*alpha3 + 1), 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1/(A*(K*alpha3 + 1)), 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [-K/(K*alpha3 + 1), 0, 0, 0, 0, 0, 0, 0, 0, 1/(A*(K*alpha3 + 1)), 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]) B E=zeros(6,9) E[0,0]=1 E[1,4]=1 E[2,8]=1 E[3,1]=1 E[3,3]=1 E[4,2]=1 E[4,6]=1 E[5,5]=1 E[5,7]=1 E mu = Symbol('mu') la = Symbol('lambda') C_tensor = getIsotropicStiffnessTensor(mu, la) C = convertStiffnessTensorToMatrix(C_tensor) C S=B.T*E.T*C*E*B*A*(1+alpha3*K) S M=Matrix([[rho, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, rho, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, rho, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) M=M*A*(1+alpha3*K) M # ## Cartesian coordinates # + import fem.geometry as g import fem.model as m import fem.material as mat import fem.solver as s import fem.mesh as me import plot stiffness_matrix_func = lambdify([A, K, mu, la, alpha3], S, "numpy") mass_matrix_func = lambdify([A, K, rho, alpha3], M, "numpy") def stiffness_matrix(material, geometry, x1, x2, x3): A,K = geometry.get_A_and_K(x1,x2,x3) return stiffness_matrix_func(A, K, material.mu(), material.lam(), x3) def mass_matrix(material, geometry, x1, x2, x3): A,K = geometry.get_A_and_K(x1,x2,x3) return mass_matrix_func(A, K, material.rho, x3) def generate_layers(thickness, layers_count, material): layer_top = thickness / 2 layer_thickness = thickness / layers_count layers = set() for i in range(layers_count): layer = m.Layer(layer_top - layer_thickness, layer_top, material, i) layers.add(layer) layer_top -= layer_thickness return layers def solve(geometry, thickness, linear, N_width, N_height): layers_count = 1 layers = generate_layers(thickness, layers_count, mat.IsotropicMaterial.steel()) model = m.Model(geometry, layers, m.Model.FIXED_BOTTOM_LEFT_RIGHT_POINTS) mesh = me.Mesh.generate(width, layers, N_width, N_height, m.Model.FIXED_BOTTOM_LEFT_RIGHT_POINTS) lam, vec = s.solve(model, mesh, stiffness_matrix, mass_matrix) return lam, vec, mesh, geometry width = 2 curvature = 0.8 thickness = 0.05 corrugation_amplitude = 0.05 corrugation_frequency = 20 geometry = g.General(width, curvature, corrugation_amplitude, corrugation_frequency) N_width = 600 N_height = 2 lam, vec, mesh, geometry = solve(geometry, thickness, False, N_width, N_height) results = s.convert_to_results(lam, vec, mesh, geometry) results_index = 0 plot.plot_init_and_deformed_geometry_in_cartesian(results[results_index], 0, width, -thickness / 2, thickness / 2, 0, geometry.to_cartesian_coordinates) to_print = 20 if (len(results) < to_print): to_print = len(results) for i in range(to_print): print(results[i].rad_per_sec_to_Hz(results[i].freq))
py/notebooks/LinearSol2DFEM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Clustering algorithms # check scikit-learn version import sklearn print(sklearn.__version__) from numpy import where from numpy import unique from matplotlib import pyplot #### Creating a random Dataset from sklearn.datasets import make_classification X, y = make_classification( n_samples=1000, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1, random_state=0 ) # ## Basic Clustering # # Just showing the points of the two variables in a graph. # create scatter plot for samples from each class for class_value in range(2): # get row indexes for samples with this class row_ix = where(y == class_value) # create scatter of these samples pyplot.scatter(X[row_ix, 0], X[row_ix, 1]) # show the plot pyplot.show() # ## Affinity Propagation # # Affinity Propagation involves finding a set of exemplars that best summarize the data. # affinity propagation clustering from sklearn.cluster import AffinityPropagation # define the model model = AffinityPropagation(damping=0.9, random_state=None) # fit the model model.fit(X) # assign a cluster to each example yhat = model.predict(X) # retrieve unique clusters clusters = unique(yhat) # create scatter plot for samples from each cluster for cluster in clusters: # get row indexes for samples with this cluster row_ix = where(yhat == cluster) # create scatter of these samples pyplot.scatter(X[row_ix, 0], X[row_ix, 1]) # show the plot pyplot.show() # ## Agglomerative Clustering # # Agglomerative clustering merges examples until the desired number of clusters is achieved. from sklearn.cluster import AgglomerativeClustering # define the model model = AgglomerativeClustering(n_clusters=4) # four clusters in this example # fit model and predict clusters yhat = model.fit_predict(X) # retrieve unique clusters clusters = unique(yhat) # create scatter plot for samples from each cluster for cluster in clusters: # get row indexes for samples with this cluster row_ix = where(yhat == cluster) # create scatter of these samples pyplot.scatter(X[row_ix, 0], X[row_ix, 1]) # show the plot pyplot.show() # ## BIRCH # # Balanced Iterative Reducing and Clustering using Hierarchies involves constructing a tree structure from which cluster centroids are extracted. from sklearn.cluster import Birch # define the model model = Birch(threshold=0.01, n_clusters=4) # fit the model model.fit(X) # assign a cluster to each example yhat = model.predict(X) # retrieve unique clusters clusters = unique(yhat) # create scatter plot for samples from each cluster for cluster in clusters: # get row indexes for samples with this cluster row_ix = where(yhat == cluster) # create scatter of these samples pyplot.scatter(X[row_ix, 0], X[row_ix, 1]) # show the plot pyplot.show() # ## DBSCAN # # Density-Based Spatial Clustering of Applications with Noise involves finding high-density areas in the domain and expanding those areas of the feature space around them as clusters. from sklearn.cluster import DBSCAN # define the model model = DBSCAN(eps=0.30, min_samples=9) # fit model and predict clusters yhat = model.fit_predict(X) # retrieve unique clusters clusters = unique(yhat) # create scatter plot for samples from each cluster for cluster in clusters: # get row indexes for samples with this cluster row_ix = where(yhat == cluster) # create scatter of these samples pyplot.scatter(X[row_ix, 0], X[row_ix, 1]) # show the plot pyplot.show() # ## OPTICS # # Ordering Points To Identify the Clustering Structure) is a modified version of DBSCAN from sklearn.cluster import OPTICS # define the model model = OPTICS(eps=1, min_samples=20) # fit model and predict clusters yhat = model.fit_predict(X) # retrieve unique clusters clusters = unique(yhat) # create scatter plot for samples from each cluster for cluster in clusters: # get row indexes for samples with this cluster row_ix = where(yhat == cluster) # create scatter of these samples pyplot.scatter(X[row_ix, 0], X[row_ix, 1]) # show the plot pyplot.show() # ## K-Means # # K-Means Clustering involves assigning examples to clusters in an effort to minimize the variance within each cluster. from sklearn.cluster import KMeans # define the model model = KMeans(n_clusters=4) # fit the model model.fit(X) # assign a cluster to each example yhat = model.predict(X) # retrieve unique clusters clusters = unique(yhat) # create scatter plot for samples from each cluster for cluster in clusters: # get row indexes for samples with this cluster row_ix = where(yhat == cluster) # create scatter of these samples pyplot.scatter(X[row_ix, 0], X[row_ix, 1]) # show the plot pyplot.show() # ## Mini-Batch K-Means # # Mini-Batch K-Means is a modified version of k-means that makes updates to the cluster centroids using mini-batches of samples rather than the entire dataset, which can make it faster for large datasets, and perhaps more robust to statistical noise. from sklearn.cluster import MiniBatchKMeans # define the model model = MiniBatchKMeans(n_clusters=4) # fit the model model.fit(X) # assign a cluster to each example yhat = model.predict(X) # retrieve unique clusters clusters = unique(yhat) # create scatter plot for samples from each cluster for cluster in clusters: # get row indexes for samples with this cluster row_ix = where(yhat == cluster) # create scatter of these samples pyplot.scatter(X[row_ix, 0], X[row_ix, 1]) # show the plot pyplot.show() # ## Mean Shift # # Mean shift clustering involves finding and adapting centroids based on the density of examples in the feature space. from sklearn.cluster import MeanShift # define the model model = MeanShift() # fit model and predict clusters yhat = model.fit_predict(X) # retrieve unique clusters clusters = unique(yhat) # create scatter plot for samples from each cluster for cluster in clusters: # get row indexes for samples with this cluster row_ix = where(yhat == cluster) # create scatter of these samples pyplot.scatter(X[row_ix, 0], X[row_ix, 1]) # show the plot pyplot.show() # ## Gaussian Mixture Model # # As its name suggests, a Gaussian mixture model summarizes a multivariate probability density function with a mixture of Gaussian probability distributions. from sklearn.mixture import GaussianMixture # define the model model = GaussianMixture(n_components=4) # fit the model model.fit(X) # assign a cluster to each example yhat = model.predict(X) # retrieve unique clusters clusters = unique(yhat) # create scatter plot for samples from each cluster for cluster in clusters: # get row indexes for samples with this cluster row_ix = where(yhat == cluster) # create scatter of these samples pyplot.scatter(X[row_ix, 0], X[row_ix, 1]) # show the plot pyplot.show() # ## Experiment 01 # # 1. Handle the dataset # 2. Converting all textual values into integer values representing their differences (unique values) or similariry (repeated values) # 3. Use the handled dataset to cluster under distinct algorithms import pandas as pd import re url = "https://raw.githubusercontent.com/waldeyr/Pedro_RED_SPL/main/RED_SPL.tsv" df = pd.read_csv(url, sep='\t' ) df.sample(n=3) # + def setRegion(Region): if Region == 'chrX': return 23 # chromossome X if Region == 'chrY': return 24 # chromossome Y if Region == 'chrM': return 25 # Mitochondrial return re.sub('chr', '', Region) df['Region'] = df['Region'].apply(lambda x: setRegion(x)) df.Nucleotide_Substitution_1 = pd.factorize(df.Nucleotide_Substitution_1, na_sentinel=None)[0] df.Nucleotide_Substitution_2 = pd.factorize(df.Nucleotide_Substitution_2, na_sentinel=None)[0] # - partial_dataset = df[["Region", "Mutation_Position", "Freq_Edition_0", "Freq_Edition_1", "Nucleotide_Substitution_1", "Nucleotide_Substitution_2"]] partial_dataset.sample(n=2) from sklearn.cluster import KMeans X = partial_dataset.values X from sklearn.cluster import KMeans # define the model model = KMeans(n_clusters=2) # fit the model model.fit(X) # assign a cluster to each example yhat = model.predict(X) # retrieve unique clusters clusters = unique(yhat)
Clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_tensorflow2_p36 # language: python # name: conda_tensorflow2_p36 # --- # # Data preparation # ## 1. Import Libraries # + import boto3 import sagemaker sagemaker_session = sagemaker.Session() role = sagemaker.get_execution_role() print(role) bucket = 'eagle-eye-dataset' prefix = 'OD_using_TFOD_API/experiment2/tfrecords' # - # ## 2. Uploading To S3 s3_input = sagemaker_session.upload_data('data', bucket, prefix) print(s3_input) # ## 3. Copying data To Local Path # image_name = 'copying-ecr-expr2' # !sh ./docker/build_and_push.sh $image_name # + import os with open (os.path.join('docker', 'ecr_image_fullname.txt'), 'r') as f: container = f.readlines()[0][:-1] print(container) # -
experiment-2/1_Data_prep/prepare_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Pytorch # language: python # name: myenv # --- # # Data Scraping # Importing Libraries import pandas as pd from twython import Twython from tqdm import tqdm import numpy as np CONSUMER_KEY = "CONSUMER_KEY" CONSUMER_SECRET = "CONSUMER_SECRET" OAUTH_TOKEN = "OAUTH_TOKEN" OAUTH_TOKEN_SECRET = "OAUTH_TOKEN_SECRET" twitter = Twython( CONSUMER_KEY, CONSUMER_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) df = pd.read_csv("MeTooMMD_train.csv") df.head(14) len(df.TweetId) # |Features| # - # |Text| # |in_reply_to_user_id (Loop for more data with same features as this one)| # |Hash Tags with segmentation [ library](https://github.com/cbaziotis/ekphrasis)| # |URL's| # Main Dataset Dict dictDF ={ "TweetId":[], "text":[], "context":[], "in_reply_to_status_id":[], "hashtags":[], "urls":[], "Text_Only_Informative":[], "Image_Only_Informative":[], "Directed_Hate":[], "Generalized_Hate":[], "Sarcasm":[], "Allegation":[], "Justification":[], "Refutation":[], "Support":[], "Oppose":[] } with tqdm(total=len(df.TweetId)) as pbar: for i in range(0, len(df.TweetId)): try: tweet = twitter.show_status(id=df.TweetId[i]) dictDF['TweetId'].append(df.TweetId[i]) dictDF['text'].append(tweet['text']) dictDF['in_reply_to_status_id'].append(tweet['in_reply_to_status_id']) dictDF['context'].append(' ') t_hashTag=' ' if(len(tweet['entities']['hashtags']) != 0): for j in tweet['entities']['hashtags']: t_hashTag= j['text']+' '+t_hashTag dictDF['hashtags'].append(t_hashTag) t_urls=' ' if(len(tweet['entities']['urls']) != 0): for j in tweet['entities']['urls']: t_urls= j['expanded_url']+' '+t_urls dictDF['urls'].append(t_urls) dictDF['Text_Only_Informative'].append(df.Text_Only_Informative[i]) dictDF['Image_Only_Informative'].append(df.Image_Only_Informative[i]) dictDF['Directed_Hate'].append(df.Directed_Hate[i]) dictDF['Generalized_Hate'].append(df.Generalized_Hate[i]) dictDF['Sarcasm'].append(df.Sarcasm[i]) dictDF['Allegation'].append(df.Allegation[i]) dictDF['Justification'].append(df.Justification[i]) dictDF['Refutation'].append(df.Refutation[i]) dictDF['Support'].append(df.Support[i]) dictDF['Oppose'].append(df.Oppose[i]) pbar.update(1) except: pbar.update(1) pass my_data.to_csv('dataset.csv', index = False)
Data_Scraping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Part A # A lot of the graph/vertex class was taken directly from PS5 solution. # # **See README for part A description.** from math import inf class Vertex: def __init__(self,id): self.attributes = {} self.attributes['id'] = id def __str__(self): return str(self.attributes) def new_copy(self): return Vertex(self.attributes['id']) def set(self,key,value): self.attributes[key] = value def get(self,key): return self.attributes[key] class Graph: def __init__(self): self.vertices = {} self.id_to_v = {} self.edge_attributes = {} def __str__(self): s = '' for v in self.vertices: s += str(v) s += '\n\n' return s def add_vertex(self,v): self.vertices[v] = [] self.id_to_v[v.get('id')] = v def size_vertices(self): return len(self.vertices) def add_edge(self,v1,v2): pass def adjacent(self,v): return self.vertices[v] def BFS(self,start): assert(isinstance(start,Vertex)) result = [] start.set('color','gray') start.set('d',0) start.set('parent',None) for v in self.vertices: if v == start: continue v.set('color','white') v.set('d',inf) v.set('parent',None) queue = [] queue.append(start) while len(queue) != 0: u = queue.pop(0) for v in self.adjacent(u): if v.get('color') == 'white': v.set('color','gray') v.set('d',u.get('d') + 1) v.set('parent',u) queue.append(v) u.set('color','black') result.append(u.get('id')) return result def DFS_Visit(self,u,time): time += 1 u.set('d',time) u.set('color','gray') for v in self.adjacent(u): if v.get('color') == 'white': v.set('parent',u) time = self.DFS_Visit(v,time) u.set('color','black') time += 1 u.set('f',time) return time def DFS(self): for v in self.vertices: v.set('color','white') v.set('parent',None) time = 0 for v in self.vertices: if v.get('color') == 'white': time = self.DFS_Visit(v,time) class UndirectedGraph(Graph): def add_edge(self,v1,v2): self.vertices[v1].append(v2) self.vertices[v2].append(v1) class DirectedGraph(Graph): def add_edge(self,v1,v2, s): self.vertices[v1].append(v2) self.edge_attributes[(v1,v2)] = {} self.edge_attributes[(v1,v2)]['sound'] = s def transpose(self): GT = DirectedGraph() for v in self.vertices: GT.add_vertex(v) for v in self.vertices: for u in self.adjacent(v): GT.add_edge(u,v) return GT def acyclic(self): found_BE = False for u in self.vertices: for v in self.adjacent(u): b1 = v.get('d') <= u.get('d') b2 = u.get('d') < u.get('f') b3 = u.get('f') <= v.get('f') if b1 and b2 and b3: found_BE = True return not found_BE def topological_sort(self): self.DFS() if self.acyclic(): result = sorted(self.vertices,key=lambda v: v.get('f'),reverse=True) return list(map(lambda v: v.get('id'),result)) else: return None def checkPath(self, s, verticesTaken, current_vertex): #I'm going to leave the print statements from debugging in so that yall can see the #process this takes. print(s) print(current_vertex) print(verticesTaken) print("id To add to verticesTaken:", current_vertex.get('id')) #I believe the NoneType error was a result of doing: #list = list.append(x) instead of just list.append(x) #This fixed it. idToAddToVerticesTaken = current_vertex.get('id') verticesTaken.append(idToAddToVerticesTaken) print("Vertices Taken after append.", verticesTaken) #if length of sound to find is 0 return the path because it is consumed by one #each time a new acceptable edge is found #when it is 0 it returns the path of the full sound if len(s) == 0: return verticesTaken #for each adjectent vertex, check if edge matches next element of sound (s[0]) for v in self.adjacent(current_vertex): if self.edge_attributes[(current_vertex,v)]['sound'] == s[0]: #if it's a match consume first element of sound to check for s = s[1:len(s)] #set the current vertex to the one taken by that edge current_vertex = v #recrusive call, s is consumed by one #verticesTaken was updated prior to s len check #current_vertex updated directly above comment return self.checkPath(s, verticesTaken, current_vertex) #will return either the path or type None #the latter occuring when no path exists to satisfy sound to search for s. #maybe go back and check that this is indeed the case to justify the helper, #like there is a possibility of an infinite loop, but I don't beleive that to be the case if no match def checkPathHelper(self, s, verticesTaken, current_vertex): #thing to Return is id's of the vertex's taken on path satisfying sound s. thingToReturn = self.checkPath(s, verticesTaken, current_vertex) if thingToReturn == None: return 'NO-SUCH-PATH' else: return thingToReturn def create_graph_4(): G = DirectedGraph() for i in ['0','1','2','3','4']: G.add_vertex(Vertex(i)) for (v1,v2,sigma) in [('0','1','a'),('1','3','b'),('3','4','a'),('0','2','a')]: G.add_edge(G.id_to_v[v1],G.id_to_v[v2],sigma) return G G4 = create_graph_4() G4.checkPathHelper('aba', [], G4.id_to_v['0'])
partAWorkingSubmission.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Gesture Recognition # # ### Machine Learning # # Machine Learning is the field of study that gives computers the capability to learn without being explicitly programmed. "Machine Learning" emphasizes that the computer program (or machine) must do some work after it is given data. The Learning step is made explicit. Eventhough Machine Learning was started in use to recognize patterns, Researchers started applying Machine Learning to Robotics (reinforcement learning, manipulation, motion planning, grasping), to genome data, as well as to predict financial markets. # # <img src="./images/ml-eng.png"> # # ### Deep Learning # # Fast forward to today and what we’re seeing is a large interest in something called Deep Learning which is a subset of Machine Learning. Deep learning is a machine learning technique that teaches computers to do what comes naturally to humans: learn by example. Deep learning is a key technology behind driverless cars, enabling them to recognize a stop sign, or to distinguish a pedestrian from a lamppost. The most popular kinds of Deep Learning models, as they are using in large scale image recognition tasks, are known as Convolutional Neural Nets, or simply ConvNets. # # <img src="./images/traditional-ml-deep-learning-2.png"> # # #### Convolutional Neural Network # # A Convolutional Neural Network (ConvNet/CNN) is a Deep Learning algorithm which can take in an input image, assign importance (learnable weights and biases) to various aspects/objects in the image and be able to differentiate one from the other. The pre-processing required in a ConvNet is much lower as compared to other classification algorithms. While in primitive methods filters are hand-engineered, with enough training, ConvNets have the ability to learn these filters/characteristics. # # The architecture of a ConvNet is analogous to that of the connectivity pattern of Neurons in the Human Brain and was inspired by the organization of the Visual Cortex. Individual neurons respond to stimuli only in a restricted region of the visual field known as the Receptive Field. A collection of such fields overlap to cover the entire visual area. # # <img src="./images/Typical_cnn.png"> # ### How it works # # #### Input # # In the figure, we have an RGB image which has been separated by its three color planes — Red, Green, and Blue. There are a number of such color spaces in which images exist — Grayscale, RGB, HSV, CMYK, etc. # # <img src="./images/input-img.png"> # # You can imagine how computationally intensive things would get once the images reach dimensions, say 8K (7680×4320). The role of the ConvNet is to reduce the images into a form which is easier to process, without losing features which are critical for getting a good prediction. # # #### Convolution # # Think of convolution as applying a filter to our image. We pass over a mini image, usually called a kernel, and output the resulting, filtered subset of our image. # # <img src="./images/Convolution_schematic.gif"> # # The objective of the Convolution Operation is to extract the high-level features such as edges, from the input image. # # <img src="./images/convolution-layer.gif"> # # There are a few parameters that get adjusted here: # # * Kernel Size – the size of the filter. # * Kernel Type – the values of the actual filter. Some examples include identity, edge detection, and sharpen. # * Stride – the rate at which the kernel passes over the input image. A stride of 2 moves the kernel in 2-pixel increments. # * Padding – we can add layers of 0s to the outside of the image in order to make sure that the kernel properly passes over the edges of the image. # * Output Layers – how many different kernels are applied to the image. # # Output of the convolution process is called the “convolved feature” or “feature map.” # # #### ReLU # CNNs often add in a nonlinear function to help approximate such a relationship in the underlying data. ReLU (Rectified Linear Unit) is one such simple function. # # #### Max Pooling # # We pass over sections of our image and pool them into the highest value in the section. # # Similar to Convolution layer, the pooling layer decreases the computational power required to process the data through dimensionality reduction. Furthermore, it is useful for extracting dominant features which are rotational and positional invariant, thus maintaining the process of effectively training of the model. # # <img src="./images/max-pooling.png"> # # #### Fully Connected Layers # After the above preprocessing steps are applied, the resulting image (which may end up looking nothing like the original!) is passed into the traditional neural network architecture. # # After going through the above process, we have successfully enabled the model to understand the features. Moving on, we are going to flatten the final output and feed it to a regular Neural Network for classification purposes. # # ### PyTorch # # PyTorch is software, specifically a machine learning library for the programming language Python, based on the Torch library, used for applications such as deep learning and natural language processing. # # A replacement for NumPy to use the power of GPUs. # # #### Computational graphs # # The first thing to understand about any deep learning library is the idea of a computational graph. A computational graph is a set of calculations, which are called nodes, and these nodes are connected in a directional ordering of computation. # # A simple example of a computational graph for the calculation a=(b+c)∗(c+2) can be seen below. # # <img src="./images/Simple-graph-example.png"> # Lets construct a randomly initialized matrix. Run the snippet below. # + import torch x = torch.rand(5, 3) print(x) # - # PyTorch uses an imperative / eager paradigm. That is, each line of code required to build a graph defines a component of that graph. We can independently perform computations on these components itself, even before your graph is built completely. This is called “define-by-run” methodology. # # <img src="./images/pytorch-variable.gif"> # #### Tensors # # Tensors are nothing but multidimensional arrays. Tensors in PyTorch are similar to numpy’s ndarrays. PyTorch requires the data set to be transformed into a tensor so it can be consumed in the training and testing of the network. # + # define a tensor a = torch.FloatTensor([2]) b = torch.FloatTensor([3]) print(a + b) # - # ### Model Parameters (Constants) # # The batch size is a number of samples processed before the model is updated. # # The number of epochs is the number of complete passes through the training dataset. # # The learning rate or step size in machine learning is a hyperparameter which determines to what extent newly acquired information overrides old information. At the global minima we can be confident that the learning algorithm has achieved a high level of accuracy, and is sufficient for making predictions on test or other unseen data. # # <img src="./images/global-minima.png"> # # We must specify the batch size, number of epochs and learning rate for any learning algorithm. # EPOCHS = 10 BATCH_SIZE = 1 LEARNING_RATE = 0.003 # ### Loading the Data # # `.._DATA_PATH` will be the training and test folder images we saved earlier. # # + from os.path import dirname, abspath # Inside the script use abspath('') to obtain the absolute path of this script # Call os.path.dirname twice to get parent directory of this directory parent_directory = dirname(dirname(abspath(''))) TRAIN_DATA_PATH = parent_directory + "/data/train/" TEST_DATA_PATH = parent_directory + "/data/test/" print(TRAIN_DATA_PATH) print(TEST_DATA_PATH) # - # The transform parameter `TRANSFORM_IMG` can be used to preprocess the images. # # + from torchvision import transforms TRANSFORM_IMG = transforms.Compose([ transforms.Grayscale(num_output_channels=1), transforms.ToTensor() ]) print(TRANSFORM_IMG) # - # PyTorch ships with the torchvision package, which makes it easy to download and use datasets for CNNs. # # + import torch.utils.data as data import torchvision train_data = torchvision.datasets.ImageFolder(root=TRAIN_DATA_PATH, transform=TRANSFORM_IMG) train_data_loader = data.DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4) test_data = torchvision.datasets.ImageFolder(root=TEST_DATA_PATH, transform=TRANSFORM_IMG) test_data_loader = data.DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4) print(train_data) print(train_data_loader) print('') print(test_data) print(test_data_loader) # - # #### Autograd module # # PyTorch uses a technique called automatic differentiation. That is, we have a recorder that records what operations we have performed, and then it replays it backward to compute our gradients. This technique is especially powerful when building neural networks. # # x and y are image and target label respectively. # + from torch.autograd import Variable for step, (x, y) in enumerate(train_data_loader): b_x = Variable(x.float()) # batch x (image) b_y = Variable(y) # batch y (target) print(b_x) print(b_y) # - # #### nn module # # PyTorch autograd makes it easy to define computational graphs and take gradients, but raw autograd can be a bit too low-level for defining complex neural networks. This is where the nn (Neural Network) module of PyTorch comes into play. # # A Simple Neural Network will have the following format. # # ``` # # define model # model = torch.nn.Sequential( # torch.nn.Linear(input_num_units, hidden_num_units), # torch.nn.ReLU(), # torch.nn.Linear(hidden_num_units, output_num_units), # ) # loss_fn = torch.nn.CrossEntropyLoss() # ``` # # Now that you know the basic components of PyTorch, you can easily build your own neural network from scratch. # ### Designing your Neural Net # # We’ll be making use of four major functions in our CNN class: # # * torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) – applies convolution # * torch.nn.relu(x) – applies ReLU # * torch.nn.MaxPool2d(kernel_size, stride, padding) – applies max pooling # * torch.nn.Linear(in_features, out_features) – fully connected layer (multiply inputs by learned weights) # # We will create a CNN class with one class method: forward. The forward() method computes a forward pass of the CNN, which includes the preprocessing steps we outlined above. # # + import torch.nn as nn class CNN(nn.Module): def __init__(self): """ In the constructor we instantiate two nn.Sequential (Convolution) modules and one nn.Linear (Fully connected) module and assign them as member variables. """ super(CNN, self).__init__() self.conv1 = nn.Sequential( # input shape (1, 28, 28) nn.Conv2d( in_channels=1, # input height out_channels=16, # n_filters kernel_size=5, # filter size stride=1, # filter movement/step padding=2, # if want same width and length of this image after Conv2d, # padding=(kernel_size-1)/2 if stride=1 ), # output shape (16, 28, 28) nn.ReLU(), # activation nn.MaxPool2d(kernel_size=2), # choose max value in 2x2 area, output shape (16, 14, 14) ) self.conv2 = nn.Sequential( # input shape (16, 14, 14) nn.Conv2d(16, 32, 5, 1, 2), # output shape (32, 14, 14) nn.ReLU(), # activation nn.MaxPool2d(2), # output shape (32, 7, 7) ) self.out = nn.Linear(32 * 7 * 7, 2) # fully connected layer, output 2 classes def forward(self, x): """ In the forward function we accept a Tensor of input data and we must return a Tensor of output data. We can use Modules defined in the constructor as well as arbitrary (differentiable) operations on Tensors. """ x = self.conv1(x) x = self.conv2(x) x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7) output = self.out(x) return output, x # return x for visualization #The Neural Net can then be initialized in a single line as. model = CNN() print(model) # - # We’ll also define our loss and optimizer functions that the CNN will use to find the right weights. We’ll be using Cross Entropy Loss (Log Loss) as our loss function, which strongly penalizes high confidence in the wrong answer. The optimizer is the popular Adam algorithm (not a person!). # + optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE) loss_func = nn.CrossEntropyLoss() print(optimizer) print(loss_func) # - # Also, to check if GPU is available and to initialize PyTorch on the right device, we can use # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) # ### Training the Neural Net # # Once we’ve defined the class for our CNN, we need to train the net itself. This is where neural network code gets interesting.Our basic flow is a training loop: each time we pass through the loop (called an “epoch”), we compute a forward pass on the network and implement backpropagation to adjust the weights. We’ll also record some other measurements like loss and time passed, so that we can analyze them as the net trains itself. # # Finally, we’ll define a function to train our CNN using a simple for loop. During each epoch of training, we pass data to the model in batches whose size we define when we call the training loop. Data is feature-engineered using the SimpleCNN class we’ve defined, and then basic metrics are printed after a few passes. During each loop, we also calculate the loss on our validation set. # for epoch in range(EPOCHS): for step, (x, y) in enumerate(train_data_loader): b_x = Variable(x.float()) # batch x (image) b_y = Variable(y) # batch y (target) output = model(b_x)[0] loss = loss_func(output, b_y) optimizer.zero_grad() loss.backward() optimizer.step() print('Current Epoch: ', epoch) print('Current Loss:', loss.data) # ### Testing Accuracy # # At the end of every training epoch we test the current accuracy of the model which will give a set of print statement for each EPOCH of which first one is training loss and the second one is validation loss. # for epoch in range(EPOCHS): for _, (tx, ty) in enumerate(test_data_loader): test_x = Variable(tx) test_y = Variable(ty) test_output, last_layer = model(test_x) pred_y = torch.max(test_output, 1)[1].data.squeeze() accuracy = sum(pred_y == test_y) / float(test_y.size(0)) print('Epoch: ', epoch, '| train loss: %.4f' % loss.data, '| test accuracy: %.2f' % accuracy) # ### Save and Load Model # # Saving torch.save({'state_dict': model.state_dict()}, '../../data/checkpoint.pth.tar') print(model.state_dict()) # Loading # + new_model = CNN() # Model will have different state_dict() at this point # print(new_model.state_dict()) checkpoint = torch.load('../../data/checkpoint.pth.tar') new_model.load_state_dict(checkpoint['state_dict']) print(new_model.state_dict()) # - # Now you have successfully trained your CNN and tested it. Now only thing left to do is use this trained model which will directly predict if the desired gesture is there in a given image stream or not.
notebooks/project/4.Recognizing Gestures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: fengine # language: python # name: fengine # --- # ## Regression # # In this lecture, I will bring together various techniques for feature engineering that we have covered in this course to tackle a regression problem. This would give you an idea of the end-to-end pipeline to build machine learning algorithms for regression. # # I will: # - build a lasso # - use feature-engine for the feature engineering steps # - set up an entire engineering and prediction pipeline using a Scikit-learn Pipeline # # =================================================================================================== # # ## In this demo: # # We will use the House Prices dataset, please refer to lecture **Datasets** in Section 1 of the course for instructions on how to download the dataset. # ## House Prices dataset # + from math import sqrt import pandas as pd import numpy as np import matplotlib.pyplot as plt # for the model from sklearn.model_selection import train_test_split from sklearn.linear_model import Lasso from sklearn.pipeline import Pipeline from sklearn.metrics import mean_squared_error, r2_score # for feature engineering from sklearn.preprocessing import StandardScaler from feature_engine import imputation as mdi from feature_engine import discretisation as dsc from feature_engine import encoding as ce pd.pandas.set_option('display.max_columns', None) # - # ### Load Datasets # + # load dataset data = pd.read_csv('../houseprice.csv') print(data.shape) data.head() # - # ### Types of variables (section 2) # # Let's go ahead and find out what types of variables there are in this dataset # + # let's inspect the type of variables in pandas data.dtypes # - # There are a mixture of categorical and numerical variables. Numerical are those of type **int** and **float** and categorical those of type **object**. # + # we have an Id variable, that we should not use for predictions: print('Number of House Id labels: ', len(data.Id.unique())) print('Number of Houses in the Dataset: ', len(data)) # - # Id is a unique identifier for each of the houses. Thus this is not a variable that we can use. # # #### Find categorical variables # + # find categorical variables categorical = [var for var in data.columns if data[var].dtype=='O'] print('There are {} categorical variables'.format(len(categorical))) # - data[categorical].head() # #### Find temporal variables # # There are a few variables in the dataset that are temporal. They indicate the year in which something happened. We shouldn't use these variables straightaway for model building. We should instead transform them to capture some sort of time information. Let's inspect these temporal variables: # # + # make a list of the numerical variables first numerical = [var for var in data.columns if data[var].dtype!='O'] # list of variables that contain year information year_vars = [var for var in numerical if 'Yr' in var or 'Year' in var] year_vars # - data[year_vars].head() # We can see that these variables correspond to the years in which the houses were built or remodeled or a garage was built, or the house was indeed sold. It would be better if we captured the time elapsed between the time the house was built and the time the house was sold for example. We are going to do that in the feature engineering section later in the notebook. # # We have another temporal variable: MoSold, which indicates the month in which the house was sold. Let's inspect if the house price varies with the time of the year in which it is sold: # + # plot median house price per month in which it was sold data.groupby('MoSold')['SalePrice'].median().plot() plt.title('House price variation along the year') plt.ylabel('median House price') # - # The price seems to vary depending on the month in which the house is sold. # # #### Find discrete variables # # To identify discrete variables, I will select from all the numerical ones, those that contain a finite and small number of distinct values. See below. # + # let's visualise the values of the discrete variables discrete = [] for var in numerical: if len(data[var].unique()) < 20 and var not in year_vars: print(var, ' values: ', data[var].unique()) discrete.append(var) print() print('There are {} discrete variables'.format(len(discrete))) # - # #### Continuous variables # + # find continuous variables # let's remember to skip the Id variable and the target variable SalePrice # which are both also numerical numerical = [var for var in numerical if var not in discrete and var not in [ 'Id', 'SalePrice'] and var not in year_vars] print('There are {} numerical and continuous variables'.format(len(numerical))) # - # Perfect!! Now we have inspected and have a view of the different types of variables that we have in the house price dataset. Let's move on to understand the types of problems that these variables have. # # ### Types of problems within the variables (section 3) # # #### Missing values # + # let's output variables with NA and the percentage of NA for var in data.columns: if data[var].isnull().sum() > 0: print(var, data[var].isnull().mean()) # - # #### Outliers and distributions # + # let's make boxplots to visualise outliers in the continuous variables # and histograms to get an idea of the distribution for var in numerical: plt.figure(figsize=(6,4)) plt.subplot(1, 2, 1) fig = data.boxplot(column=var) fig.set_title('') fig.set_ylabel(var) plt.subplot(1, 2, 2) fig = data[var].hist(bins=20) fig.set_ylabel('Number of houses') fig.set_xlabel(var) plt.show() # - # The majority of the continuous variables seem to contain outliers. In addition, the majority of the variables are not normally distributed. As we are planning to build linear regression, we need to tackle these to improve the model performance. To tackle the 2 aspects together, I will do discretisation. I will follow discretisation with encoding of the intervals following the target mean, as we do in the **Discretisation plus encoding lecture** in section 8. # #### Outliers in discrete variables # # Now, let's identify outliers in the discrete variables. I will call outliers those values that are present in less than 5 % of the houses. This is exactly the same as finding rare labels in categorical variables. **Discrete variables can be pre-processed / engineered as if they were categorical**. Keep this in mind. # + # outlies in discrete variables for var in discrete: (data.groupby(var)[var].count() / np.float(len(data))).plot.bar() plt.ylabel('Percentage of observations per label') plt.title(var) plt.show() # - # Most of the discrete variables show values that are shared by a tiny proportion of houses in the dataset. # ### Monotonicity between discrete variables and target values # + # let's plot the median sale price per value of the discrete # variable for var in discrete: data.groupby(var)['SalePrice'].median().plot() plt.ylabel('Median house Price per label') plt.title(var) plt.show() # - # Some of the discrete variables show some sort of monotonic relationship and some don't. # # #### Number of labels: cardinality # # Let's go ahead now and examine the cardinality of our categorical variables. That is, the number of different labels. # + # plot number of categories per categorical variable data[categorical].nunique().plot.bar(figsize=(10,6)) plt.title('CARDINALITY: Number of categories in categorical variables') plt.xlabel('Categorical variables') plt.ylabel('Number of different categories') # - # Most of the variables, contain only a few labels. Then, we do not have to deal with high cardinality. That is good news! # # Very likely though, they contain rare labels. Why don't you go ahead and plot the frequency of the categories for each categorical variable? We learned how to do this in section 3 of the course. # ### Separate train and test set # + # Let's separate into train and test set X_train, X_test, y_train, y_test = train_test_split(data.drop(['Id', 'SalePrice'], axis=1), data['SalePrice'], test_size=0.1, random_state=0) X_train.shape, X_test.shape # - # **Now we will move on and engineer the features of this dataset. The most important part for this course.** # ### Temporal variables (Section 12) # # First, we will create those temporal variables we discussed a few cells ago # + # function to calculate elapsed time def elapsed_years(df, var): # capture difference between year variable and # year the house was sold df[var] = df['YrSold'] - df[var] return df # - for var in ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']: X_train = elapsed_years(X_train, var) X_test = elapsed_years(X_test, var) X_train[['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']].head() # Instead of the "year", now we have the amount of **years that passed** since the house was built or remodeled and the house was sold. Next, we drop the YrSold variable from the datasets, because we already extracted its value. # drop YrSold X_train.drop('YrSold', axis=1, inplace=True) X_test.drop('YrSold', axis=1, inplace=True) # capture the column names for use later in the notebook final_columns = X_train.columns # ### Missing data imputation (section 4) # #### Continuous variables # + # print variables with missing data # keep in mind that now that we created those new temporal variables, we # are going to treat them as numerical and continuous: # remove YrSold from the variable list # because it is no longer in our dataset year_vars.remove('YrSold') # examine percentage of missing values for col in numerical+year_vars: if X_train[col].isnull().mean() > 0: print(col, X_train[col].isnull().mean()) # - # Imputation technique to use: **additional variable with NA + median imputation** # + # print variables with missing data for col in categorical: if X_train[col].isnull().mean() > 0: print(col, X_train[col].isnull().mean()) # - # Imputation technique to use: **Add missing label to categorical variables** # ## Putting it all together # + # I will treat discrete variables as if they were categorical # to treat discrete as categorical using Feature-engine # we need to re-cast them as object X_train[discrete] = X_train[discrete].astype('O') X_test[discrete] = X_test[discrete].astype('O') # - house_pipe = Pipeline([ # missing data imputation - section 4 ('missing_ind', mdi.AddMissingIndicator( variables=['LotFrontage', 'MasVnrArea', 'GarageYrBlt'])), ('imputer_num', mdi.MeanMedianImputer(imputation_method='median', variables=['LotFrontage', 'MasVnrArea', 'GarageYrBlt'])), ('imputer_cat', mdi.CategoricalImputer(variables=categorical)), # categorical encoding - section 6 ('rare_label_enc', ce.RareLabelEncoder( tol=0.05, n_categories=6, variables=categorical+discrete)), ('categorical_enc', ce.OrdinalEncoder( encoding_method='ordered', variables=categorical+discrete)), # discretisation + encoding - section 8 ('discretisation', dsc.EqualFrequencyDiscretiser( q=5, return_object=True, variables=numerical)), ('encoding', ce.OrdinalEncoder( encoding_method='ordered', variables=numerical)), # feature Scaling - section 10 ('scaler', StandardScaler()), # regression ('lasso', Lasso(random_state=0)) ]) # + # let's fit the pipeline house_pipe.fit(X_train, y_train) # let's get the predictions X_train_preds = house_pipe.predict(X_train) X_test_preds = house_pipe.predict(X_test) # - # a peek into the prediction values X_train_preds # + # check model performance: print('train mse: {}'.format(mean_squared_error(y_train, X_train_preds))) print('train rmse: {}'.format(sqrt(mean_squared_error(y_train, X_train_preds)))) print('train r2: {}'.format(r2_score(y_train, X_train_preds))) print() print('test mse: {}'.format(mean_squared_error(y_test, X_test_preds))) print('test rmse: {}'.format(sqrt(mean_squared_error(y_test, X_test_preds)))) print('test r2: {}'.format(r2_score(y_test, X_test_preds))) # + # plot predictions vs real value plt.scatter(y_test,X_test_preds) plt.xlabel('True Price') plt.ylabel('Predicted Price') # + # let's explore the importance of the features # the importance is given by the absolute value of the coefficient # assigned by the Lasso importance = pd.Series(np.abs(house_pipe.named_steps['lasso'].coef_)) importance.index = list(final_columns)+['LotFrontage_na', 'MasVnrArea_na', 'GarageYrBlt_na'] importance.sort_values(inplace=True, ascending=False) importance.plot.bar(figsize=(18,6)) # -
Section-13-Putting-it-altogether/13.02-Regression-house-prices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Initializing a model # Example showing how to initialize a model with another model # + # %pip install pybamm -q import pybamm import pandas as pd import os os.chdir(pybamm.__path__[0]+'/..') # - # ## Solve a model with a drive cycle # Load model model = pybamm.lithium_ion.DFN() # Set up drive cycle # import drive cycle from file drive_cycle = pd.read_csv( "pybamm/input/drive_cycles/US06.csv", comment="#", header=None ).to_numpy() # create interpolant param = model.default_parameter_values timescale = param.evaluate(model.timescale) current_interpolant = pybamm.Interpolant(drive_cycle[:, 0], drive_cycle[:, 1], timescale * pybamm.t) # set drive cycle param["Current function [A]"] = current_interpolant # Create and run simulation using the CasadiSolver in "fast" mode, remembering to pass in the updated parameters sim_US06_1 = pybamm.Simulation( model, parameter_values=param, solver=pybamm.CasadiSolver(mode="fast") ) sol_US06_1 = sim_US06_1.solve() # ## Update initial conditions based on a solution and solve again # Now pre-charge with CCCV, update the initial conditions, and solve again with the US06 drive cycle # + experiment = pybamm.Experiment( ["Charge at 1 A until 4.1 V", "Hold at 4.1 V until 50 mA"] ) sim_cccv = pybamm.Simulation(model, experiment=experiment) sol_cccv = sim_cccv.solve() # MODEL RE-INITIALIZATION: ############################################################# # Now initialize the model with the solution of the charge, and then discharge with # the US06 drive cycle # We could also do this inplace by setting inplace to True, which modifies the original # model in place new_model = model.set_initial_conditions_from(sol_cccv, inplace=False) ######################################################################################## sim_US06_2 = pybamm.Simulation( new_model, parameter_values=param, solver=pybamm.CasadiSolver(mode="fast") ) sol_US06_2 = sim_US06_2.solve() # - # Plot both solutions, we can clearly see the difference now that initial conditions have been updated pybamm.dynamic_plot( [sol_US06_1, sol_US06_2], labels=["Default initial conditions", "Fully charged"] ) # ## Initialize using a different model # We can also initialize the model using the solution of a different model # + spm = pybamm.lithium_ion.SPM() sim_spm_cccv = pybamm.Simulation(spm, experiment=experiment) sol_spm_cccv = sim_spm_cccv.solve() # MODEL RE-INITIALIZATION: ############################################################# # Now initialize the model with the solution of the charge, and then discharge with # the US06 drive cycle # We could also do this inplace by setting inplace to True, which modifies the original # model in place new_dfn = model.set_initial_conditions_from(sol_spm_cccv, inplace=False) ######################################################################################## sim_US06_3 = pybamm.Simulation( new_dfn, parameter_values=param, solver=pybamm.CasadiSolver(mode="fast") ) sol_US06_3 = sim_US06_3.solve() # - # Now the model initialized by the DFN and the model initialized by the SPM give the same solution pybamm.dynamic_plot( [sol_US06_1, sol_US06_2, sol_US06_3], labels=["Default initial conditions", "Fully charged (from DFN)", "Fully charged (from SPM)"] ) # ## References # # The relevant papers for this notebook are: pybamm.print_citations()
examples/notebooks/initialize-model-with-solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: python36 # language: python # name: python36 # --- # + import os import torch from torch import nn, optim from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision import datasets, transforms from sklearn.metrics import r2_score import pandas as pd from mlp import MLP from lantentDataset import LatentDataset learning_rate = 0.001 dataFile = os.path.join('.\\data', 'Pancancer_LatentVec_Drug+GeneExp(cgc+eliminated+sampledGene+unsampledDrug).txt') num=0 while num<6: num += 1 print("-----------------------------\n-------------"+str(num)+"---------------\n-----------------------------") data = pd.read_table(open(dataFile), sep='\t') data = data.sample(frac=1).reset_index(drop=True) print(data.head()) trainData = data.loc[: 12244, :] trainDataset = LatentDataset(trainData, train0val1test2=0) validationData = data.loc[12245: 12924, :] validationDataset = LatentDataset(validationData, train0val1test2=1) testData = data.loc[12925: 13604, :] testDataset = LatentDataset(testData, train0val1test2=2) trainLoader = DataLoader(trainDataset, batch_size=64, shuffle=True, drop_last=True) validationLoader = DataLoader(validationDataset, batch_size=680, drop_last=True) testLoader = DataLoader(testDataset, batch_size=680, drop_last=True) model = MLP() ''' for m in model.modules(): if isinstance(m, (nn.Conv2d, nn.Linear)): nn.init.kaiming_normal_(m.weight, mode='fan_in') ''' if torch.cuda.is_available(): model = model.cuda() criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.000005) epoch = 0 bestR2 = -1 bestLoss = 200 bestEpoch = 0 path = '.\\trainedModels\\' while epoch < 2: model.train() for batch in trainLoader: geLatentVec, dLatentVec, target = batch # if geLatentVec.shape[0] != 50: # continue if torch.cuda.is_available(): geLatentVec = geLatentVec.cuda() dLatentVec = dLatentVec.cuda() target = target.cuda() else: geLatentVec = Variable(geLatentVec) dLatentVec = Variable(dLatentVec) target = Variable(target) out = model(geLatentVec, dLatentVec) loss = criterion(out, target) optimizer.zero_grad() loss.backward() optimizer.step() epoch += 1 if epoch % 2 == 0: model.eval() for batch in validationLoader: geLatentVec, dLatentVec, target = batch if torch.cuda.is_available(): geLatentVec = geLatentVec.cuda() dLatentVec = dLatentVec.cuda() target = target.cuda() out = model(geLatentVec, dLatentVec) loss = criterion(out, target) evalLoss = loss.data.item() out = out.data.cpu().numpy().tolist() target = target.cpu().numpy().tolist() r2 = r2_score(target, out) # SS_tot = torch.std(target) # SS_res = evalLoss print('epoch: {}, Validation Loss: {:.6f}, R2_Score: {:.6f}'.format(epoch, evalLoss, r2)) if (r2 > bestR2 and epoch > 20): bestLoss = evalLoss bestR2 = r2 bestEpoch = epoch torch.save(model.state_dict(), path + 'modelParameters.pt') print("Got a better model!") # print('epoch: {}, loss: {:.4}'.format(epoch, loss.data.item())) pass path = '.\\trainedModels\\' model.load_state_dict(torch.load(path + 'modelParameters.pt')) print('\nNow testing the best model on test dataset\n') for batch in testLoader: geLatentVec, dLatentVec, target = batch if torch.cuda.is_available(): geLatentVec = geLatentVec.cuda() dLatentVec = dLatentVec.cuda() target = target.cuda() out = model(geLatentVec, dLatentVec) loss = criterion(out, target) evalLoss = loss.data.item() # SS_tot = torch.std(target) # SS_res = evalLoss out = out.data.cpu().numpy().tolist() target = target.cpu().numpy().tolist() r2 = r2_score(target, out) print('epoch: {}, Validation Loss: {:.6f}, R2_Score: {:.6f}'.format(bestEpoch, bestLoss, bestR2)) print('Test Loss: {:.6f}, R2_Score: {:.6f}'.format(evalLoss, r2)) df = pd.read_table('.\\R2_Score_relu6(cgc+unsampledGene+unsampledDrug).txt', sep='\t') df = df.append({'id': int(len(df)), 'R2_test': r2, 'R2_val': bestR2, 'epoch': bestEpoch}, ignore_index=True) df.to_csv('.\\R2_Score_relu6(cgc+unsampledGene+unsampledDrug).txt', sep='\t', index=False) # -
Code/CancerML/mlps_drug_exp/.ipynb_checkpoints/steven_train-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from matplotlib import pyplot as plt import numpy as np import random as rn import csv import urllib import matplotlib.dates as mdates # + # Video 1 - Introduction and Line plt.plot([1, 2, 3], [5, 7, 4]) plt.show() # + # Video 2 - Legends, titles and labels x1 = [1, 2, 3] y1 = [5, 7, 4] x2 = [1, 2, 3] y2 = [10, 14, 12] plt.plot(x1, y1, label = 'First line') plt.plot(x2, y2, label = 'Second line') plt.xlabel('X Axis here') plt.ylabel('Y Axis here') plt.title('Title here!\nSubtitle here!') plt.legend() plt.show() # + # Video 3 - Barcharts and Histograms # x1 = [2, 4, 6, 8, 10] # y1 = [6, 7, 8, 2, 4] # x2 = [1, 3, 5, 7, 9] # y2 = [7, 8, 2, 4, 2] # plt.bar(x1, y1, label = 'Bars1', color = 'green') # plt.bar(x2, y2, label = 'Bars2', color = 'red') population_ages = [22, 55, 62, 45, 21, 22, 34, 42, 42, 4, 99, 102, 110, 120, 121, 122, 130, 111, 115, 112, 80, 75, 65, 54, 44, 43, 42, 48] # ids = [x for x in range(len(population_ages))] bins = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130] plt.hist(population_ages, bins = bins, histtype = 'bar', rwidth = 0.8, label ='Ages') plt.xlabel('x') plt.ylabel('y') plt.title('Interesting Graph!\ncredits: sentdex') plt.legend() plt.show() # + # Video 4 - Scatter plots x = [x for x in range(8)] y = [rn.randint(0, 10) for x in range(8)] plt.scatter(x, y, label = 'skitscat', color = 'b', marker = '*', s = 100) plt.xlabel('x') plt.ylabel('y') plt.title('Interesting Graph!\ncredits: sentdex') plt.legend() plt.show() # + # Video 5 - Pie plots days = [x for x in range(5)] sleeping = [7, 8, 6, 11, 7] eating = [2, 3, 4, 3, 2] playing = [7, 8, 7, 2, 2] coding = [5, 3, 8, 6, 7] slices = [7,2,2,13] activities = ['sleeping','eating','working','playing'] cols = ['c','m','r','b'] plt.pie(slices, labels = activities, colors = cols, startangle = 90, shadow = True, explode = (0, 0.1, 0, 0), autopct = '%1.1f' ) # plt.xlabel('x') # plt.ylabel('y') plt.title('Interesting Graph!\ncredits: sentdex') # plt.legend() plt.show() # + # Video 6 - Loading data from file # x = [] # y = [] # with open('data.txt', 'r') as csv_file: # plots = csv.reader(csv_file, delimiter = ',') # for row in plots: # x.append(int(row[0])) # y.append(int(row[1])) x, y = np.loadtxt('data.txt', delimiter = ',', unpack = True) plt.plot(x, y, label ='Loaded from file', color = 'g') plt.xlabel('x') plt.ylabel('y') plt.title('Interesting Graph!\ncredits: sentdex') plt.legend() plt.show() # + # Day 7 - Getting Data from Internet def load_data(): global source_code stock_price_url = 'https://pythonprogramming.net/yahoo_finance_replacement' source_code = urllib.request.urlopen(stock_price_url).read().decode() def graph_data(): stock_data = [] split_sources = source_code.split('\n') for i in range(1, len(split_sources)): stock_data.append(split_sources[i].split(',')) # date, closep, highp, lowp, openp, volume = np.loadtxt(stock_data, delimiter = ',', unpack = True, converters = {0: bytespdate2num('%Y%m%d')}) # plt.xlabel("x") # plt.ylabel("y") # plt.title('Data Visualization Course\nBy sentdex') # plt.legend() # plt.show() graph_data() # + # Day 8 - Coverting data from Internet def bytespdate2num(fmt, encoding='utf-8'): def bytesconverter(b): s = b.decode(encoding) return (mdates.datestr2num(s)) return bytesconverter def graph_data(stock): stock_price_url = 'https://pythonprogramming.net/yahoo_finance_replacement' source_code = urllib.request.urlopen(stock_price_url).read().decode() stock_data = [] split_source = source_code.split('\n') for line in split_source[1:]: split_line = line.split(',') if len(split_line) == 7: if 'values' not in line and 'labels' not in line: stock_data.append(line) date, closep, highp, lowp, openp, adj_closep, volume = np.loadtxt(stock_data, delimiter = ',', unpack = True, converters = {0: bytespdate2num('%Y-%m-%d')}) plt.plot_date(date, closep, '-', label = 'Price') plt.xlabel('Date') plt.ylabel('Price') plt.title('Interesting Graph\nCheck it out') plt.legend() plt.show() graph_data('TSLA') # + # Day 9 - Basic Customizations, Rotating labels def bytespdate2num(fmt, encoding='utf-8'): def bytesconverter(b): s = b.decode(encoding) return (mdates.datestr2num(s)) return bytesconverter def graph_data(stock): stock_price_url = 'https://pythonprogramming.net/yahoo_finance_replacement' source_code = urllib.request.urlopen(stock_price_url).read().decode() stock_data = [] split_source = source_code.split('\n') for line in split_source[1:]: split_line = line.split(',') if len(split_line) == 7: if 'values' not in line and 'labels' not in line: stock_data.append(line) date, closep, highp, lowp, openp, adj_closep, volume = np.loadtxt(stock_data, delimiter = ',', unpack = True, converters = {0: bytespdate2num('%Y-%m-%d')}) fig = plt.figure() ax = plt.subplot2grid((1,1), (0,0)) ax.plot_date(date, closep, '-', label = 'Price') ax.grid(True) plt.xticks(rotation = 45) plt.xlabel('Date') plt.ylabel('Price') plt.title('Interesting Graph\nCheck it out') plt.legend() plt.show() graph_data('TSLA') # -
Courses/sentdex/Data Visualization with Matplotlib/Data Visualization codes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Housing Price Prediction Project # <NAME> <<EMAIL>> # # Working through Ch2 on my own. # + import os import tarfile from six.moves import urllib DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/" HOUSING_PATH = os.path.join("datasets", "housing") HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz" def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH): if not os.path.isdir(housing_path): os.makedirs(housing_path) tgz_path = os.path.join(housing_path, "housing.tgz") urllib.request.urlretrieve(housing_url, tgz_path) housing_tgz = tarfile.open(tgz_path) housing_tgz.extractall(path=housing_path) housing_tgz.close() print("Fetched") # - # # Import Data # + import pandas as pd def load_housing_data(housing_path=HOUSING_PATH): csv_path = os.path.join(housing_path, "housing.csv") return pd.read_csv(csv_path) # - fetch_housing_data() housing = load_housing_data() housing.head() housing.info() housing.describe() housing["ocean_proximity"].value_counts() # %matplotlib inline #ipython magic function import matplotlib.pyplot as plt housing.hist(bins=50, figsize=(20,15)) plt.show() # # Set aside the test set # And don't look at it again until evaluation time. (Avoid the data snooping bias) # # This shuffle method works ok for now, but if the data set is evolving over time we can't adapt to the new data and keep our existing test/train sets. A better way might be to use last digit of the hash of an ID column as a definition of test vs train membership. Then sort rows based on this membership test. import numpy as np def split_train_test(data, test_ratio): np.random.seed(42) #so we shuffle in a known repeatable way. shuffled_indices= np.random.permutation(len(data)) test_set_size = int(len(data) * test_ratio) test_indices = shuffled_indices[:test_set_size] train_indices = shuffled_indices[test_set_size] return data.iloc[train_indices], data.iloc[test_indices] random_train_set, random_test_set = split_train_test(housing, 0.2) #Or we can use sci kit to just do this for us. from sklearn.model_selection import train_test_split train_set_sk, test_set_sk = train_test_split(housing, test_size=0.2, random_state=42) # # Avoiding Sampling Bias # If you randomly sample, you may not be getting a representive slice of the data in your test and training sets. Use "Stratified Sampling" to avoid this. # # Don't have too many strata, and make sure each is large enough. # + #Limit and discretize income categories housing["income_cat"] = np.ceil(housing["median_income"] / 1.5) # Label those above 5 as 5 # Where the predicate test is NOT true, then we replace with the value. Confusing. housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True) #Compare input vs output after we setup our strata housing["income_cat"].value_counts() # - #Compare input vs output after we setup our strata housing["median_income"].hist() housing["income_cat"].hist() # + #time to do the stratified sampling from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing["income_cat"]): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] # - # # Compare sampling error of Strat vs Random # + #how did the strat split do, vs random. sample_stats = pd.DataFrame() sample_stats["overall"] = housing["income_cat"].value_counts() / len(housing) sample_stats["strat"] = strat_train_set["income_cat"].value_counts() / len(strat_train_set) #Rerun random sample, now that housing has income_cat random_train_set, random_test_set = train_test_split(housing, test_size=0.2, random_state=42) sample_stats["random"] = random_train_set["income_cat"].value_counts() / len(random_train_set) sample_stats.sort_index() #Compute error percentages sample_stats["Random % Error"] = (sample_stats["overall"] - sample_stats["random"]) / sample_stats["overall"] * 100 sample_stats["Strat % Error"] = (sample_stats["overall"] - sample_stats["strat"]) / sample_stats["overall"] * 100 sample_stats # - # Clearly, Strat has lower error percentages, so it is a better representation of the whole set. #Finally, cleanup income_cat off the test/train data. for set_ in (strat_train_set, strat_test_set): set_.drop("income_cat", axis=1, inplace=True) # # Discover and Visualize the Data to Gain Insights # Now that we've prepped the test and train sets, we can dig into it to see what we can learn. # # At this point you are only to look at the training set (ignore the test set!). If the training set is large, you can sample it to make exploration more efficient. Ours is small though. #make a copy of train set so we don't mangle it. housing = strat_train_set.copy() #rough plot housing.plot(kind="scatter", x="longitude", y="latitude") #Better plot with alpha, to visualize density housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1) # ## Keep improving the visualization, maybe patterns will emerge #s is radius #c is color housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4, s=housing["population"] / 100, label="population", figsize=(10,7), c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True) # ## Looking For Correlations # # corr_matrix = housing.corr() #Pearson's r, or standard correlation coeff corr_matrix["median_house_value"].sort_values(ascending=False) # ## Close to 1 means strongly correlated, (as opposed to -1) # + #using scatter matrix plot from pandas.plotting import scatter_matrix attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"] scatter_matrix(housing[attributes], figsize=(12,8)) # - # Median income seems like the most promising correlation, so lets zoom in on that housing.plot(kind="scatter", x="median_income", y="median_house_value", alpha=0.1) # ## Attribute Combinations # Room numbers may not be useful unless you compare it to number of rooms. You could combine the too attributes into a new one that is more useful. housing["rooms_per_household"] = housing["total_rooms"]/housing["households"] housing["bedrooms_per_room"] = housing["total_bedrooms"] / housing["total_rooms"] housing["population_per_household"] = housing["population"]/housing["households"] #recompute correlation matrix corr_matrix = housing.corr() corr_matrix["median_house_value"].sort_values(ascending=False) # # Prepare the Data for ML Algorithms # You should not do this manually, write functions instead. # #Start with fresh data set again for this section #Separate the labels from the data (the label is the median house value, aka what we are trying to predict) housing = strat_train_set.drop("median_house_value", axis=1) housing_labels = strat_train_set["median_house_value"].copy() # ### Data Cleaning # ML hates missing features (aka incomplete data). # # A few options: # * get rid of rows with missing features. # * get rid of the attr that has missing data. # * set missing values to some value (zero, median, mean, something not harmful). #housing.dropna(subset=["total_bedrooms"]) #drop rows with the attr missing. #housing.drop("total_bedrooms", axis=1) #drop whole attr median = housing["total_bedrooms"].median() #be sure to save the median for future use. housing["total_bedrooms"].fillna(median, inplace=True) #option 3. #using Imputer from scikit from sklearn.preprocessing import Imputer imputer = Imputer(strategy="median") housing_num = housing.drop("ocean_proximity", axis=1) #imputer needs numeric only data to work. imputer.fit(housing_num) imputer.statistics_ housing_num.median().values #Use the imputer to transform training set to replace all missing values X = imputer.transform(housing_num) housing_tr = pd.DataFrame(X, columns=housing_num.columns) # ## Handling Text and categorical attributes housing_cat = housing["ocean_proximity"] housing_cat.head(10) #ML prefers numbers over text, so we can convert text into numbers. housing_cat_encoded, housing_categories = housing_cat.factorize() housing_cat_encoded[:10] housing_categories # Some ML algorithms assume that numbers that are close together have significance, so this could present problems for our numerical encoding here, we don't want to imply similarity where there is none. # # A fix is to use "one hot" binary encoding from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder() housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1, 1)) housing_cat_1hot #efficient sparse matrix format. housing_cat_encoded.reshape(-1, 1) #fit_transform expected a 2d array, so we transposed the 1d array using reshape. # Newer versions of Scikit-learn have a `CategoricalEncoder` class, which could simplify this one hot encoding. # # If you find the number of categories you have is really large, this could be detrimental to training and performance. Look into "embeddings", chapter 14. # ## SKLearn custom transformers # You can write your own data transformers. Following example adds in the combined attributes that we computed manually earlier. # + from sklearn.base import BaseEstimator, TransformerMixin rooms_ix, bedrooms_ix, population_ix, household_ix = 3,4,5,6 class CombinedAttributesAdder(BaseEstimator, TransformerMixin): def __init__(self, add_bedrooms_per_room = True): #don't use *args or **kargs #This variable is a hyperparameter of the ML algorithm. self.add_bedrooms_per_room = add_bedrooms_per_room def fit(self, X, y=None): return self def transform(self, X, y=None): rooms_per_household = X[:, rooms_ix] / X[:, household_ix] population_per_household = X[:, population_ix] / X[:, household_ix] if self.add_bedrooms_per_room: bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix] return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room] else: return np.c_[X, rooms_per_household, population_per_household] # - attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False) housing_extra_attribs = attr_adder.transform(housing.values) housing_extra_attribs = pd.DataFrame(housing_extra_attribs, columns=list(housing.columns)+["rooms_per_household", "population_per_household"]) housing_extra_attribs.head() # ## Feature Scaling and Transformation Pipelines. # ML Algorithms usually don't perform well when the attributes have vastly different scales on the numerical attributes. # # Use min-max scaling (normalization), or standardization. SK learn has builtins for these. # # Use Transformation Pipelines to do this processing. # + from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler num_pipeline = Pipeline([ ('imputer', Imputer(strategy="median")), ('attribs_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler()) ]) housing_num_tr = num_pipeline.fit_transform(housing_num) # - # It would be great to add a Pandas DataFrame directly into the pipeline. To do that we can create a custom transformer for the start of the pipe. # + from sklearn.base import BaseEstimator, TransformerMixin class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X, y=None): return X[self.attribute_names].values # - # Now we show using multiple pipelines and then (one for numeric and one for category data) and then how to join them together. num_attribs = list(housing_num) cat_attribs = ["ocean_proximity"] num_attribs #list of attributes (columns) from DataFrame # + num_pipeline = Pipeline([ ('selector', DataFrameSelector(num_attribs)), ('imputer', Imputer(strategy="median")), ('attribs_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler()) ]) from future_encoders import OneHotEncoder cat_pipeline = Pipeline([ ('selector', DataFrameSelector(cat_attribs)), ('onehot_encoder', OneHotEncoder()) ]) # - # Warning: earlier versions of the book used the LabelBinarizer or CategoricalEncoder classes to convert each categorical value to a one-hot vector. It is now preferable to use the OneHotEncoder class. Right now it can only handle integer categorical inputs, but in Scikit-Learn 0.20 it will also handle string categorical inputs (see PR #10521). So for now we import it from future_encoders.py, but when Scikit-Learn 0.20 is released, you can import it from sklearn.preprocessing instead: # + #Join pipes using FeatureUnion from sklearn.pipeline import FeatureUnion full_pipeline = FeatureUnion(transformer_list=[ ('num_pipeline', num_pipeline), ('cat_pipeline', cat_pipeline) ]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared.shape # - # # Select and Train a Model # Finally! We've done the prep work to make the rest of this go as smoothly as possible. # + #Linear Regression model. from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(housing_prepared, housing_labels) # + #Try out linear model on a some data from the training set. some_data = housing.iloc[:5] some_labels= housing_labels.iloc[:5] #yay pipeline reuse some_data_prepared = full_pipeline.transform(some_data) #predict print("Predictions:", lin_reg.predict(some_data_prepared)) print("labels:", list(some_labels)) # - #Measure RMSE to see how the model performed. from sklearn.metrics import mean_squared_error housing_predictions = lin_reg.predict(housing_prepared) lin_mse = mean_squared_error(housing_labels, housing_predictions) lin_rmse = np.sqrt(lin_mse) lin_rmse # This means typical error is $68K. not great. Model might be underfit to training data. Perhaps the features are not good for predictions, or the model is not powerful enough. # # Lets try a more powerful model than Linear next, Decision Tree # + from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor() tree_reg.fit(housing_prepared, housing_labels) housing_predictions = tree_reg.predict(housing_prepared) tree_mse = mean_squared_error(housing_labels, housing_predictions) tree_rmse = np.sqrt(tree_mse) # - tree_rmse # That can't be right. Perhaps we are badly overfit. But still *DON'T TOUCH TEST SET UNTIL LAUNCH* # # We can use part of the training data for Cross Validation # # ## Cross Validation for Better Evaluation # + #Randomly split the training set into 10 distinct subsets (called folds) #Trains and evals the models 10 times and cross validates. from sklearn.model_selection import cross_val_score scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10) #SK learn cross validation expects utility function (great is better) compared to cost function (lower is better) #thats why we use -scores before calculating sqrt tree_rmse_scores = np.sqrt(-scores) # + def display_scores(scores): print('Scores:', scores) print('Mean:', scores.mean()) print('Std Dev:', scores.std()) display_scores(tree_rmse_scores) # - # These metrics seem more realistic than 0 error after doing cross validation. Seems worse than Linear regression, but lets use cross validation on Linear to have apples to apples comparison. lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10) lin_rmse_scores = np.sqrt(-lin_scores) display_scores(lin_rmse_scores) # This shows that our decision tree is overfitting and is even worse than a simple linear regression. Lets try one more model, Random Forest. from sklearn.ensemble import RandomForestRegressor forest_reg = RandomForestRegressor(random_state=42) forest_reg.fit(housing_prepared, housing_labels) #Full Set housing_predictions = forest_reg.predict(housing_prepared) forest_mse = mean_squared_error(housing_labels, housing_predictions) forest_rmse = np.sqrt(forest_mse) forest_rmse #Cross Val for forest forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10) forest_rmse_scores = np.sqrt(-forest_scores) display_scores(forest_rmse_scores) # We see that the cross val scores for Forest show worse performance than the RMSE for the whole training set. This means we may still be overfitting the data using this model. # ## Fine Tuning the Model # Assume you have a short list of a few promising models. now you want to fine tune them. # + #Using Grid search to tune hyperparameters for Random Forest from sklearn.model_selection import GridSearchCV param_grid = [ {'n_estimators': [3,10,30], 'max_features':[2,4,6,8]}, {'bootstrap': [False], 'n_estimators':[3,10], 'max_features':[2,3,4]}, ] forest_reg = RandomForestRegressor() grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error') grid_search.fit(housing_prepared, housing_labels) # - # This grid search will check all combinations of parameters (in this case, 18 combos in all). Since CV is 5, it means there will be 90 rounds of training against the training set (for cross validation). Might take a while. #Once grid search is done, see what it found. grid_search.best_params_ #get best model directly grid_search.best_estimator_ #get scores cvres = grid_search.cv_results_ for mean_score, params in sorted(zip(cvres["mean_test_score"], cvres["params"])): print(np.sqrt(-mean_score), params) # ## Randomized Search # Grid search works ok if the state space of hyperparameters is not too large. Otherwise using RandomizedSearchCV might be more tractable # ## Analyze Best models and their Errors #RandomForest can tell you which attributes ended up being most important. feature_importances = grid_search.best_estimator_.feature_importances_ feature_importances #Display in context extra_attribs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"] cat_encoder = cat_pipeline.named_steps["onehot_encoder"] cat_one_hot_attribs = list(cat_encoder.categories_[0]) attributes = num_attribs + extra_attribs + cat_one_hot_attribs sorted(zip(feature_importances, attributes), reverse=True) # Based on these results we could try dropping some of the less important attributes/categories, since they are not contributing much to the model. # ## Evaluate System on the Test Set # + final_model = grid_search.best_estimator_ X_test = strat_test_set.drop("median_house_value", axis=1) y_test = strat_test_set["median_house_value"].copy() X_test_prepared= full_pipeline.transform(X_test) #just transform, not fit! final_predictions = final_model.predict(X_test_prepared) final_mse = mean_squared_error(y_test, final_predictions) final_rmse = np.sqrt(final_mse) # - final_rmse # ## Prelaunch presentation # Present your solution, what you learned, what worked, didn't work, assumptions made, system limitations. Easy to remember statements "median income is number one predictor of housing prices" # ## Launch, Monitor, and Maintain Your System # Checklist for monitoring and maintenance # * Alerts for prod system to let us know if performance drops or deviates strangely. # * Plug production data input into system. # * Evaluate system performance by sampling predictions and evaluating them. Usually human analysts (Mturk, Crowdflower/Figure-eight) # * monitor data cleanliness in production. # * Train model on regular basis using fresh data (automate this!) # * For online learning systems, snapshot state from time to time so you can rollback and have a historic record. # # # Exercises # 1: Try Support Vector Machine regressor with different hyperparameters (kernel="linear", various C values) or kernel=rbf with various C and Gamma hyperparams. How does best SVR predictor perform? # 2: Try replacing GridSearchCV with RandomizedSearchCV # 3: Try adding a transformer in the prep pipeline to select only most important attrs # 4: Try creating single pipeline that does full data prep plus final prediction. # 5: Automatically explore some preparation opt ions using GridSearchCV # ## Datasets to Explore # https://www.kaggle.com/datasets
handsOn/ch2/Housing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Case 2. Diabetic Retinopathy Analysis # <NAME><br> # 26.2.2018<br> # Cognitive Systems for Health Technology Applications, Spring 2018<br> # Helsinki Metropolia University of Applied Sciences # <h2>1. Objectives</h2><br> # The aim of this assignment is to learn to use convolutional neural networks to classify medical # images.<br> # For a little help in this assignment I checked what <NAME> and his team had done in Kaggle Diabetic Retinopathy Detection competition report. # <h2> 2. Required libraries </h2> # + # import libraries and functions import numpy as np import matplotlib.pyplot as plt import pickle from keras import layers from keras import models from keras.preprocessing.image import ImageDataGenerator from keras import optimizers import time # - # Numpy is used for scientific computing and creating multidimensional arrays, matplotlib for ploting figures, pickle for saving the history of the model, keras for building the convolutional neural network and time for calculating time. # <h2> 3. Data description and preprocessing </h2> # This dataset is a large set of high-resolution retina images taken under a variety of imaging conditions. # A clinician has rated the presence of diabetic retinopathy in each image on a scale of 0 to 4: # # <ul> # <li>0 - No DR</li> # <li>1 - Mild</li> # <li>2 - Moderate</li> # <li>3 - Severe</li> # <li>4 - Proliferative DR</li> # </ul> # # The images come from different models and types of cameras, which can affect the visual appearance. Some images are shown as one would see the retina anatomically meaning macula on the left, optic nerve on the right for the right eye. Others are shown as one would see through a microscope condensing lens in example inverted, as one sees in a typical live eye exam. There are two ways to tell if an image is inverted: # # It is inverted if the macula, the small dark central area, is slightly higher than the midline through the optic nerve. If the macula is lower than the midline of the optic nerve, it's not inverted. # If there is a notch on the side of the image (square, triangle, or circle) then it's not inverted. If there is no notch, it's inverted. # + # dataset directories and labels files train_dir = "../dataset2/train" validation_dir = "../dataset2/validation" test_dir = "../dataset2/test" # create datagenerators train_datagen = ImageDataGenerator(rescale=1./255, fill_mode='nearest', horizontal_flip=True, zoom_range=0.2) validation_datagen = ImageDataGenerator(rescale = 1./255) test_datagen = ImageDataGenerator(rescale=1./255) # training parameters batch_size = 50 epochs = 50 steps_per_epoch = 25 validation_steps = 10 image_height = 150 image_width = 200 # generator for train dataset print('Training dataset.') train_generator = train_datagen.flow_from_directory( train_dir, target_size = (image_height, image_width), batch_size = batch_size, class_mode = 'binary') # generator for validation dataset print('Validation dataset.') validation_generator = validation_datagen.flow_from_directory( validation_dir, target_size = (image_height, image_width), batch_size = batch_size, class_mode = 'binary') # generator for test dataset print('Test dataset.') test_generator = test_datagen.flow_from_directory( test_dir, target_size = (image_height, image_width), batch_size = batch_size, class_mode = 'binary') # - # Dataset is splitted to train, validation and test datasets. All images will be rescaled by 1./255 and resized to 150x200. Training set is supplemented. It's filling mode is choosed 'nearest' which means that if there are generated empty pixels in prosessing generator is able to choose a pixel value from nearest pixel that has a value. It's also accepting horizontal flip, zoom range is maxium in 20%. For preprocessing I first tried the preprocessing function designed by <NAME> but I had some issues with that so I did some research and used a different approach. # <h2> 4. Modeling and compilation </h2> # This model is almost exactly alike the one in Sakaris GitHub repository. I wanted to try with this one also, since I had problems with the other one. For starters, there is two Conv2D layers followed by one MaxPool2D layer. After two sets of these, there is two Conv2D layers and then two sets of two Conv2D layers with a Dropout layer for weight regularization to avoid overfitting. In the end there is Flatten layer to flatten the input, a couple of Dense leyers and another Dropout layer. # + # build the model model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation = 'relu', input_shape = (image_height, image_width, 3))) model.add(layers.Conv2D(32, (3, 3), activation = 'relu')) model.add(layers.MaxPool2D((3, 3), strides=2)) model.add(layers.Conv2D(64, (3, 3), activation = 'relu')) model.add(layers.Conv2D(64, (3, 3), activation = 'relu')) model.add(layers.MaxPool2D((3, 3), strides=2)) model.add(layers.Conv2D(96, (3, 3), activation = 'relu')) model.add(layers.Conv2D(96, (3, 3), activation = 'relu')) model.add(layers.MaxPool2D((3, 3), strides=2)) model.add(layers.Conv2D(128, (3, 3), activation = 'relu')) model.add(layers.Conv2D(128, (3, 3), activation = 'relu')) model.add(layers.Conv2D(160, (3, 3), activation = 'relu')) model.add(layers.Conv2D(160, (3, 3), activation = 'relu')) model.add(layers.Dropout(0.1)) model.add(layers.Conv2D(192, (3, 3), activation = 'relu')) model.add(layers.Conv2D(192, (3, 3), activation = 'relu')) model.add(layers.Dropout(0.1)) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dropout(0.2)) model.add(layers.Dense(1, activation='sigmoid')) model.summary() # compile the model model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(), metrics=['acc']) # - # <h2> 5. Training and validation </h2> # + # train the model t1 = time.time() h = model.fit_generator( train_generator, steps_per_epoch = steps_per_epoch, verbose = 1, epochs = epochs, validation_data = validation_generator, validation_steps = validation_steps) t2 = time.time() # store the elapsed time into history h.history.update({'time_elapsed': t2 - t1}) # save the model and history model.save('case_2_run_3.h5') pickle.dump(h.history, open('case_2_history_3.p', 'wb')) # - print('Time per epoch {:.2f} hours.'.format((t2-t1)/3600)) print('Time per epoch {:.2f} minutes.'.format((t2-t1)/40/60)) # <h2> 6. Evaluation </h2> # Here the model created above is tested with the testing set. # + test_generator = test_datagen.flow_from_directory( test_dir, target_size = (image_height, image_width), batch_size = batch_size, class_mode = 'binary') r = model.evaluate_generator(test_generator, steps = 20) # loss and accuracy r # - # <h2> 7. Results and discussion </h2> # Training accuracy is still under 0.75 the whole time. The final testing accuracy is 0.71. In the end of training the loss function is 4.77 which is really big. This means that the inconsistency between predicted value and actual label is large. The final testing loss function is 4.70. # + acc = h.history['acc'] val_acc = h.history['val_acc'] loss = h.history['loss'] val_loss = h.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'bo', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accracy') plt.title('Training and validation accuracy') plt.ylim([0, 1]) plt.xlabel('Epochs') plt.grid() plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.ylim([0, 10]) plt.xlabel('Epochs') plt.grid() plt.legend() plt.show() # - # <h2>8. Conclusions</h2> # I had still the same problem, validation accuracy and validation loss stay the same and the results are even worse, I can not seem to understand what is going wrong.
Case 2 version 3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/hemanthsunny/machine_learning/blob/master/Neural_network_layers.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Byl0R2jCw7zH" colab_type="text" # *Reference* # https://medium.com/fintechexplained/what-are-hidden-layers-4f54f7328263 # + id="5EThcxEHw3jN" colab_type="code" colab={} # + [markdown] id="YfYSMaU1xDLk" colab_type="text" # A layer is a collection of neurons. Different types - # 1. Input layer # 2. Hidden layer 1 # 3. Hidden layer 2 # 4. . . . # 5. Output layer # # Each layer has its purpose. They simply calculate the weighted sum of inputs and weights, add the bias and execute an activation function # + id="zkRd4YlK270l" colab_type="code" colab={} # + [markdown] id="66nhlGCZ28po" colab_type="text" # **Input Layer** # - This layer must exist in every neural network. # - This layer takes inputs from external sources, performs calculations via its neurons and then output transmitted to next layers # # ![alt text](https://miro.medium.com/max/191/1*M6x8sR7hOi46gc5tzYgMIQ.png) # - Number of neurons in input layer = number of training data + 1 (this node is to capture the bias term) # + [markdown] id="wY9qXNpF6zAJ" colab_type="text" # **Output layer** # - Must be one output layer in every neural network. # - This layer takes inputs from its previous layer, performs calculations via its neurons and then output is computed. # # ![alt text](https://miro.medium.com/max/169/1*Zhg5ypanLyDaL2CuD0GBvQ.png) # # - Number of neurons in this layer = Depends on problem type (classification / regression) # - If it is regression type (regressor) then this layer has single neuron / node. # - If it is classification type (classifier) then this layer has single neuron / node. # - If it is classification type and uses probabilistic activation function (ex. Softmax), then this layer has one neuron / node per class label. # + [markdown] id="U54qNc5_7TBJ" colab_type="text" # **Hidden layer** # - These layers stays in between in input & output layers. And that's the reason this is referred as 'hidden layer'. # - Hidden => not visible to external systems and private to neural network # - There will be atleast zero hidden layers in each neural network. [Most of the times, 1 hidden layer is sufficient] # - Usually, each hidden layer has same number of neurons / nodes. # # **Uses** # - More hidden layers - means more computation time to produce output - also solves more complex problems # # ![alt text](https://miro.medium.com/max/249/1*wCphd0ADUwgCYRGh-OcAyQ.png) # # - Each neuron in hidden layers calculates weighted sum of inputs and weights, add the bias and execute an activation function. # - Number of neurons = (Trading data samples) / (Factor * (Input neurons + Output neurons)) # - where factor = a number between 1-10 - used to prevent over-fitting # # # # # + id="p_jtAmpx6vBg" colab_type="code" colab={} # + [markdown] id="ykXw0E6kBDTC" colab_type="text" # *Reference* https://medium.com/fintechexplained/neural-networks-bias-and-weights-10b53e6285da # # Understand w8's and bias # + id="ISyKgg8mBTYy" colab_type="code" outputId="c6d3405b-4827-412e-c03a-33f81eb9dd8e" colab={"base_uri": "https://localhost:8080/", "height": 105} ''' A basic example using MNIST numbers dataset ''' # %tensorflow_version 2.x from tensorflow import keras import matplotlib.pyplot as plt mnist = keras.datasets.mnist (train_images, train_labels), (test_images, test_labels) = mnist.load_data() plt.figure() # + id="iTC0GoOtC7uO" colab_type="code" outputId="a4638d6a-5cf8-4572-94c0-ecd9b97512b0" colab={"base_uri": "https://localhost:8080/", "height": 282} plt.imshow(train_images[0]) # + id="DnML5ypWDBma" colab_type="code" outputId="d150ff61-7b92-413c-efc8-6cc5fe65481d" colab={"base_uri": "https://localhost:8080/", "height": 265} plt.imshow(train_images[1]) plt.colorbar() plt.grid(False) plt.show() # + id="WibjUU8bDHFN" colab_type="code" colab={}
Neural_network_layers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="4iTl-z4Pt8pv" outputId="138a1325-6a88-4adf-c0c0-32c4f8047fc3" # # !pip install q tensorflow==2.1 # # !pip install q keras==2.3.1 # # !pip install git+https://github.com/qubvel/segmentation_models # for str decode error ... run it and restart runtime # # !pip uninstall h5py -y # !pip install h5py==2.10.0 # - # i = !ls Dataset/images/ # l = !ls Dataset/masks/ len(i), len(l) l[-5:], i[-5:] # + id="GntQ2vuNt8gL" import cv2 from glob import glob import os import numpy as np # 100 = background # 101 = sidewalk(1) # 102 = obstacle(person(11), rider, car, truck, bus, train, motorcycle, bicycle(18)) # 103 = traffic (5,6,7) # 104 = road(0) l = glob('Dataset/masks/*') m = cv2.imread(l[2970],0) m.shape np.unique(m) # + colab={"base_uri": "https://localhost:8080/"} id="JLjUhiwat8Wu" outputId="8d64a866-f250-4843-961f-1eb71f81fe43" import os len(os.listdir('Dataset/images/')), len(os.listdir('Dataset/masks/')) # + import numpy as np import cv2 import os from glob import glob from sklearn.model_selection import train_test_split import tensorflow as tf def load_dataset(dataset_path): images = sorted(glob(os.path.join(dataset_path, "images/*"))) masks = sorted(glob(os.path.join(dataset_path, "masks/*"))) train_x, test_x, train_y, test_y = train_test_split(images,masks, test_size=0.168, random_state=168, shuffle=True) return train_x, train_y, test_x, test_y train_x, train_y, val_x, val_y = load_dataset('Dataset') print(len(train_x), len(train_y), len(val_x), len(val_y)) # - train_X_y_paths = list(zip(train_x, train_y)) val_X_y_paths = list(zip(val_x, val_y)) # + colab={"base_uri": "https://localhost:8080/"} id="VXDu8XQjt8N3" outputId="ded95e1c-2225-4078-8339-5e7d62c6cba5" def shuffle(samples): index = np.random.permutation(len(samples)) return np.array(samples)[index] def read_img(img): X_image = cv2.imread(img)[:,:,::-1] # convert bgr to rgb X_image = cv2.resize(X_image, (512, 512)) X_image = X_image / 255.0 X_image = X_image.astype(np.float32) return X_image def read_label(label_img): Y_image = cv2.imread(label_img, 0) Y_image = cv2.resize(Y_image, (512, 512),fx = 1, fy = 1,interpolation = cv2.INTER_NEAREST) Y_image = tf.one_hot(Y_image, 5) # Y_image.set_shape([512, 1024, 3]) Y_image = np.array(Y_image) Y_image = Y_image.astype(np.float32) return Y_image def get_data_generator(samples,batch_size): while True: for offset in range(0, len(samples), batch_size): samples = shuffle(samples) batch_samples = samples[offset:offset+batch_size] X_train = [] Y_train = [] for batch_sample in batch_samples: X_image = read_img(img=batch_sample[0]) Y_image = read_label(label_img=batch_sample[1]) X_train.append(X_image) Y_train.append(Y_image) yield np.array(X_train), np.array(Y_train) BATCH_SIZE = 4 train_generator = get_data_generator(train_X_y_paths,batch_size=BATCH_SIZE) val_generator = get_data_generator(val_X_y_paths,batch_size=BATCH_SIZE) # x_train, y_train = next(train_generator) # x_val, y_val = next(val_generator) # print(x_train.shape,y_train.shape) # print(x_val.shape, y_val.shape) # + colab={"base_uri": "https://localhost:8080/"} id="tdHn7tQkt8LA" outputId="a73aa33e-307d-4d0a-cce6-ef0ab902f504" # train_X_y_paths[-5:] # np.unique(y_train[0]), y_train.shape # + colab={"base_uri": "https://localhost:8080/"} id="W8lZJDRpwZHk" outputId="03103e3e-f0f3-4006-eba0-1e64d25ffcac" # %env SM_FRAMEWORK=tf.keras # + cellView="form" id="4_E6XiKQvAhO" # -*- coding: utf-8 -*- """ Deeplabv3+ model for Keras. This model is based on TF repo: https://github.com/tensorflow/models/tree/master/research/deeplab On Pascal VOC, original model gets to 84.56% mIOU MobileNetv2 backbone is based on this repo: https://github.com/JonathanCMitchell/mobilenet_v2_keras # Reference - [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf) - [Xception: Deep Learning with Depthwise Separable Convolutions] (https://arxiv.org/abs/1610.02357) - [Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation](https://arxiv.org/abs/1801.04381) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.keras.models import Model from tensorflow.python.keras import layers from tensorflow.python.keras.layers import Input from tensorflow.python.keras.layers import Reshape from tensorflow.python.keras.layers import Activation from tensorflow.python.keras.layers import Concatenate from tensorflow.python.keras.layers import Add from tensorflow.python.keras.layers import Dropout from tensorflow.python.keras.layers import BatchNormalization from tensorflow.python.keras.layers import Conv2D from tensorflow.python.keras.layers import DepthwiseConv2D from tensorflow.python.keras.layers import ZeroPadding2D from tensorflow.python.keras.layers import GlobalAveragePooling2D from tensorflow.python.keras.utils.layer_utils import get_source_inputs from tensorflow.python.keras.utils.data_utils import get_file from tensorflow.python.keras import backend as K from tensorflow.python.keras.applications.imagenet_utils import preprocess_input WEIGHTS_PATH_X = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_xception_tf_dim_ordering_tf_kernels.h5" WEIGHTS_PATH_MOBILE = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5" WEIGHTS_PATH_X_CS = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.2/deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5" WEIGHTS_PATH_MOBILE_CS = "pretrained_weights/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5" def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3): """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN Implements right "same" padding for even kernel sizes Args: x: input tensor filters: num of filters in pointwise convolution prefix: prefix before name stride: stride at depthwise conv kernel_size: kernel size for depthwise convolution rate: atrous rate for depthwise convolution depth_activation: flag to use activation between depthwise & poinwise convs epsilon: epsilon to use in BN layer """ if stride == 1: depth_padding = 'same' else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) depth_padding = 'valid' if not depth_activation: x = Activation(tf.nn.relu)(x) x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation(tf.nn.relu)(x) x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x) x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation(tf.nn.relu)(x) return x def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1): """Implements right 'same' padding for even kernel sizes Without this there is a 1 pixel drift when stride = 2 Args: x: input tensor filters: num of filters in pointwise convolution prefix: prefix before name stride: stride at depthwise conv kernel_size: kernel size for depthwise convolution rate: atrous rate for depthwise convolution """ if stride == 1: return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='same', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x) else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='valid', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x) def _xception_block(inputs, depth_list, prefix, skip_connection_type, stride, rate=1, depth_activation=False, return_skip=False): """ Basic building block of modified Xception network Args: inputs: input tensor depth_list: number of filters in each SepConv layer. len(depth_list) == 3 prefix: prefix before name skip_connection_type: one of {'conv','sum','none'} stride: stride at last depthwise conv rate: atrous rate for depthwise convolution depth_activation: flag to use activation between depthwise & pointwise convs return_skip: flag to return additional tensor after 2 SepConvs for decoder """ residual = inputs for i in range(3): residual = SepConv_BN(residual, depth_list[i], prefix + '_separable_conv{}'.format(i + 1), stride=stride if i == 2 else 1, rate=rate, depth_activation=depth_activation) if i == 1: skip = residual if skip_connection_type == 'conv': shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut', kernel_size=1, stride=stride) shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut) outputs = layers.add([residual, shortcut]) elif skip_connection_type == 'sum': outputs = layers.add([residual, inputs]) elif skip_connection_type == 'none': outputs = residual if return_skip: return outputs, skip else: return outputs def _make_divisible(v, divisor, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1): in_channels = inputs.shape[-1]#.value # inputs._keras_shape[-1] pointwise_conv_filters = int(filters * alpha) pointwise_filters = _make_divisible(pointwise_conv_filters, 8) x = inputs prefix = 'expanded_conv_{}_'.format(block_id) if block_id: # Expand x = Conv2D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x) x = Activation(tf.nn.relu6, name=prefix + 'expand_relu')(x) else: prefix = 'expanded_conv_' # Depthwise x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same', dilation_rate=(rate, rate), name=prefix + 'depthwise')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x) x = Activation(tf.nn.relu6, name=prefix + 'depthwise_relu')(x) # Project x = Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x) if skip_connection: return Add(name=prefix + 'add')([inputs, x]) # if in_channels == pointwise_filters and stride == 1: # return Add(name='res_connect_' + str(block_id))([inputs, x]) return x def Deeplabv3(weights='pascal_voc', input_tensor=None, input_shape=(512, 512, 3), classes=21, backbone='mobilenetv2', OS=16, alpha=1., activation=None): """ Instantiates the Deeplabv3+ architecture Optionally loads weights pre-trained on PASCAL VOC or Cityscapes. This model is available for TensorFlow only. # Arguments weights: one of 'pascal_voc' (pre-trained on pascal voc), 'cityscapes' (pre-trained on cityscape) or None (random initialization) input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: shape of input image. format HxWxC PASCAL VOC model was trained on (512,512,3) images. None is allowed as shape/width classes: number of desired classes. PASCAL VOC has 21 classes, Cityscapes has 19 classes. If number of classes not aligned with the weights used, last layer is initialized randomly backbone: backbone to use. one of {'xception','mobilenetv2'} activation: optional activation to add to the top of the network. One of 'softmax', 'sigmoid' or None OS: determines input_shape/feature_extractor_output ratio. One of {8,16}. Used only for xception backbone. alpha: controls the width of the MobileNetV2 network. This is known as the width multiplier in the MobileNetV2 paper. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. Used only for mobilenetv2 backbone. Pretrained is only available for alpha=1. # Returns A Keras model instance. # Raises RuntimeError: If attempting to run this model with a backend that does not support separable convolutions. ValueError: in case of invalid argument for `weights` or `backbone` """ if not (weights in {'pascal_voc', 'cityscapes', None}): raise ValueError('The `weights` argument should be either ' '`None` (random initialization), `pascal_voc`, or `cityscapes` ' '(pre-trained on PASCAL VOC)') if not (backbone in {'xception', 'mobilenetv2'}): raise ValueError('The `backbone` argument should be either ' '`xception` or `mobilenetv2` ') if input_tensor is None: img_input = Input(shape=input_shape) else: img_input = input_tensor if backbone == 'xception': if OS == 8: entry_block3_stride = 1 middle_block_rate = 2 # ! Not mentioned in paper, but required exit_block_rates = (2, 4) atrous_rates = (12, 24, 36) else: entry_block3_stride = 2 middle_block_rate = 1 exit_block_rates = (1, 2) atrous_rates = (6, 12, 18) x = Conv2D(32, (3, 3), strides=(2, 2), name='entry_flow_conv1_1', use_bias=False, padding='same')(img_input) x = BatchNormalization(name='entry_flow_conv1_1_BN')(x) x = Activation(tf.nn.relu)(x) x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1) x = BatchNormalization(name='entry_flow_conv1_2_BN')(x) x = Activation(tf.nn.relu)(x) x = _xception_block(x, [128, 128, 128], 'entry_flow_block1', skip_connection_type='conv', stride=2, depth_activation=False) x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2', skip_connection_type='conv', stride=2, depth_activation=False, return_skip=True) x = _xception_block(x, [728, 728, 728], 'entry_flow_block3', skip_connection_type='conv', stride=entry_block3_stride, depth_activation=False) for i in range(16): x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1), skip_connection_type='sum', stride=1, rate=middle_block_rate, depth_activation=False) x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1', skip_connection_type='conv', stride=1, rate=exit_block_rates[0], depth_activation=False) x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2', skip_connection_type='none', stride=1, rate=exit_block_rates[1], depth_activation=True) else: OS = 8 first_block_filters = _make_divisible(32 * alpha, 8) x = Conv2D(first_block_filters, kernel_size=3, strides=(2, 2), padding='same', use_bias=False, name='Conv' if input_shape[2] == 3 else 'Conv_')(img_input) x = BatchNormalization( epsilon=1e-3, momentum=0.999, name='Conv_BN')(x) x = Activation(tf.nn.relu6, name='Conv_Relu6')(x) x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0, skip_connection=False) x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2, expansion=6, block_id=1, skip_connection=False) x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2, skip_connection=True) x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3, skip_connection=False) x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4, skip_connection=True) x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=5, skip_connection=True) # stride in block 6 changed from 2 -> 1, so we need to use rate = 2 x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, # 1! expansion=6, block_id=6, skip_connection=False) x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2, expansion=6, block_id=7, skip_connection=True) x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2, expansion=6, block_id=8, skip_connection=True) x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2, expansion=6, block_id=9, skip_connection=True) x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2, expansion=6, block_id=10, skip_connection=False) x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2, expansion=6, block_id=11, skip_connection=True) x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2, expansion=6, block_id=12, skip_connection=True) x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2, # 1! expansion=6, block_id=13, skip_connection=False) x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4, expansion=6, block_id=14, skip_connection=True) x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4, expansion=6, block_id=15, skip_connection=True) x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4, expansion=6, block_id=16, skip_connection=False) # end of feature extractor # branching for Atrous Spatial Pyramid Pooling # Image Feature branch shape_before = tf.shape(x) b4 = GlobalAveragePooling2D()(x) b4_shape = tf.keras.backend.int_shape(b4) # from (b_size, channels)->(b_size, 1, 1, channels) b4 = Reshape((1, 1, b4_shape[1]))(b4) b4 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='image_pooling')(b4) b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4) b4 = Activation(tf.nn.relu)(b4) # upsample. have to use compat because of the option align_corners size_before = tf.keras.backend.int_shape(x) b4 = tf.keras.layers.experimental.preprocessing.Resizing( *size_before[1:3], interpolation="bilinear" )(b4) # simple 1x1 b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x) b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0) b0 = Activation(tf.nn.relu, name='aspp0_activation')(b0) # there are only 2 branches in mobilenetV2. not sure why if backbone == 'xception': # rate = 6 (12) b1 = SepConv_BN(x, 256, 'aspp1', rate=atrous_rates[0], depth_activation=True, epsilon=1e-5) # rate = 12 (24) b2 = SepConv_BN(x, 256, 'aspp2', rate=atrous_rates[1], depth_activation=True, epsilon=1e-5) # rate = 18 (36) b3 = SepConv_BN(x, 256, 'aspp3', rate=atrous_rates[2], depth_activation=True, epsilon=1e-5) # concatenate ASPP branches & project x = Concatenate()([b4, b0, b1, b2, b3]) else: x = Concatenate()([b4, b0]) x = Conv2D(256, (1, 1), padding='same', use_bias=False, name='concat_projection')(x) x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x) x = Activation(tf.nn.relu)(x) x = Dropout(0.1)(x) # DeepLab v.3+ decoder if backbone == 'xception': # Feature projection # x4 (x2) block skip_size = tf.keras.backend.int_shape(skip1) x = tf.keras.layers.experimental.preprocessing.Resizing( *skip_size[1:3], interpolation="bilinear" )(x) dec_skip1 = Conv2D(48, (1, 1), padding='same', use_bias=False, name='feature_projection0')(skip1) dec_skip1 = BatchNormalization( name='feature_projection0_BN', epsilon=1e-5)(dec_skip1) dec_skip1 = Activation(tf.nn.relu)(dec_skip1) x = Concatenate()([x, dec_skip1]) x = SepConv_BN(x, 256, 'decoder_conv0', depth_activation=True, epsilon=1e-5) x = SepConv_BN(x, 256, 'decoder_conv1', depth_activation=True, epsilon=1e-5) # you can use it with arbitary number of classes if (weights == 'pascal_voc' and classes == 21) or (weights == 'cityscapes' and classes == 19): last_layer_name = 'logits_semantic' else: last_layer_name = 'custom_logits_semantic' x = Conv2D(classes, (1, 1), padding='same', name=last_layer_name)(x) size_before3 = tf.keras.backend.int_shape(img_input) x = tf.keras.layers.experimental.preprocessing.Resizing( *size_before3[1:3], interpolation="bilinear" )(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = get_source_inputs(input_tensor) else: inputs = img_input if activation in {'softmax', 'sigmoid'}: x = tf.keras.layers.Activation(activation)(x) model = Model(inputs, x, name='deeplabv3plus') # load weights if weights == 'pascal_voc': if backbone == 'xception': weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH_X, cache_subdir='models') else: weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH_MOBILE, cache_subdir='models') model.load_weights(weights_path, by_name=True) elif weights == 'cityscapes': if backbone == 'xception': # weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5', # WEIGHTS_PATH_X_CS, # cache_subdir='models') weights_path = "pretrained_weights/deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5" else: # weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5', # WEIGHTS_PATH_MOBILE_CS, # cache_subdir='models') weights_path = "pretrained_weights/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5" model.load_weights(weights_path, by_name=True) return model def preprocess_input(x): """Preprocesses a numpy array encoding a batch of images. # Arguments x: a 4D numpy array consists of RGB values within [0, 255]. # Returns Input array scaled to [-1.,1.] """ return preprocess_input(x, mode='tf') # - # !pwd model = Deeplabv3(weights='cityscapes', input_shape=(512,512,3), classes=5, backbone= 'mobilenetv2', #'xception' OS=16, alpha=1, activation='softmax') # model.load_weights('pretrained_weights/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5') # + colab={"base_uri": "https://localhost:8080/"} id="xmKNkiHYwKkU" outputId="8a4da256-3ab5-4a52-fd69-ece9c828bbc9" from segmentation_models.losses import cce_jaccard_loss, dice_loss, JaccardLoss from segmentation_models.metrics import iou_score, f1_score, precision, recall ls = dice_loss + cce_jaccard_loss metrics = [precision, recall, f1_score, iou_score] from tensorflow.keras.models import load_model model = load_model('mobilenetV2_bs4/ckpt_path/84.h5', custom_objects={'dice_loss_plus_categorical_crossentropy_plus_jaccard_loss':ls, 'precision':precision, 'recall':recall, 'f1-score':f1_score, 'iou_score':iou_score}) # + import os, time, keras # %env SM_FRAMEWORK=tf.keras import numpy as np import tensorflow as tf from segmentation_models.losses import bce_jaccard_loss, dice_loss, JaccardLoss from segmentation_models.metrics import iou_score, f1_score, precision, recall from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping """ Hyperparamaters """ batch_size = 4 epochs = 1000 base_dir = 'mobilenetV2_bs4' if not os.path.exists(base_dir): os.mkdir(base_dir) os.mkdir(f"{base_dir}/ckpt_path") csv_path = f"{base_dir}/history.csv" """ callbacks """ root_logdir = os.path.join(os.curdir, f"{base_dir}/logs","fit","") def get_run_logdir(): run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S") return os.path.join(root_logdir, run_id) run_logdir = get_run_logdir() tensorboard_cb = keras.callbacks.TensorBoard(run_logdir) checkpoint_filepath = f'{base_dir}/'+'ckpt_path/{epoch}.h5' model_checkpoint_callback = ModelCheckpoint( filepath=checkpoint_filepath, save_weights_only=False, # monitor='val_iou_score', # mode='max', verbose = 1, period = 25, save_best_only=False ) callbacks = [ model_checkpoint_callback, # ReduceLROnPlateau(monitor="val_loss", patience=5, factor=0.1, verbose=1), CSVLogger(csv_path), # EarlyStopping(monitor="val_loss", patience=10), tensorboard_cb ] """ steps per epochs """ train_steps = len(train_X_y_paths)//batch_size if len(train_X_y_paths) % batch_size != 0: train_steps += 1 test_steps = len(val_X_y_paths)//batch_size if len(val_X_y_paths) % batch_size != 0: test_steps += 1 print("train_steps", train_steps, "test_steps",test_steps) """ Model training """ # for layer in model.layers: # if layer.name == "global_average_pooling2d": # break # else: # layer.trainable = False # for layer in model.layers: # print(layer.name,layer.trainable) model.compile( loss=ls, optimizer= "adam", metrics=metrics ) # model.summary() # + cellView="code" colab={"base_uri": "https://localhost:8080/"} id="3JwsSXcIt8Hn" outputId="07d990ff-4f18-40a8-a863-24a4721bb4ea" history = model.fit( train_generator, validation_data=val_generator, epochs=1000, initial_epoch = 84, steps_per_epoch=train_steps, validation_steps=test_steps, callbacks=callbacks ) # + id="dpIcWrR70uRO" # model.save('/content/drive/MyDrive/mobilenetv2.h5') # + # from segmentation_models.losses import cce_jaccard_loss, dice_loss, JaccardLoss # from segmentation_models.metrics import iou_score, f1_score, precision, recall # ls = dice_loss + cce_jaccard_loss # metrics = [precision, recall, f1_score, iou_score] # from tensorflow.keras.models import load_model # model = load_model('mobilenetv2_bs4/ckpt_path/Deeplabv3p_mobilenetv2_200.h5', # custom_objects={'dice_loss_plus_categorical_crossentropy_plus_jaccard_loss':ls, # 'precision':precision, 'recall':recall, 'f1-score':f1_score, 'iou_score':iou_score}) # + id="6Kj98mf1y7bt" # # !tensorboard --logdir logs --host 0.0.0.0
Scripts/TrainingScripts/CS_Dv3p_mobilenetV2_alpha1.0_bs4_training_customDataGenerator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os from glob import glob import earthpy.plot as ep import gdal import numpy as np import matplotlib.pyplot as plt data = os.getcwd() + "\\rsgislib_clusters" scenes = glob(data+"\\*.kea") scenes # + #conversion to tif for file in scenes: print(file.split('\\')[-1]) dst = file.split('\\')[-1].split('.')[0] + ".tif" dst = data +"\\"+ dst if not os.path.exists(dst): os.system("gdal_translate "+file+" "+dst) if os.path.exists(dst): print("Converted " + file.split('\\')[-1].split('.')[0] + "...") else: print("Failed Converting " + file.split('\\')[-1].split('.')[0] + "...") # -
APIs/SentinelSat/kea-to-jp2-converrsion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Diagnostics for multi-plane with the pupil function PSF model # ### Configure environment # + import os os.chdir("/home/hbabcock/Data/storm_analysis/sa_diagnostics/mp_pfn") print(os.getcwd()) import numpy numpy.random.seed(1) # - import storm_analysis.diagnostics.multiplane.settings as settings import storm_analysis.diagnostics.multiplane.configure as configure import storm_analysis.diagnostics.multiplane.make_data as makeData import storm_analysis.diagnostics.multiplane.analyze_data as analyzeData import storm_analysis.diagnostics.multiplane.collate as collate # + settings.photons = [[10, 500], [10, 1000]] print(settings.photons) settings.iterations = 20 settings.n_frames = 10 settings.peak_locations = None settings.psf_size = 30 # - # ### Configure configure.configure("pupilfn", False) # ### Make Data makeData.makeData() # ### Analyze data # %time analyzeData.analyzeData() # ### Collate data collate.collate() # ### Reference results # + active="" # 2018-10-28 # commit a3eb9fb0815f71eb089fdc0e08e0f5dcaaeb7ffd # # Added 1260 # Added 1260 # Processing test_01 # Using max_distance 200.0 nm for error calcuations. # Processing test_02 # Using max_distance 200.0 nm for error calcuations. # # Analysis Summary: # Processed 2510 localizations in 18.30 seconds, 137.17/sec # Recall 0.58611 # Noise 0.41389 # XYZ Error Standard Deviation (nm): # test_01 31.64 31.48 60.77 # test_02 16.96 16.81 36.54 # # XYZ RMSE Accuracy (nm): # test_01 31.64 31.48 60.83 # test_02 16.97 16.86 36.55
storm_analysis/diagnostics/jpy_notebooks/mp_pfn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py36] # language: python # name: conda-env-py36-py # --- # # 10.7 文本情感分类:使用循环神经网络 # + import collections import os import random import tarfile import torch from torch import nn import torchtext.vocab as Vocab import torch.utils.data as Data import sys sys.path.append("..") import d2lzh_pytorch as d2l os.environ["CUDA_VISIBLE_DEVICES"] = "7" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') DATA_ROOT = "/S1/CSCL/tangss/Datasets" print(torch.__version__, device) # - # ## 10.7.1 文本情感分类数据 # ### 10.7.1.1 读取数据 fname = os.path.join(DATA_ROOT, "aclImdb_v1.tar.gz") if not os.path.exists(os.path.join(DATA_ROOT, "aclImdb")): print("从压缩包解压...") with tarfile.open(fname, 'r') as f: f.extractall(DATA_ROOT) # + from tqdm import tqdm def read_imdb(folder='train', data_root="/S1/CSCL/tangss/Datasets/aclImdb"): # 本函数已保存在d2lzh_pytorch包中方便以后使用 data = [] for label in ['pos', 'neg']: folder_name = os.path.join(data_root, folder, label) for file in tqdm(os.listdir(folder_name)): with open(os.path.join(folder_name, file), 'rb') as f: review = f.read().decode('utf-8').replace('\n', '').lower() data.append([review, 1 if label == 'pos' else 0]) random.shuffle(data) return data train_data, test_data = read_imdb('train'), read_imdb('test') # - # ### 10.7.1.2 预处理数据 def get_tokenized_imdb(data): # 本函数已保存在d2lzh_pytorch包中方便以后使用 """ data: list of [string, label] """ def tokenizer(text): return [tok.lower() for tok in text.split(' ')] return [tokenizer(review) for review, _ in data] # + def get_vocab_imdb(data): # 本函数已保存在d2lzh_pytorch包中方便以后使用 tokenized_data = get_tokenized_imdb(data) counter = collections.Counter([tk for st in tokenized_data for tk in st]) return Vocab.Vocab(counter, min_freq=5) vocab = get_vocab_imdb(train_data) '# words in vocab:', len(vocab) # - def preprocess_imdb(data, vocab): # 本函数已保存在d2lzh_torch包中方便以后使用 max_l = 500 # 将每条评论通过截断或者补0,使得长度变成500 def pad(x): return x[:max_l] if len(x) > max_l else x + [0] * (max_l - len(x)) tokenized_data = get_tokenized_imdb(data) features = torch.tensor([pad([vocab.stoi[word] for word in words]) for words in tokenized_data]) labels = torch.tensor([score for _, score in data]) return features, labels # ### 10.7.1.3 创建数据迭代器 batch_size = 64 train_set = Data.TensorDataset(*preprocess_imdb(train_data, vocab)) test_set = Data.TensorDataset(*preprocess_imdb(test_data, vocab)) train_iter = Data.DataLoader(train_set, batch_size, shuffle=True) test_iter = Data.DataLoader(test_set, batch_size) for X, y in train_iter: print('X', X.shape, 'y', y.shape) break '#batches:', len(train_iter) # ## 10.7.2 使用循环神经网络的模型 class BiRNN(nn.Module): def __init__(self, vocab, embed_size, num_hiddens, num_layers): super(BiRNN, self).__init__() self.embedding = nn.Embedding(len(vocab), embed_size) # bidirectional设为True即得到双向循环神经网络 self.encoder = nn.LSTM(input_size=embed_size, hidden_size=num_hiddens, num_layers=num_layers, bidirectional=True) self.decoder = nn.Linear(4*num_hiddens, 2) # 初始时间步和最终时间步的隐藏状态作为全连接层输入 def forward(self, inputs): # inputs的形状是(批量大小, 词数),因为LSTM需要将序列长度(seq_len)作为第一维,所以将输入转置后 # 再提取词特征,输出形状为(词数, 批量大小, 词向量维度) embeddings = self.embedding(inputs.permute(1, 0)) # rnn.LSTM只传入输入embeddings,因此只返回最后一层的隐藏层在各时间步的隐藏状态。 # outputs形状是(词数, 批量大小, 2 * 隐藏单元个数) outputs, _ = self.encoder(embeddings) # output, (h, c) # 连结初始时间步和最终时间步的隐藏状态作为全连接层输入。它的形状为 # (批量大小, 4 * 隐藏单元个数)。 encoding = torch.cat((outputs[0], outputs[-1]), -1) outs = self.decoder(encoding) return outs embed_size, num_hiddens, num_layers = 100, 100, 2 net = BiRNN(vocab, embed_size, num_hiddens, num_layers) # ### 10.7.2.1 加载预训练的词向量 glove_vocab = Vocab.GloVe(name='6B', dim=100, cache=os.path.join(DATA_ROOT, "glove")) # + def load_pretrained_embedding(words, pretrained_vocab): """从预训练好的vocab中提取出words对应的词向量""" embed = torch.zeros(len(words), pretrained_vocab.vectors[0].shape[0]) # 初始化为0 oov_count = 0 # out of vocabulary for i, word in enumerate(words): try: idx = pretrained_vocab.stoi[word] embed[i, :] = pretrained_vocab.vectors[idx] except KeyError: oov_count += 0 if oov_count > 0: print("There are %d oov words.") return embed net.embedding.weight.data.copy_(load_pretrained_embedding(vocab.itos, glove_vocab)) net.embedding.weight.requires_grad = False # 直接加载预训练好的, 所以不需要更新它 # - # ### 10.7.2.2 训练并评价模型 lr, num_epochs = 0.01, 5 optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr) loss = nn.CrossEntropyLoss() d2l.train(train_iter, test_iter, net, loss, optimizer, device, num_epochs) # 本函数已保存在d2lzh包中方便以后使用 def predict_sentiment(net, vocab, sentence): """sentence是词语的列表""" device = list(net.parameters())[0].device sentence = torch.tensor([vocab.stoi[word] for word in sentence], device=device) label = torch.argmax(net(sentence.view((1, -1))), dim=1) return 'positive' if label.item() == 1 else 'negative' predict_sentiment(net, vocab, ['this', 'movie', 'is', 'so', 'great']) predict_sentiment(net, vocab, ['this', 'movie', 'is', 'so', 'bad'])
code/chapter10_natural-language-processing/10.7_sentiment-analysis-rnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ######################################################################################################################## # Filename: FF_Models.ipynb # # Purpose: Multi-label Text-categorization via feed forward neural networks # Author(s): Bobby (Robert) Lumpkin # # Library Dependencies: numpy, pandas, scikit-learn, skmultilearn, joblib, os, sys, threshold_learning ######################################################################################################################## # - # # Multilabel Text Classification with Feed Forward Networks import numpy as np import pandas as pd import os import json import ast import random import tensorflow as tf import tensorflow_addons as tfa from bpmll import bp_mll_loss import sklearn_json as skljson from sklearn.model_selection import train_test_split from sklearn import metrics import matplotlib.pyplot as plt import sys os.chdir('C:\\Users\\rober\\OneDrive\\Documents\\Multilabel-Text-Classification\\Deep Learning Models\\FF Models') ## Set working directory ## to be 'ANN Results' sys.path.append('../../ThresholdFunctionLearning') ## Append path to the ThresholdFunctionLearning directory to the interpreters ## search path from threshold_learning import predict_test_labels_binary ## Import the 'predict_test_labels_binary()' function from the from threshold_learning import predict_labels_binary ## threshold_learning library # + ## Load the seperabe PCs training and test data npzfile = np.load("../../Data/tfidf_PC_separable.npz") X_sepPCs_train = npzfile['X_sepPCs_train'] X_sepPCs_test = npzfile['X_sepPCs_test'] Y_train = npzfile['Y_train'].astype('float64') Y_test = npzfile['Y_test'].astype('float64') ## Load the autoencoder train/test features npzfile = np.load('../../Data/tfidf_encoded_data.npz') encoded_train = npzfile['encoded_train'] encoded_test = npzfile['encoded_test'] ## Load the tfidf training and test data npzfile = np.load("../../Data/tfidf_trainTest_data.npz", allow_pickle = True) X_tfidfTrain = npzfile['X_tfidfTrain'] X_tfidfTest = npzfile['X_tfidfTest'] # - # # Cross Entropy Models -- Traditional ("Naive") Approach # ## Training on Full (Sparse) TFIDF Vectors # + ## Start by defining and compiling a cross-entropy loss network to train on the full tfidf data (bpmll used later) tf.random.set_seed(123) num_labels = Y_train.shape[1] model_ce_FF_tfidf = tf.keras.models.Sequential([ tf.keras.layers.Dense(32, activation = 'relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(num_labels, activation = 'sigmoid') ]) optim_func = tf.keras.optimizers.Adam(lr=0.01) metric = tfa.metrics.HammingLoss(mode = 'multilabel', threshold = 0.5) model_ce_FF_tfidf.compile(optimizer = optim_func, loss = 'binary_crossentropy', metrics = metric ) # - tf.random.set_seed(123) history_ce_FF_tfidf_lr001 = model_ce_FF_tfidf.fit(X_tfidfTrain, Y_train, epochs = 30, validation_data = (X_tfidfTest, Y_test), verbose = 0) ## (CAUTION: DO NOT OVERWRITE EXISTING FILES) -- Convert training history to dataframe and write to a .json file history_ce_FF_tfidf_lr001_df = pd.DataFrame(history_ce_FF_tfidf_lr001.history) #with open("Training Histories/history_ce_FF_tfidf_lr001.json", "w") as outfile: # history_ce_FF_tfidf_lr001_df.to_json(outfile) # + ## Visualize the validation hamming and cross-entropy loss histories fig = plt.figure(figsize=(14, 4)) ax1 = fig.add_subplot(121) ax1.plot(history_ce_FF_tfidf_lr001_df['val_hamming_loss'], label = 'Validation HL') ax1.plot(history_ce_FF_tfidf_lr001_df['hamming_loss'], label = 'Training HL') ax1.set_xlabel('Epoch', fontsize = 12) ax1.set_ylabel('Hamming Loss', fontsize = 12) ax1.legend() ax1.set_title('Hamming Loss History', fontsize = 15) ax2 = fig.add_subplot(122) ax2.plot(history_ce_FF_tfidf_lr001_df['val_loss'], label = 'Validation CE') ax2.plot(history_ce_FF_tfidf_lr001_df['loss'], label = 'Training CE') ax2.set_xlabel('Epoch', fontsize = 12) ax2.set_ylabel('Cross-Entropy Loss', fontsize = 12) ax2.legend() ax2.set_title('Cross-Entropy Loss History', fontsize = 15) #plt.savefig('Reduced_Dataset_Learning_Rate_01.png') # - # ## Training on Dimension-Reduced Separable PC Vectors # + ## Define and compile a cross-entropy loss network to train on the separable PCs (bpmll used later) tf.random.set_seed(123) num_labels = Y_train.shape[1] model_ce_FF_sepPCs = tf.keras.models.Sequential([ tf.keras.layers.Dense(32, activation = 'relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(num_labels, activation = 'sigmoid') ]) optim_func = tf.keras.optimizers.Adam(lr=0.01) metric = tfa.metrics.HammingLoss(mode = 'multilabel', threshold = 0.5) model_ce_FF_sepPCs.compile(optimizer = optim_func, loss = 'binary_crossentropy', metrics = metric ) # - tf.random.set_seed(123) history_ce_FF_sepPCs_lr001 = model_ce_FF_sepPCs.fit(X_sepPCs_train, Y_train, epochs = 30, validation_data = (X_sepPCs_test, Y_test), verbose = 0) ## (CAUTION: DO NOT OVERWRITE EXISTING FILES) -- Convert training history to dataframe and write to a .json file history_ce_FF_sepPCs_lr001_df = pd.DataFrame(history_ce_FF_sepPCs_lr001.history) #with open("Training Histories/history_ce_FF_sepPCs_lr001.json", "w") as outfile: # history_ce_FF_sepPCs_lr001_df.to_json(outfile) # + ## Visualize the validation hamming and cross-entropy loss histories fig = plt.figure(figsize=(14, 4)) ax1 = fig.add_subplot(121) ax1.plot(history_ce_FF_sepPCs_lr001_df['val_hamming_loss'], label = 'Validation HL') ax1.plot(history_ce_FF_sepPCs_lr001_df['hamming_loss'], label = 'Training HL') ax1.set_xlabel('Epoch', fontsize = 12) ax1.set_ylabel('Hamming Loss', fontsize = 12) ax1.legend() ax1.set_title('Hamming Loss History', fontsize = 15) ax2 = fig.add_subplot(122) ax2.plot(history_ce_FF_sepPCs_lr001_df['val_loss'], label = 'Validation CE') ax2.plot(history_ce_FF_sepPCs_lr001_df['loss'], label = 'Training CE') ax2.set_xlabel('Epoch', fontsize = 12) ax2.set_ylabel('Cross-Entropy Loss', fontsize = 12) ax2.legend() ax2.set_title('Cross-Entropy Loss History', fontsize = 15) #plt.savefig('Reduced_Dataset_Learning_Rate_01.png') # - # ## Training on Dimension-Reduced Autoencoder Feature Vectors # + ## Define and compile a cross-entropy loss network to train on the separable PCs (bpmll used later) tf.random.set_seed(123) num_labels = Y_train.shape[1] model_ce_FF_encoded = tf.keras.models.Sequential([ tf.keras.layers.Dense(32, activation = 'relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(num_labels, activation = 'sigmoid') ]) optim_func = tf.keras.optimizers.Adam(lr=0.01) metric = tfa.metrics.HammingLoss(mode = 'multilabel', threshold = 0.5) model_ce_FF_encoded.compile(optimizer = optim_func, loss = 'binary_crossentropy', metrics = metric ) # - tf.random.set_seed(123) history_ce_FF_encoded_lr001 = model_ce_FF_encoded.fit(encoded_train, Y_train, epochs = 30, validation_data = (encoded_test, Y_test), verbose = 0) ## (CAUTION: DO NOT OVERWRITE EXISTING FILES) -- Convert training history to dataframe and write to a .json file history_ce_FF_encoded_lr001_df = pd.DataFrame(history_ce_FF_encoded_lr001.history) #with open("Training Histories/history_ce_FF_encoded_lr001.json", "w") as outfile: # history_ce_FF_encoded_lr001_df.to_json(outfile) # + ## Visualize the validation hamming and cross-entropy loss histories fig = plt.figure(figsize=(14, 4)) ax1 = fig.add_subplot(121) ax1.plot(history_ce_FF_encoded_lr001_df['val_hamming_loss'], label = 'Validation HL') ax1.plot(history_ce_FF_encoded_lr001_df['hamming_loss'], label = 'Training HL') ax1.set_xlabel('Epoch', fontsize = 12) ax1.set_ylabel('Hamming Loss', fontsize = 12) ax1.legend() ax1.set_title('Hamming Loss History', fontsize = 15) ax2 = fig.add_subplot(122) ax2.plot(history_ce_FF_encoded_lr001_df['val_loss'], label = 'Validation CE') ax2.plot(history_ce_FF_encoded_lr001_df['loss'], label = 'Training CE') ax2.set_xlabel('Epoch', fontsize = 12) ax2.set_ylabel('Cross-Entropy Loss', fontsize = 12) ax2.legend() ax2.set_title('Cross-Entropy Loss History', fontsize = 15) #plt.savefig('Reduced_Dataset_Learning_Rate_01.png') # - # # BPMLL Models -- "Novel" Approach # ## Training on Full (Sparse) TFIDF Vectors # + ## Start by defining and compiling a bpmll loss network to train on the full tfidf data tf.random.set_seed(123) num_labels = Y_train.shape[1] model_bpmll_FF_tfidf = tf.keras.models.Sequential([ tf.keras.layers.Dense(32, activation = 'relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(num_labels, activation = 'sigmoid') ]) optim_func = tf.keras.optimizers.Adam(lr=0.01) metric = tfa.metrics.HammingLoss(mode = 'multilabel', threshold = 0.5) model_bpmll_FF_tfidf.compile(optimizer = optim_func, loss = bp_mll_loss, metrics = metric ) # - tf.random.set_seed(123) history_bpmll_FF_tfidf_lr001 = model_bpmll_FF_tfidf.fit(X_tfidfTrain, Y_train, epochs = 30, validation_data = (X_tfidfTest, Y_test), verbose = 0) ## (CAUTION: DO NOT OVERWRITE EXISTING FILES) -- Convert training history to dataframe and write to a .json file history_bpmll_FF_tfidf_lr001_df = pd.DataFrame(history_bpmll_FF_tfidf_lr001.history) #with open("Training Histories/history_bpmll_FF_tfidf_lr001.json", "w") as outfile: # history_bpmll_FF_tfidf_lr001_df.to_json(outfile) # + ## Visualize the validation hamming and cross-entropy loss histories fig = plt.figure(figsize=(14, 4)) ax1 = fig.add_subplot(121) ax1.plot(history_bpmll_FF_tfidf_lr001_df['val_hamming_loss'], label = 'Validation HL') ax1.plot(history_bpmll_FF_tfidf_lr001_df['hamming_loss'], label = 'Training HL') ax1.set_xlabel('Epoch', fontsize = 12) ax1.set_ylabel('Hamming Loss', fontsize = 12) ax1.legend() ax1.set_title('Hamming Loss History', fontsize = 15) ax2 = fig.add_subplot(122) ax2.plot(history_bpmll_FF_tfidf_lr001_df['val_loss'], label = 'Validation CE') ax2.plot(history_bpmll_FF_tfidf_lr001_df['loss'], label = 'Training CE') ax2.set_xlabel('Epoch', fontsize = 12) ax2.set_ylabel('BPMLL Loss', fontsize = 12) ax2.legend() ax2.set_title('BPMLL Loss History', fontsize = 15) #plt.savefig('Reduced_Dataset_Learning_Rate_01.png') # - # ## Training on Dimension-Reduced Separable PC Vectors # + ## Define and compile a bpmll loss network to train on the separable PCs tf.random.set_seed(123) num_labels = Y_train.shape[1] model_bpmll_FF_sepPCs = tf.keras.models.Sequential([ tf.keras.layers.Dense(32, activation = 'relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(num_labels, activation = 'sigmoid') ]) optim_func = tf.keras.optimizers.Adam(lr=0.01) metric = tfa.metrics.HammingLoss(mode = 'multilabel', threshold = 0.5) model_bpmll_FF_sepPCs.compile(optimizer = optim_func, loss = 'binary_crossentropy', metrics = metric ) # - tf.random.set_seed(123) history_bpmll_FF_sepPCs_lr001 = model_bpmll_FF_sepPCs.fit(X_sepPCs_train, Y_train, epochs = 30, validation_data = (X_sepPCs_test, Y_test), verbose = 0) ## (CAUTION: DO NOT OVERWRITE EXISTING FILES) -- Convert training history to dataframe and write to a .json file history_bpmll_FF_sepPCs_lr001_df = pd.DataFrame(history_bpmll_FF_sepPCs_lr001.history) #with open("Training Histories/history_bpmll_FF_sepPCs_lr001.json", "w") as outfile: # history_bpmll_FF_sepPCs_lr001_df.to_json(outfile) # + ## Visualize the validation hamming and cross-entropy loss histories fig = plt.figure(figsize=(14, 4)) ax1 = fig.add_subplot(121) ax1.plot(history_bpmll_FF_sepPCs_lr001_df['val_hamming_loss'], label = 'Validation HL') ax1.plot(history_bpmll_FF_sepPCs_lr001_df['hamming_loss'], label = 'Training HL') ax1.set_xlabel('Epoch', fontsize = 12) ax1.set_ylabel('Hamming Loss', fontsize = 12) ax1.legend() ax1.set_title('Hamming Loss History', fontsize = 15) ax2 = fig.add_subplot(122) ax2.plot(history_bpmll_FF_sepPCs_lr001_df['val_loss'], label = 'Validation CE') ax2.plot(history_bpmll_FF_sepPCs_lr001_df['loss'], label = 'Training CE') ax2.set_xlabel('Epoch', fontsize = 12) ax2.set_ylabel('BPMLL Loss', fontsize = 12) ax2.legend() ax2.set_title('BPMLL Loss History', fontsize = 15) #plt.savefig('Reduced_Dataset_Learning_Rate_01.png') # - # ## Training on Dimension-Reduced Autoencoder Feature Vectors # + ## Define and compile a cross-entropy loss network to train on the separable PCs (bpmll used later) tf.random.set_seed(123) num_labels = Y_train.shape[1] model_bpmll_FF_encoded = tf.keras.models.Sequential([ tf.keras.layers.Dense(32, activation = 'relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(num_labels, activation = 'sigmoid') ]) optim_func = tf.keras.optimizers.Adam(lr=0.01) metric = tfa.metrics.HammingLoss(mode = 'multilabel', threshold = 0.5) model_bpmll_FF_encoded.compile(optimizer = optim_func, loss = bp_mll_loss, metrics = metric ) # - tf.random.set_seed(123) history_bpmll_FF_encoded_lr001 = model_bpmll_FF_encoded.fit(encoded_train, Y_train, epochs = 30, validation_data = (encoded_test, Y_test), verbose = 0) ## (CAUTION: DO NOT OVERWRITE EXISTING FILES) -- Convert training history to dataframe and write to a .json file history_bpmll_FF_encoded_lr001_df = pd.DataFrame(history_bpmll_FF_encoded_lr001.history) #with open("Training Histories/history_bpmll_FF_encoded_lr001.json", "w") as outfile: # history_bpmll_FF_encoded_lr001_df.to_json(outfile) # + ## Visualize the validation hamming and cross-entropy loss histories fig = plt.figure(figsize=(14, 4)) ax1 = fig.add_subplot(121) ax1.plot(history_bpmll_FF_encoded_lr001_df['val_hamming_loss'], label = 'Validation HL') ax1.plot(history_bpmll_FF_encoded_lr001_df['hamming_loss'], label = 'Training HL') ax1.set_xlabel('Epoch', fontsize = 12) ax1.set_ylabel('Hamming Loss', fontsize = 12) ax1.legend() ax1.set_title('Hamming Loss History', fontsize = 15) ax2 = fig.add_subplot(122) ax2.plot(history_bpmll_FF_encoded_lr001_df['val_loss'], label = 'Validation CE') ax2.plot(history_bpmll_FF_encoded_lr001_df['loss'], label = 'Training CE') ax2.set_xlabel('Epoch', fontsize = 12) ax2.set_ylabel('BPMLL Loss', fontsize = 12) ax2.legend() ax2.set_title('BPMLL Loss History', fontsize = 15) #plt.savefig('Reduced_Dataset_Learning_Rate_01.png') # - # # Results # + ## Visualize the validation hamming and cross-entropy loss histories for comparing dimension reduction techniques fig = plt.figure(figsize=(14, 4)) ax1 = fig.add_subplot(121) ax1.plot(history_ce_FF_tfidf_lr001_df['val_hamming_loss'], label = 'CE TFIDF') ax1.plot(history_ce_FF_sepPCs_lr001_df['val_hamming_loss'], label = 'CE SepPCs') ax1.plot(history_ce_FF_encoded_lr001_df['val_hamming_loss'], label = 'CE Encodings') ax1.set_xlabel('Epoch', fontsize = 12) ax1.set_ylabel('Hamming Loss', fontsize = 12) ax1.legend() ax1.set_title('Dimension Reduction Comparison (CE Loss)', fontsize = 15) ax2 = fig.add_subplot(122) ax2.plot(history_bpmll_FF_tfidf_lr001_df['val_hamming_loss'], label = 'CE TFIDF') ax2.plot(history_bpmll_FF_sepPCs_lr001_df['val_hamming_loss'], label = 'CE SepPCs') ax2.plot(history_bpmll_FF_encoded_lr001_df['val_hamming_loss'], label = 'CE Encodings') ax2.set_xlabel('Epoch', fontsize = 12) ax2.set_ylabel('BPMLL Loss', fontsize = 12) ax2.legend() ax2.set_title('Dimension Reduction Comparison (BPMLL Loss)', fontsize = 15) #plt.savefig('Reduced_Dataset_Learning_Rate_01.png') # + ## Visualize the validation hamming and cross-entropy loss histories for comparing loss functions fig = plt.figure(figsize=(14, 4)) ax1 = fig.add_subplot(131) ax1.plot(history_ce_FF_tfidf_lr001_df['val_hamming_loss'], label = 'CE Loss') ax1.plot(history_bpmll_FF_tfidf_lr001_df['val_hamming_loss'], label = 'BPMLL Loss') ax1.set_xlabel('Epoch', fontsize = 12) ax1.set_ylabel('Validation Hamming Loss', fontsize = 12) ax1.legend() ax1.set_title('BPMLL vs CE Loss', fontsize = 15) ax2 = fig.add_subplot(132) ax2.plot(history_ce_FF_sepPCs_lr001_df['val_hamming_loss'], label = 'CE Loss') ax2.plot(history_bpmll_FF_sepPCs_lr001_df['val_hamming_loss'], label = 'BPMLL Loss') ax2.set_xlabel('Epoch', fontsize = 12) ax2.set_ylabel('Validation Hamming Loss', fontsize = 12) ax2.legend() ax2.set_title('BPMLL vs CE Loss', fontsize = 15) ax3 = fig.add_subplot(133) ax3.plot(history_ce_FF_encoded_lr001_df['val_hamming_loss'], label = 'CE Loss') ax3.plot(history_bpmll_FF_encoded_lr001_df['val_hamming_loss'], label = 'BPMLL loss') ax3.set_xlabel('Epoch', fontsize = 12) ax3.set_ylabel('Validation Hamming Loss', fontsize = 12) ax3.legend() ax3.set_title('BPMLL vs CE Loss', fontsize = 15) fig.tight_layout() #plt.savefig('Reduced_Dataset_Learning_Rate_01.png')
Deep Learning Models/FF Models/.ipynb_checkpoints/FF_Models-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''py37'': conda)' # name: python3 # --- # # ## 回归 # # 根据因变量的不同,分成几种回归: # * 连续:多重线性回归(注意与多元线性回归有区别,比如多元自变量是连续的,多重则可以是多种数据类型等) # * 二项分布:logistic回归 # * poisson分布:poisson回归 # * 负二项分布:负二项回归 # # ## 逻辑回归 # # 同线性回归一样,需要求出$n$个参数: # # $$ # z=\theta_0+\theta_1x+\theta_2x+...+\theta_nx=\theta^Tx # $$ # # 逻辑回归通过Sigmoid函数引入了非线性因素,可以轻松处理二分类问题: # # $$ # h_{\theta}(x)=g\left(\theta^{T} x\right), g(z)=\frac{1}{1+e^{-z}} # $$ # # 与线性回归不同,逻辑回归使用的是交叉熵损失函数: # # $$ # J(\theta)=-\frac{1}{m}\left[\sum_{i=1}^{m}\left(y^{(i)} \log h_{\theta}\left(x^{(i)}\right)+\left(1-y^{(i)}\right) \log \left(1-h_{\theta}\left(x^{(i)}\right)\right)\right]\right. # $$ # # 其梯度为: # # $$ # \frac{\partial J(\theta)}{\partial \theta_{j}} = \frac{1}{m} \sum_{i=0}^{m}\left(h_{\theta}-y^{i}\left(x^{i}\right)\right) x_{j}^{i} # $$ # # 形式和线性回归一样,但其实假设函数(Hypothesis function)不一样,逻辑回归是: # $$ # h_{\theta}(x)=\frac{1}{1+e^{-\theta^{T} x}} # $$ # # 其推导如下: # # $$ # \begin{aligned} # \frac{\partial}{\partial \theta_{j}} J(\theta) &=\frac{\partial}{\partial \theta_{j}}\left[-\frac{1}{m} \sum_{i=1}^{m}\left[y^{(i)} \log \left(h_{\theta}\left(x^{(i)}\right)\right)+\left(1-y^{(i)}\right) \log \left(1-h_{\theta}\left(x^{(i)}\right)\right)\right]\right] \\ # &=-\frac{1}{m} \sum_{i=1}^{m}\left[y^{(i)} \frac{1}{\left.h_{\theta}\left(x^{(i)}\right)\right)} \frac{\partial}{\partial \theta_{j}} h_{\theta}\left(x^{(i)}\right)-\left(1-y^{(i)}\right) \frac{1}{1-h_{\theta}\left(x^{(i)}\right)} \frac{\partial}{\partial \theta_{j}} h_{\theta}\left(x^{(i)}\right)\right] \\ # &=-\frac{1}{m} \sum_{i=1}^{m}\left[y^{(i)} \frac{1}{\left.h_{\theta}\left(x^{(i)}\right)\right)}-\left(1-y^{(i)}\right) \frac{1}{1-h_{\theta}\left(x^{(i)}\right)}\right] \frac{\partial}{\partial \theta_{j}} h_{\theta}\left(x^{(i)}\right) \\ # &=-\frac{1}{m} \sum_{i=1}^{m}\left[y^{(i)} \frac{1}{\left.h_{\theta}\left(x^{(i)}\right)\right)}-\left(1-y^{(i)}\right) \frac{1}{1-h_{\theta}\left(x^{(i)}\right)}\right] \frac{\partial}{\partial \theta_{j}} g\left(\theta^{T} x^{(i)}\right) # \end{aligned} # $$ # # 因为: # $$ # \begin{aligned} # \frac{\partial}{\partial \theta_{j}} g\left(\theta^{T} x^{(i)}\right) &=\frac{\partial}{\partial \theta_{j}} \frac{1}{1+e^{-\theta^{T} x^{(i)}}} \\ # &=\frac{e^{-\theta^{T} x^{(i)}}}{\left(1+^{-\theta} T^{T_{x}(i)}\right)^{2}} \frac{\partial}{\partial \theta_{j}} \theta^{T} x^{(i)} \\ # &=g\left(\theta^{T} x^{(i)}\right)\left(1-g\left(\theta^{T} x^{(i)}\right)\right) x_{j}^{(i)} # \end{aligned} # $$ # 所以: # $$ # \begin{aligned} # \frac{\partial}{\partial \theta_{j}} J(\theta) &=-\frac{1}{m} \sum_{i=1}^{m}\left[y^{(i)}\left(1-g\left(\theta^{T} x^{(i)}\right)\right)-\left(1-y^{(i)}\right) g\left(\theta^{T} x^{(i)}\right)\right] x_{j}^{(i)} \\ # &=-\frac{1}{m} \sum_{i=1}^{m}\left(y^{(i)}-g\left(\theta^{T} x^{(i)}\right)\right) x_{j}^{(i)} \\ # &=\frac{1}{m} \sum_{i=1}^{m}\left(h_{\theta}\left(x^{(i)}\right)-y^{(i)}\right) x_{j}^{(i)} # \end{aligned} # $$ # # # # 添加目录到系统路径方便导入模块,该项目的根目录为".../machine-learning-toy-code" import sys from pathlib import Path curr_path = str(Path().absolute()) parent_path = str(Path().absolute().parent) p_parent_path = str(Path().absolute().parent.parent) sys.path.append(p_parent_path) print(f"主目录为:{p_parent_path}") from torch.utils.data import DataLoader from torchvision import datasets import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as np # + train_dataset = datasets.MNIST(root = p_parent_path+'/datasets/', train = True,transform = transforms.ToTensor(), download = False) test_dataset = datasets.MNIST(root = p_parent_path+'/datasets/', train = False, transform = transforms.ToTensor(), download = False) batch_size = len(train_dataset) # batch_size等于len(train_dataset),即一次性读取整个数据集 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) X_train,y_train = next(iter(train_loader)) X_test,y_test = next(iter(test_loader)) # 打印前100张图片 images, labels= X_train[:100], y_train[:100] # 使用images生成宽度为10张图的网格大小 img = torchvision.utils.make_grid(images, nrow=10) # cv2.imshow()的格式是(size1,size1,channels),而img的格式是(channels,size1,size1), # 所以需要使用.transpose()转换,将颜色通道数放至第三维 img = img.numpy().transpose(1,2,0) print(images.shape) print(labels.reshape(10,10)) print(img.shape) plt.imshow(img) plt.show() # - X_train,y_train = X_train.cpu().numpy(),y_train.cpu().numpy() # tensor转为array形式) X_test,y_test = X_test.cpu().numpy(),y_test.cpu().numpy() # tensor转为array形式) print(f"数据格式:{type(X_train)},数据维度:{X_train.shape}") print(f"数据格式:{type(y_train)},数据维度:{y_train.shape}") X_train = X_train.reshape(X_train.shape[0],784) print(f"数据格式:{type(X_train)},数据维度:{X_train.shape}") # + ones_col=[[1] for i in range(len(X_train))] # 生成全为1的二维嵌套列表,即[[1],[1],...,[1]] X_train_modified=np.append(X_train,ones_col,axis=1) x_train_modified_mat = np.mat(X_train_modified) # Mnsit有0-9十个标记,由于是二分类任务,所以可以将标记0的作为1,其余为0用于识别是否为0的任务 y_train_modified=np.array([1 if y_train[i]==1 else 0 for i in range(len(y_train))]) theta = np.mat(np.zeros(len(X_train_modified[0]))) n_epochs=10 lr = 0.01 # 学习率 def sigmoid(x): '''sigmoid函数 ''' return 1.0/(1+np.exp(-x)) # - train_dataset = datasets.MNIST(p_parent_path+'/datasets/', train=True, download=False, transform=transforms.ToTensor()) train_loader = DataLoader(dataset=train_dataset, batch_size=100, shuffle=True) images, labels = next(iter(train_loader)) print(images) # 使用images生成宽度为10张图的网格大小 img = torchvision.utils.make_grid(images, nrow=10) # cv2.imshow()的格式是(size1,size1,channels),而img的格式是(channels,size1,size1), # 所以需要使用.transpose()转换,将颜色通道数放至第三维 img = img.numpy().transpose(1,2,0) # print(images.shape) # print(labels.reshape(10,10)) plt.imshow(img) plt.show() # + for i_epoch in range(n_epochs): loss_epoch = 0 for i in range(len(X_train_modified)): hypothesis = sigmoid(np.dot(X_train_modified[i], theta.T)) error = y_train_modified[i]- hypothesis grad = error*x_train_modified_mat[i] theta += lr*grad loss_epoch+=error.item() # loss_epoch /= len(X_train_modified) print(f"回合数:{i_epoch+1}/{n_epochs},损失:{loss_epoch:.4f}")
ml-with-numpy/LogisticRegression/LogisticRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_pytorch_p36 # language: python # name: conda_pytorch_p36 # --- # + [markdown] heading_collapsed=true # # Building, training and deploying fastai models on SageMaker example # # With Amazon SageMaker, you can package your own algorithms that can then be trained and deployed in the SageMaker environment. This notebook guides you through an example on how to build a custom container for SageMaker training and deployment using the popular [fast.ai](http://fast.ai) library. # # By packaging an algorithm in a container, you can bring almost any code to the Amazon SageMaker environment, regardless of programming language, environment, framework, or dependencies. # # 1. [Fast.ai on SageMaker example](#Fast.ai-on-SageMaker-example) # 1. [Permissions](#Permissions) # 1. [The example](#The-example) # 1. [The presentation](#The-presentation) # 1. [Part 1: Packaging and Uploading your Algorithm for use with Amazon SageMaker](#Part-1:-Packaging-and-Uploading-your-Algorithm-for-use-with-Amazon-SageMaker) # 1. [An overview of Docker](#An-overview-of-Docker) # 1. [How Amazon SageMaker runs your Docker container](#How-Amazon-SageMaker-runs-your-Docker-container) # 1. [Running your container during training](#Running-your-container-during-training) # 1. [The input](#The-input) # 1. [The output](#The-output) # 1. [Running your container during hosting](#Running-your-container-during-hosting) # 1. [The parts of the sample container](#The-parts-of-the-sample-container) # 1. [The Dockerfile](#The-Dockerfile) # 1. [Building and registering the container](#Building-and-registering-the-container) # 1. [Testing your algorithm on your local machine](#Testing-your-algorithm-on-your-local-machine) # 1. [Download the Shirts dataset](#Download-the-Shirts-dataset) # 1. [SageMaker Python SDK Local Training](#SageMaker-Python-SDK-Local-Training) # 1. [Fit, Deploy, Predict](#Fit,-Deploy,-Predict) # 1. [Making predictions using Python SDK](#Making-predictions-using-Python-SDK) # 1. [Part 2: Training and Hosting your Algorithm in Amazon SageMaker](#Part-2:-Training-and-Hosting-your-Algorithm-in-Amazon-SageMaker) # 1. [Set up the environment](#Set-up-the-environment) # 1. [Create the session](#Create-the-session) # 1. [Upload the data for training](#Upload-the-data-for-training) # 1. [Training On SageMaker](#Training-on-SageMaker) # 1. [Optional cleanup](#Optional-cleanup) # 1. [Reference](#Reference) # # _or_ I'm impatient, just [let me see the code](#The-Dockerfile)! # # # ## Permissions # # Running this notebook requires permissions in addition to the normal `SageMakerFullAccess` permissions. This is because it creates new repositories in Amazon ECR. The easiest way to add these permissions is simply to add the managed policy `AmazonEC2ContainerRegistryFullAccess` to the role that you used to start your notebook instance. There's no need to restart your notebook instance when you do this, the new permissions will be available immediately. # # ## The example # # In this example we show how to package a fastai container with an example application using a custom data set with a set of heavy metal t-shirts and sports shirts. By extending the SageMaker Fast.ai container we can utilize the existing training and hosting solution made to work on SageMaker found [here](https://github.com/mattmcclean/sagemaker-fastai-container). By comparison, if one were to build their own custom framework container from scratch, they would need to implement a training and hosting solution in order to use SageMaker. Here is an example showing [how to create a SageMaker TensorFlow container from scratch](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/tensorflow_bring_your_own/tensorflow_bring_your_own.ipynb). # # In this example, we use a GPU image to support training and a CPU image to support hosting. We require a GPU image for training as we will require access to the cuda libraries to accelerate training. For hosting we do not require a GPU which can be expensive so we will use the CPU only libraries of fastai and PyTorch. # # If you're only using Amazon SageMaker for training or hosting, but not both, only the functionality used needs to be built into your container. # # ## The presentation # # This presentation is divided into two parts: _building_ the container and _using_ the container. # - # # Part 1: Packaging and Uploading your Algorithm for use with Amazon SageMaker # # ### An overview of Docker # # If you're familiar with Docker already, you can skip ahead to the next section. # # For many data scientists, Docker containers are a new technology. But they are not difficult and can significantly simplify the deployment of your software packages. # # Docker provides a simple way to package arbitrary code into an _image_ that is totally self-contained. Once you have an image, you can use Docker to run a _container_ based on that image. Running a container is just like running a program on the machine except that the container creates a fully self-contained environment for the program to run. Containers are isolated from each other and from the host environment, so the way your program is set up is the way it runs, no matter where you run it. # # Docker is more powerful than environment managers like conda or virtualenv because (a) it is completely language independent and (b) it comprises your whole operating environment, including startup commands, and environment variable. # # A Docker container is like a virtual machine, but it is much lighter weight. For example, a program running in a container can start in less than a second and many containers can run simultaneously on the same physical or virtual machine instance. # # Docker uses a simple file called a `Dockerfile` to specify how the image is assembled. An example is provided below. You can build your Docker images based on Docker images built by yourself or by others, which can simplify things quite a bit. # # Docker has become very popular in programming and devops communities due to its flexibility and its well-defined specification of how code can be run in its containers. It is the underpinning of many services built in the past few years, such as [Amazon ECS]. # # Amazon SageMaker uses Docker to allow users to train and deploy arbitrary algorithms. # # In Amazon SageMaker, Docker containers are invoked in a one way for training and another, slightly different, way for hosting. The following sections outline how to build containers for the SageMaker environment. # # Some helpful links: # # * [Docker home page](http://www.docker.com) # * [Getting started with Docker](https://docs.docker.com/get-started/) # * [Dockerfile reference](https://docs.docker.com/engine/reference/builder/) # * [`docker run` reference](https://docs.docker.com/engine/reference/run/) # # [Amazon ECS]: https://aws.amazon.com/ecs/ # # ### How Amazon SageMaker runs your Docker container # # Because you can run the same image in training or hosting, Amazon SageMaker runs your container with the argument `train` or `serve`. How your container processes this argument depends on the container. All SageMaker deep learning framework containers already cover this requirement and will trigger your defined training algorithm and inference code. # # * If you specify a program as an `ENTRYPOINT` in the Dockerfile, that program will be run at startup and its first argument will be `train` or `serve`. The program can then look at that argument and decide what to do. The original `ENTRYPOINT` specified within the SageMaker PyTorch is [here](https://github.com/aws/sagemaker-pytorch-container/blob/master/docker/0.4.0/final/Dockerfile.cpu#L18). # # #### Running your container during training # # The base Docker image is based on the [SageMaker PyTorch Container](https://github.com/aws/sagemaker-pytorch-container) image as fast.ai is built on top of PyTorch. It can be found at this public [Github repo](https://github.com/mattmcclean/sagemaker-fastai-container). We have two images, one for GPU based training and a CPU based image for model hosting. # # Currently, our SageMaker fastai container utilizes [console_scripts](http://python-packaging.readthedocs.io/en/latest/command-line-scripts.html#the-console-scripts-entry-point) to make use of the `train` command issued at training time. The line that gets invoked during `train` is defined within the setup.py file inside [SageMaker Containers](https://github.com/aws/sagemaker-containers/blob/master/setup.py#L48), our common SageMaker deep learning container framework. When this command is run, it will invoke the [trainer class](https://github.com/aws/sagemaker-containers/blob/master/src/sagemaker_containers/cli/train.py) to run, which will finally invoke our [fastai container code](https://github.com/mattmcclean/sagemaker-fastai-container/blob/master/src/sagemaker_pytorch_container/training.py) to run your Python file. # # A number of files are laid out for your use, under the `/opt/ml` directory: # # /opt/ml # ├── input # │   ├── config # │   │   ├── hyperparameters.json # │   │   └── resourceConfig.json # │   └── data # │   └── <channel_name> # │   └── <input data> # ├── model # │   └── <model files> # └── output # └── failure # # ##### The input # # * `/opt/ml/input/config` contains information to control how your program runs. `hyperparameters.json` is a JSON-formatted dictionary of hyperparameter names to values. These values are always strings, so you may need to convert them. `resourceConfig.json` is a JSON-formatted file that describes the network layout used for distributed training. # * `/opt/ml/input/data/<channel_name>/` (for File mode) contains the input data for that channel. The channels are created based on the call to CreateTrainingJob but it's generally important that channels match algorithm expectations. The files for each channel are copied from S3 to this directory, preserving the tree structure indicated by the S3 key structure. # * `/opt/ml/input/data/<channel_name>_<epoch_number>` (for Pipe mode) is the pipe for a given epoch. Epochs start at zero and go up by one each time you read them. There is no limit to the number of epochs that you can run, but you must close each pipe before reading the next epoch. # # ##### The output # # * `/opt/ml/model/` is the directory where you write the model that your algorithm generates. Your model can be in any format that you want. It can be a single file or a whole directory tree. SageMaker packages any files in this directory into a compressed tar archive file. This file is made available at the S3 location returned in the `DescribeTrainingJob` result. # * `/opt/ml/output` is a directory where the algorithm can write a file `failure` that describes why the job failed. The contents of this file are returned in the `FailureReason` field of the `DescribeTrainingJob` result. For jobs that succeed, there is no reason to write this file as it is ignored. # # #### Running your container during hosting # # Hosting has a very different model than training because hosting is reponding to inference requests that come in via HTTP. Currently, the SageMaker PyTorch containers [uses](https://github.com/aws/sagemaker-pytorch-container/blob/master/src/sagemaker_pytorch_container/serving.py#L103) our [recommended Python serving stack](https://github.com/aws/sagemaker-containers/blob/master/src/sagemaker_containers/_server.py#L44) to provide robust and scalable serving of inference requests: # # ![Request serving stack](img/stack.png) # # Amazon SageMaker uses two URLs in the container: # # * `/ping` receives `GET` requests from the infrastructure. Your program returns 200 if the container is up and accepting requests. # * `/invocations` is the endpoint that receives client inference `POST` requests. The format of the request and the response is up to the algorithm. If the client supplied `ContentType` and `Accept` headers, these are passed in as well. # # The container has the model files in the same place that they were written to during training: # # /opt/ml # └── model #    └── <model files> # # # ### The parts of the sample container # # The root directory has all the components you need to extend the SageMaker fast.ai container to use as an sample algorithm: # # . # ├── Dockerfile # ├── build_and_push.sh # └── src # ├── shirts.py # # Let's discuss each of these in turn: # # * __`Dockerfile`__ describes how to build your Docker container image. More details are provided below. # * __`build_and_push.sh`__ is a script that uses the Dockerfile to build your container GPU and CPU images and then push them to ECR. # * __`src`__ is the directory which contains our user code to be invoked. # # In this simple application, we install only one file in the container based on [Lesson 1](https://github.com/fastai/fastai/blob/master/courses/dl1/lesson1.ipynb) of the fast.ai MOOC course. You may only need that many, but if you have many supporting routines, you may wish to install more. # # The files that we put in the container are: # # * __`shirts.py`__ is the program based a custom image classification data set between photos of either heavy metal t-shirts or sports shirts that implements our training algorithm and handles loading our model for inferences. # ### The Dockerfile # # The Dockerfile describes the image that we want to build. You can think of it as describing the complete operating system installation of the system that you want to run. A Docker container running is quite a bit lighter than a full operating system, however, because it takes advantage of Linux on the host machine for the basic operations. # # We start from the [SageMaker fast.ai image](https://hub.docker.com/r/mattmcclean/sagemaker-fastai/) as the base image. This contains the PyTorch and fastai binaries as well as the SageMaker specific libraries explained earlier. The base image is an DockerHub image with the following pattern. # # `{account}/sagemaker-fastai:{framework_version}-{processor_type}-{python_version}` # # Here is an explanation of each field. # * __`account`__ - The DockerHub account to pull the base image from. Currently it is `mattmcclean`. # * __`framework_version`__ - The version of the fastai library. Currently is `1.0` # * __`processor_type`__ - CPU or GPU. We pass this as a Docker build arguement depending on the kind of image we want. Defaults to `gpu`. # * __`python_version`__ - The supported version of Python. Currently default is `py37` # # So the SageMaker fast.ai DockerHub image would be: # `mattmcclean/sagemaker-fastai:1.0-gpu-py37` # # You can also build your own base Docker image by performing the following steps: # 1. Clone the Github repo with the command: # # ` # git clone https://github.com/mattmcclean/sagemaker-fastai-container # ` # # 2. Build the GPU and CPU Docker base images and push to your own ECR respository with the commands: # # ``` # cd sagemaker-fastai-container # ./build_and_push.sh # ``` # # Next, we add the code that implements our specific algorithm to the container and set up the right environment for it to run under. # # Finally, we need to specify two environment variables. # 1. __`SAGEMAKER_SUBMIT_DIRECTORY`__ - the directory within the container containing our Python script for training and inference. # 2. __`SAGEMAKER_PROGRAM`__ - the Python script that should be invoked for training and inference. # # Let's look at the Dockerfile for this example. # !cat docker/shirts/Dockerfile # ### Building and registering the container # # The following bash script builds the Docker images (both GPU and CPU based) needed to train and deploy the fastai models and pushed them to ECR. The same code can be run from the script named `build_and_push.sh`. # # This script looks for an ECR repository in the account you're using and the current default region (if you're using a SageMaker notebook instance, this is the region where the notebook instance was created). If the repository doesn't exist, the script will create it. Then it will call Docker build and push for a CPU and GPU based image. # + language="bash" # # image="sagemaker-fastai-shirts" # FASTAI_VERSION="1.0" # PY_VERSION="py37" # # # Get the account number associated with the current IAM credentials # account=$(aws sts get-caller-identity --query Account --output text) # # # Get the region defined in the current configuration (default to us-west-2 if none defined) # region=$(aws configure get region) # region=${region:-us-west-2} # # # If the repository doesn't exist in ECR, create it. # # aws ecr describe-repositories --repository-names "${image}" > /dev/null 2>&1 # # if [ $? -ne 0 ] # then # aws ecr create-repository --repository-name "${image}" > /dev/null # fi # # # Get the login command from ECR and execute it directly # $(aws ecr get-login --region ${region} --no-include-email) # # # Build the docker image locally with the image name and then push it to ECR # # with the full name. # fullname="${account}.dkr.ecr.${region}.amazonaws.com/${image}:${FASTAI_VERSION}-gpu-${PY_VERSION}" # docker build -t ${image}:${FASTAI_VERSION}-gpu-${PY_VERSION} -f docker/shirts/Dockerfile --build-arg ARCH=gpu . # docker tag ${image}:${FASTAI_VERSION}-gpu-${PY_VERSION} ${fullname} # docker push ${fullname} # # fullname="${account}.dkr.ecr.${region}.amazonaws.com/${image}:${FASTAI_VERSION}-cpu-${PY_VERSION}" # docker build -t ${image}:${FASTAI_VERSION}-cpu-${PY_VERSION} -f docker/shirts/Dockerfile --build-arg ARCH=cpu . # docker tag ${image}:${FASTAI_VERSION}-cpu-${PY_VERSION} ${fullname} # docker push ${fullname} # # - # ## Testing your algorithm on your local machine # # When you're packaging your first algorithm to use with Amazon SageMaker, you probably want to test it yourself to make sure it's working correctly. We use the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk) to test both locally and on SageMaker. For more examples with the SageMaker Python SDK, see [Amazon SageMaker Examples](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-python-sdk). In order to test our algorithm, we need our dataset. # ## Download the Shirts dataset # We will be utilizing a custom data set with a mixture of metal and sport shirts for training. # %mkdir -p data/shirts # !wget https://s3-eu-west-1.amazonaws.com/sagemaker-934676248949-eu-west-1/data/shirts_imgs.tar.gz # !tar zxf shirts_imgs.tar.gz -C data/shirts # !rm shirts_imgs.tar.gz import os DATA_PATH=f'{os.getcwd()}/data/shirts' # %ls {DATA_PATH} # ## SageMaker Python SDK Local Training # To represent our training, we use the Estimator class, which needs to be configured in five steps. # 1. IAM role - our AWS execution role # 2. train_instance_count - number of instances to use for training. # 3. train_instance_type - type of instance to use for training. For training locally, we specify `local` or `local_gpu`. # 4. image_name - our custom Fast.ai Docker image we created. # 5. hyperparameters - hyperparameters we want to pass. # # Let's start with setting up our IAM role. We make use of a helper function within the Python SDK. This function throw an exception if run outside of a SageMaker notebook instance, as it gets metadata from the notebook instance. If running outside, you must provide an IAM role with proper access stated above in [Permissions](#Permissions). # + from sagemaker import get_execution_role role = get_execution_role() # - # ## Fit, Deploy, Predict # # Now that the rest of our estimator is configured, we can call `fit()` with the path to our local Shirts dataset prefixed with `file://`. This invokes our fast.ai container with 'train' and passes in our hyperparameters and other metadata as json files in /opt/ml/input/config within the container to our program entry point defined in the Dockerfile. # # After our training has succeeded, our training algorithm outputs our trained model within the /opt/ml/model directory, which is used to handle predictions. # # We can then call `deploy()` with an instance_count and instance_type, which is 1 and `local`. This invokes our fast.ai container with 'serve', which setups our container to handle prediction requests as defined [here](https://github.com/aws/sagemaker-pytorch-container/blob/master/src/sagemaker_pytorch_container/serving.py#L103). What is returned is a predictor, which is used to make inferences against our trained model. # # After our prediction, we can delete our endpoint. # # We recommend testing and training your training algorithm locally first, as it provides quicker iterations and better debuggability. base_image_name = 'sagemaker-fastai-shirts' # + import os import subprocess instance_type = 'local' image_name = f'{base_image_name}:1.0-cpu-py37' if subprocess.call('nvidia-smi') == 0: ## Set type to GPU if one is present instance_type = 'local_gpu' image_name = f'{base_image_name}:1.0-gpu-py37' print("Instance type = " + instance_type) print("Image = " + image_name) # + from sagemaker.estimator import Estimator hyperparameters = {'epochs': 4, 'batch-size': 64} estimator = Estimator(role=role, train_instance_count=1, train_instance_type=instance_type, image_name=image_name, hyperparameters=hyperparameters) estimator.fit(f'file://{DATA_PATH}') # - # ## Making predictions using Python SDK # # To make predictions, we will use a few images, from the test loader, converted into a json format to send as an inference request. # # We will do predictions using a different Docker image that is CPU only rather than GPU based that was used for training. # # The reponse will be tensors containing the probabilities of each image belonging to one of the 2 classes. Based on the highest probability we will map that index to the corresponding class in our output. import os, random from PIL import Image # + from sagemaker.predictor import RealTimePredictor, json_deserializer class ImagePredictor(RealTimePredictor): def __init__(self, endpoint_name, sagemaker_session): super(ImagePredictor, self).__init__(endpoint_name, sagemaker_session=sagemaker_session, serializer=None, deserializer=json_deserializer, content_type='image/jpeg') # - predictor = estimator.deploy(1, 'local', image=f'{base_image_name}:1.0-cpu-py37', predictor_cls=ImagePredictor) from PIL import Image import requests from io import BytesIO # Motorhead T-Shirt #url = 'https://images.backstreetmerch.com/images/products/bands/clothing/mthd/bsi_mthd281.jpg' # Judas Priest T-Shirt #url = 'https://thumbs2.ebaystatic.com/d/l225/m/m7Lc1qRuFN3oFIlQla5V0IA.jpg' # Australia Rugby T-Shirt #url = 'https://www.lovell-rugby.co.uk/products/products_580x387/40378.jpg' # Chicago Bulls top #url = 'https://i.ebayimg.com/images/g/qc0AAOSwBahVN~qm/s-l300.jpg' # Masters Golf Shirt url = 'https://s-media-cache-ak0.pinimg.com/originals/29/6a/15/296a15200e7dd3ed08e12d9052ea4f97.jpg' img_bytes = requests.get(url).content img = Image.open(BytesIO(img_bytes)) img response = predictor.predict(img_bytes) response predictor.delete_endpoint() # # Part 2: Training and Hosting your Algorithm in Amazon SageMaker # Once you have your container packaged, you can use it to train and serve models. Let's do that with the algorithm we made above. # # ## Set up the environment # Here we specify the bucket to use and the role that is used for working with SageMaker. # S3 prefix prefix = 'DEMO-shirt-classifier' # ## Create the session # # The session remembers our connection parameters to SageMaker. We use it to perform all of our SageMaker operations. # + import sagemaker as sage sess = sage.Session() # - # ## Upload the data for training # # We will use the tools provided by the SageMaker Python SDK to upload the data to a default bucket. data_location = sess.upload_data(DATA_PATH, key_prefix=prefix) data_location # + # run if data already uploaded #data_location = 's3://' + sess.default_bucket() + '/' + prefix # - # ## Training on SageMaker # Training a model on SageMaker with the Python SDK is done in a way that is similar to the way we trained it locally. This is done by changing our train_instance_type from `local` to one of our [supported EC2 instance types](https://aws.amazon.com/sagemaker/pricing/instance-types/). # # In addition, we must now specify the ECR image URL, which we just pushed above. # # Finally, our local training dataset has to be in Amazon S3 and the S3 URL to our dataset is passed into the `fit()` call. # # Let's first fetch our ECR image url that corresponds to the image we just built and pushed. # + import boto3 client = boto3.client('sts') account = client.get_caller_identity()['Account'] my_session = boto3.session.Session() region = my_session.region_name algorithm_name = base_image_name arch = 'gpu' ecr_image = f'{account}.dkr.ecr.{region}.amazonaws.com/{algorithm_name}:1.0-{arch}-py37' print(f'Using ECR image for training: {ecr_image}') # + from sagemaker.estimator import Estimator hyperparameters = {'epochs': 4, 'batch-size': 64} instance_type_train = 'ml.p3.2xlarge' estimator = Estimator(role=role, train_instance_count=1, train_instance_type=instance_type_train, image_name=ecr_image, hyperparameters=hyperparameters) estimator.fit(data_location) # + instance_type_deploy = 'ml.m5.xlarge' arch = 'cpu' ecr_image = f'{account}.dkr.ecr.{region}.amazonaws.com/{algorithm_name}:1.0-{arch}-py37' print(f'Using ECR image for inference: {ecr_image}') predictor = estimator.deploy(1, instance_type_deploy, image=ecr_image, predictor_cls=ImagePredictor) # + # use on existing endpoint #endpoint_name = '' #predictor = ImagePredictor(endpoint=endpoint_name, sagemaker_session=my_session) # - from PIL import Image import requests from io import BytesIO # Motorhead T-Shirt url = 'https://images.backstreetmerch.com/images/products/bands/clothing/mthd/bsi_mthd281.jpg' # Judas Priest T-Shirt #url = 'https://thumbs2.ebaystatic.com/d/l225/m/m7Lc1qRuFN3oFIlQla5V0IA.jpg' # Australia Rugby T-Shirt #url = 'https://www.lovell-rugby.co.uk/products/products_580x387/40378.jpg' # Chicago Bulls top #url = 'https://i.ebayimg.com/images/g/qc0AAOSwBahVN~qm/s-l300.jpg' # Masters Golf Shirt #url = 'https://s-media-cache-ak0.pinimg.com/originals/29/6a/15/296a15200e7dd3ed08e12d9052ea4f97.jpg' img_bytes = requests.get(url).content img = Image.open(BytesIO(img_bytes)) img response = predictor.predict(img_bytes) response # ## Optional cleanup # When you're done with the endpoint, you should clean it up. # # All of the training jobs, models and endpoints we created can be viewed through the SageMaker console of your AWS account. predictor.delete_endpoint() # # Reference # - [How Amazon SageMaker interacts with your Docker container for training](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html) # - [How Amazon SageMaker interacts with your Docker container for inference](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html) # - [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk) # - [Dockerfile](https://docs.docker.com/engine/reference/builder/) # - [PyTorch extending container example](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/pytorch_extending_our_containers/pytorch_extending_our_containers.ipynb) # - [scikit-bring-your-own example](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/scikit_bring_your_own/scikit_bring_your_own.ipynb) # - [SageMaker fast.ai container](https://github.com/mattmcclean/sagemaker-fastai-container) # - [SageMaker fast.ai example](https://github.com/mattmcclean/sagemaker-fastai-example) # - [SageMaker PyTorch container](https://github.com/aws/sagemaker-pytorch-container)
fastai_shirts_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lecture 5: String, List & 2D Lists a = "<NAME>" b = 'a' type(a) type(b) a = "<NAME>" print(a[0]) print(a[5]) print(a[10]) # reverse string a = "<NAME>" a[::-1] # string is immutable a = "<NAME>" a[3] = 'l' # string with double or single quote[ escaping charter] a = "'hello word'" print(a) a = '"hello world"' print(a) a = "hello world\'s" print(a) a = "Akash" b = "Singh" print(a+" "+b) print(a == b) print(a > b) # lexigraphical compare # Split function a = "<NAME>" a.split() # by default delimiter is spaces a = "akash,is,hardworking" a.split(",") # delimiter is , # replace function a = "<NAME>" a. replace("akash","hello") # lower, upper,islower,isuppper,isalpha(find only alphabet),startwith,endwith # ### String Slicing a = "<NAME>" a[2] a[2:5] a[2:10:2] a[:5] a[2:5] a[::1] a[:] a[-1::] a[::-1] a[10:2:-1] # replace 1st occursence s = input() pos = s.find('a') ans = s if pos != -1: ans = s[:pos]+'y'+s[pos+1:] ans # replace ALL occursence s = input() pos = s.find('a') ans = s for i in range(0,len(ans)): if ans[i] == 'x': ans = ans[:i]+'y'+ans[i+1:] ans # ### List Intro. a = [1,2,3] type(a) a = [] a a = [5,6,7,8] a[2] # mutable a = [5,6,7,8] a[3] = 10 a a = [True,a,False,1,2.5] a # list compression num = [i for i in range(10)] num # ### List Imp Functions. a = [1,2,3,4] b = [5,6,7,8] print(len(a)) print(a+b) a.append(11) # append at last print(a) a.insert(3,12) #particular index add elem print(a) a.extend(b) # merge two list print(a) a.remove(12) # remove particular elem print(a) a.pop(3) # remove at particular index print(a) a.sort() print(a) max(a) a.index(5) 5 in a # ### Taking Input from User in List a = [7,8,9,4,5,6,1,2,3] for i in a: print(i) # variant 1 aarr = int(input()) latest = [] for i in range(aarr): b = int(input()) latest.append(b) latest # variant 2 line= input() l = line.split() new_a = [] for i in l: new_a.append(int(i)) new_a # alterntive 2 line= input() l = line.split() a = [int(i) for i in l] a # or a = [int(i) for i in input().split()] a # ### List Slicing # ### Nested List or 2d List a = [[1,2,3],[4,5,6]] a[0][2] a = [[1,2,3],[4,5,6]] len(a) # + # user input in 2d list # Var 1 elem = input().split() m = int(elem[0]) n = int(elem[1]) l = [] for i in range(m): box_row = [int(i) for i in input().split()] l.append(box_row) l # - # var 2 elem = input().split() m = int(elem[0]) n = int(elem[1]) l = [] for i in range(m): next_row = [] for j in range(n): num = int(input()) next_row.append(num) l.append(next_row) l # + # var3 elem = input().split() m = int(elem[0]) n = int(elem[1]) l_id = [int(i) for i in input().split()] l = [] for i in range(m): nex_row = l_id[(n*i):(n*(i+1))] l.append(next_row) l # - # ### Muti deminesion Slicing a = [[1, 2, 4, 5], [3, 6, 7, 8], [9, 10, 11, 12]] a[1:3] a = [[] for i in range(10)] a a = [[0 for j in range(5)] for i in range(10)] a a = [[j for j in range(5)] for i in range(10)] a a = [[i for j in range(5)] for i in range(10)] a # ## Assignment Prog. # question1 : Palidrome String str = input() str == str[::-1] # question2: all Substring str = input() for i in range(0,len(str)): for j in range(i+1,len(str)+1): print(str[i:j]) num = [int(i) for i in input().split()] even = 0 odd = 0 for i in num: if i%2 ==0: even += 1 else: odd += 1 print(even,odd) # swap adject num num = [int(i) for i in input().split()] for i in range(0,len(num),2): num[i],num[i+1] = num[i+1],num[i] num # equilibrm index size = int(input()) num = [int(i) for i in input().split()] left = 0 right = 0 for i in range(size): left = 0 right = 0 for j in range(i): left += num[j] for j in range(i+1,size): right += num[j] if left == right: print(i) else: print("-1") # + # sum of col in 2d list def col_sum(a): m = len(a) if m == 0: return n = len(a[0]) for j in range(n): sum_j = 0 for i in range(m): sum_j += a[i][j] print(sum_j) a = [[j for j in range(5)] for i in range(10)] col_sum(a) # + # row wise sum def row_sum(a): row = len(a) if row == 0: return col = len(a[0]) for i in range(row): sum_i = 0 for j in range(col): sum_i += a[i][j] print(sum_i, end=" ") elem = input().split() m = int(elem[0]) n = int(elem[1]) l = [] for i in range(m): box_row = [int(i) for i in input().split()] l.append(box_row) row_sum(l) # + # wave like num def wave(a): row = len(a) if row == 0: return col = len(a[0]) for j in range(col): if j%2 == 0: for i in range(row): print(a[i][j]) else: for i in range(row): print(a[i][j]) elem = input().split() m = int(elem[0]) n = int(elem[1]) l = [] for i in range(m): box_row = [int(i) for i in input().split()] l.append(box_row) row_sum(l) # - # remove consecutive charater str = input() new_str = "" for i in str: if i in new_str: continue else: new_str += i new_str # + # reverse the string in its own place def rev(str): return str[::-1] str = input() new_str = "" splited_str = str.split() for i in splited_str: new_str = new_str + rev(i) +" " new_str # + # Highest freq str = input() new_dict = {} for i in str: if i in new_dict: new_dict[i] += 1 else: new_dict[i] = 1 print(max(new_dict,key = new_dict.get)) # + # Array arrange number import random n = int(input()) lis = random.sample(range(1, n+1), n) for i in lis: print(i,end=" ") # + # leader array elemt num = int(input()) a = input() data = a.split() arr = [] for i in range(num): for d in data: arr.append(int(d)) arr for i in range(num): if arr[i] > arr[i+1]: print(arr[i],end = " ") print(arr[-1]) # + # spiral Pattern size = input().split() row,col = int(size[0]),int(size[1]) arr = [] for i in range(row): row_arr = [int(j) for j in input().split()] arr.append(row_arr) arr
5. Strings, List & 2D lists/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # ## Hitchhiker's Guide to Hyperspace - An Indexing Subsystem for Apache Spark™ # Hyperspace introduces the ability for Apache Spark™ users to create indexes on their datasets (e.g., CSV, JSON, Parquet etc.) and leverage them for potential query and workload acceleration. # # In this notebook, we highlight the basics of Hyperspace, emphasizing on its simplicity and shows how it can be used by just anyone. # # **Disclaimer**: Hyperspace helps accelerate your workloads/queries under two circumstances: # # 1. Queries contain filters on predicates with high selectivity (e.g., you want to select 100 matching rows from a million candidate rows) # 2. Queries contain a join that requires heavy-shuffles (e.g., you want to join a 100 GB dataset with a 10 GB dataset) # # You may want to carefully monitor your workloads and determine whether indexing is helping you on a case-by-case basis. # ## Setup # To begin with, let's start a new Spark™ session. Since this notebook is a tutorial merely to illustrate what Hyperspace can offer, we will make a configuration change that allow us to highlight what Hyperspace is doing on small datasets. By default, Spark™ uses *broadcast join* to optimize join queries when the data size for one side of join is small (which is the case for the sample data we use in this tutorial). Therefore, we disable broadcast joins so that later when we run join queries, Spark™ uses *sort-merge* join. This is mainly to show how Hyperspace indexes would be used at scale for accelerating join queries. # # The output of running the cell below shows a reference to the successfully created Spark™ session and prints out '-1' as the value for the modified join config which indicates that broadcast join is successfully disabled. # + # Start your Spark session spark # Disable BroadcastHashJoin, so Spark will use standard SortMergeJoin. Currently Hyperspace indexes utilize SortMergeJoin to speed up query. spark.conf.set("spark.sql.autoBroadcastJoinThreshold", -1) # Verify that BroadcastHashJoin is set correctly print(spark.conf.get("spark.sql.autoBroadcastJoinThreshold")) # - # ## Data Preparation # # To prepare our environment, we will create sample data records and save them as parquet data files. While we use Parquet for illustration, you can use other formats such as CSV. In the subsequent cells, we will also demonstrate how you can create several Hyperspace indexes on this sample dataset and how one can make Spark™ use them when running queries. # # Our example records correspond to two datasets: *department* and *employee*. You should configure "empLocation" and "deptLocation" paths so that on the storage account they point to your desired location to save generated data files. # # The output of running below cell shows contents of our datasets as lists of triplets followed by references to dataFrames created to save the content of each dataset in our preferred location. # + from pyspark.sql.types import StructField, StructType, StringType, IntegerType # Sample department records departments = [(10, "Accounting", "New York"), (20, "Research", "Dallas"), (30, "Sales", "Chicago"), (40, "Operations", "Boston")] # Sample employee records employees = [(7369, "SMITH", 20), (7499, "ALLEN", 30), (7521, "WARD", 30), (7566, "JONES", 20), (7698, "BLAKE", 30)] # Create a schema for the dataframe dept_schema = StructType([StructField('deptId', IntegerType(), True), StructField('deptName', StringType(), True), StructField('location', StringType(), True)]) emp_schema = StructType([StructField('empId', IntegerType(), True), StructField('empName', StringType(), True), StructField('deptId', IntegerType(), True)]) departments_df = spark.createDataFrame(departments, dept_schema) employees_df = spark.createDataFrame(employees, emp_schema) #TODO ** customize this location path ** emp_Location = "/<yourpath>/employees.parquet" dept_Location = "/<yourpath>/departments.parquet" employees_df.write.mode("overwrite").parquet(emp_Location) departments_df.write.mode("overwrite").parquet(dept_Location) # - # Let's verify the contents of parquet files we created above to make sure they contain expected records in correct format. We later use these data files to create Hyperspace indexes and run sample queries. # # Running below cell, the output displays the rows in employee and department dataFrames in a tabular form. There should be 14 employees and 4 departments, each matching with one of triplets we created in the previous cell. # + # emp_Location and dept_Location are the user defined locations above to save parquet files emp_DF = spark.read.parquet(emp_Location) dept_DF = spark.read.parquet(dept_Location) # Verify the data is available and correct emp_DF.show() dept_DF.show() # - # ## Hello Hyperspace Index! # Hyperspace lets users create indexes on records scanned from persisted data files. Once successfully created, an entry corresponding to the index is added to the Hyperspace's metadata. This metadata is later used by Apache Spark™'s optimizer (with our extensions) during query processing to find and use proper indexes. # # Once indexes are created, users can perform several actions: # - **Refresh** If the underlying data changes, users can refresh an existing index to capture that. # - **Delete** If the index is not needed, users can perform a soft-delete i.e., index is not physically deleted but is marked as 'deleted' so it is no longer used in your workloads. # - **Vacuum** If an index is no longer required, users can vacuum it which forces a physical deletion of the index contents and associated metadata completely from Hyperspace's metadata. # # Below sections show how such index management operations can be done in Hyperspace. # # First, we need to import the required libraries and create an instance of Hyperspace. We later use this instance to invoke different Hyperspace APIs to create indexes on our sample data and modify those indexes. # # Output of running below cell shows a reference to the created instance of Hyperspace. # + from hyperspace import * # Create an instance of Hyperspace hyperspace = Hyperspace(spark) # - # ### Create Indexes # To create a Hyperspace index, the user needs to provide 2 pieces of information: # * An Apache Spark™ DataFrame which references the data to be indexed. # * An index configuration object: IndexConfig, which specifies the *index name*, *indexed* and *included* columns of the index. # # We start by creating three Hyperspace indexes on our sample data: two indexes on the department dataset named "deptIndex1" and "deptIndex2", and one index on the employee dataset named 'empIndex'. # For each index, we need a corresponding IndexConfig to capture the name along with columns lists for the indexed and included columns. Running below cell creates these indexConfigs and its output lists them. # # **Note**: An *index column* is a column that appears in your filters or join conditions. An *included column* is a column that appears in your select/project. # # For instance, in the following query: # ```sql # SELECT X # FROM Table # WHERE Y = 2 # ``` # X can be an *index column* and Y can be an *included column*. # + # Create index configurations emp_IndexConfig = IndexConfig("empIndex1", ["deptId"], ["empName"]) dept_IndexConfig1 = IndexConfig("deptIndex1", ["deptId"], ["deptName"]) dept_IndexConfig2 = IndexConfig("deptIndex2", ["location"], ["deptName"]) # - # Now, we create three indexes using our index configurations. For this purpose, we invoke "createIndex" command on our Hyperspace instance. This command requires an index configuration and the dataFrame containing rows to be indexed. # Running below cell creates three indexes. # # + # Create indexes from configurations hyperspace.createIndex(emp_DF, emp_IndexConfig) hyperspace.createIndex(dept_DF, dept_IndexConfig1) hyperspace.createIndex(dept_DF, dept_IndexConfig2) # - # ### List Indexes # # Below code shows how a user can list all available indexes in a Hyperspace instance. It uses "indexes" API which returns information about existing indexes as a Spark™'s DataFrame so you can perform additional operations. For instance, you can invoke valid operations on this DataFrame for checking its content or analyzing it further (for example filtering specific indexes or grouping them according to some desired property). # # Below cell uses DataFrame's 'show' action to fully print the rows and show details of our indexes in a tabular form. For each index, we can see all information Hyperspace has stored about it in the metadata. You will immediately notice the following: # - "config.indexName", "config.indexedColumns", "config.includedColumns" and "status.status" are the fields that a user normally refers to. # - "dfSignature" is automatically generated by Hyperspace and is unique for each index. Hyperspace uses this signature internally to maintain the index and exploit it at query time. # # In the output below, all three indexes should have "ACTIVE" as status and their name, indexed columns, and included columns should match with what we defined in index configurations above. # hyperspace.indexes().show() # ### Delete Indexes # A user can drop an existing index by using the "deleteIndex" API and providing the index name. Index deletion does a soft delete: It mainly updates index's status in the Hyperspace metadata from "ACTIVE" to "DELETED". This will exclude the dropped index from any future query optimization and Hyperspace no longer picks that index for any query. However, index files for a deleted index still remain available (since it is a soft-delete), so that the index could be restored if user asks for. # # Below cell deletes index with name "deptIndex2" and lists Hyperspace metadata after that. The output should be similar to above cell for "List Indexes" except for "deptIndex2" which now should have its status changed into "DELETED". # + hyperspace.deleteIndex("deptIndex2") hyperspace.indexes().show() # - # ### Restore Indexes # A user can use the "restoreIndex" API to restore a deleted index. This will bring back the latest version of index into ACTIVE status and makes it usable again for queries. Below cell shows an example of "restoreIndex" usage. We delete "deptIndex1" and restore it. The output shows "deptIndex1" first went into the "DELETED" status after invoking "deleteIndex" command and came back to the "ACTIVE" status after calling "restoreIndex". # # + hyperspace.deleteIndex("deptIndex1") hyperspace.indexes().show() hyperspace.restoreIndex("deptIndex1") hyperspace.indexes().show() # - # ### Vacuum Indexes # The user can perform a hard-delete i.e., fully remove files and the metadata entry for a deleted index using "vacuumIndex" command. Once done, this action is irreversible as it physically deletes all the index files (which is why it is a hard-delete). # # The cell below vacuums the "deptIndex2" index and shows Hyperspace metadata after vaccuming. You should see metadata entries for two indexes "deptIndex1" and "empIndex" both with "ACTIVE" status and no entry for "deptIndex2". hyperspace.vacuumIndex("deptIndex2") hyperspace.indexes().show() # ## Enable/Disable Hyperspace # # Hyperspace provides APIs to enable or disable index usage with Spark™. # # - By using "enable" command, Hyperspace optimization rules become visible to the Apache Spark™ optimizer and they will exploit existing Hyperspace indexes to optimize user queries. # - By using "disable' command, Hyperspace rules no longer apply during query optimization. You should note that disabling Hyperspace has no impact on created indexes as they remain intact. # # Below cell shows how you can use these commands to enable or disable Hyperspace. The output simply shows a reference to the existing Spark™ session whose configuration is updated. # + # Enable Hyperspace Hyperspace.enable(spark) # Disable Hyperspace Hyperspace.disable(spark) # - # ## Index Usage # In order to make Spark use Hyperspace indexes during query processing, the user needs to make sure that Hyperspace is enabled. # # The cell below enables Hyperspace and creates two DataFrames containing our sample data records which we use for running example queries. For each DataFrame, a few sample rows are printed. # + # Enable Hyperspace Hyperspace.enable(spark) emp_DF = spark.read.parquet(emp_Location) dept_DF = spark.read.parquet(dept_Location) emp_DF.show(5) dept_DF.show(5) # - # # Hyperspace's Index Types # # Currently, Hyperspace has rules to exploit indexes for two groups of queries: # * Selection queries with lookup or range selection filtering predicates. # * Join queries with an equality join predicate (i.e. Equi-joins). # # ## Indexes for Accelerating Filters # # Our first example query does a lookup on department records (see below cell). In SQL, this query looks as follows: # # ```sql # SELECT deptName # FROM departments # WHERE deptId = 20 # ``` # # The output of running the cell below shows: # - query result, which is a single department name. # - query plan that Spark™ used to run the query. # # In the query plan, the "FileScan" operator at the bottom of the plan shows the datasource where the records were read from. The location of this file indicates the path to the latest version of the "deptIndex1" index. This shows that according to the query and using Hyperspace optimization rules, Spark™ decided to exploit the proper index at runtime. # # + # Filter with equality predicate eqFilter = dept_DF.filter("""deptId = 20""").select("""deptName""") eqFilter.show() eqFilter.explain(True) # - # Our second example is a range selection query on department records. In SQL, this query looks as follows: # # ```sql # SELECT deptName # FROM departments # WHERE deptId > 20" # ``` # Similar to our first example, the output of the cell below shows the query results (names of two departments) and the query plan. The location of data file in the FileScan operator shows that 'deptIndex1" was used to run the query. # # + # Filter with range selection predicate rangeFilter = dept_DF.filter("""deptId > 20""").select("deptName") rangeFilter.show() rangeFilter.explain(True) # - # Our third example is a query joining department and employee records on the department id. The equivalent SQL statement is shown below: # # ```sql # SELECT employees.deptId, empName, departments.deptId, deptName # FROM employees, departments # WHERE employees.deptId = departments.deptId" # ``` # # The output of running the cell below shows the query results which are the names of 14 employees and the name of department each employee works in. The query plan is also included in the output. Notice how the file locations for two FileScan operators shows that Spark used "empIndex" and "deptIndex1" indexes to run the query. # # + # Join eqJoin = emp_DF.join(dept_DF, emp_DF.deptId == dept_DF.deptId).select(emp_DF.empName, dept_DF.deptName) eqJoin.show() eqJoin.explain(True) # + # Join eqJoin = emp_DF.join(dept_DF, emp_DF.deptId == dept_DF.deptId).select(emp_DF.empName, dept_DF.deptName) eqJoin.show() eqJoin.explain(True) # - # ## Support for SQL Semantics # # The index usage is transparent to whether the user uses DataFrame API or Spark™ SQL. The following example shows the same join example as before, in sql form, showing the use of indexes if applicable. # + from pyspark.sql import SparkSession emp_DF.createOrReplaceTempView("EMP") dept_DF.createOrReplaceTempView("DEPT") joinQuery = spark.sql("SELECT EMP.empName, DEPT.deptName FROM EMP, DEPT WHERE EMP.deptId = DEPT.deptId") joinQuery.show() joinQuery.explain(True) # - # ## Explain API # Indexes are great but how do you know if they are being used? Hyperspace allows users to compare their original plan vs the updated index-dependent plan before running their query. You have an option to choose from html/plaintext/console mode to display the command output. # # The following cell shows an example with HTML. The highlighted section represents the difference between original and updated plans along with the indexes being used. # + eqJoin = emp_DF.join(dept_DF, emp_DF.deptId == dept_DF.deptId).select(emp_DF.empName, dept_DF.deptName) spark.conf.set("spark.hyperspace.explain.displayMode", "html") hyperspace.explain(eqJoin, True, displayHTML) # - # ## Refresh Indexes # If the original data on which an index was created changes, then the index will no longer capture the latest state of data. The user can refresh such a stale index using "refreshIndex" command. This causes the index to be fully rebuilt and updates it accroding to the latest data records (don't worry, we will show you how to *incrementally refresh* your index in other notebooks). # # The two cells below show an example for this scenario: # - First cell adds two more departments to the original departments data. It reads and prints list of departments to verify new departments are added correctly. The output shows 6 departments in total: four old ones and two new. Invoking "refreshIndex" updates "deptIndex1" so index captures new departments. # - Second cell runs our range selection query example. The results should now contain four departments: two are the ones, seen before when we ran the query above, and two are the new departments we just added. # + extra_Departments = [(50, "Inovation", "Seattle"), (60, "Human Resources", "San Francisco")] extra_departments_df = spark.createDataFrame(extra_Departments, dept_schema) extra_departments_df.write.mode("Append").parquet(dept_Location) dept_DFrame_Updated = spark.read.parquet(dept_Location) dept_DFrame_Updated.show(10) # + newRangeFilter = dept_DFrame_Updated.filter("deptId > 20").select("deptName") newRangeFilter.show() newRangeFilter.explain(True) # - hyperspace.indexes().show() # + # Clean-up the remaining indexes hyperspace.deleteIndex("empIndex1") hyperspace.deleteIndex("deptIndex1") hyperspace.vacuumIndex("empIndex1") hyperspace.vacuumIndex("deptIndex1")
notebooks/python/Hitchhikers Guide to Hyperspace.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Analítica Predictiva 2018-1 - Trabajo Final # # Presentado por: # # **<NAME>** # <EMAIL> # # El trabajo propuesto fue desarrollado de acuerdo a los requisitos establecidos con el dataset **Polish Companies Bankruptcy** disponible en el repositorio de datos para Machine Learning de la Universidad de Carolina, Irvine. Este conjunto de datos hace parte de la coleccion relacionada a las categorias *Clasificación* y *Business*. # # Para mayores detalles del dataset se puede referir a su [pagina][1] en el repositorio de UCI. # El trabajo realizado tambien se encuentra en este [repositorio][2]. # # [1]:https://archive.ics.uci.edu/ml/datasets/Polish+companies+bankruptcy+data # [2]:https://github.com/mfreyeso/mlcourse_challenge # ### Definición del Problema en Terminos de Negocio # La previsión de bancarota se considera un tópico importante en la toma de decisiones económicas no solo por ser un mecanismo que previene los impactos y consecuencias sobre los colaboradores, inversores o stakeholders de una empresa, sino tambien porque permite identificar diversos escenarios que son observados por distintos actores (legisladores, entidades de control, organizaciones, etc.) externos para inferir comportamientos económicos y tomar decisiones de alto nivel respecto a la economía de una región o país. # # Coherente a esto, resulta relevante analizar la dimensión operativa y financiera de un conjunto de empresas respecto a hechos historicos sobre lineas de tiempo, de tal manera que permitan identificar umbrales de riesgo sobre las operaciones económicas que pueden afectar su estabilidad (bancarota) y preever hechos que puedan generar repercusiones en la productividad de un sector. # ### Definición del Problema en Terminos de Datos # El conjunto de datos **Polish Companies Bankruptcy** esta compuesto por 43405 registros, cada registro representa el estado financiero de una empresa a traves de 64 variables numéricas relacionadas a distintos indicadores financieros y una variable categórica que define su bancarota dentro de un tiempo determinado. # # El tiempo de previsión de bancarota es definido dentro del rango 2007 y 2013 de acuerdo a la siguiente regla: # - Para los registros disponibles desde el primer año (2007) la previsión de bancarota dentro de 5 años # - Para los registros disponibles desde el primer año (2008) la previsión de bancarota dentro de 4 años # - Para los registros disponibles desde el primer año (2009) la previsión de bancarota dentro de 3 años # - Para los registros disponibles desde el primer año (2010) la previsión de bancarota dentro de 2 años # - Para los registros disponibles desde el primer año (2011) la previsión de bancarota dentro de 1 años # # De acuerdo a la anterior descripción los datos estan particionados en cinco diferentes archivos en formato **arff**. Las variables se describen a continuacion: # # - X1 Beneficio neto / activos totales # - X2 pasivos totales / activos totales # - X3 capital de trabajo / activos totales # - X4 activos corrientes / pasivos a corto plazo # - X5 [(efectivo + valores a corto plazo + cuentas por cobrar - pasivos a corto plazo) / (gastos de funcionamiento - depreciación)] * 365 # - X6 ganancias retenidas / activos totales # - X7 EBIT / activos totales # - X8 valor contable del capital / pasivo total # - X9 ventas / activos totales # - X10 equity / total assets # - X11 (ganancia bruta + partidas extraordinarias + gastos financieros) / activos totales # - X12 Ganancia bruta / pasivos a corto plazo # - X13 (ganancia bruta + depreciación) / ventas # - X14 (beneficio bruto + interés) / activos totales # - X15 (pasivo total * 365) / (ganancia bruta + depreciación) # - X16 (beneficio bruto + depreciación) / pasivo total # - X17 activos totales / pasivos totales # - X18 Ganancia bruta / activos totales # - X19 ganancia bruta / ventas # - X20 (inventario * 365) / ventas # - X21 ventas (n) / ventas (n-1) # - X22 Beneficio en actividades operativas / activos totales # - X23 Ganancia / ventas netas # - X24 Ganancia bruta (en 3 años) / activos totales # - X25 (capital social - capital social) / activos totales # - X26 (beneficio neto + depreciación) / pasivo total # - X27 Beneficio en actividades de operación / gastos financieros # - X28 capital de trabajo / activos fijos # - X29 logaritmo de los activos totales # - X30 (pasivo total - efectivo) / ventas # - X31 (beneficio bruto + interés) / ventas # - X32 (pasivo corriente * 365) / costo de los productos vendidos # - X33 gastos operativos / pasivos a corto plazo # - X34 gastos operativos / pasivos totales # - X35 Beneficio en ventas / activos totales # - X36 ventas totales / activos totales # - X37 (activo circulante - inventarios) / pasivo a largo plazo # - X38 capital constante / activos totales # - X39 Beneficio en ventas / ventas # - X40 (activo circulante - inventario - cuentas por cobrar) / pasivos a corto plazo # - X41 pasivo total / ((ganancia en actividades operativas + depreciación) * (12/365)) # - X42 Beneficio en actividades de operación / ventas # - X43 giro por cobrar + rotación de inventario en días # - X44 (cuentas por cobrar * 365) / ventas # - X45 Beneficio neto / inventario # - X46 (activo circulante - inventario) / pasivo a corto plazo # - X47 (inventario * 365) / costo de los productos vendidos # - X48 EBITDA (ganancia en actividades operativas - depreciación) / activos totales # - X49 EBITDA (ganancia en actividades operativas - depreciación) / ventas # - X50 activos corrientes / pasivos totales # - X51 pasivos a corto plazo / activos totales # - X52 (pasivo a corto plazo * 365) / costo de los productos vendidos) # - X53 capital / activos fijos # - X54 capital constante / activos fijos # - X55 Capital de trabajo # - X56 (ventas - costo de los productos vendidos) / ventas # - X57 (activo circulante - inventario - pasivo a corto plazo) / (ventas - ganancia bruta - depreciación) # - X58 costos totales / ventas totales # - X59 pasivo a largo plazo / patrimonio # - X60 ventas / inventario # - X61 ventas / cuentas por cobrar # - X62 (pasivos a corto plazo * 365) / ventas # - X63 Ventas / pasivos a corto plazo # - X64 Ventas / activos fijos # - X65 **Bancarota** # # El problema respecto a los datos es establecer si una empresa entrará en bancarota (1, 0) a partir de sus indicadores financieros en la prevision de tiempo de acuerdo al año al que pertenece el registro (estado financiero) de la empresa. # #### Exploración de los Datos # + import os import pandas as pd import numpy as np from matplotlib import pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.model_selection import * from sklearn.metrics import * # - # Los siguientes comandos facilitan la descarga de los datos. # !mkdir 'data' # !wget -q 'https://archive.ics.uci.edu/ml/machine-learning-databases/00365/data.zip' # !unzip -q 'data.zip' -d './data' # !rm 'data.zip' # Como anteriormente se describió el tipo de archivo de los datos es **.arff**, por tanto es necesario poder convertirlos a **csv** para facilitar su manejo con la libreria pandas. La siguiente función es un pequeño parser entre este tipo de formatos. # + def to_csv(content): data = False header = "" new_content = list() for line in content: if not data: if "@attribute" in line: attr = line.split() column_name = attr[attr.index("@attribute")+1] header = header + column_name + "," elif "@data" in line: data = True header = header[:-1] header += '\n' new_content.append(header) else: new_content.append(line) return new_content files = [arff for arff in os.listdir('./data') if arff.endswith(".arff")] for file in files: with open(("data/"+ file) , "r") as in_file: content = in_file.readlines() name, ext = os.path.splitext(in_file.name) new = to_csv(content) with open(name +".csv", "w") as out_file: out_file.writelines(new) # - # Como los archivos **arff** no contienen el nombre de cada variable en el header como propuesta personal se creó un archivo de texto con las etiquetas para facilitar y automatizar la operación de las etiquetas con la colección de datos. El siguiente comando descarga el archivo desde un almacenamiento píblico propio. Adicionalmente se realiza una transformación sobre la cadena para evitar espacios innecesarios. # !wget -q 'https://storage.googleapis.com/mareyesobu/data/polishfeatures_dataset.txt' -O 'variables.txt' variables = list() with open('variables.txt', 'r') as var_file: content = var_file.readlines() variables = [(variable.strip().replace(" + ", "+").replace(" / ", "/").replace(" * ","*") .replace(" - ","-").replace(" ", "_")) for variable in content] # Se utilizará los DataFrames de Pandas libreria de Python para almacenar, limpiar y transformar los datos. Los datos son leidos desde los archivos **csv**. # # Previamente se habia descrito que los datos han sido particionados de acuerdo al año de previsión de bancarota en cinco distintos archivos. Como parte de este trabajo se formula la hipotesis de utilizar el año de previsión como una variable categórica que se añada a cada registro de acuerdo a la colección a la cual pertenece. Se intenta que los modelos puedan dentro de su aprendizaje utilizar esta variable para determinar respecto a los otros valores de esta variable *time_to_bankruptcy* la clase. # # Finalmente los datos se concatenan. # + dataframes = list() files = sorted([file for file in os.listdir('./data') if file.endswith(".csv")]) years = ['60', '48', '36', '24', '12'] for index, file in enumerate(files): filepath = 'data/%s' %(file, ) dataframe = pd.read_csv(filepath, low_memory=False) dataframe.columns = variables year_feature = pd.Series(np.repeat([years[index]], [len(dataframe)])) dataframe['time_to_bankruptcy'] = year_feature dataframes.append(dataframe) variables.append('time_to_bankruptcy') data = pd.concat(dataframes, ignore_index=True) # - size_data = data.shape print(size_data) # #### Limpieza de los Datos # # Para realizar el proceso de exploración forma efectiva se realiza un proceso de limpieza de datos coherente a la descripción del repositorio de UCI donde se detalla que el dataset contiene datos perdidos. Previamente observando los datos se identifica que el caracter **?** representa los datos perdidos. variables_change = variables[:-2] for v in data: data[v] = data[v].replace('?', np.nan) # + size_start = len(data) data = data.dropna() size_clean = len(data) lostdata_flag = False if size_start != size_clean: lostdata_flag = True print("Incompletos ", (size_start - size_clean)) size_start = size_clean data = data.drop_duplicates() size_clean = len(data) duplicated_flag = False if size_start != size_clean: duplicated_flag = True print("Duplicados ", (size_start - size_clean)) message = "Los datos se encontraron completos" if lostdata_flag or duplicated_flag: message = "Se realizo limpieza sobre la colección de datos, el nuevo conjunto es de %s observaciones" %(size_clean, ) print(message) # - # #### Ajustando los tipos de datos sobre el DataFrame # # Como parte de la lectura de los archivos **csv** Pandas establece el tipo de las variables como objetos, de acuerdo a eso se establece los tipos de datos numericos para las 64 variables iniciales y categórico para *time_to_bankruptcy* y la clase. # + data['time_to_bankruptcy'] = data['time_to_bankruptcy'].astype('category') data['bankruptcy'] = data['bankruptcy'].astype('category') for v in variables_change: data[v] = pd.to_numeric(data[v]) # - # Validando los nuevos tipos de variables data.dtypes # #### Análisis de los Datos # # A continuación se realiza un analisis de las variables que conforman el conjunto de datos a fin de poder conocer su escala, distribución y balance. data.describe() # #### Boxplots de Variables # # A fin de poder observar graficamente las variables a traves un diagrama de caja y bigotes se realiza un filtro previo sobre los datos para no observar los outliers que a través de los valores maximos descritos a través del anterior tabla son evidentes. # + filt_df = data.loc[:, data.columns != 'bankruptcy'] filt_df = filt_df.loc[:, filt_df.columns != 'time_to_bankruptcy'] low = .05 high = .95 quant_df = filt_df.quantile([low, high]) filt_df = filt_df.apply(lambda x: x[(x>=quant_df.loc[low,x.name]) & (x<=quant_df.loc[high,x.name])], axis=0) filt_df = pd.concat([filt_df, data.loc[:,'time_to_bankruptcy']], axis=1) filt_df = pd.concat([filt_df, data.loc[:,'bankruptcy']], axis=1) data_df = filt_df # - # #### Boxplot de Features # + # %matplotlib inline a4_dims = (11.7, 30.27) fig, ax = plt.subplots(figsize=a4_dims) medium_variables = list() for f in data_df.loc[:, :'sales/fixed_assets']: if data_df[f].max() >= 10: medium_variables.append(f) small_df = data_df.loc[:, data_df.columns != medium_variables[0]] for i in range(1,len(medium_variables)): small_df = small_df.loc[:, small_df.columns != medium_variables[i]] small_df = small_df.loc[:, small_df.columns != 'time_to_bankruptcy'] small_df = small_df.loc[:, small_df.columns != 'bankruptcy'] sns.boxplot(data=small_df, orient='h') # + big_variables = list() medium_df = data_df[medium_variables] for j in medium_df: if medium_df[j].max() > 1000: big_variables.append(j) medium_df.pop(j) a4_dims = (11.7, 18.27) fig, ax = plt.subplots(figsize=a4_dims) sns.boxplot(data=medium_df, orient='h') # - big_df = data_df[big_variables] a4_dims = (11.7, 4.27) fig, ax = plt.subplots(figsize=a4_dims) sns.boxplot(data=big_df, orient='h') # #### Variables Categoricas # + f = data_df['bankruptcy'].value_counts() proportion_bankruptcy = f / data_df.shape[0] a4_dims = (7.7, 7.27) fig, ax = plt.subplots(figsize=a4_dims) labels = ['No Bancarota - %.3f' %(proportion_bankruptcy[0]), 'Bancarota - %.2f' %(proportion_bankruptcy[1])] explode = (0, 0.1) cs = plt.cm.Set2(np.arange(2)/2.) patches, texts = plt.pie(f, explode=explode, labels=labels, radius=1, colors=cs) plt.axis('equal') plt.title("Proporción de clase Bancarota para los datos") plt.show() # - proportion_bankruptcy # En el gráfico se puede evidenciar que el conjunto de datos respecto a la clase bancarota se encuentra desbalanceado, esto sugiere tener absoluto cuidado en el proceso de selección de muestras para el proceso de entrenamiento, validacion y prueba. # + labels = ['36 meses', '24 meses', '48 meses', '60 meses', '12 meses'] labels.reverse() feq_years = data_df['time_to_bankruptcy'].value_counts() proportion_years = (feq_years / data_df.shape[0]).tolist() proportion_years.reverse() for i in range(0, len(proportion_years)): labels[i] = labels[i] + " - %.2f" %(proportion_years[i], ) + "%" a4_dims = (7.7, 7.27) fig, ax = plt.subplots(figsize=a4_dims) explode = (0.1, 0, 0, 0.05, 0) cs = plt.cm.tab20c(np.arange(5)/5.) patches, texts = plt.pie(proportion_years, explode=explode, labels=labels, radius=1, colors=cs) plt.title('Proporcion de datos de acuerdo a los tiempos de previsión de año de Bancarota \n') plt.gca().axis("equal") plt.show() # + years = ('12', '24', '36', '48', '60') bankruptcy = list() no_bankruptcy = list() grouped = data_df.groupby(['time_to_bankruptcy', 'bankruptcy'])['bankruptcy'].count() for i in range(0, len(grouped), 2): bankruptcy.append(grouped[i]) no_bankruptcy.append(grouped[i+1]) a4_dims = (7.7, 7.27) fig, ax = plt.subplots(figsize=a4_dims) width = 0.5 ind = np.arange(len(years)) p1 = plt.bar(ind, no_bankruptcy, width, color='green') p2 = plt.bar(ind, bankruptcy, width, bottom=no_bankruptcy, color='lightblue') plt.ylabel('Scores') plt.title('Agrupación Bancarota por Prevision en Meses \n') plt.xticks(ind, years) plt.legend((p1[0], p2[0]), ('Bancarota', 'No Bancarota')) plt.show() # - # #### Controlando Outliers # # Con base en el anterior análisis de las variables que conforman el conjunto de datos se establece realizar una transformacion sobre los valores de cada columna que supera los limites adecuados de la desviación estandar. A traves del método clip de pandas se ejecuta esta tranformacion sobre los datos con excepción de las variables categóricas. # + data_df = pd.DataFrame() for f in data.loc[:,:'sales/fixed_assets']: std = data[f].std() data_df[f] = data[f].clip(-3*std, 3*std) data_df = pd.concat([data_df, data.loc[:,'time_to_bankruptcy']], axis=1) data_df = pd.concat([data_df, data.loc[:,'bankruptcy']], axis=1) # - # ## Preparación de Datos # A traves de las siguientes cuatro tecnicas se intenta establecer un modelo de de aprendizaje para la previsión de bancarota para empresas de polonia. Las tecnicas seleccionadas se caracterizan por ser algoritmos de aprendizaje supervisado que pueden determinar una clasificación a traves de un proceso de entrenamiento previo. # # De acuerdo a lo anteriormente descrito los metodos utilizados requieren conjuntos de datos disimiles durante su # fase de entrenamiento, validación y prueba. Para facilitar el proceso se plantea construir una función que genere # los conjuntos de entrenamiento y prueba cada vez que se requiera. La función se basa en el metodo kfolds estratificado debido a que los datos se encuentran desbalanceados con relación a la etiqueta de bancarota como en la sección de exploración de datos fue descrito. La funcion es parametrizable, se puede determinar el numero de folds y la columna que representa la clase. def get_sets(data, label_class, num_folds=10): folds = dict() skf = StratifiedKFold(n_splits=num_folds) x = data.loc[:, data.columns != label_class] y = data[label_class] fold = 1 for train_index, test_index in skf.split(x, y): x_train, x_test = x.reindex(train_index), x.reindex(test_index) y_train, y_test = y.reindex(train_index), y.reindex(test_index) folds[str(fold)] = {"train": [x_train, y_train], "test": [x_test, y_test]} fold +=1 return folds # Para la construcción de los modelos se debe indicar sobre el dataframe que estamos utilizando que la variable que se añadió **time_to_bankruptcy** debe ser considerada como un factor. De forma complementaria para evitar errrores de indexación se reinician los indices sobre el dataframe despues del proceso de limpieza y transformación realizado previamente. enc = LabelEncoder() data_df['time_to_bankruptcy'] = enc.fit_transform(data_df['time_to_bankruptcy']) data_df = data_df.reset_index() def draw_precision_recall(precision, recall): plt.step(recall, precision, color='b', alpha=0.2, where='post') plt.fill_between(recall, precision, step='post', alpha=0.2, color='b') plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format( average_precision)) plt.show() # ## Modelado # ### Método 1: Regresión Logistica # La Regresión Logística es una técnica estadística que permite estimar la relación existente entre una variable dependiente no métrica, en particular dicotómica y un conjunto de variables independientes métricas o no métricas. # # La variable dependiente o respuesta no es continua, sino discreta (generalmente toma valores 1,0). Las variables explicativas pueden ser cuantitativas o cualitativas; y la ecuación del modelo no es una función lineal de partida, sino exponencial, este tipo de modelos son utiles en frecuentes situaciones en que la respuesta puede tomar únicamente dos valores: 1, presencia (con probabilidad p); y 0, ausencia (con probabilidad 1-p). # + results = list() penalties = ['l1','l2'] for p in penalties: data_ml = get_sets(data_df, 'bankruptcy', 10) print("Penalty %s \n" %(p, )) mse = list() pr = list() score = list() for f in data_ml: print("Procesando fold %s" % f) train = data_ml[f]['train'] test = data_ml[f]['test'] # Validando con diferentes hiperparametros para la penalidad # Utilizando el solver liblinear recomendado para clasificación binaria logreg = LogisticRegression(penalty=p, solver='liblinear') logreg.fit(train[0], train[1]) pred = logreg.predict(test[0]) mse.append(mean_squared_error(test[1], pred)) score.append(logreg.score(test[0], test[1])) y_score = logreg.decision_function(test[0]) precision, recall, _ = precision_recall_curve(test[1], y_score) pr.append([precision, recall]) result = {'mse': mse, 'score': score, 'curve': pr} results.append(result) print("\n") # - # **Evaluación del Modelo** # + print("Resultados del Modelo: \n") for i in range(0, len(penalties)): print("Con penalidad %s \n" %(penalties[i])) result = results[i] mean_mse = pd.DataFrame(result['mse']).mean() mean_score = pd.DataFrame(result['score']).mean() print("Score Promedio del Modelo: %s \n" %(mean_score,)) print("Error Promedio del Modelo: %s \n" %(mean_mse,)) # - # ### Método 2: Árboles de Decisión # Los árboles de decisión son modelos que tienen estructuras para representar conjuntos de decisiones. Estas decisiones generan reglas para la clasificación de un conjunto de datos. # # Existen métodos específicos de árboles de decisión que incluyen clasificación y regresión. De manera fundamental los árboles de decisión son representaciones gráficas de la lógica de las probabilidades aplicada a las alternativas de decisión. La base (trunk) del árbol es el punto de partida de la decisión. Las ramas de éste comienzan con la probabilidad del primer acontecimiento. # # La probabilidad de cada situacion produce dos o más efectos posibles, algunos de los cuales conducen a otros eventos de probabilidad y a puntos de decisión anidados. Los valores en los que se fijan las ramas del árbol provienen de un análisis detallado para establecer un criterio para la toma de decisión. # + results = list() depth_list = [7, 10, 15] for d in depth_list: data_ml = get_sets(data_df, 'bankruptcy', 10) print("Arbol con profundidad max %s \n" %(d, )) mse = list() score = list() for f in data_ml: print("Procesando fold %s" % f) train = data_ml[f]['train'] test = data_ml[f]['test'] #Utilizando diferentes valores de profundidad para el arbol clf = DecisionTreeClassifier(max_depth=d) clf.fit(train[0], train[1]) pred = clf.predict(test[0]) mse.append(mean_squared_error(test[1], pred)) score.append(clf.score(test[0], test[1])) result = {'mse': mse, 'score': score} results.append(result) print("\n") # - # **Resultados del Modelo** # + print("Resultados del Modelo: \n") for i in range(0, len(depth_list)): print("Con profundidad %s \n" %(depth_list[i])) result = results[i] mean_mse = pd.DataFrame(result['mse']).mean() mean_score = pd.DataFrame(result['score']).mean() print("Score Promedio del Modelo: %s \n" %(mean_score,)) print("Error Promedio del Modelo: %s \n" %(mean_mse,)) # - # ### Método 3: Redes Neuronales Artificiales # La técnica basada en redes neuronales artificiales se basa en la neurona como unidad inteligente capaz de procesar y aprender de forma análoga a como sucede en el cerebro humano. Una red neuronal artificial se basa sobre el trabajo conjunto de numerosas neuornas que interactuan para efectuar una acción. # # En una red neuronal artificial las unidades de procesamiento se organizan en capas. Comunmente una red esta compuesta por tres partes: una capa con unidades que representan los campos de entrada, una o varias capas ocultas y una capa de salida con una unidad o unidades que representa el campo o los campos de destino. Las unidades se conectan con fuerzas de conexión variables consideradas pesos que son inicializados de forma aleatoria. Los datos de entrada ingresan a traves de la primera capa y los valores de procesamiento y cálculo se propagan desde cada neurona hasta cada neurona de la capa siguiente. al final, se envía un resultado desde la capa de salida. # # La red aprende examinando los registros individuales, generando una predicción para cada registro y realizando ajustes a las ponderaciones cuando realiza una predicción incorrecta. Este proceso se repite muchas veces y la red sigue mejorando sus predicciones hasta haber alcanzado uno o varios criterios de parada. # + mse = list() score = list() data_ml = get_sets(data_df, 'bankruptcy', 10) for f in data_ml: print("Procesando fold %s" % f) train = data_ml[f]['train'] test = data_ml[f]['test'] nna = MLPClassifier(alpha=0.01) nna.fit(train[0], train[1]) pred = nna.predict(test[0]) mse.append(mean_squared_error(test[1], pred)) score.append(nna.score(test[0], test[1])) print("Resultados del Modelo: \n") print("Error obtenido: %s" % (pd.DataFrame(mse).mean(), )) print("Score obtenido: %s" % (pd.DataFrame(score).mean(), )) # - # ### Método 4: Maquina de Soporte Vectorial # Las maquinas de soporte vectorial hacen parte de las técnicas de aprendizaje supervisado desarrollada en 1995 por # Vapnik y Cortés en AT&T durante el año 1995. Las maquinas de soporte vectorial se pueden utilizar tanto para problemas de clasificación como tambien para regresión. # # La naturaleza del método se basa en la idea de transformar o proyectar un conjunto de datos pertenecientes a una dimensión n dada hacia un espacio de dimensión superior aplicando una función kernel. A partir del nuevo espacio creado los datos se operaran como si se tratase de un problema de tipo lineal resolviendo el problema sin considerar la dimensionalidad de los datos. # # La idea detrás de las SVM es que a partir de unos inputs de entrada al modelo se etiquetan las clases y se entrena una SVM construyendo un modelo que sea capaz de predecir la clase de los nuevos datos que se introduzcan al modelo. La SVM representa en un eje de coordenadas los vectores de entrenamiento, separando las clases por un espacio lo más grande posible. Cuando nuevos datos son introducidos al modelo, estos se colocan sobre el mismo eje y en función de la cercanía de los grupos antes separados, los cuáles serán clasificados en una u otra clase. # + mse = list() score = list() data_ml = get_sets(data_df, 'bankruptcy', 10) for f in data_ml: print("Procesando fold %s" % f) train = data_ml[f]['train'] test = data_ml[f]['test'] svcm = SVC() svcm.fit(train[0], train[1]) pred = svcm.predict(test[0]) mse.append(mean_squared_error(test[1], pred)) score.append(svcm.score(test[0], test[1])) print("Resultados del Modelo: \n") print("Error obtenido: %s" % (pd.DataFrame(mse).mean(), )) print("Score obtenido: %s" % (pd.DataFrame(score).mean(), )) # - # ### Evaluacion y Comparación de Modelos # De acuerdo a los métodos utilizados el más adecuado de acuerdo al score y nivel de errores es la maquina de soporte vectorial. De forma complementaria se puede observar que todos los modelos generados tiene una clasificacion semejante, se prodia afirmar que los modelos prodian estar fallando por el desbalance de la muestra, no obstante se esta utilizando validacion cruzada estratificada para evitar escenarios de overfitting.
ML_Final_Exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:Py2] # language: python # name: conda-env-Py2-py # --- # # Bitfinex API Data Pull # + import csv import urllib import os import ast import pandas as pd from datetime import datetime, date def writecsvfile(fname, data): mycsv = csv.writer(open(fname, 'w')) for row in data: mycsv.writerow(row) def get_data(symbol,freq): fpath = os.getcwd() link = 'https://api.bitfinex.com/v2/candles/trade:' + freq + ":t" + "%s"%symbol + "/hist?limit=1000" f = urllib.urlopen(link) myfile = f.read() evaluated = ast.literal_eval(myfile) writecsvfile(fpath + 'Bitfinex-' + '{}'.format(symbol) + '.csv', evaluated) df = pd.read_csv(fpath + 'Bitfinex-' + '{}'.format(symbol) + '.csv', sep=',') df.columns = ['date','open','close','high','low','volume'] df['date'] = pd.to_datetime(df['date'], unit='ms') df = df.iloc[::-1] df = df.set_index(['date']) df.to_csv(fpath + '\Bitfinex-' + '{}'.format(symbol) + '.csv', sep=',') # - get_data('OMGETH','1D')
quantitativefinance/Bitfinex API Data Pull.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt ### Just some matplotlib tweaks import matplotlib as mpl mpl.rcParams["xtick.direction"] = "in" mpl.rcParams["ytick.direction"] = "in" mpl.rcParams["lines.markeredgecolor"] = "k" mpl.rcParams["lines.markeredgewidth"] = 1.5 mpl.rcParams["figure.dpi"] = 200 from matplotlib import rc rc('font', family='serif') rc('text', usetex=True) rc('xtick', labelsize='medium') rc('ytick', labelsize='medium') rc("axes", labelsize = "large") def cm2inch(value): return value/2.54 # - z = np.linspace(10e-9, 5e-6, 10000) a = 1.5e-6 D0 = 4e-21 / (6*np.pi * 0.001 * a) #taking alpha = 1 v_noise = 2*D0 * a * (2*a**2 + 12 *a * z + 21 * z** 2) / (2*a**2 + 9*a*z + z**2)**2 plt.figure(figsize=( cm2inch(16),cm2inch(8))) plt.plot(z*1e6, v_noise*1e6) # + def eta_z(z): return 0.001 * (6*z**2 + 9 * a * z + 2 * a**2)/(6*z**2 + 2*a*z) def gamma(z): return 6 * np.pi * eta_z(z) * a lb = 500e-9 ld = 50e-9 B = 4 #kt unit def F_z(z): return - 4e-21 * (-1/ld * B *np.exp(-z/ld) + 1/lb) # - v_deterministic = 1/gamma(z) * F_z(z) plt.figure(figsize=( cm2inch(16),cm2inch(8))) plt.plot(z*1e6, v_deterministic*1e6, label = "$v_\mathrm{d}$") plt.plot(z*1e6, v_noise*1e6, label="$v_\mathrm{noise}$") plt.plot(z*1e6, v_noise*1e6 + v_deterministic*1e6, color = "black", label = "$\\bar{v}_\mathrm{d}$") plt.ylabel("$v$ ($\\mathrm{\\mu m.s^{-1}}$)") plt.xlabel("$z$ ($\\mathrm{\\mu m}$)") plt.legend(frameon=False) plt.tight_layout() vtot = v_noise*1e6 + v_deterministic*1e6 vtot_gradient =np.abs(1/vtot* np.gradient(vtot, np.mean(np.diff(z)))) np.mean(np.diff(z)) D_z = 4e-21 / gamma(z) D_z_gradient = np.abs( 1/ D_z * np.gradient(D_z, np.mean(np.diff(z)))) plt.semilogx(z*1e6, vtot_gradient, label = "Drifts gradient") plt.semilogx(z*1e6, D_z_gradient, label = "Diffusion gradient") plt.ylabel("gradients") plt.xlabel("$z$ ($\mu$m)") plt.legend() # + D0 = 4e-21 / (6 * np.pi * 0.001 * 1.5e-6) a = 1.5e-6 def tau_max(ld, B, z): lb = 500e-9 return a / (2*D0) * np.power((1/(B/ld + 1/lb) + z),2) / z def min_tau_max(ld, B): lb = 500e-9 return 2* a / D0 / (B/ld - 1/lb) # - # + lb = 500e-9 B = 10 z = np.linspace(1e-9, 100e-9, 10000) for i in [20e-9, 30e-9,40e-9, 50e-9]: plt.semilogx(z*1e6, tau_max(i, B, z), label= "$\ell_\mathrm{D} =$ " + str(np.round(i*1e9))[:-2] + " nm") plt.plot(1/(B/i - 1/lb)* 1e6, min_tau_max(i, B), "o", markersize = 2, color = "b") lds = np.linspace(20e-9, 300e-9) plt.plot(1/(B/lds - 1/lb)* 1e6, min_tau_max(lds, B), color = "black") plt.ylabel("$\\tau_\mathrm{max}$") plt.xlabel("$z$ ($\mu$m)") plt.legend(frameon = False) # - min_tau_max(i, B) # + fig = plt.figure(figsize = (cm2inch(16),cm2inch(9))) gs = fig.add_gridspec(1, 2) fig.add_subplot(gs[0, 0]) plt.loglog(z*1e6, vtot_gradient, label = "$\\frac{1}{\\bar{v}_\\mathrm{d}} \\frac{\\partial \\bar{v}_\\mathrm{d} }{\partial z}$") plt.plot(z*1e6, D_z_gradient, label = "$\\frac{1}{D_\\bot} \\frac{\\partial D_\\bot }{\partial z}$") plt.ylabel("relative variations (m$^{-1}$)") plt.xlabel("$z$ ($\mu$m)") plt.legend(frameon = False) #plt.text(0.05, -1e6, "a)", fontsize=20) fig.add_subplot(gs[0, 1]) ld = 500e-9 B = 10 z = np.linspace(1e-9, 100e-9, 10000) for i in [20e-9, 30e-9, 40e-9, 50e-9]: plt.semilogx(z*1e6, tau_max(i, B, z), label= "$\ell_\mathrm{D} =$ " + str(np.round(i*1e9))[:-2] + " nm") plt.plot(1/(B/i - 1/lb)* 1e6, min_tau_max(i, B), "o", markersize = 2, color = "b") lds = np.linspace(20e-9, 50e-9) plt.plot(1/(B/lds- 1/lb)* 1e6, min_tau_max(lds, B), color = "black") plt.ylabel("$\\tau_\mathrm{max}$") plt.xlabel("$z$ ($\mu$m)") plt.text(0.05, 0.06, "b)", fontsize=20) plt.legend(frameon = False) plt.tight_layout() plt.savefig("maximal_tau.pdf") # - min_tau_max(20e-9, B)
02_body/chapter3/images/simulation_confined_Brownian_motion/maximal_tau.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mean and median # import numpy as np import pandas as pd # + # get path to the dataset food_data = 'C:\\Users\\ADMIN\\Desktop\\Data Science Projects\\Stutern Projects\\DataCamp\\Introduction to statistics\\Datasets\\food_consumption.csv' # load the dataset as csv food_consumption = pd.read_csv(food_data) # inspect the dataset food_consumption.head() # + # Filter for Belgium be_consumption = food_consumption[food_consumption['country'] == 'Belgium'] # Filter for USA usa_consumption = food_consumption[food_consumption['country'] == 'USA'] # Calculate mean and median consumption in Belgium print(be_consumption[['consumption', 'co2_emission']].mean()) print(be_consumption[['consumption', 'co2_emission']].median(), '\n') # Calculate mean and median consumption in USA print(usa_consumption[['consumption', 'co2_emission']].mean()) print(usa_consumption[['consumption', 'co2_emission']].median()) # + # Subset for Belgium and USA only be_and_usa = food_consumption[(food_consumption['country'] == "Belgium") | (food_consumption['country'] == 'USA')] # Group by country, select consumption column, and compute mean and median be_and_usa.groupby('country')['consumption'].agg([np.mean, np.median]) # + # Import matplotlib.pyplot with alias plt import matplotlib.pyplot as plt # Subset for food_category equals rice rice_consumption = food_consumption[food_consumption['food_category'] == 'rice'] # Histogram of co2_emission for rice and show plot rice_consumption['co2_emission'].hist() plt.show() # - # Calculate mean and median of co2_emission in rice consumption with .agg() rice_consumption['co2_emission'].agg([np.mean, np.median]) # # Quartiles, quantiles, and quintiles # # Calculate the quartiles of co2_emission np.quantile(food_consumption['co2_emission'], [0, 0.25, 0.5, 0.75, 1]) # Calculate the quintiles of co2_emission print(np.quantile(food_consumption['co2_emission'], [0, 0.2, 0.4, 0.6, 0.8, 1])) # Calculate the deciles of co2_emission print(np.quantile(food_consumption['co2_emission'], np.linspace(0, 1, 10))) # # Variance and standard deviation # # Calculate the variance and standard deviation of co2_emission for each food_category by grouping and aggregating. food_consumption.groupby('food_category')['co2_emission'].agg([np.var, np.std]) # + # Create histogram of co2_emission for food_category 'beef' food_consumption[food_consumption['food_category'] == 'beef']['co2_emission'].hist() # Show plot plt.show() # + # Create histogram of co2_emission for food_category 'egg' food_consumption[food_consumption['food_category'] == 'eggs']['co2_emission'].hist() # Show plot plt.show() # - # # Finding outliers using IQR # # + # Calculate total co2_emission per country: emissions_by_country emissions_by_country = food_consumption.groupby('country')['co2_emission'].agg(np.sum) # check emissions_by_country # - # Compute the first and third quartiles and IQR of emissions_by_country q1 = emissions_by_country.quantile(0.25) q3 = emissions_by_country.quantile(0.75) iqr = q3 - q1 # Calculate the lower and upper cutoffs for outliers lower = q1 - 1.5 * iqr upper = q3 + 1.5 * iqr # + """ Subset emissions_by_country to get countries with a total emission greater than the upper cutoff or a total emission less than the lower cutoff. """ outliers = emissions_by_country[(emissions_by_country < lower) | (emissions_by_country > upper)] # check outliers
Introduction_to_statistics/Summary Statistics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Name # Data preparation by using a template to submit a job to Cloud Dataflow # # # Labels # GCP, Cloud Dataflow, Kubeflow, Pipeline # # # Summary # A Kubeflow Pipeline component to prepare data by using a template to submit a job to Cloud Dataflow. # # # Details # # ## Intended use # Use this component when you have a pre-built Cloud Dataflow template and want to launch it as a step in a Kubeflow Pipeline. # # ## Runtime arguments # Argument | Description | Optional | Data type | Accepted values | Default | # :--- | :---------- | :----------| :----------| :---------- | :----------| # project_id | The ID of the Google Cloud Platform (GCP) project to which the job belongs. | No | GCPProjectID | | | # gcs_path | The path to a Cloud Storage bucket containing the job creation template. It must be a valid Cloud Storage URL beginning with 'gs://'. | No | GCSPath | | | # launch_parameters | The parameters that are required to launch the template. The schema is defined in [LaunchTemplateParameters](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters). The parameter `jobName` is replaced by a generated name. | Yes | Dict | A JSON object which has the same structure as [LaunchTemplateParameters](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters) | None | # location | The regional endpoint to which the job request is directed.| Yes | GCPRegion | | None | # staging_dir | The path to the Cloud Storage directory where the staging files are stored. A random subdirectory will be created under the staging directory to keep the job information. This is done so that you can resume the job in case of failure.| Yes | GCSPath | | None | # validate_only | If True, the request is validated but not executed. | Yes | Boolean | | False | # wait_interval | The number of seconds to wait between calls to get the status of the job. | Yes | Integer | | 30 | # # ## Input data schema # # The input `gcs_path` must contain a valid Cloud Dataflow template. The template can be created by following the instructions in [Creating Templates](https://cloud.google.com/dataflow/docs/guides/templates/creating-templates). You can also use [Google-provided templates](https://cloud.google.com/dataflow/docs/guides/templates/provided-templates). # # ## Output # Name | Description # :--- | :---------- # job_id | The id of the Cloud Dataflow job that is created. # # ## Caution & requirements # # To use the component, the following requirements must be met: # - Cloud Dataflow API is enabled. # - The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details. # - The Kubeflow user service account is a member of: # - `roles/dataflow.developer` role of the project. # - `roles/storage.objectViewer` role of the Cloud Storage Object `gcs_path.` # - `roles/storage.objectCreator` role of the Cloud Storage Object `staging_dir.` # # ## Detailed description # You can execute the template locally by following the instructions in [Executing Templates](https://cloud.google.com/dataflow/docs/guides/templates/executing-templates). See the sample code below to learn how to execute the template. # Follow these steps to use the component in a pipeline: # 1. Install the Kubeflow Pipeline SDK: # # + # %%capture --no-stderr # !pip3 install kfp --upgrade # - # 2. Load the component using KFP SDK # + import kfp.components as comp dataflow_template_op = comp.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/38771da09094640cd2786a4b5130b26ea140f864/components/gcp/dataflow/launch_template/component.yaml') help(dataflow_template_op) # - # ### Sample # # Note: The following sample code works in an IPython notebook or directly in Python code. # In this sample, we run a Google-provided word count template from `gs://dataflow-templates/latest/Word_Count`. The template takes a text file as input and outputs word counts to a Cloud Storage bucket. Here is the sample input: # !gsutil cat gs://dataflow-samples/shakespeare/kinglear.txt # #### Set sample parameters # + tags=["parameters"] # Required Parameters PROJECT_ID = '<Please put your project ID here>' GCS_WORKING_DIR = 'gs://<Please put your GCS path here>' # No ending slash # - # Optional Parameters EXPERIMENT_NAME = 'Dataflow - Launch Template' OUTPUT_PATH = '{}/out/wc'.format(GCS_WORKING_DIR) # #### Example pipeline that uses the component import kfp.dsl as dsl import json @dsl.pipeline( name='Dataflow launch template pipeline', description='Dataflow launch template pipeline' ) def pipeline( project_id = PROJECT_ID, gcs_path = 'gs://dataflow-templates/latest/Word_Count', launch_parameters = json.dumps({ 'parameters': { 'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt', 'output': OUTPUT_PATH } }), location = '', validate_only = 'False', staging_dir = GCS_WORKING_DIR, wait_interval = 30): dataflow_template_op( project_id = project_id, gcs_path = gcs_path, launch_parameters = launch_parameters, location = location, validate_only = validate_only, staging_dir = staging_dir, wait_interval = wait_interval) # #### Compile the pipeline pipeline_func = pipeline pipeline_filename = pipeline_func.__name__ + '.zip' import kfp.compiler as compiler compiler.Compiler().compile(pipeline_func, pipeline_filename) # #### Submit the pipeline for execution # + #Specify pipeline argument values arguments = {} #Get or create an experiment and submit a pipeline run import kfp client = kfp.Client() experiment = client.create_experiment(EXPERIMENT_NAME) #Submit a pipeline run run_name = pipeline_func.__name__ + ' run' run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) # - # #### Inspect the output # !gsutil cat $OUTPUT_PATH* # ## References # # * [Component python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataflow/_launch_template.py) # * [Component docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile) # * [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataflow/launch_template/sample.ipynb) # * [Cloud Dataflow Templates overview](https://cloud.google.com/dataflow/docs/guides/templates/overview) # # ## License # By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control. #
components/gcp/dataflow/launch_template/sample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt from PIL import Image import os from matplotlib.pyplot import imshow import pandas as pd import seaborn as sb import random random.seed(1) numcat = 6 #number of categories categories = ['AbdomenCT', 'BreastMRI', 'ChestCT', 'CXR', 'Hand', 'HeadCT'] # directory = r'./Resized_MNIST/' # train, test = [], [] # for i in range(numcat): # imagearray = [] # for image_raw in os.listdir(directory + categories[i]): # image_np = (np.array(Image.open(os.path.join(directory + categories[i], image_raw))).flatten()) # image = np.append(image_np, i) # imagearray.append(image.astype('uint8')) # train += imagearray[0:int(0.8*len(imagearray))] # test += imagearray[int(0.8*len(imagearray)):] # data = np.array(imagearray) # train = np.array(train) # test = np.array(test) # + # np.save('Medical_train_resized.npy', train) # np.save('Medical_test_resized.npy', test) # - train = np.load('Medical_train_resized.npy') test = np.load('Medical_test_resized.npy') #shuffle train and test sets # + np.random.shuffle(train) np.random.shuffle(test) #splitting into x and y - for both train and test sets x_train, y_train = train[:, :-1], train[:, -1] x_test, y_test = test[:, :-1], test[:, -1] cols = x_train.shape[1] for i in range(cols): train_col = x_train[:,i] train_mean = train_col.mean() train_std = train_col.std() x_train[:,i] = (x_train[:,i] - train_mean)/train_std test_col = x_test[:,i] test_mean = test_col.mean() test_std = test_col.std() x_test[:,i] = (x_test[:,i] - test_mean)/test_std # - train_ones = np.ones((x_train.shape[0],1), dtype=x_train.dtype) X = np.append(x_train,train_ones,axis=1) print(X.shape) test_ones = np.ones((x_test.shape[0],1), dtype=x_test.dtype) X_test = np.append(x_test,test_ones,axis=1) print(X_test.shape) Y = np.reshape(y_train,(y_train.shape[0],1)) print(Y.shape) Y_test = np.reshape(y_test,(y_test.shape[0],1)) print(Y_test.shape) # ## Conversion of output values into one-hot vectors # + classes = 6 Y_oh = np.zeros((Y.shape[0],classes)) for i in range(Y.shape[0]): Y_oh[i,Y[i]] = 1 Y_test_oh = np.zeros((Y_test.shape[0],classes)) for i in range(Y_test.shape[0]): Y_test_oh[i,Y_test[i]] = 1 print(Y_oh) print(Y) # - num_iter = 40 rate = 0.01 # + #subtract the maximum of each array to stabilise exponential calculation using the identity softmax(x - c) = softmax(x) def softmax(Z): prob = np.zeros(Z.shape) for i in range(Z.shape[0]): Z_stable = Z[i] - np.amax(Z[i]) exp = np.exp(Z_stable) total = np.sum(exp) prob[i] = exp/total return prob # - # ## Cross Entropy Loss # + #add a small positive value to make sure log does not overflow def loss(W,X,Y): Z = np.matmul(X,W) p = softmax(Z) loss = 0 epsilon = 1e-5 for i in range(Y.shape[0]): for j in range(Y.shape[1]): if(Y[i,j] > 0): loss = loss - np.log(p[i,j] + epsilon) return loss/Y.shape[0] def gradient(W,X,Y): Z = np.matmul(X,W) p = softmax(Z) grad = np.matmul(X.T,(p-Y))/X.shape[0] return grad def sgd(W,X,Y,X_test,Y_test,rate,num_iter): train_loss, test_loss = [], [] for _ in range(0, num_iter): grad = gradient(W,X,Y) W = W - rate*grad train_loss.append(loss(W,X,Y)) test_loss.append(loss(W,X_test,Y_test)) if _ == num_iter - 20: rate/=10 return W, train_loss, test_loss # + W = np.random.rand(1025,6)**0.01 print(W) W, train_loss, test_loss = sgd(W,X,Y_oh,X_test,Y_test_oh,rate,num_iter) print(W) # - plt.plot(train_loss, 'r') plt.plot(test_loss, 'b') plt.show() print(train_loss[-1]) print(test_loss[-1]) # ## Cross Entropy Loss with L2 Regularisation # + # W = np.random.rand(4097,6) #add a small positive value to make sure log does not overflow def loss_L2(W,X,Y,lam2): Z = np.matmul(X,W) # dim Z = (47163, 6) p = softmax(Z) #loss = -y(i)logp(i) or y product with p loss = 0 epsilon = 1e-5 for i in range(Y.shape[0]): for j in range(Y.shape[1]): if(Y[i,j] != 0): loss = loss - np.log(p[i,j] + epsilon) return loss/X.shape[0] + lam2*np.sum(np.multiply(W,W)) def gradient_L2(W,X,Y, lam2): # W_grad = np.zeros(W.shape) Z = np.matmul(X,W) p = softmax(Z) #dim p/Y = (47163,6) #dim X = (47163,4097) grad = np.matmul(X.T,(p-Y))/X.shape[0] + 2*lam2*W return grad def sgd_L2(W,X,Y,X_test,Y_test,rate,num_iter,lam2): train_loss, test_loss = [], [] for _ in range(0, num_iter): grad = gradient_L2(W,X,Y,lam2) W = W - rate*grad train_loss.append(loss_L2(W,X,Y,lam2)) test_loss.append(loss_L2(W,X_test,Y_test,lam2)) if _ == num_iter - 20: rate = rate/10 return W, train_loss, test_loss # + W_sq = np.random.rand(1025,6)**0.01 lam2 = 1e-15 W_sq, train_loss_L2, test_loss_L2 = sgd_L2(W_sq,X,Y_oh,X_test,Y_test_oh,rate,num_iter,lam2) print(W_sq) plt.plot(train_loss_L2, 'r') plt.plot(test_loss_L2, 'b') plt.show() # - print(train_loss_L2[-1]) print(test_loss_L2[-1]) # + Y_pred = np.zeros((Y_test.shape[0],1)) Y_test = Y_test.reshape((Y_test.shape[0],1)) one_hot_pred = np.matmul(X_test,W_sq) correct = 0 for i in range(Y_test.shape[0]): maximum = -1 for j in range(6): if(maximum < one_hot_pred[i,j]): Y_pred[i,0] = j maximum = one_hot_pred[i,j] if(Y_pred[i,0] == Y_test[i,0]): correct = correct + 1 print(Y_pred) print(Y_test) plt.plot(range(11791),Y_pred - Y_test) plt.show() print(correct/Y_test.shape[0])
.ipynb_checkpoints/Multinomial_Logistic_Regression_FOR Q3-checkpoint.ipynb
# # Linear regression without scikit-learn # # In this notebook, we introduce linear regression. Before presenting the # available scikit-learn classes, we will provide some insights with a simple # example. We will use a dataset that contains information about penguins. # <div class="admonition note alert alert-info"> # <p class="first admonition-title" style="font-weight: bold;">Note</p> # <p class="last">If you want a deeper overview regarding this dataset, you can refer to the # Appendix - Datasets description section at the end of this MOOC.</p> # </div> # + import pandas as pd penguins = pd.read_csv("../datasets/penguins_regression.csv") penguins.head() # - # This dataset contains measurements taken on penguins. We will formulate the # following problem: using the flipper length of a penguin, we would like # to infer its mass. # + import seaborn as sns feature_names = "Flipper Length (mm)" target_name = "Body Mass (g)" data, target = penguins[[feature_names]], penguins[target_name] ax = sns.scatterplot(data=penguins, x=feature_names, y=target_name) ax.set_title("Flipper length in function of the body mass") # - # <div class="admonition tip alert alert-warning"> # <p class="first admonition-title" style="font-weight: bold;">Tip</p> # <p class="last">The function <tt class="docutils literal">scatterplot</tt> from searborn take as input the full dataframe # and the parameter <tt class="docutils literal">x</tt> and <tt class="docutils literal">y</tt> allows to specify the name of the columns to # be plotted. Note that this function returns a matplotlib axis # (named <tt class="docutils literal">ax</tt> in the example above) that can be further used to add element on # the same matplotlib axis (such as a title).</p> # </div> # <div class="admonition caution alert alert-warning"> # <p class="first admonition-title" style="font-weight: bold;">Caution!</p> # <p class="last">Here and later, we use the name <tt class="docutils literal">data</tt> and <tt class="docutils literal">target</tt> to be explicit. In # scikit-learn documentation, <tt class="docutils literal">data</tt> is commonly named <tt class="docutils literal">X</tt> and <tt class="docutils literal">target</tt> is # commonly called <tt class="docutils literal">y</tt>.</p> # </div> # In this problem, penguin mass is our target. It is a continuous # variable that roughly varies between 2700 g and 6300 g. Thus, this is a # regression problem (in contrast to classification). We also see that there is # almost a linear relationship between the body mass of the penguin and its # flipper length. The longer the flipper, the heavier the penguin. # # Thus, we could come up with a simple formula, where given a flipper length # we could compute the body mass of a penguin using a linear relationship # of the form `y = a * x + b` where `a` and `b` are the 2 parameters of our # model. def linear_model_flipper_mass(flipper_length, weight_flipper_length, intercept_body_mass): """Linear model of the form y = a * x + b""" body_mass = weight_flipper_length * flipper_length + intercept_body_mass return body_mass # Using the model we defined above, we can check the body mass values # predicted for a range of flipper lengths. We will set `weight_flipper_length` # to be 45 and `intercept_body_mass` to be -5000. # + import numpy as np weight_flipper_length = 45 intercept_body_mass = -5000 flipper_length_range = np.linspace(data.min(), data.max(), num=300) predicted_body_mass = linear_model_flipper_mass( flipper_length_range, weight_flipper_length, intercept_body_mass) # - # We can now plot all samples and the linear model prediction. # + label = "{0:.2f} (g / mm) * flipper length + {1:.2f} (g)" ax = sns.scatterplot(data=penguins, x=feature_names, y=target_name) ax.plot(flipper_length_range, predicted_body_mass, color="tab:orange") _ = ax.set_title(label.format(weight_flipper_length, intercept_body_mass)) # - # The variable `weight_flipper_length` is a weight applied to the feature # `flipper_length` in order to make the inference. When this coefficient is # positive, it means that penguins with longer flipper lengths will have larger # body masses. If the coefficient is negative, it means that penguins with # shorter flipper lengths have larger body masses. Graphically, this # coefficient is represented by the slope of the curve in the plot. Below we # show what the curve would look like when the `weight_flipper_length` # coefficient is negative. # + weight_flipper_length = -40 intercept_body_mass = 13000 predicted_body_mass = linear_model_flipper_mass( flipper_length_range, weight_flipper_length, intercept_body_mass) # - # We can now plot all samples and the linear model prediction. ax = sns.scatterplot(data=penguins, x=feature_names, y=target_name) ax.plot(flipper_length_range, predicted_body_mass, color="tab:orange") _ = ax.set_title(label.format(weight_flipper_length, intercept_body_mass)) # In our case, this coefficient has a meaningful unit: g/mm. # For instance, a coefficient of 40 g/mm, means that for each # additional millimeter in flipper length, the body weight predicted will # increase by 40 g. # + body_mass_180 = linear_model_flipper_mass( flipper_length=180, weight_flipper_length=40, intercept_body_mass=0) body_mass_181 = linear_model_flipper_mass( flipper_length=181, weight_flipper_length=40, intercept_body_mass=0) print(f"The body mass for a flipper length of 180 mm " f"is {body_mass_180} g and {body_mass_181} g " f"for a flipper length of 181 mm") # - # We can also see that we have a parameter `intercept_body_mass` in our model. # This parameter corresponds to the value on the y-axis if `flipper_length=0` # (which in our case is only a mathematical consideration, as in our data, # the value of `flipper_length` only goes from 170mm to 230mm). This y-value # when x=0 is called the y-intercept. If `intercept_body_mass` is 0, the curve # will pass through the origin: # + weight_flipper_length = 25 intercept_body_mass = 0 # redefined the flipper length to start at 0 to plot the intercept value flipper_length_range = np.linspace(0, data.max(), num=300) predicted_body_mass = linear_model_flipper_mass( flipper_length_range, weight_flipper_length, intercept_body_mass) # - ax = sns.scatterplot(data=penguins, x=feature_names, y=target_name) ax.plot(flipper_length_range, predicted_body_mass, color="tab:orange") _ = ax.set_title(label.format(weight_flipper_length, intercept_body_mass)) # Otherwise, it will pass through the `intercept_body_mass` value: # + weight_flipper_length = 45 intercept_body_mass = -5000 predicted_body_mass = linear_model_flipper_mass( flipper_length_range, weight_flipper_length, intercept_body_mass) # - ax = sns.scatterplot(data=penguins, x=feature_names, y=target_name) ax.plot(flipper_length_range, predicted_body_mass, color="tab:orange") _ = ax.set_title(label.format(weight_flipper_length, intercept_body_mass)) # In this notebook, we have seen the parametrization of a linear regression # model and more precisely meaning of the terms weights and intercepts.
notebooks/linear_regression_without_sklearn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Este trabajo se propone analizar los arcos emocionales de reportes de experiencias con sustancias psicoactivas. # # ### Arcos emocionales # # Se llama arco emocional a la evolución de determinada magnitud emocional conforme al curso de una historia, comunmente la valencia, que es el sentimiento de positividad/negatividad del lenguaje, y no distingue entre emociones. # # ### Reportes # # Los reportes se obtuvieron de erowid.org, el sitio web de una organización que provee información de plantas y químicos psicoactivos así como actividades y tecnologías que producen estados alterados de consciencia. # # Erowid permite a sus visitantes enviar descripciones de sus experiencias personales con psicoactivos para su revisión y publicación. Su colección consiste en más de 30.000 reportes. # # ### Análisis # # El análisis sobre los reportes intenta encontrar arcos emocionales típicos según grupos de sustancias. # # ---- # # # # # # # # Backlog # * Agrupamiento de emociones # * Magnitudes emocionales # * Proceso de obtención de reportes
_drafts/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Detecting near duplicate keywords using dedupe # # The dedupe Python library is a machine learning library that uses a combination of blocking, hierarchical clustering and logistic regression to generate clusters out of similar records. The use case it focuses on are structured records - a good overview of its use can be found in the [Basics of Entity Resolution with Python and Dedupe](https://medium.com/district-data-labs/basics-of-entity-resolution-with-python-and-dedupe-bc87440b64d4) article. # # Since our keywords are likely to be of different sizes, we will use 3-char shingles and feature hashing to reduce each keyword to an integer array of 25 features. # + import os import nltk import numpy as np from sklearn.feature_extraction import FeatureHasher from sklearn.metrics import jaccard_similarity_score from sklearn.metrics import accuracy_score, confusion_matrix, classification_report # + DATA_DIR = "../data" CURATED_KEYWORDS = os.path.join(DATA_DIR, "raw_keywords.txt") # dedupe wants (or suggests) a CSV file CURATED_KEYWORD_HASHES = os.path.join(DATA_DIR, "curated_keywords_hash.csv") # output of dedupe KEYWORD_DEDUPE_MAPPINGS = os.path.join(DATA_DIR, "keyword_dedupe_mappings.tsv") # - # ### Encode keywords # # For each keyword, we create 3-char shingles, then use the sklearn FeatureHasher to hash the array of shingles to a fixed length integer array. We show below that these arrays can be sent through a similarity measure such as Jaccard and return intuitively good values. # + hasher = FeatureHasher(input_type="string", n_features=25, dtype=np.int32) keywords = ["absolute value", "absolute values"] hashes = [] for keyword in keywords: shingles = ["".join(trigram) for trigram in nltk.trigrams([c for c in keyword])] keyword_hash = hasher.transform([shingles]).toarray() hashes.append(keyword_hash[0]) print(keyword, keyword_hash[0]) print("jaccard:", jaccard_similarity_score(hashes[0], hashes[1])) # + fcurated = open(CURATED_KEYWORDS, "r") fhashed = open(CURATED_KEYWORD_HASHES, "w") # header cols = ["id"] cols.extend(["col_{:d}".format(i+1) for i in range(25)]) cols.append("keyword") fhashed.write("{:s}\n".format(",".join(cols))) # shingle each word into 3-char trigrams, then hash to 25 features hasher = FeatureHasher(input_type="string", n_features=25, dtype=np.int32) for rowid, keyword in enumerate(fcurated): keyword = keyword.strip() shingles = ["".join(trigram) for trigram in nltk.trigrams([c for c in keyword])] keyword_hash = hasher.transform([shingles]).todense().tolist()[0] cols = [str(rowid)] cols.append(",".join([str(h) for h in keyword_hash])) cols.append(keyword) fhashed.write("{:s}\n".format(",".join(cols))) fhashed.close() fcurated.close() print("num keywords: {:d}".format(rowid)) # - # ### Cluster encoded keywords using dedupe # # The pipeline is closely modeled after the [CSV example in the dedup-examples repository](https://github.com/dedupeio/dedupe-examples). Code is in a script [../scripts/dedupe_keyword_train.py](https://github.com/sujitpal/content-engineering-tutorial/blob/master/scripts/dedupe_keyword_train.py). Input is the `../data/curated_keywords_hash.csv` file we generated in this notebook. Output is a settings and a labels file which is generated as a result of the active learning step the first time the model trains so you don't have to repeat the labeling exercise every time. Final output is a set of pairs similar to the one we generated using `simhash` in the previous notebook. # + i = 0 labels, preds = [], [] f = open(KEYWORD_DEDUPE_MAPPINGS, "r") for line in f: keyword_left, keyword_right, score = line.strip().split("\t") score = float(score) preds.append(1 if score > 0.75 else 0) edit_dist = nltk.edit_distance(keyword_left, keyword_right) labels.append(1 if edit_dist <= 2 else 0) if i <= 10: print("{:25s}\t{:25s}\t{:.3f}\t{:.3f}".format(keyword_left, keyword_right, score, edit_dist)) i += 1 f.close() acc = accuracy_score(labels, preds) cm = confusion_matrix(labels, preds) cr = classification_report(labels, preds) print("---") print("accuracy: {:.3f}".format(acc)) print("---") print("confusion matrix") print(cm) print("---") print("classification report") print(cr) # - # ### What about clustering? # # I also tried encoding the keywords as described and recursively splitting it up with different clustering algorithms (KMeans and Spectral) until the size of the output cluster is less than some preset threshold. Unfortunately, the clustering algorithm operated by splitting off one row at a time, finally ending with N single row clusters where N is the size of the original dataset. So naive clustering is probably not the way to go with this.
notebooks/09-keyword-dedupe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import os import pandas as pd import datetime from IPython.display import SVG from sklearn.model_selection import KFold from sklearn import metrics from sklearn.linear_model import LogisticRegression from itertools import cycle from sklearn.linear_model import lasso_path, enet_path # + from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from sklearn.metrics import auc from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import cohen_kappa_score from sklearn.metrics import roc_auc_score from sklearn.metrics import confusion_matrix # + SMALL_SIZE = 10 MEDIUM_SIZE = 15 BIGGER_SIZE = 18 # font = {'family' : 'monospace', # 'weight' : 'bold', # 'size' : 'larger'} #plt.rc('font', **font) # pass in the font dict as kwargs plt.rc('font', size=MEDIUM_SIZE,family='normal',weight='normal') # controls default text sizes plt.rc('axes', titlesize=MEDIUM_SIZE,) # fontsize of the axes title plt.rc('axes', labelsize=MEDIUM_SIZE,) # fontsize of the x and y labels plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize plt.rc('figure', titlesize=BIGGER_SIZE,titleweight='bold') # fontsize of the figure title #plt.rc('xtick', labelsize=15) #plt.rc('ytick', labelsize=15) # - np.random.seed(42) print(str(datetime.datetime.now())) # + NBname='_F-elNet' N_folds=4 np.random.seed(100) kf = KFold(n_splits=N_folds, shuffle=False) rm='res_x.npy' rl='res_y.npy' rdata=np.load(os.path.abspath(rm)) rlabels=np.load(os.path.abspath(rl)) sm='sen_x.npy' sl='sen_y.npy' sdata=np.load(os.path.abspath(sm)) slabels=np.load(os.path.abspath(sl)) fmtim='testim_x.npy' fltim='testim_y.npy' testim=np.load(os.path.abspath(fmtim)) tlabelsim=np.load(os.path.abspath(fltim)) fmtb='testb_x.npy' fltb='testb_y.npy' testb=np.load(os.path.abspath(fmtb)) tlabelsb=np.load(os.path.abspath(fltb)) # ================= # Do once! # ================= sen_batch = np.random.RandomState(seed=45).permutation(sdata.shape[0]) bins = np.linspace(0, 200, 41) digitized = np.digitize(sen_batch, bins,right=False) # ================ # =============================== # # FINAL TRAIN # =============================== train_idx_k=np.random.permutation(rdata.shape[0]) s_x=sdata[np.isin(digitized,train_idx_k+1)] s_y=slabels[np.isin(digitized,train_idx_k+1)] r_x=np.concatenate((rdata[train_idx_k],rdata[train_idx_k],rdata[train_idx_k],rdata[train_idx_k],rdata[train_idx_k])) r_y=np.concatenate((rlabels[train_idx_k],rlabels[train_idx_k],rlabels[train_idx_k],rlabels[train_idx_k],rlabels[train_idx_k])) f_train_x, f_train_y = np.concatenate((s_x,r_x)), np.concatenate((s_y,r_y)) train_shuf_idx = np.random.permutation(f_train_x.shape[0]) x_train, y_train = f_train_x[train_shuf_idx], f_train_y[train_shuf_idx] # x_better_test=x_test.reshape(x_test.shape[0],x_test.shape[1]) # y_better_test=y_test.reshape(y_test.shape[0],y_test.shape[1]) # y_better_test=y_better_test[:,1] x_better_train=x_train.reshape(x_train.shape[0],x_train.shape[1]) y_better_train=y_train.reshape(y_train.shape[0],y_train.shape[1]) y_better_train=y_better_train[:,1] xb_better_test=testb.reshape(testb.shape[0],testb.shape[1]) yb_better_test=tlabelsb.reshape(tlabelsb.shape[0],tlabelsb.shape[1]) yb_better_test=yb_better_test[:,1] xim_better_test=testim.reshape(testim.shape[0],testim.shape[1]) yim_better_test=tlabelsim.reshape(tlabelsim.shape[0],tlabelsim.shape[1]) yim_better_test=yim_better_test[:,1] # l1rat=0.4 # C=0.61 l1rat=0.6 C=0.81 LR = LogisticRegression(C=C, tol=0.01, penalty='elasticnet', solver='saga', n_jobs=-1, l1_ratio=l1rat) LR.fit(x_better_train,y_better_train) # y_pred = regE.predict(xb_better_test) # mname1='f1'+NBname+'.h5' # mname2='f2'+NBname+'.h5' # regE1.save(mname1) # regE2.save(mname2) # # ======================= # # ONLY FOR CROSS-VAL # # ======================= # i=0 # # logistic=[] # l1rat=0.5 # acc1=[] # acc2=[] # acc3=[] # # callbacks = [EarlyStopping(monitor='val_loss', patience=10), # # ModelCheckpoint(filepath='best_model'+NBname+'.h5', monitor='val_loss', save_best_only=True)] # for train_idx_k, val_idx_k in kf.split(rdata): # print ("Running Fold", i+1, "/", N_folds) # f= open('perform_'+str(i+1)+NBname+'.txt', "a") # # =============================== # # select train # # =============================== # s_train_x=sdata[np.isin(digitized,train_idx_k+1)] # s_train_y=slabels[np.isin(digitized,train_idx_k+1)] # r_train_x=np.concatenate((rdata[train_idx_k],rdata[train_idx_k],rdata[train_idx_k],rdata[train_idx_k],rdata[train_idx_k])) # r_train_y=np.concatenate((rlabels[train_idx_k],rlabels[train_idx_k],rlabels[train_idx_k],rlabels[train_idx_k],rlabels[train_idx_k])) # # =============================== # # select val # # =============================== # s_val_x=sdata[np.isin(digitized,val_idx_k+1)] # s_val_y=slabels[np.isin(digitized,val_idx_k+1)] # r_val_x=np.concatenate((rdata[val_idx_k],rdata[val_idx_k],rdata[val_idx_k],rdata[val_idx_k],rdata[val_idx_k])) # r_val_y=np.concatenate((rlabels[val_idx_k],rlabels[val_idx_k],rlabels[val_idx_k],rlabels[val_idx_k],rlabels[val_idx_k])) # # =============================== # # concatenate F_train/val_x/y # # =============================== # f_train_x, f_train_y = np.concatenate((s_train_x,r_train_x)), np.concatenate((s_train_y,r_train_y)) # # train_shuf_idx = np.random.permutation(f_train_x.shape[0]) # # F_train_x, F_train_y = f_train_x[train_shuf_idx], f_train_y[train_shuf_idx] # f_val_x, f_val_y = np.concatenate((s_val_x,r_val_x)), np.concatenate((s_val_y,r_val_y)) # # val_shuf_idx = np.random.permutation(f_val_x.shape[0]) # # F_val_x, F_val_y = f_val_x[val_shuf_idx], f_val_y[val_shuf_idx] # # =============================== # # shuffle just because we can? # # =============================== # train_shuf_idx = np.random.permutation(f_train_x.shape[0]) # x_train_CV, y_train_CV = f_train_x[train_shuf_idx], f_train_y[train_shuf_idx] # val_shuf_idx = np.random.permutation(f_val_x.shape[0]) # x_val_CV, y_val_CV = f_val_x[val_shuf_idx], f_val_y[val_shuf_idx] # x_better_val=x_val_CV.reshape(x_val_CV.shape[0],x_val_CV.shape[1]) # y_better_val=y_val_CV.reshape(y_val_CV.shape[0],y_val_CV.shape[1]) # y_better_val=y_better_val[:,1] # x_better_train=x_train_CV.reshape(x_train_CV.shape[0],x_train_CV.shape[1]) # y_better_train=y_train_CV.reshape(y_train_CV.shape[0],y_train_CV.shape[1]) # y_better_train=y_better_train[:,1] # f.write('start of ' + str(i+1) + ' fold\n') # l1rat=0.4 # C=0.61 # clf_en_LR = LogisticRegression(C=C, tol=0.01, penalty='elasticnet', solver='saga', n_jobs=-1, l1_ratio=l1rat) # regE=clf_en_LR.fit(x_better_train,y_better_train) # y_pred = regE.predict(x_better_val) # acc1.append(metrics.accuracy_score(y_pred,y_better_val)) # l1rat=0.4 # C=0.61 # clf_en_LR = LogisticRegression(C=C, tol=0.01, penalty='elasticnet', solver='saga', n_jobs=-1, l1_ratio=l1rat) # regE=clf_en_LR.fit(x_better_train,y_better_train) # y_pred = regE.predict(x_better_val) # acc2.append(metrics.accuracy_score(y_pred,y_better_val)) # # for x in range(5): # # l1rat=x*0.2 # # f.write('l1_ratio is set to ' + str(l1rat) + '\n') # # for y in range(5): # # C=1 +y*1.5 # # clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01, solver='saga',n_jobs=-1) # # clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01, solver='saga',n_jobs=-1) # # clf_en_LR = LogisticRegression(C=C, tol=0.01, penalty='elasticnet', solver='saga', n_jobs=-1, l1_ratio=l1rat) # # reg1=clf_l1_LR.fit(x_better_train, y_better_train) # # reg2=clf_l2_LR.fit(x_better_train, y_better_train) # # regE=clf_en_LR.fit(x_better_train,y_better_train) # # y_pred = reg1.predict(x_better_val) # # acc1.append(metrics.accuracy_score(y_pred,y_better_val)) # # y_pred = reg2.predict(x_better_val) # # acc2.append(metrics.accuracy_score(y_pred,y_better_val)) # # y_pred = regE.predict(x_better_val) # # acc3.append(metrics.accuracy_score(y_pred,y_better_val)) # # f.writelines(map('{}\t{}\t{}\n'.format, acc1, acc2, acc3)) # # acc1=[] # # acc2=[] # # acc3=[] # i=i+1 # f.write('done for ' + str(i) + ' fold\n') # f.write(str(datetime.datetime.now())) # f.close() # - # + # ========================================================================== # # DO NOT UNCOMMENT UNTIL THE END; DECLARES FUNCTION FOR AN UNBIASED TEST # ========================================================================== def plot_auc(aucies,fprs,tprs, last): #plt.figure(figsize=(13,13)) plt.figure(figsize=(11,11)) plt.plot([0, 1], [0, 1], 'k--') for i in range(len(aucies)): st='model_'+str(i+1)+' ' if i==0: st='Balanced' else: st='Imbalanced' plt.plot(fprs[i], tprs[i], label='{} (AUC= {:.3f})'.format(st,aucies[i]),linewidth=1.5) plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve: LR') plt.legend(loc='best') figname='ROC'+last+'.png' plt.savefig(figname,dpi=500) # + # ========================================================================== # # THIS IS THE UNBIASED TEST; DO NOT UNCOMMENT UNTIL THE END # ========================================================================== fpr_x=[] tpr_x=[] thresholds_x=[] auc_x=[] pre_S=[] rec_S=[] f1_S=[] kap_S=[] acc_S=[] mat_S=[] # - # # BALANCED TESTING # + NBname='_F-elNetb' # xb_better_test=testb.reshape(testb.shape[0],testb.shape[1]) # yb_better_test=tlabelsb.reshape(tlabelsb.shape[0],tlabelsb.shape[1]) # yb_better_test=yb_better_test[:,1] y_pred = LR.predict_proba(xb_better_test) fpr_0, tpr_0, thresholds_0 = roc_curve(yb_better_test, y_pred[:,1]) fpr_x.append(fpr_0) tpr_x.append(tpr_0) thresholds_x.append(thresholds_0) auc_x.append(auc(fpr_0, tpr_0)) testby=yb_better_test # predict probabilities for testb set yhat_probs = LR.predict_proba(xb_better_test) # predict crisp classes for testb set yhat_classes = LR.predict(xb_better_test) # # reduce to 1d array #testby1=tlabels[:,1] #yhat_probs = yhat_probs[:, 0] #yhat_classes = yhat_classes[:, 0] # accuracy: (tp + tn) / (p + n) acc_S.append(accuracy_score(testby, yhat_classes)) #print('Accuracy: %f' % accuracy_score(testby, yhat_classes)) #precision tp / (tp + fp) pre_S.append(precision_score(testby, yhat_classes)) #print('Precision: %f' % precision_score(testby, yhat_classes)) #recall: tp / (tp + fn) rec_S.append(recall_score(testby, yhat_classes)) #print('Recall: %f' % recall_score(testby, yhat_classes)) # f1: 2 tp / (2 tp + fp + fn) f1_S.append(f1_score(testby, yhat_classes)) #print('F1 score: %f' % f1_score(testby, yhat_classes)) # kappa kap_S.append(cohen_kappa_score(testby, yhat_classes)) #print('Cohens kappa: %f' % cohen_kappa_score(testby, yhat_classes)) # confusion matrix mat_S.append(confusion_matrix(testby, yhat_classes)) #print(confusion_matrix(testby, yhat_classes)) with open('perform'+NBname+'.txt', "w") as f: f.writelines("AUC \t Accuracy \t Precision \t Recall \t F1 \t Kappa\n") f.writelines(map("{}\t{}\t{}\t{}\t{}\t{}\n".format, auc_x, acc_S, pre_S, rec_S, f1_S, kap_S)) for x in range(len(fpr_x)): f.writelines(map("{}\n".format, mat_S[x])) f.writelines(map("{}\t{}\t{}\n".format, fpr_x[x], tpr_x[x], thresholds_x[x])) # ========================================================================== # # THIS IS THE UNBIASED testb; DO NOT UNCOMMENT UNTIL THE END # ========================================================================== plot_auc(auc_x,fpr_x,tpr_x,NBname) # - # ## to see which samples were correctly classified ... yhat_probs[yhat_probs[:,1]>=0.5,1] yhat_probs[:,1]>=0.5 yhat_classes testby # # IMBALANCED TESTING # + NBname='_F-elNetim' # xim_better_test=testim.reshape(testim.shape[0],testim.shape[1]) # yim_better_test=tlabelsim.reshape(tlabelsim.shape[0],tlabelsim.shape[1]) # yim_better_test=yim_better_test[:,1] y_pred = LR.predict_proba(xim_better_test)#.ravel() fpr_0, tpr_0, thresholds_0 = roc_curve(yim_better_test, y_pred[:,1]) fpr_x.append(fpr_0) tpr_x.append(tpr_0) thresholds_x.append(thresholds_0) auc_x.append(auc(fpr_0, tpr_0)) testim=xim_better_test # predict probabilities for testim set yhat_probs = LR.predict_proba(testim) # predict crisp classes for testim set yhat_classes = LR.predict(testim) # reduce to 1d array testimy=tlabelsim[:,1] #testimy1=tlabels[:,1] #yhat_probs = yhat_probs[:, 0] #yhat_classes = yhat_classes[:, 0] # accuracy: (tp + tn) / (p + n) acc_S.append(accuracy_score(testimy, yhat_classes)) #print('Accuracy: %f' % accuracy_score(testimy, yhat_classes)) #precision tp / (tp + fp) pre_S.append(precision_score(testimy, yhat_classes)) #print('Precision: %f' % precision_score(testimy, yhat_classes)) #recall: tp / (tp + fn) rec_S.append(recall_score(testimy, yhat_classes)) #print('Recall: %f' % recall_score(testimy, yhat_classes)) # f1: 2 tp / (2 tp + fp + fn) f1_S.append(f1_score(testimy, yhat_classes)) #print('F1 score: %f' % f1_score(testimy, yhat_classes)) # kappa kap_S.append(cohen_kappa_score(testimy, yhat_classes)) #print('Cohens kappa: %f' % cohen_kappa_score(testimy, yhat_classes)) # confusion matrix mat_S.append(confusion_matrix(testimy, yhat_classes)) #print(confusion_matrix(testimy, yhat_classes)) with open('perform'+NBname+'.txt', "w") as f: f.writelines("##THE TWO LINES ARE FOR BALANCED AND IMBALALANCED TEST\n") f.writelines("#AUC \t Accuracy \t Precision \t Recall \t F1 \t Kappa\n") f.writelines(map("{}\t{}\t{}\t{}\t{}\t{}\n".format, auc_x, acc_S, pre_S, rec_S, f1_S, kap_S)) f.writelines("#TRUE_SENSITIVE \t TRUE_RESISTANT\n") for x in range(len(fpr_x)): f.writelines(map("{}\n".format, mat_S[x])) #f.writelines(map("{}\t{}\t{}\n".format, fpr_x[x], tpr_x[x], thresholds_x[x])) f.writelines("#FPR \t TPR \t THRESHOLDs\n") for x in range(len(fpr_x)): #f.writelines(map("{}\n".format, mat_S[x])) f.writelines(map("{}\t{}\t{}\n".format, fpr_x[x], tpr_x[x], thresholds_x[x])) f.writelines("#NEXT\n") # ========================================================================== # # THIS IS THE UNBIASED testim; DO NOT UNCOMMENT UNTIL THE END # ========================================================================== plot_auc(auc_x,fpr_x,tpr_x,NBname) # - # ## to see which samples were correctly classified ... yhat_probs[yhat_probs[:,1]>=0.5,1] yhat_probs[:,1]>=0.5 yhat_classes testimy # # MISCELLANEOUS mat_S auc_x # # END OF TESTING print(str(datetime.datetime.now()))
11Oct/F-elNet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from fastcore.utils import * from fastcore.foundation import * from bs4 import BeautifulSoup # - hooks_url = 'https://docs.github.com/en/free-pro-team@latest/developers/webhooks-and-events/webhook-events-and-payloads' soup = BeautifulSoup(urlread(hooks_url)) events = L(soup.select('li.ml-0')).attrgot('text')[1:] # As at 2020/01/12 the 'sponsor' event has two examples sponsor_evt = events.argwhere(lambda o: o.startswith('sponsor'))[0] elems = L(soup.select('div.height-constrained-code-block > pre:nth-child(1) > code:nth-child(1)')).attrgot('text') # Delete the 2nd sponsor example del(elems[sponsor_evt+1]) exs = Path('.') for ev,el in zip(events,elems): (exs/f'{ev}.json').write_text(el)
examples/_create_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Tool for simulating 1D Conservative Transport (Laboratory Column) ## # ### Info on the tool ### # # The worksheet addresses **1D conservative transport** of a solute through a porous medium, e.g. in a laboratory column. <br> # Water flow through the porous medium is assumed to be steady-state. <br> # **Advection** and **dispersion** are considered as transport processes. <br> # **Dirac injection**, **finite pulse injection** or **continuous injection** may be used as upgradient boundary condition. <br> # # The worksheet calculates solute breakthrough at the column outlet (sheet "model") and allows for comparison with measured data (to be provided in sheet "data"). <br> # Computations are based on analytical solutions involving the complementary error function. # # input parameters | dimension | remarks # :-----------------------|:--------------|:-------------------------------------------- # column length | [L] | enter positive number # column radius | [L] | enter positive number # effective porosity | [-] | enter positive number not bigger than 1 # dispersivity | [L] | enter non-negative number # flow rate | [L³/T] | enter positive number # initial concentration | [M/L³] | enter non-negative number # input mass | [M] | enter positive number or "inf" # input concentration | [M/L³] | enter non-negative number or "inf" # bulk density | [M/L³] | leave empty or enter positive number # starting time | [T] | enter non-negative number # # # # **_Contributed by Ms. <NAME> and <NAME>. The original concept from Prof. R. Liedl spreasheet code._** # # The codes are licensed under CC by 4.0 [(use anyways, but acknowledge the original work)](https://creativecommons.org/licenses/by/4.0/deed.en) # #### Python `Libraries` Cell #### # # `numpy` for computation, `matplotlib.pyplot` for plotting and `pandas` for tabulation, are most general libraries for our works. # # `ipywidget` - for interactive activities, are **special** functions used in this tool. # # **Please execute the cell before moving to the next step.** # + tags=[] import math import numpy as np from ipywidgets import * import matplotlib.pyplot as plt # - # #### `Input Data` Cell #### # # In the cell below you can change input values. Pls. read the **info** above and the table before making the changes the input. Also check the boundary type after executing the cell. # # **Make sure to execute the cell if you change any input value** # + tags=[] #input data L = 50 #cm, column lenght R = 3 #cm, column radius ne = 0.25 #eff. porosity alpha = 0.1 #dispersivity Q = 0.167 #cm³/h, flow rate c0 = 0 #mg/cm³, initital concentration mi = 2000 #mg, input mass ci = 1.25e+1 #mg/cm³, input concentration delta_t = 70 #h, time increment A = math.pi * R * R #cm², area-cross section vf = Q/A #cm/h, darcy velocity va = vf/ne #cm/h, average linear velocity D = alpha * va #cm²/h, dispersion coeff. Vp = L/va #pore volume #intermediate Results ##boundary condition if mi == math.inf: print("The type of boundary condition is a continuous injection.") elif ci == math.inf: print("The type of boundary condition is a dirac pulse injection.") else: print("The type of boundary condition is a finite pulse injection.") # - # #### The **main** ``function`` cell #### # # You do not have to change the two cells below only have to execute only once. # + tags=[] #Definition of the function def transport(L, R, ne, alpha, Q, c0, mi, ci, delta_t, t): A = np.pi * R * R #cm², area-cross section vf = Q/A #cm/h, darcy velocity va = vf/ne #cm/h, average linear velocity D = alpha * va #cm²/h, dispersion coeff. Vp = L/va #pore volume ##peclet if alpha == 0: Pe = np.inf else: Pe = L/alpha ##input duration if mi==np.inf or ci==c0: ti=np.inf else: if ci==np.inf: ti=0 else: ti=mi/Q/abs(ci-c0) Vp = L/va #pore volume ##rel. input duration if ti == None or Vp == None: ti_rel = None else: if ti == np.inf: ti_rel = np.inf else: ti_rel=ti/Vp if Vp == None: delta_t_rel = None else: delta_t_rel = delta_t / Vp #rel. time increment #rel. time if Vp == None: t_rel = None else: t_rel=t/Vp ##initial condition ###arg- if Pe == None or t_rel == 0 or Pe == np.inf or t_rel == None: arg_n1_IC=None else: arg_n1_IC=np.sqrt(0.25*Pe/t_rel)*(1-t_rel) if Pe == None or arg_n1_IC == None or Pe == np.inf or t_rel == 0: arg_n2_IC = None else: if arg_n1_IC > 0: arg_n2_IC= 1-1*(1-math.erfc(min(abs(arg_n1_IC),27))) elif arg_n1_IC < 0: arg_n2_IC= 1+1*(1-math.erfc(min(abs(arg_n1_IC),27))) else: arg_n2_IC= 1-0*(1-math.erfc(min(abs(arg_n1_IC),27))) ###arg+ if Pe == None or t_rel == None or Pe == np.inf or t_rel == 0: arg_p1_IC = None else: arg_p1_IC = np.sqrt(0.25*Pe/t_rel)*(1+t_rel) if Pe == None or arg_p1_IC == None or Pe == np.inf or t_rel == 0: arg_p2_IC = None else: if arg_p1_IC>0: arg_p2_IC = 1-1*(1-math.erfc(min(abs(arg_p1_IC),27))) elif arg_p1_IC<0: arg_p2_IC = 1+1*(1-math.erfc(min(abs(arg_p1_IC),27))) else: arg_p2_IC = 1-0*(1-math.erfc(min(abs(arg_p1_IC),27))) ###arg_IC if Pe == None or arg_n2_IC == None or arg_p2_IC == None or Pe == np.inf or t_rel == 0: arg_IC = None else: if arg_p2_IC == 0: arg_IC = arg_n2_IC else: arg_IC = arg_n2_IC + np.exp(Pe) * arg_p2_IC ##boundary condition ###positive pulse ####arg- if Pe == np.inf or t_rel==0 or Pe == None or ti_rel == None or t_rel == None or ti_rel == 0: arg_n1_BC_pp = None else: arg_n1_BC_pp = np.sqrt(0.25*Pe/t_rel)*(1-t_rel) if Pe == None or Pe == np.inf or ti_rel == None or ti_rel == 0 or arg_n1_BC_pp == None or t_rel == 0: arg_n2_BC_pp = None else: if arg_n1_BC_pp>0: arg_n2_BC_pp = 1-1*(1-math.erfc(min(abs(arg_n1_BC_pp),27))) elif arg_n1_BC_pp<0: arg_n2_BC_pp = 1+1*(1-math.erfc(min(abs(arg_n1_BC_pp),27))) else: arg_n2_BC_pp = 1-1*(0-math.erfc(min(abs(arg_n1_BC_pp),27))) ####arg+ if Pe == None or Pe == np.inf or ti_rel == None or ti_rel == 0 or t_rel == None or t_rel ==0: arg_p1_BC_pp = None else: arg_p1_BC_pp = np.sqrt(0.25*Pe/ t_rel)*(1+t_rel) if Pe == None or Pe == np.inf or ti_rel == None or ti_rel == 0 or arg_n1_BC_pp == None or t_rel == 0: arg_p2_BC_pp = None else: arg_p2_BC_pp = math.erfc(min(arg_p1_BC_pp,27)) ####arg_BC_pp if Pe==np.inf or t_rel==0 or Pe==None or ti_rel==None or t_rel==None or ti_rel==0 or arg_n2_BC_pp == None or arg_p2_BC_pp == None : arg_BC_pp=None else: if arg_p2_BC_pp==0: arg_BC_pp = arg_n2_BC_pp else: arg_BC_pp = arg_n2_BC_pp + (np.exp(Pe)*arg_p2_BC_pp) ###negative pulse ####arg- arg_n1_BC_np=None arg_n2_BC_np=None if Pe==None or ti_rel==None or Pe==np.inf or ti_rel==np.inf or ti_rel == 0 or t_rel==0 or t_rel == None: arg_n1_BC_np = None else: if t_rel>ti_rel: arg_n1_BC_np = np.sqrt(0.25*Pe/(t_rel-ti_rel))*(1-(t_rel-ti_rel)) else: arg_n1_BC_np = None if Pe==None or ti_rel==None or arg_n1_BC_np == None or Pe== np.inf or ti_rel==0 or ti_rel == np.inf or t_rel == 0: arg_n2_BC_np = None else: if arg_n1_BC_np > 0: arg_n2_BC_np = 1-(1-math.erfc(min(abs(arg_n1_BC_np),27))) elif arg_n1_BC_np < 0: arg_n2_BC_np = 1+(1-math.erfc(min(abs(arg_n1_BC_np),27))) else: arg_n2_BC_np = 1-(0-math.erfc(min(abs(arg_n1_BC_np),27))) ####arg+ if Pe==np.inf or Pe==None or ti_rel==0 or ti_rel==np.inf or t_rel==0 or ti_rel==None or t_rel==None: arg_p1_BC_np=None else: if t_rel>ti_rel: arg_p1_BC_np=np.sqrt(0.25*Pe/(t_rel-ti_rel))*(1+(t_rel-ti_rel)) else: arg_p1_BC_np = None if Pe==None or ti_rel==None or arg_p1_BC_np == None or Pe == np.inf or ti_rel == np.inf or ti_rel == 0 or t_rel ==0: arg_p2_BC_np = None else: if t_rel > ti_rel: arg_p2_BC_np = math.erfc(min((arg_p1_BC_np),27)) else: arg_p2_BC_np = None ####arg_BC_np arg_BC_np = None if Pe == None or ti_rel == None or arg_n2_BC_np == None or arg_p2_BC_np == None or Pe == np.inf or ti_rel == 0 or ti_rel == np.inf or t_rel == 0: arg_BC_np = None else: if t_rel > ti_rel: if arg_p2_BC_np == 0: arg_BC_np = arg_n2_BC_np else: arg_BC_np = arg_n2_BC_np+ np.exp(Pe) * arg_p2_BC_np else: arg_BC_np = None ##rel conc due initial condition if Pe == None or t_rel == None: c_rel_IC = None else: if t_rel>0: if Pe==np.inf: if t_rel<1: c_rel_IC = 1 else: c_rel_IC = 0 else: c_rel_IC = 1-0.5*arg_IC else: c_rel_IC = None ##rel conc due boundary condition if Pe == None or ti_rel == None or t_rel == None: c_rel_BC = None else: if t_rel > 0: if Pe == np.inf: if t_rel > 1: if ti_rel == np.inf: if t_rel>1: c_rel_BC = 1 else: c_rel_BC = 0 elif t_rel <= 1+ti_rel: c_rel_BC = 1 else: c_rel_BC = 0 else: c_rel_BC = 0 else: if ti_rel == 0: c_rel_BC = np.sqrt(0.25/np.pi*Pe/t_rel^3)*np.exp(-0.25*Pe/t_rel*(1-t_rel)^2) else: if ti_rel==np.inf or t_rel<=ti_rel: c_rel_BC = 0.5 * arg_BC_pp else: c_rel_BC = 0.5*(arg_BC_pp-arg_BC_np) else: c_rel_BC = None if ti_rel == None or c_rel_IC == None or c_rel_BC == None: c=None else: if t_rel==0: c=c0 else: if ti_rel == 0: c=c0*c_rel_IC+mi/(ne*A*L)*c_rel_BC else: c=c0*c_rel_IC+ci*c_rel_BC return c # - # #### The ``Interactive`` cell #### # # Just execute it # + def curve_data(L, R, ne, alpha, Q, c0, mi, ci, delta_t, t_max): plot_c = [] plot_t = np.arange(0, t_max, delta_t) for t in np.arange(0, t_max, delta_t): plot_c.append(transport(L, R, ne, alpha, Q, c0, mi, ci, delta_t, t)) return plot_t, plot_c def plot(L, R, ne, alpha, Q, c0, mi, ci, delta_t, t_max): plot_t, plot_c = curve_data(L, R, ne, alpha, Q, c0, mi, ci, delta_t, t_max) plt.plot(plot_t, plot_c) plt.ylabel('concentration [mg/cm³]') plt.ylim(-10,30) plt.xlabel('Time [h]') plt.xlim(-1, t_max) plt.show interact(plot, L=widgets.FloatSlider(value=50, min=0, max=500, step=1, description='column lenght [cm]:', disabled=False), R=widgets.FloatSlider(value=3, min=0, max=250, step=0.1, description='column radius [cm]:', disabled=False), ne= widgets.FloatSlider(value=0.25,min=0, max=1,step=0.05, description='eff. porosity [-]:' , disabled=False), alpha=widgets.FloatSlider(value=0.1, min=0, max=100, step=0.01, description='dispersivity [cm]:', disabled=False), Q=widgets.FloatSlider(value=0.167, min=0, max=10, step=0.05, description='flow rate [cm³/h]:', disabled=False), c0= widgets.FloatSlider(value=0,min=0, max=1000,step=0.5, description='initital concentration [mg/cm³]:', disabled=True), mi=widgets.FloatSlider(value=2000, min=0, max=10000, step=10, description='input mass [mg]:', disabled=False), ci=widgets.FloatSlider(value=12.5, min=0, max=1000, step=0.5, description='input concentration [mg/cm³]:', disabled=False), delta_t= widgets.FloatSlider(value=70,min=0, max=100,step=0.5, description='time increment [h]:' , disabled=False), t_max = widgets.FloatSlider(value=8400,min=0, max=10000,step=24, description='time [h]:' , disabled=False), ) # -
_build/html/_sources/contents/tools/1D_advection_dispersion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tensorflow_gpuenv # language: python # name: tensorflow_gpuenv # --- # + from __future__ import print_function, division from keras.datasets import mnist from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate from keras.layers import BatchNormalization, Activation, ZeroPadding2D from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.models import Sequential, Model from keras.optimizers import Adam from keras.datasets import mnist from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras import backend as K from keras.datasets import cifar10 from keras.callbacks import LearningRateScheduler from keras.models import model_from_json from keras.models import load_model import numpy as np import keras import math import time import matplotlib.pyplot as plt import sys import tensorflow as tf import warnings warnings.filterwarnings('ignore') from ourwrnet import create_wide_residual_network from cifar10utils import getCIFAR10, getCIFAR10InputShape ''' Function that loads from a file the teacher ''' def getTeacher(file_name): # Model reconstruction from JSON file with open(file_name + '.json', 'r') as f: model = model_from_json(f.read()) # Load weights into the new model model.load_weights(file_name + '.h5') print('Teacher loaded from' + file_name + '.h5') return model ''' Function that loads from a file the teacher and test it on the CIRAF10 dataset ''' def testTeacher(file_name): x_train,y_train,x_test,y_test = getCIFAR10() model = getTeacher(file_name) opt_rms = keras.optimizers.rmsprop(lr=0.001,decay=1e-6) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=opt_rms, metrics=['accuracy']) score = model.evaluate(x_test, y_test, verbose=0) print('Teacher test loss:', score[0]) print('Teacher test accuracy:', score[1]) ''' Function that returns a simple student done by 2 convolutions, a maxpool and a final two fully connected layers ''' def getStudent(input_shape): num_classes = 10 model_train,model_test,m1,m2,m3 = create_wide_residual_network(input_shape, nb_classes=10, N=2,k=1) print('Simple student loaded') return model_train, model_test ''' Function that returns a simple generator ''' def getGenerator(): noise_shape = (100,) model = Sequential() img_shape = getCIFAR10InputShape() model.add(Dense(128*8**2, input_shape=noise_shape)) model.add(Reshape((8, 8, 128))) model.add(BatchNormalization()) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, strides=1, padding="same")) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=(3,3), strides=1, padding="same")) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) model.add(Conv2D(3, kernel_size=(3,3), strides=1, padding="same")) model.add(BatchNormalization()) #model.summary() print('Generator loaded') return model def getGAN(teacher,student,generator): z = Input(shape=(100,)) img = generator(z) student.trainable = False # it works if it is not true teacher.trainable = False out_t = teacher(img) out_s = student(img) joinedOutput = Concatenate()([out_t,out_s]) gan = Model(z,joinedOutput) return gan def gan_loss(y_true, y_pred): t_out = y_pred[:,0:10] s_out = y_pred[:,10:21] loss = keras.losses.kullback_leibler_divergence(t_out,s_out) min_loss = (-(loss)) return min_loss def main(): x_train,y_train,x_test,y_test = getCIFAR10() input_shape = getCIFAR10InputShape() teacher = getTeacher('./pretrained_models/wrn_16_2') teacher.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) optim_stud = Adam(lr=2e-3, clipnorm=5.0) optim_gen = Adam(lr=1e-3, clipnorm=5.0) student_train, student_test = getStudent(input_shape) student_test.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) student_train.compile(loss=keras.losses.kullback_leibler_divergence, optimizer=optim_stud) generator = getGenerator() gan = getGAN(teacher,student_train,generator) gan.summary() gan.compile(loss=gan_loss, optimizer=optim_gen) n_batches = 1000 batch_size = 128 log_freq = 10 ns = 5 print('TEACHER SUMMARY') teacher.summary() print('TEACHER L1 SUMMARY') t_layer1.summary() print('TEACHER L2 SUMMARY') t_layer2.summary() print('TEACHER L3 SUMMARY') t_layer3.summary() print('STUDENT SUMMARY') student.summary() noise = np.random.normal(0, 1, (batch_size, 100)) gen_imgs = generator.predict(noise) t_final_out = teacher.predict(gen_imgs) print('Teacher output shape: ' + str(t_final_out)) t_l1_out = t_layer1.predict(gen_imgs) print('Teacher LAYER1 output shape: ' + str(t_l1_out)) t_l2_out = t_layer2.predict(gen_imgs) print('Teacher LAYER2 output shape: ' + str(t_l2_out)) t_l3_out = t_layer3.predict(gen_imgs) print('Teacher LAYER3 output shape: ' + str(t_l3_out)) student_all_outputs = student_train.predict(gen_imgs) for i in range (len(student_all_outputs)): print('Student output number ' + str(i) + ' shape: ' + student_all_outputs[i].shape) ''' for i in range(n_batches): noise = np.random.normal(0, 1, (batch_size, 100)) gen_imgs = generator.predict(noise) t_predictions = teacher.predict(gen_imgs) fake_lbl = K.zeros((batch_size,20)) g_loss = gan.train_on_batch(noise,fake_lbl) s_loss = 0 for j in range(ns): s_loss += student_train.train_on_batch(gen_imgs,t_predictions) print('batch ' + str(i) + '/' + str(n_batches) + ' G loss: ' + str(g_loss) + ' S loss: ' + str(s_loss/ns)) if (i % log_freq) == 0: score = student_test.evaluate(x_test, y_test, verbose=0) print('Student test loss: ' + str(score)) score = student_test.evaluate(x_test, y_test, verbose=0) print('Student test loss: ' + str(score)) ''' main() # + import keras from keras import backend as K #a = K.zeros((2,4)) a = K.random_uniform((2,4)) loss = keras.losses.kullback_leibler_divergence(a,a) with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) print('tensor: ' + str(a.eval())) print('loss: ' + str(loss.eval())) # -
Our_code/Keras(not working)/.ipynb_checkpoints/new_architecture-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="0539Pwt1Ygwz" colab_type="text" # # Project objective # In this project, we build a neural network model for predicting tissue of origin of cancer cell lines using their gene expression provided in cancer cell line encyclopedia dataset. Although by definition the built model is a deep learning model, I try to avoid it for now. # # Information about the dataset, some technical details about the used machine learning method(s) and mathematical details of the quantifications approaches are provided in the code. # + [markdown] id="VjtJFxdsNh05" colab_type="text" # # Packages we work with in this notebook # We are going to use the following libraries and packages: # # * **numpy**: NumPy is the fundamental package for scientific computing with Python. (http://www.numpy.org/) # * **sklearn**: Scikit-learn is a machine learning library for Python programming language. (https://scikit-learn.org/stable/) # * **pandas**: Pandas provides easy-to-use data structures and data analysis tools for Python. (https://pandas.pydata.org/) # * **keras**: keras is a widely-used neural network framework in python. # + id="57oB2idEgr-g" colab_type="code" colab={} import numpy as np import pandas as pd import keras # + [markdown] id="Bb1Zm7ARN5D5" colab_type="text" # # Introduction to the dataset # # **Name**: Cancer Cell Line Encyclopedia dataset # # **Summary**: Identifying tissue of origin of cancer cell lines using their gene expression. Cell lines from 6 tissues were chosen for this code including: breast, central_nervous_system, haematopoietic_and_lymphoid_tissue, large_intestine, lung, skin # # **number of features**: 500 (real, positive) # Top 500 genex based on variance of their expression in the dataset is chosen. The right way to select the features is to do it only on the training set to eliminate information leak from test set. But to simplify the process for the sake of this teaching code, we use all the dataset. # # **Number of data points (instances)**: 550 # # **dataset accessibility**: Dataset is available as part of PharmacoGx R package (https://www.bioconductor.org/packages/release/bioc/html/PharmacoGx.html) # # **Link to the dataset**: https://portals.broadinstitute.org/ccle # # # # + [markdown] id="QjBnejgpP0Gr" colab_type="text" # ## Importing the dataset # We can import the dataset in multiple ways # # **Colab Notebook**: You can download the dataset file (or files) from the link (if provided) and uploading it to your google drive and then you can import the file (or files) as follows: # # **Note.** When you run the following cell, it tries to connect the colab with google derive. Follow steps 1 to 5 in this link (https://www.marktechpost.com/2019/06/07/how-to-connect-google-colab-with-google-drive/) to complete the # + id="RILQWrhjQUtF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="071c5e82-32e8-4690-88e1-eddf33d143d3" from google.colab import drive drive.mount('/content/gdrive') # This path is common for everybody # This is the path to your google drive input_path = '/content/gdrive/My Drive/' # reading the data (target) target_dataset_features = pd.read_csv(input_path + 'CCLE_ExpMat_Top500Genes.csv', index_col=0) target_dataset_output = pd.read_csv(input_path + 'CCLE_ExpMat_Phenotype.csv', index_col=0) # Transposing the dataframe to put features in the dataframe columns target_dataset_features = target_dataset_features.transpose() # + [markdown] id="MJgoRANkcTHs" colab_type="text" # **Local directory**: In case you save the data in your local directory, you need to change "input_path" to the local directory you saved the file (or files) in. # # **GitHub**: If you use my GitHub (or your own GitHub) repo, you need to change the "input_path" to where the file (or files) exist in the repo. For example, when I clone ***ml_in_practice*** from my GitHub, I need to change "input_path" to 'data/' as the file (or files) is saved in the data dicretory in this repository. # # **Note.**: You can also clone my ***ml_in_practice*** repository (here: https://github.com/alimadani/ml_in_practice) and follow the same process. # + [markdown] id="qW4bVMDCdPVW" colab_type="text" # ## Making sure about the dataset characteristics (number of data points and features) # + id="TpSupKvgdS3e" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="23f163ec-fda2-445d-bb6e-1371788e129a" print('number of features: {}'.format(target_dataset_features.shape[1])) print('number of data points: {}'.format(target_dataset_features.shape[0])) # + [markdown] id="UIX-LbyLeEc6" colab_type="text" # ## Data preparation # We need to prepare the dataset for machine learnign modeling. Here we prepare the data in 2 steps: # # 1) Selecting target columns from the output dataframe (target_dataset_output) # # 2) Converting tissue names to integers (one for each tissue) # # 3) Converting the integer array of labels to one-hot encodings to be used in neural network modeling # + id="8GI52MUkePCR" colab_type="code" colab={} # tissueid is the column that contains tissue type information output_var_names = target_dataset_output['tissueid'] # converting tissue names to labels from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(output_var_names) output_var = le.transform(output_var_names) class_number = len(np.unique(output_var)) # transforming the output array to an array of one hot vectors # it measn that we have a vector for each datapoint # with length equal to the number of classes # Depending on the class of each datapoint, # one of the values (for that class) will be one # and the rest of them will be zero for each data point # .reshape(-1,1) has to be used to transform a 1d array of class # numbers to a 2d array ready to be encoded by OneHotEncoder from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() output_var = ohe.fit_transform(output_var.reshape(-1,1)).toarray() # we would like to use all the features as input features of the model input_features = target_dataset_features # + [markdown] id="qgejl_XWhWqN" colab_type="text" # ## Splitting data to training and testing sets # # We need to split the data to train and test, if we do not have a separate dataset for validation and/or testing, to make sure about generalizability of the model we train. # # **test_size**: Traditionally, 30%-40% of the dataset cna be used for test set. If you split the data to train, validation and test, you can use 60%, 20% and 20% of the dataset, respectively. # # **Note.**: We need the validation and test sets to be big enough for checking generalizability of our model. At the same time we would like to have as much data as possible in the training set to train a better model. # # **random_state** as the name suggests, is used for initializing the internal random number generator, which will decide the splitting of data into train and test indices in your case. # # + id="V3L9BbkSg2vp" colab_type="code" colab={} from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(input_features, output_var, test_size=0.30, random_state=5) # + [markdown] id="d0Fi1jpVkbhC" colab_type="text" # ## Building the supervised learning model # We want to build a multi-class classification model as the output variable include multiple classes. Here we build a neural network model with 2 hidden layers. A neural network with 2 or more hidden layers are called deep neural network. So technical it is a deep learning code. As you can see the implementation of a deep learning model is not difficult. But knowing how to interpret it, how to fine-tune the model and avoid overfitting are the parts that need experience and more knowledge. # # # ### Fully connected neural network # # + id="fj3SSteMkxb2" colab_type="code" colab={} from keras.models import Sequential from keras.layers import Dense # building a neural network model model = Sequential() # adding 1st hidden layer with 128 neurons and relu as its activation function # input_dim should be specified as the number of input features model.add(Dense(128, input_dim=target_dataset_features.shape[1], activation='relu')) # adding 2nd hidden layer with 32 neurons and relu as its activation function model.add(Dense(64, activation='relu')) # adding the output layer (softmax is used to generate probabilities for each predicted class) # Size if the last layer should be equal to the total number of classes in the dataset model.add(Dense(class_number, activation='softmax')) # compiling the model using cross-entropy for categorical variables, # as we are dealing with multi-class classification # Adam optimization algorithm is also used # Accuracy is used as the metric to assess performance of our model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # + [markdown] id="fgszolas0q7X" colab_type="text" # Now we fit our neural network model using the trainign set: # + id="Od9xWVWhzp28" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="9bbcc8d9-bfde-4e03-c76f-1c5ee3f7c3c6" # Train the model using the training set model.fit(X_train, y_train, epochs=300, batch_size=64) # + [markdown] id="vWb1aK68rGfT" colab_type="text" # The model is trained now and can be used to predict the lables of datapoints in the test set. # Note. To be able to assess the performance of the predictions in the test set using metrics class in sklearn, we need to transform the true lables and the predictions from one-hot encodings to lists. # + id="xyBVyd9QqS4e" colab_type="code" colab={} y_pred = model.predict(X_test) #Converting predictions to label pred = list() for i in range(len(y_pred)): pred.append(np.argmax(y_pred[i])) #Converting one hot encoded test label to label test = list() for i in range(len(y_test)): test.append(np.argmax(y_test[i])) # + [markdown] id="C-Uby3AV1OID" colab_type="text" # ## Evaluating performance of the model # We need to assess performance of the model using the predictions of the test set. We use accuracy and balanced accuracy. Here are their definitions: # # * **recall** in this context is also referred to as the true positive rate or sensitivity # # How many relevant item are selected # # # # # $${\displaystyle {\text{recall}}={\frac {tp}{tp+fn}}\,} $$ # # # # * **specificity** true negative rate # # # # $${\displaystyle {\text{true negative rate}}={\frac {tn}{tn+fp}}\,}$$ # # * **accuracy**: This measure gives you a sense of performance for all the classes together as follows: # # $$ {\displaystyle {\text{accuracy}}={\frac {tp+tn}{tp+tn+fp+fn}}\,}$$ # # # \begin{equation*} accuracy=\frac{number\:of\:correct\:predictions}{(total\:number\:of\:data\:points (samples))} \end{equation*} # # # * **balanced accuracy**: This measure gives you a sense of performance for all the classes together as follows: # # $${\displaystyle {\text{balanced accuracy}}={\frac {recall+specificity # }{2}}\,}$$ # # + id="kdDOtXow1CKT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="3b1745a1-f054-458c-8ba0-01e9b897bab9" from sklearn import metrics print('Accuracy of the neural network model is:', metrics.accuracy_score(pred,test)*100) print("Blanced accuracy of the neural network model is:", metrics.balanced_accuracy_score(pred, test))
code/project16_neuralnet_tissuetype_ccle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Basic Tensors # In this ungraded lab, you will try some of the basic operations you can perform on tensors. # ## Imports # + try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x except Exception: pass import tensorflow as tf import numpy as np # - # ## Exercise on basic Tensor operations # # Lets create a single dimension numpy array on which you can perform some operation. You'll make an array of size 25, holding values from 0 to 24. # Create a 1D uint8 NumPy array comprising of first 25 natural numbers x = np.arange(0, 25) x # Now that you have your 1-D array, next you'll change that array into a `tensor`. After running the code block below, take a moment to inspect the information of your tensor. # Convert NumPy array to Tensor using `tf.constant` x = tf.constant(x) x # As the first operation to be performed, you'll square (element-wise) all the values in the tensor `x` # Square the input tensor x x = tf.square(x) x # One feature of tensors is that they can be reshaped. When reshpaing, make sure you consider dimensions that will include all of the values of the tensor. # Reshape tensor x into a 5 x 5 matrix. x = tf.reshape(x, (5, 5)) x # Notice that you'll get an error message if you choose a shape that cannot be exactly filled with the values of the given tensor. # * Run the cell below and look at the error message # * Try to change the tuple that is passed to `shape` to avoid an error. # Try this and look at the error # Try to change the input to `shape` to avoid an error tmp = tf.constant([1,2,3,4]) tf.reshape(tmp, shape=(2,3)) # Like reshaping, you can also change the data type of the values within the tensor. Run the cell below to change the data type from `int` to `float` # Cast tensor x into float32. Notice the change in the dtype. x = tf.cast(x, tf.float32) x # Next, you'll create a single value float tensor by the help of which you'll see `broadcasting` in action # Let's define a constant and see how broadcasting works in the following cell. y = tf.constant(2, dtype=tf.float32) y # Multiply the tensors `x` and `y` together, and notice how multiplication was done and its result. # Multiply tensor `x` and `y`. `y` is multiplied to each element of x. result = tf.multiply(x, y) result # Re-Initialize `y` to a tensor having more values. # Now let's define an array that matches the number of row elements in the `x` array. y = tf.constant([1, 2, 3, 4, 5], dtype=tf.float32) y # Let's see first the contents of `x` again. x # Add the tensors `x` and `y` together, and notice how addition was done and its result. # Add tensor `x` and `y`. `y` is added element wise to each row of `x`. result = x + y result # ### The shape parameter for tf.constant # # When using `tf.constant()`, you can pass in a 1D array (a vector) and set the `shape` parameter to turn this vector into a multi-dimensional array. tf.constant([1,2,3,4], shape=(2,2)) # ### The shape parameter for tf.Variable # # Note, however, that for `tf.Variable()`, the shape of the tensor is derived from the shape given by the input array. Setting `shape` to something other than `None` will not reshape a 1D array into a multi-dimensional array, and will give a `ValueError`. try: # This will produce a ValueError tf.Variable([1,2,3,4], shape=(2,2)) except ValueError as v: # See what the ValueError says print(v)
Custom and Distributed Training with TensorFlow/Week1/C2_W1_Lab_1_basic-tensors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="l8Syssvjisg-" # ### LOAD DIVERSITY CSV # most updated csv # + id="34aAImFOmiDw" import pandas as pd pd.set_option('display.max_columns', None) # + id="5jcNDdJaANTr" merged = pd.read_csv('diversity.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="8kYRziq8imZb" outputId="f18b7004-d999-4715-d5d7-fd4412842637" merged.head() # + [markdown] id="AVhGV4UxmiD5" # ### NEAREST NEIGHBORS # + id="xkwNAm0imiDM" import numpy as np from sklearn.neighbors import NearestNeighbors from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline # + id="J28Z3TVqmiD7" X = merged[[ 'TotalPop', 'Men', 'Women', 'Hispanic', 'White', 'Black', 'Native', 'Asian', 'Pacific', 'Diversity Index', 'Income', 'IncomeErr', 'IncomePerCap', 'IncomePerCapErr', 'Poverty', 'ChildPoverty', 'Employed', 'Unemployment', 'PrivateWork', 'PublicWork', 'SelfEmployed','FamilyWork', 'Professional', 'Service', 'Office', 'Construction','Production', 'Drive', 'Carpool', 'Transit', 'Walk', 'OtherTransp', 'WorkAtHome', 'MeanCommute', 'Violent crime', 'Murder and nonnegligent manslaughter', 'Rape', 'Robbery', 'Aggravated assault', 'Property crime', 'Burglary', 'Larceny- theft', 'Motor vehicle theft', 'Arson', 'Crime Rate per 1000', 'Rent', 'Days with AQI','Good Days', 'Moderate Days', 'Unhealthy for Sensitive Groups Days', 'Unhealthy Days', 'Very Unhealthy Days', 'Hazardous Days', 'Max AQI', '90th Percentile AQI', 'Median AQI', 'Days CO', 'Days NO2','Days Ozone', 'Days SO2', 'Days PM2.5', 'Days PM10' ]] # + [markdown] id="EBm_FTyBmiEJ" # ## Nearest Neighbors # + id="dBxvFMQmmiEM" # scaling data since the values are not the same, i.e 1000s for rent and populutaion versus numbers shown as percentages scaler = StandardScaler() standard_df = scaler.fit_transform(X) standard_df = pd.DataFrame(standard_df, columns = X.columns) # + colab={"base_uri": "https://localhost:8080/"} id="PTonh9czmiEN" outputId="645893aa-a45c-42d5-a023-166205b18aae" nn = NearestNeighbors(n_neighbors=6, algorithm='kd_tree', n_jobs=8) nn.fit(standard_df) # + id="gL6gJdPHmiEO" def nearest(idx): return ','.join(map(str, nn.kneighbors([standard_df.iloc[idx]])[1][0][1:].tolist())) # + id="IpmEYhcemiET" merged['Index'] = merged.index # + id="iwGAdyF3miEV" merged['Nearest'] = merged['Index'].apply(nearest) # + [markdown] id="DOpmE3DAmiEa" # ### TEST # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="z3fHHgmemiEb" outputId="55304463-9f5a-45a0-add1-7cfa62f276a8" merged.head() # + colab={"base_uri": "https://localhost:8080/", "height": 170} id="6uMRSdAUmiEc" outputId="106dd717-832d-4a05-969e-f3830f2db42c" merged[0:1] # + colab={"base_uri": "https://localhost:8080/", "height": 187} id="FCN0UY0qmiEd" outputId="1ac4a38a-602e-4786-e7ff-1546da43c93a" merged[126:127] # + colab={"base_uri": "https://localhost:8080/", "height": 170} id="d_hZ10bMmiEf" outputId="3790830f-494f-46c3-bfb7-4eaa668d23c1" merged[56:57] # + colab={"base_uri": "https://localhost:8080/", "height": 187} id="iers9syXmiEf" outputId="69aa6ba6-6472-4b3a-e894-335f7c5711bb" merged[332:333] # + colab={"base_uri": "https://localhost:8080/", "height": 170} id="nobStoRimiEj" outputId="04dce572-0780-4a45-b517-d5fe8ed1d05b" merged[355:356] # + colab={"base_uri": "https://localhost:8080/", "height": 170} id="SvGVGIg0miEl" outputId="cd60a8cb-e5d0-45ec-c654-049e317a86cf" merged[295:296] # + [markdown] id="Q0ReIfYClhyZ" # ### DROP COLUMNS # + id="MvnqNufgllmV" merged = merged.drop(columns=['Index'], axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="BysYJ-RToDkv" outputId="877a2019-5e43-4afd-c1d5-04e9642ed86f" merged.head() # + [markdown] id="MrcN3BDOmiEm" # ### SAVE # + id="otLtz7lEmiEn" merged.to_csv('nn_model.csv', index=False) # + [markdown] id="0ag49Ousngti" # ### UPDATE DATABASE # + colab={"base_uri": "https://localhost:8080/"} id="Lh3W9Ts3O7yq" outputId="5e96bd42-76de-419c-c2c9-b2aa8a59a057" # !pip install sqlalchemy psycopg2-binary # + id="u3fwVeIPPCOc" import sqlalchemy engine = sqlalchemy.create_engine(DATABASE_URL) connection = engine.connect() # + id="qsCQpghbojjq" df = pd.read_csv('nn_model.csv') # + id="5hBV3dAOomOM" df.to_sql("data", con=engine, method='multi', if_exists='replace') # + [markdown] id="gI-Hri9Wi-P6" # ### CHECK # + colab={"base_uri": "https://localhost:8080/", "height": 513} id="VBYuxetoomIE" outputId="8f9b9a88-e94b-401c-97ac-4864fbadc9ac" pd.read_sql('data', con=engine)
notebooks/model/nearest_neighbor/nn_updated.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ManojKesani/100-Days-Of-ML-Code/blob/master/Language_Models_and_Ecco_PyData_Khobar.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="tUdlUusx7U-n" # ## Intro to Ecco -- Making Language Models More Transparent # This notebook is an intro to [Ecco](https://www.eccox.io) and is the companion to the video: [Take A Look Inside Language Models With Ecco | PyData Khobar](https://www.youtube.com/watch?v=rHrItfNeuh0). # # + id="Ii6Crs6VpXBM" # Install Ecco. This assumes you have pytorch installed. # !pip install ecco import warnings warnings.filterwarnings('ignore') # + id="S1tPxd2fpdYo" import ecco # Load pre-trained language model. Setting 'activations' to True tells Ecco to capture neuron activations. # distillgpt is a distilled GPT2 model. You can also try 'gpt2' lm = ecco.from_pretrained('distilgpt2', activations=True) # + [markdown] id="t91NoD-AwQFA" # ## Overview # # Complete the sentence: # `It was a matter of ____` # + colab={"base_uri": "https://localhost:8080/", "height": 49} id="Ixn-<KEY>" outputId="21f706c7-2229-4c4d-c616-5c75a3e269c9" text = " it was a matter of" # Generate one token output_1 = lm.generate(text, generate=1, do_sample=False) # + colab={"base_uri": "https://localhost:8080/", "height": 187} id="JzA265wyYL4K" outputId="54f3826b-3e9c-438e-db7c-9a635c1cc4f6" # Show the top 10 candidate output tokens for position #5. # Layer 5 is the last layer in the model. output_1.layer_predictions(position=5, layer=5, topk=10) # + [markdown] id="z1dCPYSL9zUu" # This view shows the top 10 candidate tokens, their probability, and their rankings. # # ### Comparing two token candidates for a single position # + id="PSQpCZb9YLlw" colab={"base_uri": "https://localhost:8080/"} outputId="e6c12993-6630-4f1e-c921-8ab93a5c7961" # What are the token IDs of the two words? lm.tokenizer(" principle principal") # + colab={"base_uri": "https://localhost:8080/", "height": 378} id="RdC5MO1yvTvC" outputId="86193189-42d6-4d77-a28e-e8f665c2c83e" # Compare the rankings of "Principle" and "Principal" across layers output_1.rankings_watch(watch=[7989, 10033], position=5) # + [markdown] id="T96DI4wF73fk" # ## Exploring World Knowledge And Layer Analysis # Does the model "know" where Heathrow airport is located? To probe the model, we let's try the input sentence: # # `Heathrow airport is located in ____` # + id="e2V8ARTEsF6B" colab={"base_uri": "https://localhost:8080/", "height": 49} outputId="d5ac06ef-8157-4b89-debe-6d3400b5f848" text = " Heathrow airport is located in" output_2 = lm.generate(text, generate=5, do_sample=False) # + [markdown] id="Qgy6fqtB_tBJ" # While the output is not incorrect, it doesn't really anwer the question we are after. Let's slightly change the input sentence: # + colab={"base_uri": "https://localhost:8080/", "height": 49} id="2E-8avmf8OWc" outputId="df611a7f-5ad4-4f8e-8bcf-8df553245d5a" text = " Heathrow airport is located in the city of" output_2 = lm.generate(text, generate=1, do_sample=False) # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="Ws7tKJ_K8Wcg" outputId="f045a616-d379-4eff-ab35-818b5bc7fd57" # What other tokens were possible to output in place of "London"? output_2.layer_predictions(position=9, layer=5, topk=30) # + colab={"base_uri": "https://localhost:8080/", "height": 326} id="HrkMe1rI8kCF" outputId="60d9d4f8-7aef-4fa7-edee-d42aa5994db9" # Now that the model has selcted the tokens "London . \n" # How did each layer rank these tokens during processing? output_2.rankings() # + [markdown] id="1Qelsn_jZlIE" # This visualization is based on the great visual treatment by nostalgebraist in [Interpreting GPT: the logit lens](https://www.lesswrong.com/posts/AcKRB8wDpdaN6v6ru/interpreting-gpt-the-logit-lens). # + [markdown] id="UcD1w1HP9hWO" # ### Probing the model's world knowledge # # What happens if we present the following input sentence to the model: # # `The countries of the European Union are:\n1. Austria\n2. Belgium\n3. Bulgaria\n4. ___________` # # Namely, we have these questions: # * Q. Will the model continue the numbering correctly? # * Q. Will it succeed in following the formatting? # * Q. Will it succeed in naming countries? European ones? # * Q. Will the model "notice" the alphabetical order of the list? Will it follow it? # # + colab={"base_uri": "https://localhost:8080/", "height": 302} id="X72IPVlJ8_8r" outputId="dba57cae-c14a-4270-bcbe-1fa06bda06a9" text= "The countries of the European Union are:\n1. Austria\n2. Belgium\n3. Bulgaria\n4." output_3 = lm.generate(text, generate=20, do_sample=True) # + [markdown] id="r-ikx4AVBvLK" # The `rankings()` visualization view shows us at which layers the model resolved the output token for each position. # + colab={"base_uri": "https://localhost:8080/", "height": 343} id="bI2OpemBE5cG" outputId="41f92b2c-00bb-4b59-8f55-4583ea8f9b39" output_3.rankings() # + [markdown] id="XJ-5kli5CGyf" # The `saliency()` visualization shows which tokens contributed the most towards generating each output token (using the gradient X Input method): # + colab={"base_uri": "https://localhost:8080/", "height": 237} id="D-nKpXW5FPlY" outputId="a476e098-5e50-4f51-9362-990ebb52ade9" output_3.saliency() # + [markdown] id="4vicS8eRCujY" # ### Detailed saliency view # We can see a more detailed view of the saliency values using the detailed view: # + id="9ao3g9RIFa7x" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="e7208416-9b90-4573-fa4f-185fec23e896" output_3.saliency(style="detailed")
Language_Models_and_Ecco_PyData_Khobar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Occlusion Analysis # # Measures the importance of each area in an each for making a match between a query-key pair # %load_ext autoreload # %autoreload 2 # %matplotlib inline # + import torch import numpy as np import torchvision.transforms as tf from matplotlib import pyplot as plt #we're using the captum library to compute #the occlusion analysis from captum.attr import Occlusion # - #local import of a few functions/classes from _helpers import * # + #pick best available device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #download the CellEMNet weights and load them #into the MoCo resnet50 model model, norms = load_full_moco_cellemnet() #we only need the query encoder (encoder_k) #is the key encoder model_q = model.encoder_q.to(device) model_q = model_q.eval() # + #set normalization parameter normalize = tf.Normalize(mean=[norms[0]], std=[norms[1]]) #create transform class aug = tf.Compose([ tf.Grayscale(1), tf.RandomResizedCrop(224, scale=(0.5, 1.)), tf.RandomHorizontalFlip(), tf.RandomVerticalFlip(), tf.ToTensor(), normalize ]) #creat the dataset dataset = DataFolder('example_data/occlusion/', aug) # + #randomly generate the query and key images #index picks an image in the dataset any number from 0-9 is OK index = 4 image1, image2 = dataset[index] #overwrite the decoder to hardcode #image2 such that the only argument to forward #is image1. The Occlusion class cannot handle #multiple inputs correctly class Decoder(DefaultDecoder): def __init__(self, encoder): super(Decoder, self).__init__(encoder) def forward(self, image1): with torch.no_grad(): key = self.encoder(image2.unsqueeze(0).to(device)) query = self.encoder(image1) compare = self.compare_encodings(query, key) return compare #create the decoder on device decoder = Decoder(model_q).to(device) decoder = decoder.eval() #perform the occlusion analysis #adjust the stride and sliding window shapes #to pick out larger or smaller regions that are #important occlusion = Occlusion(decoder) attributions_occ = occlusion.attribute( image1.unsqueeze(0).to(device), strides=(1, 15, 15), sliding_window_shapes=(1, 31, 31), baselines=0 ) #rescale to a numpy array in range [0-1] #(1, 1, H, W) --> (H, W) attributions_occ = rescale(attributions_occ.squeeze().detach().cpu()) # - f, ax = plt.subplots(1, 2, figsize=(8, 4)) ax[0].imshow(image1[0], cmap='gray') ax[0].imshow(attributions_occ, alpha=0.3, cmap='plasma') ax[0].set_title('Occluded Image') ax[1].imshow(image2[0], cmap='gray') ax[1].set_title('Reference Image')
notebooks/occlusion_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook revisits some of the literary historical trends found by <NAME> in his article "Style, Inc." (<i>Critical Inquiry</i>, 36.1 (2009), 134-158). See especially his Figures 1 (p 135) and 18 (p 155). # # Note that the dataset used in this notebook is not Moretti's bibliograpy of novels, but Hathi Trust's catalog of fiction texts (https://sharc.hathitrust.org/genre). # ## Metadata # <li>Inspecting & Cleaning</li> # <li>Trends</li> # # ## Detecting Word Patterns # <li>Intro to Regex</li> # <li>A Fortunate Formula</li> # # Inspecting & Cleaning # %pylab inline from datascience import * metadata_tb = Table.read_table("fiction_metadata.csv") metadata_tb # Remove rows that contain duplicate titles # Sets are specially designed to handle unique elements and check for duplicates efficiently titles = set() indexes = [] for i in range(len(metadata_tb['title'])): if metadata_tb['title'][i] not in titles: indexes.append(i) titles.add(metadata_tb['title'][i]) singlevol_tb = metadata_tb.take(indexes) # Inspect annual distribution of books singlevol_tb.hist('date') # + # Limit to Moretti's date range date_mask = (singlevol_tb['date'] > 1750) & (singlevol_tb['date'] < 1850) singlevol_tb = singlevol_tb.where(date_mask) # + # EX. Plot the distribution of page counts ('totalpages'). # Should we remove any entries from our metadata? Why or why not? # EX. Plot the distribution of confidence values that given texts are fiction('prob80precise'). # Should we remove any entries from our metadata? Why or why not? # - # # Trends import numpy as np singlevol_tb = singlevol_tb.select(['title', 'date']) # Determine length of each title title_tokens = [x.split() for x in singlevol_tb['title']] title_length = [len(x) for x in title_tokens] singlevol_tb['title_len'] = title_length singlevol_tb # Determine average title length per year mean_table = singlevol_tb.group('date', collect=np.mean) mean_table mean_table.scatter('date','title_len mean') # Does the pattern hold when we treat individual titles as data points? singlevol_tb.scatter('date', 'title_len') singlevol_tb.scatter('date', 'title_len', fit_line=True) # + # EX. Moretti also produces graphs for the median and standard deviation # of title lengths by year. Create graphs that represent these data. # - # # Intro to Regex (Regular Expressions) import re # Example from previous lesson for line in open('lecture notes 09-22-15.txt'): for word in line.split(): if word.endswith('ing'): print(word) # Reproduced using regex for line in open('lecture notes 09-22-15.txt'): for word in line.split(): if re.search(r'ing$', word): # only change from above print(word) # + # EX. Remove the "$" from the code above. How does it change the output? Why? # - word = 'Having' re.search(r'ing$', word) word = 'Ideas' re.search(r'ing$', word) # assign list of words to variable, so we don't have to read in the file each time with open('lecture notes 09-22-15.txt') as file_in: lec_notes = file_in.read() word_list = lec_notes.split() [word for word in word_list if re.search(r'^..t..$', word)] # + # EX. What do you think the "^" and "." metacharacters do in the code? # - [word for word in word_list if re.search(r'^a.*t', word)] # + # EX. What do you think the "*" metacharacter does in the code? # - poe = "While I nodded, nearly napping, suddenly there came a tapping,\ As of someone gently rapping, rapping at my chamber door." re.findall(r'.apping', poe) re.findall(r'.(?=apping)', poe) re.findall(r"(?<=ly ).apping", poe) re.findall(r"(?<=ly ).(?=apping)", poe) # + # EX. Find a list of "-apping" words that are followed by a comma in the line from Poe # -- but make sure the comma doesn't appear in your list entries! # - # # A Fortunate Formula def istheXofY(text): return re.search(r'the .* of .*', text.lower())!=None and len(text.split())<=4 print(istheXofY('The Castle of Otronto')) print(istheXofY('The Castle in which there are some people of Otronto and other places')) # Graph the frequency of "The X of Y" titles per decade singlevol_tb['theXofY'] = singlevol_tb.apply(istheXofY, 'title') singlevol_tb['decade'] = singlevol_tb['date']//10*10 singlevol_tb.group('decade', collect=np.mean).scatter('decade', 'theXofY mean') # Create table containing only "The X of Y" titles theXofY_tb = singlevol_tb.where('theXofY').drop('theXofY') # + def gettheX(text): X = re.findall(r'(?<=the ).*(?= of )', text.lower())[0] return X def gettheY(text): Y = re.findall(r'(?<= of ).*', text.lower())[0] return Y # - print(gettheX('The Castle of Otronto')) print(gettheY('The Castle of Otronto')) print() print(gettheX('The castle in which there are some people of Otronto and other places')) print(gettheY('The castle in which there are some people of Otronto and other places')) # Create new columns containing on the the Y and Y from each title theXofY_tb['theX'] = theXofY_tb.apply(gettheX, 'title') theXofY_tb['ofY'] = theXofY_tb.apply(gettheY, 'title') theXofY_tb # + from collections import Counter Xs = Counter(theXofY_tb['theX']) Ys = Counter(theXofY_tb['ofY']) # - Xs.most_common(10) Ys.most_common(10) # + # EX. In Moretti's study, he gives examples of titles using the formula "The X of Y" # with lengths of up to seven words. If we tweak our function istheXofY()to allow # for longer titles, how does this change our findings? Why?
10-Metadata/03-Bonus-Moretti/03-Metadata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + from qiskit import * import re import subprocess import os """ define a class to hold hodl oracle code alongside parameters this allows many instances of an oracle to be defined --> not limited to input-dependent compilation """ class HODLOracle: def __init__(self, func_name, init_params, program): ### accepts body of the oracle, parameters to the oracle in order, and the oracle name self.program = program self.params = init_params self.fname = func_name """compilation function --> this takes an oracle instance, generates a HODL program, invokes the compiler binary, converts the output assembly to a qiskit circuit, and returns it """ def to_circ(self,params): main = "function main() {" """iterate over each parameter and if a quantum register then declare it as a |0>^n register. This initialization is such so that the circuit can be concatenated with any desired input register """ # classical index serves as counter for classically passed values, ie phase angles cindex = 0 for q in params: if isinstance(q, QuantumRegister): main += f"\nsuper {q.name} = {2**(q.size)};\nH({q.name});" else: main += f"\nint c{cindex} = {q.name};" cindex += 1 # call function main += f"\n{self.fname}({','.join([p.name for p in params])});" result = self.program + main + "\n}" f = open("program.hodl", "w") f.write(result) f.close() subprocess.call(["./qc", "program.hodl"]) circ = QuantumCircuit.from_qasm_file("out.qasm") os.remove("program.hodl") os.remove("out.qasm") return circ ### takes an oracle and outputs a HODL object def init_hodl(code): fname = re.search("function (.*)\(", code).group(1) params = re.search("\((.*)\)",code).group(1) params = params.split(",") ### parameter map --> {type: name} param_dict = {} for p in params: p = p.split() param_dict[p[0]] = p[1] oracle = HODLOracle(fname, param_dict, code) return oracle # - qr = QuantumRegister(name="reg1", size=3) qr1 = QuantumRegister(name="reg2", size=2) oracle = init_hodl(""" function cool(super a) { if(a > 5) { mark(a,pi); } } """) oracle.to_circ([qr]).draw(output="mpl")
modules/QISKit HODL module.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/AuFeld/Project2/blob/master/notebooks/Project_2_4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="xJzN8XWEaDv1" colab_type="code" colab={} DATA_PATH = 'Project_2_Data.xlsx' # + id="cdo4ZUOy3XWN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b4d9913d-6afe-473d-e7b1-cb931eaa74f2" # !pip install category_encoders==2.* # !pip install pdpbox # !pip install shap # !pip install eli5 # !pip install pandas-profiling==2.* # + id="F2rYl-nCaDv4" colab_type="code" colab={} # import libraries import pandas as pd import pandas_profiling import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from xgboost import XGBRegressor from sklearn.metrics import mean_absolute_error import seaborn as sns import matplotlib.pyplot as plt import category_encoders as ce import eli5 from eli5.sklearn import PermutationImportance # + id="FNwGJJGYaDv6" colab_type="code" colab={} df = pd.read_excel(DATA_PATH) # + id="jCfTBsjdaDv_" colab_type="code" outputId="c3a0de7a-45b2-4182-e88b-432270e2a090" colab={"base_uri": "https://localhost:8080/", "height": 215} df.head() # + id="dsyE8mSuaDwE" colab_type="code" colab={} # choose your target. which column in will you predict? # target: 2019 Champion # + id="EIeAemRLaDwI" colab_type="code" colab={} # regression or classification? # classification # + id="z10ABpxcaDwK" colab_type="code" colab={} # evaluation metrics: accuracy # + id="q5gVbTOHWKem" colab_type="code" colab={} columns = ['Club', 'Season', 'GW_Played', 'Goals_Scored', 'Goals_Conceded', 'Goal_Difference', 'Expenditure', 'Income', 'Net', 'Points', 'Points_per_GW', 'Expenditure_per_Point_per_GW', 'Manager', 'Owner', 'Champion', 'Relegated'] # + id="3zxEAMidaDwM" colab_type="code" colab={} # split data by season train = df[df['Season'] < 2018] val = df[df['Season'] == 2018] test = df[df['Season'] == 2019] # + id="2eXmSdZ2aDwO" colab_type="code" outputId="0b0a6998-0a99-45cc-bfa8-0d336afb4687" colab={"base_uri": "https://localhost:8080/", "height": 34} train.shape, val.shape, test.shape # + id="JlRavM2NSXfR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="7ad24d51-bab3-4c93-828e-f5e8358d0773" train.describe(exclude='number').T.sort_values(by='unique') # + id="rN7864i9aDwR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="73441bd2-3ba1-4f1f-dd7e-0e472a428fc9" # null values? df.isna().sum() # + id="rFdnXjzAmdv1" colab_type="code" colab={} # assign variables target = 'Champion' features = df.columns.drop([target]) X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] y_test = test[target] # + id="XDoWxyz6gkS2" colab_type="code" colab={} # target distribution # + id="ZZF8AKZMg3Z2" colab_type="code" colab={} y=df['Champion'] # + id="hakSIn7MgnOe" colab_type="code" outputId="7c7c11cb-346f-4a0c-dae9-e6ddac78e06b" colab={"base_uri": "https://localhost:8080/", "height": 34} y.nunique() # + id="nQi-pVHjg_aD" colab_type="code" outputId="26f05290-5bff-45a5-dfec-d2783a1a893a" colab={"base_uri": "https://localhost:8080/", "height": 34} y.value_counts(normalize=True).max() # + id="xJJZke1xVC_l" colab_type="code" colab={} # Features Importance # + id="iiiCHKAYVJzL" colab_type="code" outputId="36cb2a81-4455-40ff-bc7f-5126f8053b78" colab={"base_uri": "https://localhost:8080/", "height": 34} pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='median'), RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1) ) # Fit on train, score on val pipeline.fit(X_train, y_train) print('Validation Accuracy', pipeline.score(X_val, y_val)) # + id="AN8e7LU2mHnp" colab_type="code" outputId="79658aa2-1b99-4aaa-d4a5-4baf3a3e0da8" colab={"base_uri": "https://localhost:8080/", "height": 338} rf = pipeline.named_steps['randomforestclassifier'] importances = pd.Series(rf.feature_importances_, X_train.columns) # Plot feature importances # %matplotlib inline import matplotlib.pyplot as plt n = 10 plt.figure(figsize=(10,n/2)) plt.title(f'Top {n} features') importances.sort_values()[-n:].plot.barh(color='grey'); # + id="bLb-ebDVoWpv" colab_type="code" colab={} # observation: Goals Scored has a higher importance re Champion # + id="K8suhX5uWAdE" colab_type="code" colab={} # The $125m question - RELEGATION # + id="dSyeH813af3R" colab_type="code" colab={} target = 'Relegated' features = df.columns.drop([target]) X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] y_test = test[target] # + id="Ld6P6DYIjd4k" colab_type="code" outputId="f9164bc4-2a11-437c-b2b4-8f7fe28d21b1" colab={"base_uri": "https://localhost:8080/", "height": 34} # target distribution y=df['Relegated'] y.nunique() # + id="tEayTArkkDHs" colab_type="code" outputId="fdda2cf2-3685-4e20-c334-fb0738f17e6a" colab={"base_uri": "https://localhost:8080/", "height": 34} y.value_counts(normalize=True).max() # + id="vrWkMzEKbA2o" colab_type="code" colab={} # Features Importance # + id="sBLynAcN-3Hh" colab_type="code" outputId="81bcd611-2c44-4186-8789-e576885ec088" colab={"base_uri": "https://localhost:8080/", "height": 34} pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='median'), RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1) ) # Fit on train, score on val pipeline.fit(X_train, y_train) print('Validation Accuracy', pipeline.score(X_val, y_val)) # + id="0J_gFALia-oq" colab_type="code" outputId="021a550c-0e09-4727-dfcd-781f4976befe" colab={"base_uri": "https://localhost:8080/", "height": 338} rf = pipeline.named_steps['randomforestclassifier'] importances = pd.Series(rf.feature_importances_, X_train.columns) # Plot feature importances # %matplotlib inline import matplotlib.pyplot as plt n = 10 plt.figure(figsize=(10,n/2)) plt.title(f'Top {n} features') importances.sort_values()[-n:].plot.barh(color='grey'); # + id="AQtHaUr9lDEr" colab_type="code" colab={} # observation: Goals Conceded (defense) has a higher importance re Relegation # + [markdown] id="LFle7uqcaC3B" colab_type="text" # "<NAME>, senior manager in the sports group at Deloitte, stated that the main reason that playoff finals are worth so much is because of the rising TV broadcast deals in the Premier League. Furthermore, most of the rising payments has to also do with the rising financial gulf in the Premier League, with a team finishing in the bottom half of the table receiving £95m to £100m from TV money alone." # + id="Uk7N2fdgA1Rn" colab_type="code" colab={} # Regression Model re Points # + id="nNdEMNZqA8SA" colab_type="code" colab={} target = 'Points' features = df.columns.drop([target]) X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] y_test = test[target] # + id="7aqsVDIpA8JH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="fa136642-dcc5-4cec-8ea3-f39cd4738df2" # %matplotlib inline sns.distplot(y_train); # + id="ZvyGmHt2A7-b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8d73f32e-4ffc-4884-8391-8619b0ff499a" lr = make_pipeline( ce.TargetEncoder(), LinearRegression() ) lr.fit(X_train, y_train) print('Linear Regression R^2', lr.score(X_val, y_val)) # + id="D41rxrSdCViM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="063b1ffd-26be-4904-8b9e-fd1cfad40f0f" coefficients = lr.named_steps['linearregression'].coef_ pd.Series(coefficients, features) # + id="8wgOOLobCVTt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="97f1da4e-cc0a-4d69-d986-149f4f069660" gb = make_pipeline( ce.OrdinalEncoder(), XGBRegressor(n_estimators=200, objective='reg:squarederror', n_jobs=-1) ) gb.fit(X_train, y_train) y_pred = gb.predict(X_val) print('Gradient Boosting R^2', r2_score(y_val, y_pred)) # + id="JLBE1RImGJIV" colab_type="code" colab={} # Shapley Values # + id="tR-pVqJPGK6h" colab_type="code" colab={} # Assign to X, y features = ['Points_per_GW', 'Goal_Difference', 'Goals_Scored', 'Goals_Conceded'] target = 'Points' X_train = train[features] y_train = train[target] X_test = test[features] y_test = test[target] # + id="l_V4F688GoN9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="02440ea0-fa7b-4fc1-a228-ca33deb464c6" from scipy.stats import randint, uniform from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import RandomizedSearchCV param_distributions = { 'n_estimators': randint(50, 500), 'max_depth': [5, 10, 15, 20, None], 'max_features': uniform(0, 1), } search = RandomizedSearchCV( RandomForestRegressor(random_state=42), param_distributions=param_distributions, n_iter=5, cv=2, scoring='neg_mean_absolute_error', verbose=10, return_train_score=True, n_jobs=-1, random_state=42 ) search.fit(X_train, y_train); # + id="9ujMOcfmGv4p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="5cbcaa43-0e2b-44d6-c18c-8b3fed6f69c0" print('Best hyperparameters', search.best_params_) print('Cross-validation MAE', -search.best_score_) model = search.best_estimator_ # + id="BAZuEbRnG2sY" colab_type="code" colab={} row = X_test.iloc[[0]] # + id="xC2HDsg3G2iO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="5db7a366-3b44-4982-9cca-75c5d5c0dbab" y_test.iloc[[0]] # + id="Y1tb37iaHQ3V" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6c9a59c0-1470-465f-8c1a-f65dc967c0a8" model.predict(row) # + id="940P3RgzHUGh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 210} outputId="45a53924-c4f9-42ca-f797-59e991295339" import shap explainer = shap.TreeExplainer(model) shap_values = explainer.shap_values(row) shap.initjs() shap.force_plot( base_value=explainer.expected_value, shap_values=shap_values, features=row ) # + id="pI1xLYQbHZTD" colab_type="code" colab={} # Features Importance for Regression Model # + id="Gnvx4sh4I9px" colab_type="code" colab={} df = pd.read_excel(DATA_PATH) # + id="6HStQckHJLAC" colab_type="code" colab={} columns = ['Club', 'Season', 'GW_Played', 'Goals_Scored', 'Goals_Conceded', 'Goal_Difference', 'Expenditure', 'Income', 'Net', 'Points', 'Points_per_GW', 'Expenditure_per_Point_per_GW', 'Manager', 'Owner', 'Champion', 'Relegated'] # + id="vXYf8XwWImSR" colab_type="code" colab={} target = 'Points' features = df.columns.drop([target]) X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] y_test = test[target] # + id="T-VD34n6JcpW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0c2bfe10-931c-4f8c-947e-aabe5b4a3f0b" pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='mean'), RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1) ) # Fit on train, score on val pipeline.fit(X_train, y_train) print('Validation Accuracy', pipeline.score(X_val, y_val)) # + id="C71V2WmaJ6wL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 610} outputId="71196b54-936f-4673-e43e-5a018aa31e5d" # Get feature importances rf = pipeline.named_steps['randomforestclassifier'] importances = pd.Series(rf.feature_importances_, X_train.columns) # Plot feature importances # %matplotlib inline import matplotlib.pyplot as plt n = 20 plt.figure(figsize=(10,n/2)) plt.title(f'Top {n} features') importances.sort_values()[-n:].plot.barh(color='grey'); # + id="LHMKw_EYKLAI" colab_type="code" colab={} # Eli5 transformers = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='median') ) # + id="--OiCgvpKWnP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="758e5898-e8d8-49c5-fd5d-385597288421" X_train_transformed = transformers.fit_transform(X_train) X_val_transformed = transformers.transform(X_val) model = RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1) model.fit(X_train_transformed, y_train) # + id="R9AagyByKWZ_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 353} outputId="ca10dcc2-01c3-4619-ad78-816fb7f2a306" # 1. Calculate permutation importances permuter = PermutationImportance( model, scoring='accuracy', n_iter=5, random_state=42 ) permuter.fit(X_val_transformed, y_val) # + id="_rdG6-GsKWM6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="ddf69752-aebb-4b61-9cdc-4b0877c21178" feature_names = X_val.columns.tolist() pd.Series(permuter.feature_importances_, feature_names).sort_values() # + id="aU-YZ-IWK0XM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="adbea716-2d36-4d73-ced3-0e673677e8c8" # 2. Display permutation importances eli5.show_weights( permuter, top=None, # show permutation importances for all features feature_names=feature_names # must be a list ) # + id="9Kvu3c56K31n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1effee4c-a225-45ec-bba1-b255f0e99a4b" # Use Importances for feature selection print('Shape before removing features:', X_train.shape) # + id="9pXGHh5OK3n-" colab_type="code" colab={} minimum_importance = 0 mask = permuter.feature_importances_ > minimum_importance features = X_train.columns[mask] X_train = X_train[features] # + id="quZ8orO0K3fs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3e721b2e-fa43-43cd-dcab-9453eca120bd" print('Shape after removing features:', X_train.shape) # + id="eb8ML_1ULf8j" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3df4809b-5eae-4f19-97e6-6cb85f67b965" X_val = X_val[features] pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='mean'), RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1) ) # Fit on train, score on val pipeline.fit(X_train, y_train) print('Validation Accuracy', pipeline.score(X_val, y_val)) # + id="wJJ1hJ4YLfzV" colab_type="code" colab={} # Gradient Boosting # + id="mS1yqVdAOt0N" colab_type="code" colab={} #columns = ['Club', 'Season', 'GW_Played', 'Goals_Scored', 'Goals_Conceded', # 'Goal_Difference', 'Expenditure', 'Income', 'Net', 'Points', # 'Points_per_GW', 'Expenditure_per_Point_per_GW', 'Manager', 'Owner', # 'Champion', 'Relegated'] # + id="U7iLqsm1Oto4" colab_type="code" colab={} #target = 'Points' #features = df.columns.drop([target]) #X_train = train[features] #y_train = train[target] #X_val = val[features] #y_val = val[target] #X_test = test[features] #y_test = test[target] # + id="0RZS9pwkOtef" colab_type="code" colab={} # + id="vVVOaZD5MNh4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 538} outputId="364fffc6-1c6f-4302-93ae-3f3856e424b7" from xgboost import XGBClassifier pipeline = make_pipeline( ce.OrdinalEncoder(), XGBClassifier(n_estimators=100, random_state=42, n_jobs=-1) ) pipeline.fit(X_train, y_train) # + id="Kr-AtiCyMNTV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="dda1804f-ac23-4b8f-fffd-cdd86bdcba31" from sklearn.metrics import accuracy_score y_pred = pipeline.predict(X_val) print('Validation Accuracy', accuracy_score(y_val, y_pred)) # + id="iT8oaGjlMNFF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 128} outputId="ded42924-1a66-4c9a-9430-4da913fde1ab" # XGBoost Parameters encoder = ce.OrdinalEncoder() X_train_encoded = encoder.fit_transform(X_train) X_val_encoded = encoder.transform(X_val) model = RandomForestClassifier( n_estimators=1000, # <= 1000 trees, depends on early stopping max_depth=10, # try deeper trees because of high cardinality categoricals #learning_rate=0.5, # try higher learning rate n_jobs=-1 ) eval_set = [(X_train_encoded, y_train), (X_val_encoded, y_val)] model.fit(X_train_encoded, y_train, #eval_set=eval_set, #eval_metric='merror', #early_stopping_rounds=50) # Stop if the score hasn't improved in 50 rounds # + id="6GQEwYNbMM57" colab_type="code" colab={}
notebooks/Project_2_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/cocoisland/DS-Unit-2-Sprint-4-Model-Validation/blob/master/module-4-select-important-features/LS_DS_24_Cross_Validation_AND_Feature_Selection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="3ctjQBseh0Cw" colab_type="text" # _Lambda School Data Science - Model Validation_ # # ## Example solution to the Cross-Validation assignment — plus Feature Selection! # # See also <NAME>'s example, [Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb). # + id="aHUVU_AMBKs0" colab_type="code" colab={} # We'll modify a project from Python Data Science Handbook by <NAME> # https://jakevdp.github.io/PythonDataScienceHandbook/05.06-linear-regression.html#Example:-Predicting-Bicycle-Traffic # Predicting Bicycle Traffic # As an example, let's take a look at whether we can predict the number of # bicycle trips across Seattle's Fremont Bridge based on weather, season, # and other factors. # We will join the bike data with another dataset, and try to determine the # extent to which weather and seasonal factors—temperature, precipitation, # and daylight hours—affect the volume of bicycle traffic through this corridor. # Fortunately, the NOAA makes available their daily weather station data # (I used station ID USW00024233) and we can easily use Pandas to join # the two data sources. import numpy as np import pandas as pd from sklearn.feature_selection import f_regression, SelectKBest from sklearn.linear_model import Ridge from sklearn.metrics import mean_absolute_error from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler def load(): fremont_bridge = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD' bicycle_weather = 'https://raw.githubusercontent.com/jakevdp/PythonDataScienceHandbook/master/notebooks/data/BicycleWeather.csv' counts = pd.read_csv(fremont_bridge, index_col='Date', parse_dates=True, infer_datetime_format=True) weather = pd.read_csv(bicycle_weather, index_col='DATE', parse_dates=True, infer_datetime_format=True) daily = counts.resample('d').sum() daily['Total'] = daily.sum(axis=1) daily = daily[['Total']] # remove other columns weather_columns = ['PRCP', 'SNOW', 'SNWD', 'TMAX', 'TMIN', 'AWND'] daily = daily.join(weather[weather_columns], how='inner') # Make a feature for yesterday's total daily['Total_yesterday'] = daily.Total.shift(1) daily = daily.drop(index=daily.index[0]) return daily def split(daily): # Hold out an "out-of-time" test set, from the last 100 days of data train = daily[:-100] test = daily[-100:] X_train = train.drop(columns='Total') y_train = train.Total X_test = test.drop(columns='Total') y_test = test.Total return X_train, X_test, y_train, y_test def jake_wrangle(X): X = X.copy() # patterns of use generally vary from day to day; # let's add binary columns that indicate the day of the week: days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] for i, day in enumerate(days): X[day] = (X.index.dayofweek == i).astype(float) # we might expect riders to behave differently on holidays; # let's add an indicator of this as well: from pandas.tseries.holiday import USFederalHolidayCalendar cal = USFederalHolidayCalendar() holidays = cal.holidays('2012', '2016') X = X.join(pd.Series(1, index=holidays, name='holiday')) X['holiday'].fillna(0, inplace=True) # We also might suspect that the hours of daylight would affect # how many people ride; let's use the standard astronomical calculation # to add this information: def hours_of_daylight(date, axis=23.44, latitude=47.61): """Compute the hours of daylight for the given date""" days = (date - pd.datetime(2000, 12, 21)).days m = (1. - np.tan(np.radians(latitude)) * np.tan(np.radians(axis) * np.cos(days * 2 * np.pi / 365.25))) return 24. * np.degrees(np.arccos(1 - np.clip(m, 0, 2))) / 180. X['daylight_hrs'] = list(map(hours_of_daylight, X.index)) # temperatures are in 1/10 deg C; convert to C X['TMIN'] /= 10 X['TMAX'] /= 10 # We can also calcuate the average temperature. X['Temp (C)'] = 0.5 * (X['TMIN'] + X['TMAX']) # precip is in 1/10 mm; convert to inches X['PRCP'] /= 254 # In addition to the inches of precipitation, let's add a flag that # indicates whether a day is dry (has zero precipitation): X['dry day'] = (X['PRCP'] == 0).astype(int) # Let's add a counter that increases from day 1, and measures how many # years have passed. This will let us measure any observed annual increase # or decrease in daily crossings: X['annual'] = (X.index - X.index[0]).days / 365. return X def wrangle(X): # From <NAME> (DS1 KotH) X = X.copy() X = X.replace(-9999, 0) X = jake_wrangle(X) X['PRCP_yest'] = X.PRCP.shift(1).fillna(X.PRCP.mean()) X['Windchill'] = (((X['Temp (C)'] * (9/5) + 32) * .6215) + 34.74) - (35.75 * (X['AWND']** .16)) + (.4275 * (X['Temp (C)'])) * (X['AWND'] ** .16) X['Rl_Cold'] = (((X['Temp (C)'] * (9/5) + 32) - X['Windchill']) -32) * (5/9) X['TMIN_ln'] = X['TMIN'] **2 months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] for i, month in enumerate(months): X[month] = (X.index.month == i+1).astype(float) return X # + id="vQN2O9PCAkBn" colab_type="code" colab={} # Download and join data into a dataframe data = load() # + id="NJrNph8IFyOM" colab_type="code" outputId="1d62d261-8ca5-4c2d-c8ef-5270a14b06c3" colab={"base_uri": "https://localhost:8080/", "height": 260} # %%time # Split data into train and test X_train, X_test, y_train, y_test = split(data) # Do the same wrangling to X_train and X_test X_train = wrangle(X_train) X_test = wrangle(X_test) # Define an estimator and param_grid pipe = make_pipeline( RobustScaler(), SelectKBest(f_regression), Ridge()) param_grid = { 'selectkbest__k': range(1, len(X_train.columns)+1), 'ridge__alpha': [0.1, 1.0, 10.] } # Fit on the train set, with grid search cross-validation gs = GridSearchCV(pipe, param_grid=param_grid, cv=3, scoring='neg_mean_absolute_error', verbose=1) gs.fit(X_train, y_train) validation_score = gs.best_score_ print() print('Cross-Validation Score:', -validation_score) print() print('Best estimator:', gs.best_estimator_) print() # + id="AR4bo95ZJFwj" colab_type="code" outputId="d7303c18-48a4-4dd7-d9f6-5e1ce66914a5" colab={"base_uri": "https://localhost:8080/", "height": 34} # Predict with X_test features y_pred = gs.predict(X_test) # Compare predictions to y_test labels test_score = mean_absolute_error(y_test, y_pred) print('Test Score:', test_score) # + id="7MQYtnmxV159" colab_type="code" outputId="6d6ad477-1442-4705-c7ab-952017f80102" colab={"base_uri": "https://localhost:8080/", "height": 34} # Or use the grid search's score method, # which combines these steps test_score = gs.score(X_test, y_test) print('Test Score:', -test_score) # + id="7qeb9ed8bIAE" colab_type="code" outputId="845e9e5c-af39-40fe-f4f9-d861bf87207d" colab={"base_uri": "https://localhost:8080/", "height": 668} # Which features were selected? selector = gs.best_estimator_.named_steps['selectkbest'] all_names = X_train.columns selected_mask = selector.get_support() selected_names = all_names[selected_mask] unselected_names = all_names[~selected_mask] print('Features selected:') for name in selected_names: print(name) print() print('Features not selected:') for name in unselected_names: print(name) # + [markdown] id="urdgBk_Vp6ZN" colab_type="text" # ## BONUS: Recursive Feature Elimination! # # https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html # + id="EEvISfuimpQV" colab_type="code" outputId="b4d366e7-37ae-4fb3-9d81-ad7913e053e2" colab={"base_uri": "https://localhost:8080/", "height": 668} from sklearn.feature_selection import RFECV X_train_scaled = RobustScaler().fit_transform(X_train) rfe = RFECV(Ridge(alpha=1.0), scoring='neg_mean_absolute_error', cv=3) X_train_subset = rfe.fit_transform(X_train_scaled, y_train) all_names = X_train.columns selected_mask = rfe.support_ selected_names = all_names[selected_mask] unselected_names = all_names[~selected_mask] print('Features selected:') for name in selected_names: print(name) print() print('Features not selected:') for name in unselected_names: print(name) # + id="hoCUHkuGpjvX" colab_type="code" colab={} X_train_subset = pd.DataFrame(X_train_subset, columns=selected_names) # + id="zZKoH475ppag" colab_type="code" colab={} X_test_subset = rfe.transform(X_test) X_test_subset = pd.DataFrame(X_test_subset, columns=selected_names) # + id="mhcS0n8ApuUx" colab_type="code" outputId="7d1c61d9-2f09-409f-8b10-7cd52ff92d59" colab={"base_uri": "https://localhost:8080/", "height": 34} print(X_train.shape, X_train_subset.shape, X_test.shape, X_test_subset.shape) # + [markdown] id="_OpHu92Yto88" colab_type="text" # # RFE again, but with polynomial features and interaction terms! # + id="ArgRECEUtyH4" colab_type="code" outputId="aee7155c-13aa-4426-8b20-a0720b38fec2" colab={"base_uri": "https://localhost:8080/", "height": 34} from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(degree=2) X_train_polynomial = poly.fit_transform(X_train) print(X_train.shape, X_train_polynomial.shape) # + id="TWVHh35wsdE7" colab_type="code" colab={} from sklearn.feature_selection import RFECV scaler = RobustScaler() X_train_scaled = scaler.fit_transform(X_train_polynomial) rfe = RFECV(Ridge(alpha=1.0), scoring='neg_mean_absolute_error', step=10, cv=3, verbose=1) X_train_subset = rfe.fit_transform(X_train_scaled, y_train) # + id="TrMRGvezuDD7" colab_type="code" outputId="d208d128-8f1a-405c-fe74-6dca7be27242" colab={"base_uri": "https://localhost:8080/", "height": 2708} all_names = poly.get_feature_names(X_train.columns) selected_mask = rfe.support_ selected_names = [name for name, selected in zip(all_names, selected_mask) if selected] print(f'{rfe.n_features_} Features selected:') for name in selected_names: print(name) # + id="Pku8KixHv3CF" colab_type="code" outputId="f0e210ec-2798-4ae6-b0b2-11267bdd02d6" colab={"base_uri": "https://localhost:8080/", "height": 171} # Define an estimator and param_grid ridge = Ridge() param_grid = { 'alpha': [0.1, 1.0, 10.] } # Fit on the train set, with grid search cross-validation gs = GridSearchCV(ridge, param_grid=param_grid, cv=3, scoring='neg_mean_absolute_error', verbose=1) gs.fit(X_train_subset, y_train) validation_score = gs.best_score_ print() print('Cross-Validation Score:', -validation_score) print() print('Best estimator:', gs.best_estimator_) print() # + id="NnpbyDLSwPIH" colab_type="code" outputId="48cf5315-4e30-451d-a851-0f36c2dae72b" colab={"base_uri": "https://localhost:8080/", "height": 34} # Do the same transformations to X_test X_test_polynomial = poly.transform(X_test) X_test_scaled = scaler.transform(X_test_polynomial) X_test_subset = rfe.transform(X_test_scaled) # Use the grid search's score method with X_test_subset test_score = gs.score(X_test_subset, y_test) print('Test Score:', -test_score)
module-4-select-important-features/LS_DS_24_Cross_Validation_AND_Feature_Selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Using RNA read counts # ### Using EUKelele to assign protistan taxonomy and generate relative community community based on RNA read counts import pandas as pd # When working with an environmental transcriptomic data set, we often want to assign taxonomy using reference databases populated with cultured isolates. It can be useful to have a single assembly which all individual transcripts within samples can relate (map) back to in order to perform gene expression comparisons across samples. In this example we ran EUKulele with a single metatranscriptomic assembly that was created by combining 40 individual samples using the metatranscriptomic assembly pipeline "eukrhythmic" (https://github.com/AlexanderLabWHOI/eukrhythmic). We use the EUKulele output, combined with a separately compiled read counts table produced with salmon (https://salmon.readthedocs.io/en/latest/salmon.html), to visualize the breakdown of taxonomic groups within each sample. # EUKulele was performed using the MMETSP database and diamond as the alignment choice (default), as below: # # `EUKulele -s /output/transdecoder_mega_merge --protein_extension .pep -m mets` # First, we load in the the annotated contig table located in the EUKulele output directory: # # `output/taxonomy_estimated/sample-estimated-taxonomy.out` taxa=pd.read_table('merged_merged-estimated-taxonomy.out') taxa.head() # This file shows the result of the alignment, with each contig matching an annotation in the database listed alongside the level of classification achieved (classification_level & classification), full classification description as presented in the database (full_classification), the maximum percentage identity as calculated by the aligner (max_pid), and whether there were discrepencies assigning the taxonomic cutoff (ambiguous). Descriptions of the EUKulele output are provided here: https://eukulele.readthedocs.io/en/latest/ # Next we want to separate out the classification levels in the "full_classification" column so that we can collapse counts based on the taxonomic level of interest. Be aware that the classification levels are specific to the taxonomic database used in the alignment, and the original references should be consulted to determine the appropriate levels: df = pd.concat([taxa['transcript_name'], taxa['full_classification'].str.split('; ', expand=True)], axis=1) #Label columns in data frame df.columns = ['Name', 'Supergroup','Division','Class','Order','Family','Genus','Species'] df.head() # Next we create and read in a counts table created using standard salmon output, although a similar table could be generated using any read aligner. Counts from individual samples aligning to the fasta assembly are joined into one data frame. There are many ways to achieve this, and the approach below was adapted based on solutions posted on StackOverflow (https://stackoverflow.com/questions/44428429/replace-column-name-with-file-name-shell-script) and StackExchange (https://unix.stackexchange.com/questions/467523/awk-for-merging-multiple-files-with-common-column # ). #Move over to the directory containing salmon output # %cd /vortexfs1/omics/alexander/ncohen/BATS2019-clio-metaT/EUKulele_NB/output/taxonomy_estimation/salmon_indiv_to_mega # ! for i in *_quant/quant.sf; do awk -F, -v OFS=, 'NR==1{split(FILENAME,a,"_quant");$2= a[1] ""}1' ${i} | awk '{gsub(/\NumReads\,/,"",$5)}1'> ${i}_cleaned; done # ! awk '{samples[$1] = samples[$1] OFS $NF}; END {print "Name", samples["Name"]; delete samples["Name"]; for (name in samples) print name, samples[name]}' */quant.sf_cleaned > table.tab counts=pd.read_table('table.tab', sep = " ") # %cd /vortexfs1/omics/alexander/ncohen/BATS2019-clio-metaT/EUKulele_NB/output/taxonomy_estimation combined = df.join(counts.set_index('Name'), on='Name') combined.head() # As a side note, it is also helpful during these environmental metatranscriptomic analyses to combine functional annotations alongside taxonomic identifications and read counts into one dataframe for downstream visualization, sharing, and exploration of the data. To do this, we can read in kegg annotations obtained by aligning our assembly against the KEGG database. [This is also performed within the eukrhythmic metatranscriptomic assembly pipeline using arKEGGio (https://github.com/AlexanderLabWHOI/eukrhythmic)]: # %cd /vortexfs1/omics/alexander/data/NB_subsampled_11Sept/kegg kegg = pd.read_csv('cat.kegg.csv', sep ='\t') # %cd /vortexfs1/omics/alexander/ncohen/BATS2019-clio-metaT/EUKulele_NB/output/taxonomy_estimation #Match columns containing contig IDs. In the counts dataframe, "Name" contains contig IDs. In the KEGG output, it is "query_id". merged = kegg.join(combined.set_index('Name'), on='query_id') #Check this returns the correct counts/annotations merged.head() #merged.to_csv('counts_taxa_kegg.csv') #Optional export of table to .csv file # Next we subset the counts/taxonomy dataframe to retain only the sample counts columns and the classification level of interest (in this case, Phylum): #Subset out taxonomic level of interest and sample ID columns subset = combined.loc[:, 'SRR1810204':'SRR1945046'] subset["Supergroup"]=combined["Supergroup"] subset.head() # We import visualization libraries and a color palette import matplotlib.pyplot as plt import seaborn as sns color = sns.set_palette(sns.color_palette("husl", 10)) # Next we collapse (sum) rows with the same taxonomic annotation. This table will be valuable for downstream applications such as reporting relative community abundance percentages: #Group counts by Phylum in each sample x = subset.groupby(['Supergroup']).sum() #Save grouped dataframe to .csv file (optional) #x.to_csv('counts_phylum.csv') # Lastly, we plot the results as a stacked barplot. This shows directly comparable community composition across samples using read counts. We conclude that we have a high relative abundance of Stramenopiles in these samples based on the transcript pool. plot = x.T #Transpose dataframe plot.head() plot.plot.bar(stacked=True, legend=True).legend(loc=(1, 0)) color
docs/source/EUKulele_read_breakdown.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Desafio 5 # # Neste desafio, vamos praticar sobre redução de dimensionalidade com PCA e seleção de variáveis com RFE. Utilizaremos o _data set_ [Fifa 2019](https://www.kaggle.com/karangadiya/fifa19), contendo originalmente 89 variáveis de mais de 18 mil jogadores do _game_ FIFA 2019. # # > Obs.: Por favor, não modifique o nome das funções de resposta. # ## _Setup_ geral # + from math import sqrt import pandas as pd import matplotlib.pyplot as plt import numpy as np import scipy.stats as sct import seaborn as sns import statsmodels.api as sm import statsmodels.stats as st from sklearn.decomposition import PCA #para questao 4 from sklearn.feature_selection import RFE from sklearn.linear_model import LinearRegression from loguru import logger # + # Algumas configurações para o matplotlib. # #%matplotlib inline from IPython.core.pylabtools import figsize figsize(12, 8) sns.set() # - fifa = pd.read_csv("fifa.csv") # + columns_to_drop = ["Unnamed: 0", "ID", "Name", "Photo", "Nationality", "Flag", "Club", "Club Logo", "Value", "Wage", "Special", "Preferred Foot", "International Reputation", "Weak Foot", "Skill Moves", "Work Rate", "Body Type", "Real Face", "Position", "Jersey Number", "Joined", "Loaned From", "Contract Valid Until", "Height", "Weight", "LS", "ST", "RS", "LW", "LF", "CF", "RF", "RW", "LAM", "CAM", "RAM", "LM", "LCM", "CM", "RCM", "RM", "LWB", "LDM", "CDM", "RDM", "RWB", "LB", "LCB", "CB", "RCB", "RB", "Release Clause" ] try: fifa.drop(columns_to_drop, axis=1, inplace=True) except KeyError: logger.warning(f"Columns already dropped") # - # ## Inicia sua análise a partir daqui # Sua análise começa aqui. fifa.head() fifa.shape fifa.describe() # ## Questão 1 # # Qual fração da variância consegue ser explicada pelo primeiro componente principal de `fifa`? Responda como um único float (entre 0 e 1) arredondado para três casas decimais. def q1(): pca = PCA(n_components=2).fit(fifa.dropna()) evr = pca.explained_variance_ratio_ return float(evr[0].round(3)) q1() # ## Questão 2 # # Quantos componentes principais precisamos para explicar 95% da variância total? Responda como un único escalar inteiro. def q2(): pca = PCA().fit(fifa.dropna()) cumulative_variance_ratio = np.cumsum(pca.explained_variance_ratio_) component_number = np.argmax(cumulative_variance_ratio >= 0.95) + 1 # Contagem começa em zero return int(component_number) q2() # ## Questão 3 # # Qual são as coordenadas (primeiro e segundo componentes principais) do ponto `x` abaixo? O vetor abaixo já está centralizado. Cuidado para __não__ centralizar o vetor novamente (por exemplo, invocando `PCA.transform()` nele). Responda como uma tupla de float arredondados para três casas decimais. x = [0.87747123, -1.24990363, -1.3191255, -36.7341814, -35.55091139, -37.29814417, -28.68671182, -30.90902583, -42.37100061, -32.17082438, -28.86315326, -22.71193348, -38.36945867, -20.61407566, -22.72696734, -25.50360703, 2.16339005, -27.96657305, -33.46004736, -5.08943224, -30.21994603, 3.68803348, -36.10997302, -30.86899058, -22.69827634, -37.95847789, -22.40090313, -30.54859849, -26.64827358, -19.28162344, -34.69783578, -34.6614351, 48.38377664, 47.60840355, 45.76793876, 44.61110193, 49.28911284 ] def q3(): pca = PCA(n_components=2).fit(fifa.dropna()) coord = pca.components_.dot(x) return tuple(coord.round(3)) q3() # ## Questão 4 # # Realiza RFE com estimador de regressão linear para selecionar cinco variáveis, eliminando uma a uma. Quais são as variáveis selecionadas? Responda como uma lista de nomes de variáveis. y = fifa.dropna()['Overall'] y.shape X = fifa.drop(columns='Overall').dropna() X.shape def q4(): X = fifa.drop(columns='Overall').dropna() y = fifa.dropna()['Overall'] rfe = RFE(LinearRegression(), n_features_to_select=5).fit(X, y) df = pd.DataFrame({'Variavel': X.columns, 'Suporte': rfe.support_}) return list(df.query('Suporte == True')['Variavel']) q4()
main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Question1. # # Create a function that takes three arguments a, b, c and returns the sum of the numbers that are evenly divided by c from the range a, b inclusive. Examples # # evenly_divisible(1, 10, 20) ➞ 0 # # No number between 1 and 10 can be evenly divided by 20. # # evenly_divisible(1, 10, 2) ➞ 30 # # 2 + 4 + 6 + 8 + 10 = 30 # # evenly_divisible(1, 10, 3) ➞ 18 # # 3 + 6 + 9 = 18 # # + def evenly_divisible(a,b,c): sum = 0 for i in range(a,b+1): if i%c == 0: sum += i return sum evenly_divisible(1, 10, 2) # - # ### Question2. # # Create a function that returns True if a given inequality expression is correct and False otherwise. Examples # # correct_signs("3 < 7 < 11") ➞ True # # correct_signs("13 > 44 > 33 > 1") ➞ False # # correct_signs("1 < 2 < 6 < 9 > 3") ➞ True # # + def correct_signs(str): return eval(str) print(correct_signs("3 < 7 < 11")) print(correct_signs("13 > 44 > 33 > 1")) print(correct_signs("1 < 2 < 6 < 9 > 3")) # - # ### Question3. # # Create a function that replaces all the vowels in a string with a specified character. Examples # # replace_vowels("the aardvark", "#") ➞ "th# ##rdv#rk" # # replace_vowels("minnie mouse", "?") ➞ "m?nn?? m??s?" # # replace_vowels("shakespeare", "*") ➞ "sh*k*sp**r*" # # + def replace_vowels(str, char): for i in str: if i in vowels: str = str.replace(i,char) return str vowels = ['a', 'e', 'i', 'o', 'u'] print(replace_vowels("the aardvark", "#")) print(replace_vowels("minnie mouse", "?")) print(replace_vowels("shakespeare", "*")) # - # ### Question4. # # Write a function that calculates the factorial of a number recursively. Examples # # factorial(5) ➞ 120 # # factorial(3) ➞ 6 # # factorial(1) ➞ 1 # # factorial(0) ➞ 1 # # + def factorial(n): if n == 0: return 1 elif n == 1: return 1 else: return n * factorial(n-1) print(factorial(5)) print(factorial(3)) print(factorial(1)) print(factorial(0)) # - # ### Question 5 # # Hamming distance is the number of characters that differ between two strings. To illustrate: # # String1: "abcbba" # # String2: "abcbda" # # Hamming Distance: 1 - "b" vs. "d" is the only difference. Create a function that computes the hamming distance between two strings. Examples # # hamming_distance("abcde", "bcdef") ➞ 5 # # hamming_distance("abcde", "abcde") ➞ 0 # # hamming_distance("strong", "strung") ➞ 1 # # + def hamming_distance(str1, str2): sum = 0 i = 0 while i < len(str1): if str1[i] != str2[i]: sum += 1 i += 1 return sum print(hamming_distance("abcde", "bcdef")) print(hamming_distance("abcde", "abcde")) print(hamming_distance("strong", "strung")) # -
Programming_Assingment17.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/islam-mirajul/YouTube-Exaggerated-Bangla-Titles-Categorization/blob/main/YouTube-Exaggerated-Bangla-Titles-Categorization(8%20ML%20Models).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="QC_gQHJdpqYy" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline import warnings import unicodedata # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="ZEWB5SC75NQS" outputId="b78bb7d6-fc21-4f57-a24f-e09e0ff818c5" df = pd.read_csv('/content/drive/MyDrive/youtube project/youtubeRD-csv.csv',error_bad_lines=False) df # + colab={"base_uri": "https://localhost:8080/"} id="Fg1Ryqrj5YzY" outputId="ba2e192e-c58c-41df-dcce-c1c8b79b1db5" df.isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="L6_u8sQC5hZh" outputId="7bb10a56-6cee-42c2-f859-c4cd34c89323" df['Type'].value_counts() # + colab={"base_uri": "https://localhost:8080/"} id="0crY927n5nse" outputId="08177f1a-dfea-4a9c-a673-11ba7e9ebf33" df['Type'].unique() # + id="BCfC_Ul15pdE" from sklearn.model_selection import train_test_split X = df['Title'] y = df['Type'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42) # + colab={"base_uri": "https://localhost:8080/"} id="99Nn3Qhe5yLR" outputId="35484d04-7ecd-4c4e-e544-dc92b6aae30f" from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer() X_train_counts = count_vect.fit_transform(X_train) X_train_counts.shape # + colab={"base_uri": "https://localhost:8080/"} id="OwIL0RNS50MZ" outputId="35b70ddb-8882-4a42-cdc1-087c90b7e28f" from sklearn.feature_extraction.text import TfidfTransformer tfidf_transformer = TfidfTransformer() X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts) X_train_tfidf.shape # + colab={"base_uri": "https://localhost:8080/"} id="lzMAPDMO53Nz" outputId="15d26011-6d19-453d-ab02-80fce4e82c5b" from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer() X_train_tfidf = vectorizer.fit_transform(X_train) X_train_tfidf.shape # + id="wi7WlXLL580N" colab={"base_uri": "https://localhost:8080/"} outputId="50112c9d-bb69-433c-82ae-067498efb011" y_test.shape, X_test.shape,X_train.shape,y_train.shape # + [markdown] id="6IOkCsIC6ptJ" # **linearsvc** # + id="f-jTv91-6BBs" colab={"base_uri": "https://localhost:8080/"} outputId="604146a1-5271-432b-e5bb-87612582e82f" from sklearn.svm import LinearSVC clf = LinearSVC() clf.fit(X_train_tfidf,y_train) # + colab={"base_uri": "https://localhost:8080/"} id="lMvEUlq46n3s" outputId="5ff5fbbb-3aad-442e-bee9-7f60ef16a533" from sklearn.pipeline import Pipeline text_clf = Pipeline([('tfidf', TfidfVectorizer()), ('clf', LinearSVC()), ]) text_clf.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 578} id="SgMKom1U6zHG" outputId="160de632-f780-4395-8c30-be764e7aac46" predictions = text_clf.predict(X_test) from sklearn import metrics cf_matrix=metrics.confusion_matrix(y_test,predictions) print(cf_matrix) plt.figure(figsize=(10,6)) sns.heatmap(cf_matrix, annot=True ) print(metrics.classification_report(y_test,predictions)) print(metrics.accuracy_score(y_test,predictions)) # + [markdown] id="3L4vgsKI66Sb" # **KNN** # + colab={"base_uri": "https://localhost:8080/"} id="JbjH6guS64_p" outputId="a47cc624-8deb-4907-faac-d2f19b0b8ad1" from sklearn.neighbors import KNeighborsClassifier clf = KNeighborsClassifier() clf.fit(X_train_tfidf,y_train) # + colab={"base_uri": "https://localhost:8080/"} id="uEQKY0_S6_eM" outputId="d3b782e1-a794-460a-aaea-412e295bae74" from sklearn.pipeline import Pipeline text_clf = Pipeline([('tfidf', TfidfVectorizer()), ('clf', KNeighborsClassifier()), ]) text_clf.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 578} id="S8wxmWSk7BIE" outputId="8e6b2043-5c5d-43c6-ba24-97e8857f1340" predictions = text_clf.predict(X_test) from sklearn import metrics cf_matrix=metrics.confusion_matrix(y_test,predictions) print(cf_matrix) plt.figure(figsize=(10,6)) sns.heatmap(cf_matrix, annot=True ) print(metrics.classification_report(y_test,predictions)) print(metrics.accuracy_score(y_test,predictions)) # + [markdown] id="jTQ35cg07HDp" # **SVC** # + colab={"base_uri": "https://localhost:8080/"} id="YDxhr__47FYl" outputId="cdb9c755-5e11-4cf5-bff9-e418d9b03302" from sklearn.svm import SVC clf = SVC() clf.fit(X_train_tfidf,y_train) # + colab={"base_uri": "https://localhost:8080/"} id="CNzV_euX7Jwi" outputId="523ddadd-c5c7-4da9-8d5a-e2ac23396126" from sklearn.pipeline import Pipeline text_clf = Pipeline([('tfidf', TfidfVectorizer()), ('clf', SVC()), ]) text_clf.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 578} id="sU6cD2Ho7LZi" outputId="e4741b68-019d-4597-f269-d27d0550deab" predictions = text_clf.predict(X_test) from sklearn import metrics cf_matrix=metrics.confusion_matrix(y_test,predictions) print(cf_matrix) plt.figure(figsize=(10,6)) sns.heatmap(cf_matrix, annot=True ) print(metrics.classification_report(y_test,predictions)) print(metrics.accuracy_score(y_test,predictions)) # + [markdown] id="Yp05Qjwl7TbC" # **Logisticreg** # + colab={"base_uri": "https://localhost:8080/"} id="fn_TeqHW7PRV" outputId="c2d0c712-c3a0-4a1d-86a4-4aa42a99b31e" from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X_train_tfidf,y_train) # + colab={"base_uri": "https://localhost:8080/"} id="zKJPzVcy7WwU" outputId="1a59c974-863b-4964-db07-c20edaa3848e" from sklearn.pipeline import Pipeline text_clf = Pipeline([('tfidf', TfidfVectorizer()), ('clf', LogisticRegression()), ]) text_clf.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 578} id="tPf9s91C7YUY" outputId="e118e490-b825-4948-b99e-346646685a77" predictions = text_clf.predict(X_test) from sklearn import metrics cf_matrix=metrics.confusion_matrix(y_test,predictions) print(cf_matrix) plt.figure(figsize=(10,6)) sns.heatmap(cf_matrix, annot=True ) print(metrics.classification_report(y_test,predictions)) print(metrics.accuracy_score(y_test,predictions)) # + [markdown] id="Gjzg2aRp7g3M" # **Random** **forest** # + colab={"base_uri": "https://localhost:8080/"} id="HMRdRUAj7abQ" outputId="38657ef9-8f7a-4f7e-ef9a-a20b3bb16044" from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf.fit(X_train_tfidf,y_train) # + colab={"base_uri": "https://localhost:8080/"} id="1apx099-7lt_" outputId="bcb394cb-8387-4098-92a5-de67740fccec" from sklearn.pipeline import Pipeline text_clf = Pipeline([('tfidf', TfidfVectorizer()), ('clf',RandomForestClassifier()), ]) text_clf.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 578} id="Y9DqtBvW7n53" outputId="151547d8-0bee-4052-e854-cf016c51fa6f" predictions = text_clf.predict(X_test) from sklearn import metrics cf_matrix=metrics.confusion_matrix(y_test,predictions) print(cf_matrix) plt.figure(figsize=(10,6)) sns.heatmap(cf_matrix, annot=True ) print(metrics.classification_report(y_test,predictions)) print(metrics.accuracy_score(y_test,predictions)) # + [markdown] id="swvibsnJ7xFg" # **decison** **tree** # + colab={"base_uri": "https://localhost:8080/"} id="iunbMpBl7ri7" outputId="cad97836-e202-404d-cf7e-716be0767c6c" from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() clf.fit(X_train_tfidf,y_train) # + colab={"base_uri": "https://localhost:8080/"} id="OjHpfUPn70hd" outputId="70e35354-aaba-469a-a4f7-ba8c0afae6c8" from sklearn.pipeline import Pipeline text_clf = Pipeline([('tfidf', TfidfVectorizer()), ('clf',DecisionTreeClassifier()), ]) text_clf.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 578} id="bTtR0_fP72IE" outputId="460552b9-9809-4289-f1fc-0a7cb5b9e43f" predictions = text_clf.predict(X_test) from sklearn import metrics cf_matrix=metrics.confusion_matrix(y_test,predictions) print(cf_matrix) plt.figure(figsize=(10,6)) sns.heatmap(cf_matrix, annot=True ) print(metrics.classification_report(y_test,predictions)) print(metrics.accuracy_score(y_test,predictions)) # + [markdown] id="GB-T-qSK78SD" # **sgd** # + colab={"base_uri": "https://localhost:8080/"} id="YxZXNC0l75oc" outputId="43ba7a40-9a68-4170-9833-33211d096dbd" from sklearn.linear_model import SGDClassifier clf = SGDClassifier() clf.fit(X_train_tfidf,y_train) # + colab={"base_uri": "https://localhost:8080/"} id="0yEmDaQs7_kO" outputId="a2b4bfa7-3665-4d7f-9b22-f7ee04e59893" from sklearn.pipeline import Pipeline text_clf = Pipeline([('tfidf', TfidfVectorizer()), ('clf',SGDClassifier()), ]) text_clf.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 578} id="JQRwLFMO8BBJ" outputId="f7c76eed-1b32-42cc-c598-f0aba9e45c01" predictions = text_clf.predict(X_test) from sklearn import metrics cf_matrix=metrics.confusion_matrix(y_test,predictions) print(cf_matrix) plt.figure(figsize=(10,6)) sns.heatmap(cf_matrix, annot=True ) print(metrics.classification_report(y_test,predictions)) print(metrics.accuracy_score(y_test,predictions)) # + [markdown] id="VcmE3IN58HC_" # **naivebayes** # + colab={"base_uri": "https://localhost:8080/"} id="zg7i82lh8EIM" outputId="8a460a39-16e4-496e-be8c-e62bb5e8dcb0" from sklearn.naive_bayes import MultinomialNB clf = MultinomialNB() clf.fit(X_train_tfidf,y_train) # + colab={"base_uri": "https://localhost:8080/"} id="zZriNrAU8JHW" outputId="f9b1f17e-4fa7-442e-afcb-54f79cb518f2" from sklearn.pipeline import Pipeline text_clf = Pipeline([('tfidf', TfidfVectorizer()), ('clf',MultinomialNB()), ]) text_clf.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 582} id="c-hICIof8KrM" outputId="a29d138a-cbb1-4863-a7d2-977375540e09" predictions = text_clf.predict(X_test) from sklearn import metrics cf_matrix=metrics.confusion_matrix(y_test,predictions) print(cf_matrix) plt.figure(figsize=(10,6)) sns.heatmap(cf_matrix, annot=True ) print(metrics.classification_report(y_test,predictions)) print(metrics.accuracy_score(y_test,predictions))
YouTube-Exaggerated-Bangla-Titles-Categorization(8 ML Models).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Hfc4NkrL44xu" import time notebook_start = time.time() # + [markdown] id="pCp_gsQ2TFIa" # # CIFAR-10 Dataset # # It is one of the most popular datasets for machine learning research. It contains 60,000, 32×32 colour images in 10 different classes. The classes represent airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks. # # - 0: airplane # - 1: automobile # - 2: bird # - 3: cat # - 4: deer # - 5: dog # - 6: frog # - 7: horse # - 8: ship # - 9: truck # + id="boGlLD8-TeD-" executionInfo={"status": "ok", "timestamp": 1620857026428, "user_tz": 300, "elapsed": 346, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} import sys import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from keras.datasets import cifar10 from keras.utils import to_categorical from keras.models import Sequential from keras.layers import Conv2D from keras.layers import MaxPooling2D from keras.layers import Dense from keras.layers import Flatten from keras import layers from keras.models import Model from keras.optimizers import SGD from keras.regularizers import l2 from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.layers import Dropout from keras.layers import BatchNormalization from keras.models import load_model from keras.applications.vgg16 import VGG16 from keras.applications.vgg16 import preprocess_input from keras.applications.vgg16 import decode_predictions # + id="G1WG-ziYTCN4" (x_train,y_train),(x_test,y_test) = cifar10.load_data() # + id="wktGO738TpsF" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620832703070, "user_tz": 300, "elapsed": 2795, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="ccbb999c-341c-4673-b67d-bbba071ad05c" x_train.shape # + id="YpuMezF3T7rJ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620832703072, "user_tz": 300, "elapsed": 2796, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="3568535f-71fb-4362-b758-adaa0fa3b961" x_test.shape # + [markdown] id="XGT63BaCUFAl" # Okay so we have 50,000 trainin images, 10,000 test images # + id="yMxBtB8aUA4z" colab={"base_uri": "https://localhost:8080/", "height": 268} executionInfo={"status": "ok", "timestamp": 1620832704533, "user_tz": 300, "elapsed": 4255, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="f939c095-0f84-4d4d-8cf9-513c67c90708" # Take a look at an example image from the data for i in range(9): # create subplot plt.subplot(330 + 1 + i) # plot raw pixel data plt.imshow(x_train[i]) plt.show() # + [markdown] id="uDexAEIcTECz" # Okay, so very small, pixelated images. But enough for the computer to learn the classes! Let's try it. Start small with a simple convolutional neural net, then later we will try augmentation and transfer learning. # + id="fnszBClBcASB" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620832704534, "user_tz": 300, "elapsed": 4254, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="0329e4d1-6e6c-49f9-8e60-7ff2e2ace67f" y_train # + [markdown] id="u9OhBwv-c2fw" # we will need to perform one hot encoding on this array utilizing the to_categorical() function # + [markdown] id="Lw8BFo_Cg4rU" # ## Building a Test Harness ## # # Modular code to repeat this process and test various features. Will have Loading, Preprocessing, Defining the model, Evaluating the model, Visualizing results. # + id="OcSjyR5McuWB" # Let's create a function to do load this all in one def load_dataset(): (x_train, y_train), (x_test, y_test) = cifar10.load_data() # keras utility function y_train = tf.keras.utils.to_categorical(y_train) y_test = tf.keras.utils.to_categorical(y_test) return x_train, y_train, x_test, y_test # + id="mY7wm4-ydSEg" # Preprocessing function def data_preprocessing(x_train, x_test): # normalize pixel range to 0-1 x_train = x_train / 255.0 x_test = x_test / 255.0 return x_train, x_test # + id="U5Gq2118grYg" # Function to define the CNN - leaving this blank as a template to be edited throughout def define_model(): model = tf.keras.Sequential() # Convolutional layers return model # + id="EIjwQaM7hh9E" # Visualization function def visualize_model(history): # plot loss plt.subplot(211) plt.title('Cross Entropy Loss') plt.plot(history.history['loss'], color='blue', label='train') plt.plot(history.history['val_loss'], color='orange', label='test') #plot accuracy plt.subplot(212) plt.title('Classification Accuracy') plt.plot(history.history['accuracy'], color='blue', label='train') plt.plot(history.history['val_accuracy'], color='orange', label='test') # display plot plt.show() # save plot to file filename = sys.argv[0].split('/')[-1] plt.savefig(filename + '_plot.png') plt.close() # + id="cIpCW700i2cW" # Put it all together into a test harness # set default epochs and batches so this can be adjusted later def run_test_harness(epochs=100,batches=64): # Time the run start = time.time() # Load dataset x_train, y_train, x_test, y_test = load_dataset() # preprocess data x_train, x_test = data_preprocessing(x_train, x_test) # define model model = define_model() # fit model history = model.fit(x_train, y_train, epochs=epochs, batch_size=batches, validation_data=(x_test,y_test),verbose=0, workers=-1, use_multiprocessing=True) # evaluate model _, acc = model.evaluate(x_test,y_test,verbose=0) # stop the timer end = time.time() seconds = end - start # Output print('> Test Accuracy: %.3f' % (acc * 100)) print(f'> Test Harness Run Time: {seconds} seconds ({seconds / 60} minutes)') # learning curves visualize_model(history) # + [markdown] id="CJO3OFhcjnc8" # Okay perfect, we now have everything set up in a fashion that can be easily ran and modified to compare various approaches. # # Starting with the general principals of the VGG models stacking convolutional layers using 3x3 filters and padding to ensure feature maps match inputs. Let's try a 3-block VGG style # + id="4T02vkZ2lkBW" loss = 'categorical_crossentropy' # + id="6-3Y4d-GkFX8" # Function to define the CNN def define_model(): model = tf.keras.Sequential() # Convolutional layers with relu activation and he_uniform initialization # block 1 model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3))) model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) # block 2 model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) # block 3 model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) # Flatten model.add(Flatten()) # Hidden layers model.add(Dense(128, activation='relu', kernel_initializer='he_uniform')) model.add(Dense(10, activation='softmax')) #compile model using Stochiastic Gradient Descent opt = SGD(lr=0.001, momentum=0.9) model.compile(optimizer=opt, loss=loss, metrics=['accuracy']) return model # + id="fuY50AcgXhMs" colab={"base_uri": "https://localhost:8080/", "height": 315} executionInfo={"status": "ok", "timestamp": 1620833075489, "user_tz": 300, "elapsed": 375179, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="6c73da58-5eb5-4a50-980a-09a870327e8f" # let's see how this one goes! run_test_harness() # + [markdown] id="nEApOjxKsRHP" # Okay, ~75% for a baseline is okay, but looking at our learning curves we can tell there is room to improve. The Loss functions diverging and the overfitting can be improved. Let's see how we can improve this with some basic techniques. # # ## Regularization ## # # We will start with dropout regularization. Briefly, what this does is randomly drops individual neurons within layers to prevent the model from overfitting. # + id="ntgsXs0el1FL" # Let's add some dropout regularization to the model # 0.2 which means drop 20% of the neurons at random def define_model(): model = tf.keras.Sequential() # Convolutional layers with relu activation and he_uniform initialization # block 1 model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3))) model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) # Add dropout model.add(Dropout(0.2)) # block 2 model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) # Dropout model.add(Dropout(0.2)) # block 3 model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) # Dropout model.add(Dropout(0.2)) # Flatten model.add(Flatten()) # Hidden layers model.add(Dense(128, activation='relu', kernel_initializer='he_uniform')) # Dropout model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) #compile model using Stochiastic Gradient Descent opt = SGD(lr=0.001, momentum=0.9) model.compile(optimizer=opt, loss=loss, metrics=['accuracy']) return model # + id="2RfTEQwItgF6" colab={"base_uri": "https://localhost:8080/", "height": 315} executionInfo={"status": "ok", "timestamp": 1620833458829, "user_tz": 300, "elapsed": 758510, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="b6b4c601-af24-4be1-d123-ee940ecda2bd" # Run the test again with updated model run_test_harness() # + [markdown] id="ikFrCbIu1iQu" # Excellent! The curves look much improved, and the accuracy jumped by nearly 10% points! Let's take a look at some other methods of optimizing this model. # # ## Weight Decay ## # # This is a common regularization technique which will effectively penalize the model in relation to the weights. # + id="oNanQq91tjUq" # Update baseline model with L2 weight regularization using the kernel regularizer def define_model(): model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001), input_shape=(32, 32, 3))) model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001))) model.add(MaxPooling2D((2, 2))) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001))) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001))) model.add(MaxPooling2D((2, 2))) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001))) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', kernel_regularizer=l2(0.001))) model.add(MaxPooling2D((2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu', kernel_initializer='he_uniform', kernel_regularizer=l2(0.001))) model.add(Dense(10, activation='softmax')) # compile model opt = SGD(lr=0.001, momentum=0.9) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model # + id="2GRzkfJP2v8G" colab={"base_uri": "https://localhost:8080/", "height": 315} executionInfo={"status": "ok", "timestamp": 1620833838703, "user_tz": 300, "elapsed": 1138375, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="fc3c7e5a-0938-47cc-9f6b-4310b1c98d0b" run_test_harness() # + [markdown] id="gv0SBptl2zRr" # Not great...curves don't look much better than the baseline model either. Let's toss this idea. # # ## Data Augmentation ## # # One of the challenges of computer vision is acquiring a robust training dataset for you problem space. One way this can be addressed is by augmenting our dataset with a variety of artificial images generated from the training dataset by performing rotations, flipping along axis, slightly shifting an image, zooming or cropping, etc. to take a small dataset of a class and artificially expand it. Let's try it now # + id="0ShUm2TM4gqT" # Set up a new test harness with data agumentation def run_augmented_test_harness(epochs=100, batches=64, save_name='default'): # Time the run start = time.time() # Load dataset x_train, y_train, x_test, y_test = load_dataset() # preprocess data x_train, x_test = data_preprocessing(x_train, x_test) # define model model = define_model() # create data generator datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, rotation_range=20, shear_range=0.1) # Prepare iterator train_iter = datagen.flow(x_train, y_train, batch_size=batches) # fit model steps = int(x_train.shape[0] / 64) history = model.fit(train_iter, steps_per_epoch=steps, epochs=epochs, validation_data=(x_test,y_test), verbose=0, workers=-1, use_multiprocessing=True) # save model model.save(f'{save_name}_final_model.h5') # evaluate model _, acc = model.evaluate(x_test,y_test,verbose=0) # stop the timer end = time.time() seconds = end - start # Output print('> Test Accuracy: %.3f' % (acc * 100)) print(f'> Test Harness Run Time: {seconds} seconds ({seconds / 60} minutes)') # learning curves visualize_model(history) # + id="E2lKTyTr5dcr" colab={"base_uri": "https://localhost:8080/", "height": 315} executionInfo={"status": "ok", "timestamp": 1620836104647, "user_tz": 300, "elapsed": 3404309, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="a6e54f0c-2bf7-42cb-c150-5fe0988dd44f" run_augmented_test_harness(save_name='augmentation') # + [markdown] id="dQYCQplarva2" # Great! The curves look much improved and the score jumped up to 82.4%. It did come at a significant increase in runtime, however. That is due to the significantly increased training data with the augmentation. # # Although weight decay did not help, it looks like a combination of dropout and data augmentation should improve the model further. # # ## Continued Improvements - Batch Normalization ## # # As we can see, with our curves looking good like this we could improve our accuracy by allowing our model to train longer (increasing epochs) and adjusting the learning rate to fully optimize. This can be done with <b>Batch Normalization</b>. # # Additionally, we can increase the dropout regularization by adjusting the percentages throughout the layers. # # Let's put it all together! Increase dropout, add batch normalization, data augmentation, and train the model for 500 epochs. # # **Performance recap -** # - Baseline - 74% # - Dropout - 83% # - Weight Decay - 74% # - Augmentation - 83% # + id="fHMfAncL56dW" # Add batch normalization and dropout to the model def define_model(): model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3))) model.add(BatchNormalization()) model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.3)) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu', kernel_initializer='he_uniform')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) # compile model opt = SGD(lr=0.001, momentum=0.9) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) return model # + id="stzxqi2ouvoL" colab={"base_uri": "https://localhost:8080/", "height": 315} executionInfo={"status": "ok", "timestamp": 1620845372366, "user_tz": 300, "elapsed": 12672013, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="68e30e5c-70f0-4379-b39b-fa576b92fe17" run_augmented_test_harness(epochs=400, save_name='sgd') # + [markdown] id="DBH4PayS0VjU" # Awesome! The model has really improved! Now once again, it did come at a big runtime cost which always needs to be weighed. Let's recap performance: # (Note, results may vary if you run this notebook again due to the SGD optimizer) # # - Baseline: 74% # - Dropout: 83% # - ~~Weight Decay: 74%~~ # - Data Augmentation: 83% # - Dropout + Batch Normalization + Data Augmentation + Longer Training: 88% # # It may be worth exploring the Adam optimizer which uses an adaptive learning rate technique to tune the model, but for now let's pickle this one and save it! The pickling occurs within the test harness, and it will allow the model to be used in other notebooks without having to spend all the time training it! # + id="F_AFyCgA9oQc" executionInfo={"status": "ok", "timestamp": 1620857047981, "user_tz": 300, "elapsed": 347, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} def run_example(opt,filename): # load image img = image.load_img(filename, target_size=(32,32)) # convert to array img = image.img_to_array(img) # img = np.expand_dims(img, axis=0) # img = np.vstack([img]) # reshape into single sample with 3 channels img = img.reshape(1, 32, 32, 3) # prepare pixels img = img / 255. # Load model model = load_model(f'{opt}_final_model.h5') # Make prediction result = model.predict_classes(img,batch_size=64) print(result) # + id="y1qZl0ZY7zKa" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620857053359, "user_tz": 300, "elapsed": 957, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="8a05bd49-47ce-4dd1-e1cf-33bad5f26164" run_example('sgd','/content/drive/MyDrive/cifar10/test_photos/airplane1.jpeg') # + [markdown] id="EZ_m9fUq5RnX" # 0 - Airplane # # Awesome! We can now import a new color photo of any size and predict the class using our saved model. As it is, it will output the class and will need to be converted back. Recall: # # - 0: airplane # - 1: automobile # - 2: bird # - 3: cat # - 4: deer # - 5: dog # - 6: frog # - 7: horse # - 8: ship # - 9: truck # # We successfully identified the airplane. Let's try a Tesla Model Y # + id="6mz4m5-N_qhT" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620857057049, "user_tz": 300, "elapsed": 608, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="21406fb9-4e7a-4d02-a2b4-a7dcbbb96523" run_example('sgd','/content/drive/MyDrive/cifar10/test_photos/car1.jpg') # + [markdown] id="nLKwcsZY_ujr" # 1: Automobile - perfect! # # Let's see how the adam optimizer performs. # + id="jHeZ8xr323UV" # Add batch normalization and dropout to the model def define_model(): model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3))) model.add(BatchNormalization()) model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.3)) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu', kernel_initializer='he_uniform')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) # compile model model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return model # + id="KHDDur4t28-b" colab={"base_uri": "https://localhost:8080/", "height": 315} executionInfo={"status": "ok", "timestamp": 1620854753023, "user_tz": 300, "elapsed": 22052629, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="f12fbe06-5c7a-4ebb-a175-8d6c53b29c8b" run_augmented_test_harness(epochs=400, save_name='adam') # + [markdown] id="DJCyPJttcsNN" # Great! The Adam optimizer improved even further! The Adam optimizer has become very popular today due to it's adaptive learning rate and typically higher performance. # # Review Performance Evolution: # - 75% - Baseline # - 84% - Dropout # - ~~76% - Weight Decay~~ # - 82% - Data Augmentation # - 88% - SGD: Dropout + Batch Normalization + Data Augmentation + Longer Training # - 90% - Adam: Dropout + Batch Normalization + Data Augmentation + Longer Training # # All told the notebook takes about 6 hours to run. # # Improving a baseline model from 75% to 90%, not bad for a days work! # + id="-4QH4CIA4Z-N" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620854753026, "user_tz": 300, "elapsed": 22052628, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04684120858633082846"}} outputId="c3da7738-1aa5-4a8e-fd6a-fc4052ad6cdc" notebook_end = time.time() notebook_runtime_seconds = notebook_end - notebook_start notebook_runtime_minutes = notebook_runtime_seconds / 60 notebook_runtime_hours = notebook_runtime_minutes / 60 print(f'> Notebook Total Run Time: {notebook_runtime_minutes} minutes ({notebook_runtime_hours} hours')
cifar10/colab/cifar10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp data.preprocessing # - # # Data preprocessing # # > Functions used to preprocess time series (both X and y). #export from tsai.imports import * from tsai.utils import * from tsai.data.external import * from tsai.data.core import * dsid = 'NATOPS' X, y, splits = get_UCR_data(dsid, return_split=False) tfms = [None, Categorize()] dsets = TSDatasets(X, y, tfms=tfms, splits=splits) #export class ToNumpyCategory(Transform): "Categorize a numpy batch" order = 90 def __init__(self, **kwargs): super().__init__(**kwargs) def encodes(self, o: np.ndarray): self.type = type(o) self.cat = Categorize() self.cat.setup(o) self.vocab = self.cat.vocab return np.asarray(stack([self.cat(oi) for oi in o])) def decodes(self, o: (np.ndarray, torch.Tensor)): return stack([self.cat.decode(oi) for oi in o]) t = ToNumpyCategory() y_cat = t(y) y_cat[:10] test_eq(t.decode(tensor(y_cat)), y) test_eq(t.decode(np.array(y_cat)), y) #export class OneHot(Transform): "One-hot encode/ decode a batch" order = 90 def __init__(self, n_classes=None, **kwargs): self.n_classes = n_classes super().__init__(**kwargs) def encodes(self, o: torch.Tensor): if not self.n_classes: self.n_classes = len(np.unique(o)) return torch.eye(self.n_classes)[o] def encodes(self, o: np.ndarray): o = ToNumpyCategory()(o) if not self.n_classes: self.n_classes = len(np.unique(o)) return np.eye(self.n_classes)[o] def decodes(self, o: torch.Tensor): return torch.argmax(o, dim=-1) def decodes(self, o: np.ndarray): return np.argmax(o, axis=-1) oh_encoder = OneHot() y_cat = ToNumpyCategory()(y) oht = oh_encoder(y_cat) oht[:10] # + n_classes = 10 n_samples = 100 t = torch.randint(0, n_classes, (n_samples,)) oh_encoder = OneHot() oht = oh_encoder(t) test_eq(oht.shape, (n_samples, n_classes)) test_eq(torch.argmax(oht, dim=-1), t) test_eq(oh_encoder.decode(oht), t) # + n_classes = 10 n_samples = 100 a = np.random.randint(0, n_classes, (n_samples,)) oh_encoder = OneHot() oha = oh_encoder(a) test_eq(oha.shape, (n_samples, n_classes)) test_eq(np.argmax(oha, axis=-1), a) test_eq(oh_encoder.decode(oha), a) # - #export class Nan2Value(Transform): "Replaces any nan values by a predefined value or median" order = 90 def __init__(self, value=0, median=False, by_sample_and_var=True): store_attr() def encodes(self, o:TSTensor): mask = torch.isnan(o) if mask.any(): if self.median: if self.by_sample_and_var: median = torch.nanmedian(o, dim=2, keepdim=True)[0].repeat(1, 1, o.shape[-1]) o[mask] = median[mask] else: o = torch.nan_to_num(o, torch.nanmedian(o)) o = torch.nan_to_num(o, self.value) return o o = TSTensor(torch.randn(16, 10, 100)) o[0,0] = float('nan') o[o > .9] = float('nan') o[[0,1,5,8,14,15], :, -20:] = float('nan') nan_vals1 = torch.isnan(o).sum() o2 = Pipeline(Nan2Value(), split_idx=0)(o.clone()) o3 = Pipeline(Nan2Value(median=True, by_sample_and_var=True), split_idx=0)(o.clone()) o4 = Pipeline(Nan2Value(median=True, by_sample_and_var=False), split_idx=0)(o.clone()) nan_vals2 = torch.isnan(o2).sum() nan_vals3 = torch.isnan(o3).sum() nan_vals4 = torch.isnan(o4).sum() test_ne(nan_vals1, 0) test_eq(nan_vals2, 0) test_eq(nan_vals3, 0) test_eq(nan_vals4, 0) # + # export class TSStandardize(Transform): """Standardizes batch of type `TSTensor` Args: - mean: you can pass a precalculated mean value as a torch tensor which is the one that will be used, or leave as None, in which case it will be estimated using a batch. - std: you can pass a precalculated std value as a torch tensor which is the one that will be used, or leave as None, in which case it will be estimated using a batch. If both mean and std values are passed when instantiating TSStandardize, the rest of arguments won't be used. - by_sample: if True, it will calculate mean and std for each individual sample. Otherwise based on the entire batch. - by_var: * False: mean and std will be the same for all variables. * True: a mean and std will be be different for each variable. * a list of ints: (like [0,1,3]) a different mean and std will be set for each variable on the list. Variables not included in the list won't be standardized. * a list that contains a list/lists: (like[0, [1,3]]) a different mean and std will be set for each element of the list. If multiple elements are included in a list, the same mean and std will be set for those variable in the sublist/s. (in the example a mean and std is determined for variable 0, and another one for variables 1 & 3 - the same one). Variables not included in the list won't be standardized. - by_step: if False, it will standardize values for each time step. - eps: it avoids dividing by 0 - use_single_batch: if True a single training batch will be used to calculate mean & std. Else the entire training set will be used. """ parameters, order = L('mean', 'std'), 90 def __init__(self, mean=None, std=None, by_sample=False, by_var=False, by_step=False, eps=1e-8, use_single_batch=True, verbose=False): self.mean = tensor(mean) if mean is not None else None self.std = tensor(std) if std is not None else None self.eps = eps self.by_sample, self.by_var, self.by_step = by_sample, by_var, by_step drop_axes = [] if by_sample: drop_axes.append(0) if by_var: drop_axes.append(1) if by_step: drop_axes.append(2) self.axes = tuple([ax for ax in (0, 1, 2) if ax not in drop_axes]) if by_var and is_listy(by_var): self.list_axes = tuple([ax for ax in (0, 1, 2) if ax not in drop_axes]) + (1,) self.use_single_batch = use_single_batch self.verbose = verbose if self.mean is not None or self.std is not None: pv(f'{self.__class__.__name__} mean={self.mean}, std={self.std}, by_sample={self.by_sample}, by_var={self.by_var}, by_step={self.by_step}\n', self.verbose) @classmethod def from_stats(cls, mean, std): return cls(mean, std) def setups(self, dl: DataLoader): if self.mean is None or self.std is None: if not self.by_sample: if not self.use_single_batch: o = dl.dataset.__getitem__([slice(None)])[0] else: o, *_ = dl.one_batch() if self.by_var and is_listy(self.by_var): shape = torch.mean(o, dim=self.axes, keepdim=self.axes!=()).shape mean = torch.zeros(*shape, device=o.device) std = torch.ones(*shape, device=o.device) for v in self.by_var: if not is_listy(v): v = [v] mean[:, v] = torch_nanmean(o[:, v], dim=self.axes if len(v) == 1 else self.list_axes, keepdim=True) std[:, v] = torch.clamp_min(torch_nanstd(o[:, v], dim=self.axes if len(v) == 1 else self.list_axes, keepdim=True), self.eps) else: mean = torch_nanmean(o, dim=self.axes, keepdim=self.axes!=()) std = torch.clamp_min(torch_nanstd(o, dim=self.axes, keepdim=self.axes!=()), self.eps) self.mean, self.std = mean, std if len(self.mean.shape) == 0: pv(f'{self.__class__.__name__} mean={self.mean}, std={self.std}, by_sample={self.by_sample}, by_var={self.by_var}, by_step={self.by_step}\n', self.verbose) else: pv(f'{self.__class__.__name__} mean shape={self.mean.shape}, std shape={self.std.shape}, by_sample={self.by_sample}, by_var={self.by_var}, by_step={self.by_step}\n', self.verbose) else: self.mean, self.std = torch.zeros(1), torch.ones(1) def encodes(self, o:TSTensor): if self.by_sample: if self.by_var and is_listy(self.by_var): shape = torch.mean(o, dim=self.axes, keepdim=self.axes!=()).shape mean = torch.zeros(*shape, device=o.device) std = torch.ones(*shape, device=o.device) for v in self.by_var: if not is_listy(v): v = [v] mean[:, v] = torch_nanmean(o[:, v], dim=self.axes if len(v) == 1 else self.list_axes, keepdim=True) std[:, v] = torch.clamp_min(torch_nanstd(o[:, v], dim=self.axes if len(v) == 1 else self.list_axes, keepdim=True), self.eps) else: mean = torch_nanmean(o, dim=self.axes, keepdim=self.axes!=()) std = torch.clamp_min(torch_nanstd(o, dim=self.axes, keepdim=self.axes!=()), self.eps) self.mean, self.std = mean, std return (o - self.mean) / self.std def decodes(self, o:TSTensor): if self.mean is None or self.std is None: return o return o * self.std + self.mean def __repr__(self): return f'{self.__class__.__name__}(by_sample={self.by_sample}, by_var={self.by_var}, by_step={self.by_step})' # - batch_tfms=[TSStandardize(by_sample=True, by_var=False, verbose=True)] dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, after_batch=batch_tfms) xb, yb = next(iter(dls.train)) test_close(xb.mean(), 0, eps=1e-1) test_close(xb.std(), 1, eps=1e-1) from tsai.data.validation import TimeSplitter X_nan = np.random.rand(100, 5, 10) idxs = np.random.choice(len(X_nan), int(len(X_nan)*.5), False) X_nan[idxs, 0] = float('nan') idxs = np.random.choice(len(X_nan), int(len(X_nan)*.5), False) X_nan[idxs, 1, -10:] = float('nan') batch_tfms = TSStandardize(by_var=True) dls = get_ts_dls(X_nan, batch_tfms=batch_tfms, splits=TimeSplitter(show_plot=False)(range_of(X_nan))) test_eq(torch.isnan(dls.after_batch[0].mean).sum(), 0) test_eq(torch.isnan(dls.after_batch[0].std).sum(), 0) xb = first(dls.train)[0] test_ne(torch.isnan(xb).sum(), 0) test_ne(torch.isnan(xb).sum(), torch.isnan(xb).numel()) batch_tfms = [TSStandardize(by_var=True), Nan2Value()] dls = get_ts_dls(X_nan, batch_tfms=batch_tfms, splits=TimeSplitter(show_plot=False)(range_of(X_nan))) xb = first(dls.train)[0] test_eq(torch.isnan(xb).sum(), 0) batch_tfms=[TSStandardize(by_sample=True, by_var=False, verbose=False)] dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, after_batch=batch_tfms) xb, yb = next(iter(dls.train)) test_close(xb.mean(), 0, eps=1e-1) test_close(xb.std(), 1, eps=1e-1) xb, yb = next(iter(dls.valid)) test_close(xb.mean(), 0, eps=1e-1) test_close(xb.std(), 1, eps=1e-1) tfms = [None, TSClassification()] batch_tfms = TSStandardize(by_sample=True) dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=[64, 128], inplace=True) xb, yb = dls.train.one_batch() test_close(xb.mean(), 0, eps=1e-1) test_close(xb.std(), 1, eps=1e-1) xb, yb = dls.valid.one_batch() test_close(xb.mean(), 0, eps=1e-1) test_close(xb.std(), 1, eps=1e-1) tfms = [None, TSClassification()] batch_tfms = TSStandardize(by_sample=True, by_var=False, verbose=False) dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, bs=[64, 128], inplace=False) xb, yb = dls.train.one_batch() test_close(xb.mean(), 0, eps=1e-1) test_close(xb.std(), 1, eps=1e-1) xb, yb = dls.valid.one_batch() test_close(xb.mean(), 0, eps=1e-1) test_close(xb.std(), 1, eps=1e-1) # + #export @patch def mul_min(x:(torch.Tensor, TSTensor, NumpyTensor), axes=(), keepdim=False): if axes == (): return retain_type(x.min(), x) axes = reversed(sorted(axes if is_listy(axes) else [axes])) min_x = x for ax in axes: min_x, _ = min_x.min(ax, keepdim) return retain_type(min_x, x) @patch def mul_max(x:(torch.Tensor, TSTensor, NumpyTensor), axes=(), keepdim=False): if axes == (): return retain_type(x.max(), x) axes = reversed(sorted(axes if is_listy(axes) else [axes])) max_x = x for ax in axes: max_x, _ = max_x.max(ax, keepdim) return retain_type(max_x, x) class TSNormalize(Transform): "Normalizes batch of type `TSTensor`" parameters, order = L('min', 'max'), 90 def __init__(self, min=None, max=None, range=(-1, 1), by_sample=False, by_var=False, by_step=False, clip_values=True, use_single_batch=True, verbose=False): self.min = tensor(min) if min is not None else None self.max = tensor(max) if max is not None else None self.range_min, self.range_max = range self.by_sample, self.by_var, self.by_step = by_sample, by_var, by_step drop_axes = [] if by_sample: drop_axes.append(0) if by_var: drop_axes.append(1) if by_step: drop_axes.append(2) self.axes = tuple([ax for ax in (0, 1, 2) if ax not in drop_axes]) if by_var and is_listy(by_var): self.list_axes = tuple([ax for ax in (0, 1, 2) if ax not in drop_axes]) + (1,) self.clip_values = clip_values self.use_single_batch = use_single_batch self.verbose = verbose if self.min is not None or self.max is not None: pv(f'{self.__class__.__name__} min={self.min}, max={self.max}, by_sample={self.by_sample}, by_var={self.by_var}, by_step={self.by_step}\n', self.verbose) @classmethod def from_stats(cls, min, max, range_min=0, range_max=1): return cls(min, max, self.range_min, self.range_max) def setups(self, dl: DataLoader): if self.min is None or self.max is None: if not self.use_single_batch: o = dl.dataset.__getitem__([slice(None)])[0] else: o, *_ = dl.one_batch() if self.by_var and is_listy(self.by_var): shape = torch.mean(o, dim=self.axes, keepdim=self.axes!=()).shape _min = torch.zeros(*shape, device=o.device) + self.range_min _max = torch.zeros(*shape, device=o.device) + self.range_max for v in self.by_var: if not is_listy(v): v = [v] _min[:, v] = o[:, v].mul_min(self.axes if len(v) == 1 else self.list_axes, keepdim=self.axes!=()) _max[:, v] = o[:, v].mul_max(self.axes if len(v) == 1 else self.list_axes, keepdim=self.axes!=()) else: _min, _max = o.mul_min(self.axes, keepdim=self.axes!=()), o.mul_max(self.axes, keepdim=self.axes!=()) self.min, self.max = _min, _max if len(self.min.shape) == 0: pv(f'{self.__class__.__name__} min={self.min}, max={self.max}, by_sample={self.by_sample}, by_var={self.by_var}, by_step={self.by_step}\n', self.verbose) else: pv(f'{self.__class__.__name__} min shape={self.min.shape}, max shape={self.max.shape}, by_sample={self.by_sample}, by_var={self.by_var}, by_step={self.by_step}\n', self.verbose) def encodes(self, o:TSTensor): if self.by_sample: if self.by_var and is_listy(self.by_var): shape = torch.mean(o, dim=self.axes, keepdim=self.axes!=()).shape _min = torch.zeros(*shape, device=o.device) + self.range_min _max = torch.ones(*shape, device=o.device) + self.range_max for v in self.by_var: if not is_listy(v): v = [v] _min[:, v] = o[:, v].mul_min(self.axes, keepdim=self.axes!=()) _max[:, v] = o[:, v].mul_max(self.axes, keepdim=self.axes!=()) else: _min, _max = o.mul_min(self.axes, keepdim=self.axes!=()), o.mul_max(self.axes, keepdim=self.axes!=()) self.min, self.max = _min, _max output = ((o - self.min) / (self.max - self.min)) * (self.range_max - self.range_min) + self.range_min if self.clip_values: if self.by_var and is_listy(self.by_var): for v in self.by_var: if not is_listy(v): v = [v] output[:, v] = torch.clamp(output[:, v], self.range_min, self.range_max) else: output = torch.clamp(output, self.range_min, self.range_max) return output def __repr__(self): return f'{self.__class__.__name__}(by_sample={self.by_sample}, by_var={self.by_var}, by_step={self.by_step})' # - batch_tfms = [TSNormalize()] dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, after_batch=batch_tfms) xb, yb = next(iter(dls.train)) assert xb.max() <= 1 assert xb.min() >= -1 batch_tfms=[TSNormalize(by_sample=True, by_var=False, verbose=False)] dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, after_batch=batch_tfms) xb, yb = next(iter(dls.train)) assert xb.max() <= 1 assert xb.min() >= -1 batch_tfms = [TSNormalize(by_var=[0, [1, 2]], use_single_batch=False, clip_values=False, verbose=False)] dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, after_batch=batch_tfms) xb, yb = next(iter(dls.train)) assert xb[:, [0, 1, 2]].max() <= 1 assert xb[:, [0, 1, 2]].min() >= -1 #export class TSClipOutliers(Transform): "Clip outliers batch of type `TSTensor` based on the IQR" parameters, order = L('min', 'max'), 90 def __init__(self, min=None, max=None, by_sample=False, by_var=False, verbose=False): self.su = (min is None or max is None) and not by_sample self.min = tensor(min) if min is not None else tensor(-np.inf) self.max = tensor(max) if max is not None else tensor(np.inf) self.by_sample, self.by_var = by_sample, by_var if by_sample and by_var: self.axis = (2) elif by_sample: self.axis = (1, 2) elif by_var: self.axis = (0, 2) else: self.axis = None self.verbose = verbose if min is not None or max is not None: pv(f'{self.__class__.__name__} min={min}, max={max}\n', self.verbose) def setups(self, dl: DataLoader): if self.su: o, *_ = dl.one_batch() min, max = get_outliers_IQR(o, self.axis) self.min, self.max = tensor(min), tensor(max) if self.axis is None: pv(f'{self.__class__.__name__} min={self.min}, max={self.max}, by_sample={self.by_sample}, by_var={self.by_var}\n', self.verbose) else: pv(f'{self.__class__.__name__} min={self.min.shape}, max={self.max.shape}, by_sample={self.by_sample}, by_var={self.by_var}\n', self.verbose) self.su = False def encodes(self, o:TSTensor): if self.axis is None: return torch.clamp(o, self.min, self.max) elif self.by_sample: min, max = get_outliers_IQR(o, axis=self.axis) self.min, self.max = o.new(min), o.new(max) return torch_clamp(o, self.min, self.max) def __repr__(self): return f'{self.__class__.__name__}(by_sample={self.by_sample}, by_var={self.by_var})' batch_tfms=[TSClipOutliers(-1, 1, verbose=True)] dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, bs=128, num_workers=0, after_batch=batch_tfms) xb, yb = next(iter(dls.train)) assert xb.max() <= 1 assert xb.min() >= -1 test_close(xb.min(), -1, eps=1e-1) test_close(xb.max(), 1, eps=1e-1) xb, yb = next(iter(dls.valid)) test_close(xb.min(), -1, eps=1e-1) test_close(xb.max(), 1, eps=1e-1) # export class TSClip(Transform): "Clip batch of type `TSTensor`" parameters, order = L('min', 'max'), 90 def __init__(self, min=-6, max=6): self.min = torch.tensor(min) self.max = torch.tensor(max) def encodes(self, o:TSTensor): return torch.clamp(o, self.min, self.max) def __repr__(self): return f'{self.__class__.__name__}(min={self.min}, max={self.max})' t = TSTensor(torch.randn(10, 20, 100)*10) test_le(TSClip()(t).max().item(), 6) test_ge(TSClip()(t).min().item(), -6) #export class TSRobustScale(Transform): r"""This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range)""" parameters, order = L('median', 'min', 'max'), 90 def __init__(self, median=None, min=None, max=None, by_sample=False, by_var=False, verbose=False): self.su = (median is None or min is None or max is None) and not by_sample self.median = tensor(median) if median is not None else tensor(0) self.min = tensor(min) if min is not None else tensor(-np.inf) self.max = tensor(max) if max is not None else tensor(np.inf) self.by_sample, self.by_var = by_sample, by_var if by_sample and by_var: self.axis = (2) elif by_sample: self.axis = (1, 2) elif by_var: self.axis = (0, 2) else: self.axis = None self.verbose = verbose if median is not None or min is not None or max is not None: pv(f'{self.__class__.__name__} median={median} min={min}, max={max}\n', self.verbose) def setups(self, dl: DataLoader): if self.su: o, *_ = dl.one_batch() median = get_percentile(o, 50, self.axis) min, max = get_outliers_IQR(o, self.axis) self.median, self.min, self.max = tensor(median), tensor(min), tensor(max) if self.axis is None: pv(f'{self.__class__.__name__} median={self.median} min={self.min}, max={self.max}, by_sample={self.by_sample}, by_var={self.by_var}\n', self.verbose) else: pv(f'{self.__class__.__name__} median={self.median.shape} min={self.min.shape}, max={self.max.shape}, by_sample={self.by_sample}, by_var={self.by_var}\n', self.verbose) self.su = False def encodes(self, o:TSTensor): if self.by_sample: median = get_percentile(o, 50, self.axis) min, max = get_outliers_IQR(o, axis=self.axis) self.median, self.min, self.max = o.new(median), o.new(min), o.new(max) return (o - self.median) / (self.max - self.min) def __repr__(self): return f'{self.__class__.__name__}(by_sample={self.by_sample}, by_var={self.by_var})' dls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, num_workers=0) xb, yb = next(iter(dls.train)) clipped_xb = TSRobustScale(by_sample=true)(xb) test_ne(clipped_xb, xb) clipped_xb.min(), clipped_xb.max(), xb.min(), xb.max() #export class TSDiff(Transform): "Differences batch of type `TSTensor`" order = 90 def __init__(self, lag=1, pad=True): self.lag, self.pad = lag, pad def encodes(self, o:TSTensor): return torch_diff(o, lag=self.lag, pad=self.pad) def __repr__(self): return f'{self.__class__.__name__}(lag={self.lag}, pad={self.pad})' t = TSTensor(torch.arange(24).reshape(2,3,4)) test_eq(TSDiff()(t)[..., 1:].float().mean(), 1) test_eq(TSDiff(lag=2, pad=False)(t).float().mean(), 2) #export class TSLog(Transform): "Log transforms batch of type `TSTensor`. For positive values only" order = 90 def encodes(self, o:TSTensor): return torch.log(o) def decodes(self, o:TSTensor): return torch.exp(o) def __repr__(self): return f'{self.__class__.__name__}()' t = TSTensor(torch.rand(2,3,4)) enc_t = TSLog()(t) test_ne(enc_t, t) test_close(TSLog().decodes(enc_t).data, t.data) # + #export class TSCyclicalPosition(Transform): """Concatenates the position along the sequence as 2 additional variables (sine and cosine) Args: magnitude: added for compatibility. It's not used. """ order = 90 def __init__(self, magnitude=None, **kwargs): super().__init__(**kwargs) def encodes(self, o: TSTensor): bs,_,seq_len = o.shape sin, cos = sincos_encoding(seq_len, device=o.device) output = torch.cat([o, sin.reshape(1,1,-1).repeat(bs,1,1), cos.reshape(1,1,-1).repeat(bs,1,1)], 1) return output # - bs, c_in, seq_len = 1,3,100 t = TSTensor(torch.rand(bs, c_in, seq_len)) enc_t = TSCyclicalPosition()(t) test_ne(enc_t, t) assert t.shape[1] == enc_t.shape[1] - 2 plt.plot(enc_t[0, -2:].cpu().numpy().T) plt.show() # + #export class TSLinearPosition(Transform): """Concatenates the position along the sequence as 1 additional variable Args: magnitude: added for compatibility. It's not used. """ order = 90 def __init__(self, magnitude=None, lin_range=(-1,1), **kwargs): self.lin_range = lin_range super().__init__(**kwargs) def encodes(self, o: TSTensor): bs,_,seq_len = o.shape lin = linear_encoding(seq_len, device=o.device, lin_range=self.lin_range) output = torch.cat([o, lin.reshape(1,1,-1).repeat(bs,1,1)], 1) return output # - bs, c_in, seq_len = 1,3,100 t = TSTensor(torch.rand(bs, c_in, seq_len)) enc_t = TSLinearPosition()(t) test_ne(enc_t, t) assert t.shape[1] == enc_t.shape[1] - 1 plt.plot(enc_t[0, -1].cpu().numpy().T) plt.show() #export class TSLogReturn(Transform): "Calculates log-return of batch of type `TSTensor`. For positive values only" order = 90 def __init__(self, lag=1, pad=True): self.lag, self.pad = lag, pad def encodes(self, o:TSTensor): return torch_diff(torch.log(o), lag=self.lag, pad=self.pad) def __repr__(self): return f'{self.__class__.__name__}(lag={self.lag}, pad={self.pad})' t = TSTensor([1,2,4,8,16,32,64,128,256]).float() test_eq(TSLogReturn(pad=False)(t).std(), 0) #export class TSAdd(Transform): "Add a defined amount to each batch of type `TSTensor`." order = 90 def __init__(self, add): self.add = add def encodes(self, o:TSTensor): return torch.add(o, self.add) def __repr__(self): return f'{self.__class__.__name__}(lag={self.lag}, pad={self.pad})' t = TSTensor([1,2,3]).float() test_eq(TSAdd(1)(t), TSTensor([2,3,4]).float()) # ## y transforms # + # export class Preprocessor(): def __init__(self, preprocessor, **kwargs): self.preprocessor = preprocessor(**kwargs) def fit(self, o): if isinstance(o, pd.Series): o = o.values.reshape(-1,1) else: o = o.reshape(-1,1) self.fit_preprocessor = self.preprocessor.fit(o) return self.fit_preprocessor def transform(self, o, copy=True): if type(o) in [float, int]: o = array([o]).reshape(-1,1) o_shape = o.shape if isinstance(o, pd.Series): o = o.values.reshape(-1,1) else: o = o.reshape(-1,1) output = self.fit_preprocessor.transform(o).reshape(*o_shape) if isinstance(o, torch.Tensor): return o.new(output) return output def inverse_transform(self, o, copy=True): o_shape = o.shape if isinstance(o, pd.Series): o = o.values.reshape(-1,1) else: o = o.reshape(-1,1) output = self.fit_preprocessor.inverse_transform(o).reshape(*o_shape) if isinstance(o, torch.Tensor): return o.new(output) return output StandardScaler = partial(sklearn.preprocessing.StandardScaler) setattr(StandardScaler, '__name__', 'StandardScaler') RobustScaler = partial(sklearn.preprocessing.RobustScaler) setattr(RobustScaler, '__name__', 'RobustScaler') Normalizer = partial(sklearn.preprocessing.MinMaxScaler, feature_range=(-1, 1)) setattr(Normalizer, '__name__', 'Normalizer') BoxCox = partial(sklearn.preprocessing.PowerTransformer, method='box-cox') setattr(BoxCox, '__name__', 'BoxCox') YeoJohnshon = partial(sklearn.preprocessing.PowerTransformer, method='yeo-johnson') setattr(YeoJohnshon, '__name__', 'YeoJohnshon') Quantile = partial(sklearn.preprocessing.QuantileTransformer, n_quantiles=1_000, output_distribution='normal', random_state=0) setattr(Quantile, '__name__', 'Quantile') # - # Standardize from tsai.data.validation import TimeSplitter y = random_shuffle(np.random.randn(1000) * 10 + 5) splits = TimeSplitter()(y) preprocessor = Preprocessor(StandardScaler) preprocessor.fit(y[splits[0]]) y_tfm = preprocessor.transform(y) test_close(preprocessor.inverse_transform(y_tfm), y) plt.hist(y, 50, label='ori',) plt.hist(y_tfm, 50, label='tfm') plt.legend(loc='best') plt.show() # RobustScaler y = random_shuffle(np.random.randn(1000) * 10 + 5) splits = TimeSplitter()(y) preprocessor = Preprocessor(RobustScaler) preprocessor.fit(y[splits[0]]) y_tfm = preprocessor.transform(y) test_close(preprocessor.inverse_transform(y_tfm), y) plt.hist(y, 50, label='ori',) plt.hist(y_tfm, 50, label='tfm') plt.legend(loc='best') plt.show() # Normalize y = random_shuffle(np.random.rand(1000) * 3 + .5) splits = TimeSplitter()(y) preprocessor = Preprocessor(Normalizer) preprocessor.fit(y[splits[0]]) y_tfm = preprocessor.transform(y) test_close(preprocessor.inverse_transform(y_tfm), y) plt.hist(y, 50, label='ori',) plt.hist(y_tfm, 50, label='tfm') plt.legend(loc='best') plt.show() # BoxCox y = random_shuffle(np.random.rand(1000) * 10 + 5) splits = TimeSplitter()(y) preprocessor = Preprocessor(BoxCox) preprocessor.fit(y[splits[0]]) y_tfm = preprocessor.transform(y) test_close(preprocessor.inverse_transform(y_tfm), y) plt.hist(y, 50, label='ori',) plt.hist(y_tfm, 50, label='tfm') plt.legend(loc='best') plt.show() # YeoJohnshon y = random_shuffle(np.random.randn(1000) * 10 + 5) y = np.random.beta(.5, .5, size=1000) splits = TimeSplitter()(y) preprocessor = Preprocessor(YeoJohnshon) preprocessor.fit(y[splits[0]]) y_tfm = preprocessor.transform(y) test_close(preprocessor.inverse_transform(y_tfm), y) plt.hist(y, 50, label='ori',) plt.hist(y_tfm, 50, label='tfm') plt.legend(loc='best') plt.show() # QuantileTransformer y = - np.random.beta(1, .5, 10000) * 10 splits = TimeSplitter()(y) preprocessor = Preprocessor(Quantile) preprocessor.fit(y[splits[0]]) plt.hist(y, 50, label='ori',) y_tfm = preprocessor.transform(y) plt.legend(loc='best') plt.show() plt.hist(y_tfm, 50, label='tfm') plt.legend(loc='best') plt.show() test_close(preprocessor.inverse_transform(y_tfm), y, 1e-1) #export def ReLabeler(cm): r"""Changes the labels in a dataset based on a dictionary (class mapping) Args: cm = class mapping dictionary """ def _relabel(y): obj = len(set([len(listify(v)) for v in cm.values()])) > 1 keys = cm.keys() if obj: new_cm = {k:v for k,v in zip(keys, [listify(v) for v in cm.values()])} return np.array([new_cm[yi] if yi in keys else listify(yi) for yi in y], dtype=object).reshape(*y.shape) else: new_cm = {k:v for k,v in zip(keys, [listify(v) for v in cm.values()])} return np.array([new_cm[yi] if yi in keys else listify(yi) for yi in y]).reshape(*y.shape) return _relabel vals = {0:'a', 1:'b', 2:'c', 3:'d', 4:'e'} y = np.array([vals[i] for i in np.random.randint(0, 5, 20)]) labeler = ReLabeler(dict(a='x', b='x', c='y', d='z', e='z')) y_new = labeler(y) test_eq(y.shape, y_new.shape) y, y_new #hide out = create_scripts(); beep(out)
nbs/003_data.preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 5. Preparación de Datos # # [Playlist de Ciencia de Datos en castellano](https://www.youtube.com/playlist?list=PLjyvn6Y1kpbEmRY4-ELeRA80ZywV7Xd67) # [![Ciencia de Datos en Python](https://img1.wsimg.com/isteam/ip/aab852a2-7b1f-49c0-92af-9206f2ec6a75/6-0001.png/:/rs=w:1160,h:653)](https://www.youtube.com/watch?v=otKULhEP-ik&list=PLjyvn6Y1kpbEmRY4-ELeRA80ZywV7Xd67&index=6) # # Gran parte del trabajo en Ciencia de Datos y Aprendizaje Automático (Machine Learning) consiste en obtener datos limpios y en la forma correcta. Esto puede incluir limpieza de datos para eliminar valores atípicos o mala información, escalado para algoritmos de aprendizaje automático o machine learning, división en grupos de entrenamiento y prueba, y enumeración de datos tipo "string". Todo esto debe suceder antes de la regresión, clasificación u otro entrenamiento aplicado al modelo. Afortunadamente, existen funciones que nos ayudan a automatizar la preparación de los datos. # # ![idea](https://apmonitor.com/che263/uploads/Begin_Python/idea.png) # # ### Generar Datos de Muestra # # Ejecuta la siguiente celda para generar datos de muestra dañados con NaN (not a number) y valores atípicos que son puntos de datos erróneos que están muy por fuera de la tendencia esperada. import numpy as np import pandas as pd np.random.seed(1) n = 100 tt = np.linspace(0,n-1,n) x = np.random.rand(n)+10+np.sqrt(tt) y = np.random.normal(10,x*0.01,n) x[1] = np.nan; y[2] = np.nan # 2 NaN (not a number) for i in range(3): # añade 3 valores atípicos (datos malos) ri = np.random.randint(0,n) x[ri] += np.random.rand()*100 data = pd.DataFrame(np.vstack((tt,x,y)).T,\ columns=['time','x','y']) data.head() # ![analyze](https://apmonitor.com/che263/uploads/Begin_Python/analyze.png) # # ### Visualización de los Datos # # Los valores atípicos se muestran en un gráfico semi-log. Los valores `NaN` no se muestran en la gráfica y son puntos faltantes. import matplotlib.pyplot as plt # %matplotlib inline plt.semilogy(tt,x,'r.',label='x') plt.semilogy(tt,y,'b.',label='y') plt.legend(); plt.xlabel('tiempo') plt.text(50,60,'Valores atípicos') plt.show() # ![idea](https://apmonitor.com/che263/uploads/Begin_Python/idea.png) # # ### Eliminar Valores Atípicos y Datos Erróneos # # Los valores NaN se eliminan con `numpy` identificando filas `ix` que contienen `NaN`. A continuación, las filas se eliminan con `z=z[~iz]` donde `~` Es un operador `not`. z = np.array([[ 1, 2], [ np.nan, 3], [ 4, np.nan], [ 5, 6]]) iz = np.any(np.isnan(z), axis=1) print(~iz) z = z[~iz] print(z) # El método `dropna` es un comando que quita filas `NaN` en un `pandas` `DataFrame`. La fila 1 y 2 son removidas. # eliminar cualquier fila con valores erróneos (NaN) data = data.dropna() data.head() # Existen varias técnicas gráficas para detectar valores atípicos. Un diagrama de caja o histograma muestra los 3 puntos periféricos. plt.boxplot(data['x']) plt.show() # Una prueba de Grubbs u [otra medida estadística](https://towardsdatascience.com/ways-to-detect-and-remove-the-outliers-404d16608dba) puede detectar valores atípicos. La prueba de Grubbs asume datos univariados, normalmente distribuidos y está destinada a detectar solo un valor atípico. En la práctica, muchos valores atípicos se eliminan al remover puntos que violan un límite de cambio, o límites superior/inferior. El enunciado `data[data['x']<30]` mantiene las filas donde x es menor a 30. data = data[data['x']<30] plt.boxplot(data['x']) plt.show() # ![expert](https://apmonitor.com/che263/uploads/Begin_Python/expert.png) # # ### Actividad con Tiempo # # Sin mirar tu reloj, ejecuta la siguiente celda para registrar intervalos de 1 segundo durante 10 segundos. Cuando ejecutes la celda, presiona `Enter` cada vez que creas que ha pasado 1 segundo. Después de recopilar los datos, utiliza un diagrama de caja para identificar cualquier punto de datos en `tsec` que son valores atípicos. import time from IPython.display import clear_output tsec = [] input('Presiona "Enter" para grabar intervalos de 1 segundo'); t = time.time() for i in range(10): clear_output(); input('Presiona "Enter": ' + str(i+1)) tsec.append(time.time()-t); t = time.time() clear_output(); print('Completo. Agrega un diagrama de caja para identificar valores atípicos') # + # Agregar un diagrama de caja para identificar valores atípicos # - # ![idea](https://apmonitor.com/che263/uploads/Begin_Python/idea.png) # # ### Escala de Datos # # La librería `sklearn` tiene un módulo de pre-procesamiento (`preprocessing`) para implementar métodos de escalado estándar. El `StandardScalar` se muestra a continuación. Cada columna se normaliza a una media cero y una desviación estándar de uno. Los métodos de escalado comunes `fit_transform(X)` para ajuste y `transform(X)` transformación basado en otro ajuste, y `inverse_transform(Xs)` para volver a escalar a la representación original. from sklearn.preprocessing import StandardScaler s = StandardScaler() ds = s.fit_transform(data) print(ds[0:5]) # imprime 5 filas # El valor `ds` se devuelve como un array `numpy` así que tenemos que convertirlo de nuevo a `pandas` `DataFrame`, reutilizando los nombres de columna `data`. ds = pd.DataFrame(ds,columns=data.columns) ds.head() # ![idea](https://apmonitor.com/che263/uploads/Begin_Python/idea.png) # # ### Dividir Datos # # Los datos se dividen en grupos de entrenamiento y prueba para separar una fracción de las filas para evaluar modelos de clasificación o regresión. Una división típica es 80% para entrenamiento y 20% para pruebas, aunque el rango depende de la cantidad de datos disponibles y del objetivo del estudio. divide = int(len(ds)*0.8) train = ds[0:divide] test = ds[divide:] print(len(train),len(test)) # El `train_test_split` es una función de `sklearn` que tiene el propósito específico de dividir los datos en grupos de entrenamiento y prueba. Existen opciones como `shuffle=True` para aleatorizar la selección en cada conjunto. from sklearn.model_selection import train_test_split train,test = train_test_split(ds, test_size=0.2, shuffle=True) print(len(train),len(test)) # ### Actividad con el TCLab # # ![expert](https://apmonitor.com/che263/uploads/Begin_Python/expert.png) # # ### Datos con Valores Erróneos y Atípicos # # Genera un nuevo archivo de datos con algunos datos incorrectos insertados aleatoriamente (3 minutos) o lee el archivo de datos de [un link en la web](https://apmonitor.com/do/uploads/Main/tclab_bad_data.txt) con el siguiente código. # + import tclab, time, csv import numpy as np try: with tclab.TCLab() as lab: with open('05-tclab.csv',mode='w',newline='') as f: cw = csv.writer(f) cw.writerow(['Time','Q1','Q2','T1','T2']) print('t Q1 Q2 T1 T2') for t in range(180): T1 = lab.T1; T2 = lab.T2 # insertar valores malos bad = np.random.randint(0,30) T1=np.nan if bad==10 else T1 T2=np.nan if bad==15 else T2 # insertar número aleatorio (potencial valor atípico) outlier = np.random.randint(-40,150) T1=outlier if bad==20 else T1 T2=outlier if bad==25 else T2 # cambiar el calentador if t%30==0: Q1 = np.random.randint(0,81) Q2 = np.random.randint(0,81) lab.Q1(Q1); lab.Q2(Q2) cw.writerow([t,Q1,Q2,T1,T2]) if t%10==0: print(t,Q1,Q2,T1,T2) time.sleep(1) data5=pd.read_csv('05-tclab.csv') except: print('Conectar el TCLab para generar nuevos datos') print('Importar datos de un repositorio en línea') url = 'http://apmonitor.com/do/uploads/Main/tclab_bad_data.txt' data5=pd.read_csv(url) # - # ### Limpiar, Escalar y Dividir Datos # # Después de generar e importar `data5`, eliminar filas con valores `NaN` o valores atípicos en las columnas `T1` o `T2`. Escala los datos con `StandardScalar` en `scikit`. Divide los datos en grupos de entrenamiento (80%) y prueba (20%).
05. Preparar_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Materials Project Workshop – July 31–August 2 2019, Berkeley, California # #### Link to notebook: [http://workshop.materialsproject.org/pymatgen/core/pymatgen_advanced.ipynb](http://workshop.materialsproject.org/pymatgen/core/pymatgen_advanced.ipynb) # # Lesson 3, Advanced use of pymatgen # *Remember to download your notebook if you want to keep a copy of your code.* # # 0. Introducing Transformations # Transformations in *pymatgen* can be used to change one structure to another such that a later calculation can be performed. # # A Transformation *object* exists because it is re-usable: it allows you to apply the same transformation to the multiple different structures, which is useful in a high-throughput context. # # Typically, in pymatgen, code might live in two places, for example: # # * `pymatgen.analysis.bond_valence` contains the `BVAnalyzer`, a code to help estimate likely oxidation states in your crystal # * `pymatgen.transformations.standard_transformations` contains ` AutoOxiStateDecorationTransformation`, the corresponding transformation that applies `BVAnalyzer` to your structure # # Transformations often wrap this other code to give it a standardized interface. # # # ## 0.1 Conventional Cell Transformation # # To start with, let's create a primitive lattice for silicon. from pymatgen import Structure, Lattice from crystal_toolkit.helpers.pythreejs_renderer import view si_lattice = Lattice.from_parameters(3.85, 3.85, 3.85, 60, 60, 60) si = Structure(si_lattice, ["Si", "Si"], [[0.75, 0.75, 0.75], [0, 0, 0]]) view(si) # In its primitive setting, this does not look much like the textbook picture of silicon. It can be useful to convert to its conventional setting, which can be necessary for certain tasks which require a crystal to be in a standard setting (for example, to report tensor properties). # # All transformations live in the `pymatgen.transformations` submodule, and wrap up operations that map one Structure to one or more transformed Structures. # Each transformation has a standard format. You create the transformation along with any options for that transformation like so: # You can also inspect to see if the transformation is one-to-one or one-to-many: # If it's one-to-one the output is a single Structure, if it's one to many the output is a list of dictionaries: `[{"structure": first transformed structure, ...}, {"structure": second transformed structure, ...}` # # Let's test this transformation out: view(si_conv, draw_image_atoms=True) # + [markdown] slideshow={"slide_type": "-"} # ### How Finding the Conventional Cell Works and When It Might Fail # # This transformation is very robust. Behind the scenes, it uses the [`spglib`](https://atztogo.github.io/spglib/) library, which is a powerful and robust code for symmetry analysis. However, note that due to limits of numerical precision sites may not be exactly on the symmetrically-equivalent positions so we introduce tolerance factors (`symprec`, a length tolerance, and `angle_tolerance`). These can be modified when constructing the transformation as appropriate: # # `trans = ConventionalCellTransformation(symprec=0.1, angle_tolerance=5)` # # Also using `spglib` is a `PrimitiveCellTransformation` to transform a crystal into its primitive setting. # - # # 1. Case Study for Structure Prediction barium_titanate = Structure.from_spacegroup("Pm-3m", Lattice.cubic(3.9), ["Ba", "Ti", "O"], [[0, 0, 0], [0.5, 0.5, 0.5], [0.5, 0.5, 0]]) view(barium_titanate, draw_image_atoms=True) # ## 1.1 Transformation to Decorate Structure with Oxidation States # Import the transformation which applies oxidation states: # Initialize the transformation: # If we apply it to our structure, we can see oxidation states are added: # ### How Oxidation State Decoration Works and When It Might Fail # # The Bond Valence analyzer implements a maximum a posteriori (MAP) estimation method to # determine oxidation states in a structure. The algorithm is as follows: # # 1. The bond valence sum of all symmetrically distinct sites in a structure # is calculated using the element-based parameters in [O'Keefe, Michael, and <NAME>. "Atom sizes and bond lengths in molecules and crystals." Journal of the American Chemical Society 113.9 (1991): 3226-3229](http://doi.org/10.1021/ja00009a002). # # 2. The posterior probabilities of all oxidation states is then calculated using: P(oxi_state|BV) = K * P(BV|oxi_state) * P(oxi_state), where K is # a constant factor for each element. P(BV/oxi_state) is calculated as a # Gaussian with mean and std deviation determined from an analysis of # the ICSD. The posterior P(oxi_state) is determined from a frequency # analysis of the ICSD. # # 3. The oxidation states are then ranked in order of decreasing probability # and the oxidation state combination that result in a charge neutral cell # is selected. # # Therefore, the bond valence analysis will fail if either parameters for that element are missing from the pre-tabulated data, or if the oxidation state is unusual and not well-represented in the ICSD. # # As a fallback, we have "oxidation state guesses" which are *composition-only* guesses. # This is a composition *object* which has many useful properties including `oxi_state_guesses`: # # 1.2 Transformation to Predict Similar Structures # This is out first one-to-many transformation. We indicate we're interested in multiple results by setting `return_ranked_list`. The ranking of this list varies between different transformations. # Let's see the our first predicted structure: # ### How Structure Prediction Works and When It Might Fail # # This is a probabilistic model based on substitution probabilities data-mined from the ICSD. A full description of the algorithm is available in: [<NAME>, et al. "Data mined ionic substitutions for the discovery of new compounds." Inorganic chemistry 50.2 (2010): 656-663.](https://doi.org/10.1021/ic102031h) # # Without subsequent calculation, we cannot say whether the predicted structures are stable or not. It can be useful to check The Materials Project to see if the predicted structure has in fact been calculated and whether it is predicted to be stable. Additionally, like the bond valence analyzer, if a given element or oxidation state is rare, predictions might be inaccurate. # # Example for Adsorbate Calculation # # Imagine a simple use case for studying the adsorption of CO on a catalyst surface. # # You might start with a disordered structure representing your catalyst. Many crystal structures obtained via experimental methods are only given in a disordered form, that is with partial occupancies on the site. *On average* a site might contain 50% Pt and 50% Au as in the following example: ptau = Structure.from_spacegroup('Fm-3m', Lattice.cubic(4), [{"Pt": 0.5, "Au": 0.5}], [[0, 0, 0]]) view(ptau, draw_image_atoms=True) # # 2.1 Transformation to Enumerate Ordered Approximations for Disordered Structures # # Most computational methods require only ordered structures (integer occupancy), and therefore the first step when starting from a disordered structure is to create a *disordered approximation.* # ### How Creating Ordered Approximations Work and When It Might Fail # # There are two ways of creating ordered approximations in *pymatgen*, `EnumerateStructureTransformation` (using the [`enumlib`](https://github.com/msg-byu/enumlib) code, and `OrderDisorderTransformation` which is implemented purely in *pymatgen* but requires your structure to be decorated with oxidation states. # # Creating ordered approximations might fail if your cell contains a large number of species or is otherwise very complex, such that performing an enumeration creates a combinatorial explosion of different possible orderings. # # There are also physical concerns: a symmetric ordered approximation might not be most appropriate, and instead a "random" like cell might be more physical. This would require a different transformation. # # 2.2 Transformation to Create a Surface # This is an example of a transformation that requires mandatory arguments set: # Note that in this case the transformation returns a `Slab` and not a `Structure`. In object-orientated fashion, the `Slab` is a sub-class of `Structure` meaning that it has all the same functionality of `Structure` but with added information such as the miller indices used to generate the surface. # # We can show that this is a `Slab` using the `type` command: # ### How Creating Surfaces Works and How It Might Fail # # This is a fairly robust transformation but care must be taken when performing # actual calculations to ensure that there is sufficient vacuum present such # that periodic images do not interact with one another, and that the surfaces # are appropriately charge balanced. # # 2.3 Transformation to Add an Adsorbate # # Finally, we want to add an adsorbate to our surface. # ### How Adding Adsorbates Works and When It Might Fail # # The AdsorbateSiteFinder finds adsorbate sites on slabs and generates # adsorbate structures according to user-defined criteria. # # The algorithm for finding sites proceeds as follows: # # 1. Determine "surface sites" by finding those within # a height threshold along the miller index of the # highest site # 2. Create a network of surface sites using the Delaunay # triangulation of the surface sites # 3. Assign on-top, bridge, and hollow adsorption sites # at the nodes, edges, and face centers of the Del. # Triangulation # 4. Generate structures from a molecule positioned at # these sites # # This algorithm is fairly robust but was developed primarily for metal surfaces, # with less testing performed for oxide surfaces. Full details can be found in # the associated publication: # # [Montoya, <NAME>., and <NAME>. "A high-throughput framework for determining adsorption energies on solid surfaces." npj Computational Materials 3.1 (2017): 14.](https://doi.org/10.1038/s41524-017-0017-z) # # Summary # # Transformations are powerful because they can be applied repeatedly to different materials, and chained together to create complex results. This can be useful when setting up your own calculations, or for educational use to demonstrate a particular system in a particular configuration. Using Transformations, *pymatgen* also makes it easy to glue together codes from other parts of the Materials Science software ecosystem. # # Transformations also form the foundations of the high-throughput workflows that power The Materials Project. Tomorrow, we will show *atomate* workflows has built upon some of these transformations to automate calculation of complex materials properties.
lessons/pymatgen/2 - Advanced Pymatgen - fill in the blanks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lesson - Machine Learning IV: Hyperparameters Optimization # # Previously, we focused on increasing the number of attributes the model uses. We saw how, in general, adding more attributes generally lowered the error of the model. This is because the model is able to do a better job identifying the living spaces from the training set that are the most similar to the ones from the test set. However, we also observed how using all of the available features didn't actually improve the model's accuracy automatically and that some of the features were probably not relevant for similarity ranking. We saw that selecting relevant features was the right lever when improving a model's accuracy, not just increasing the features used in the absolute. # # Now, we'll focus on the impact of increasing `k`, the number of nearby neighbors the model uses to make predictions. To keep results consistent, we have exported both the training (`train_df`) and test sets (`test_df`) from last exercise to CSV files, `dc_airbnb_train.csv` and `dc_airbnb_test.csv` respectively. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # Setting pandas display options for large data pd.options.display.max_rows = 200 pd.options.display.max_columns = 150 # Read the dataset and clean the price columns train_df = pd.read_csv('dc_airbnb_train.csv') test_df = pd.read_csv('dc_airbnb_test.csv') # ### Hyperparameter Optimization # # When we vary the features that are used in the model, we're affecting the data that the model uses. On the other hand, varying the k value affects the behavior of the model independently of the actual data that's used when making predictions. In other words, we're impacting how the model performs without trying to change the data that's used. # # Values that affect the behavior and performance of a model that are unrelated to the data that's used are referred to as **hyperparameters**. The process of finding the optimal hyperparameter value is known as **hyperparameter optimization** (or tuning). A simple but common hyperparameter optimization technique is known as **grid search**, which involves: # # - selecting a subset of the possible hyperparameter values, # - training a model using each of these hyperparameter values, # - evaluating each model's performance, # - selecting the hyperparameter value that resulted in the lowest error value. # # Below, we will first check results for `k` varying from 1 to 5 for a model with four features: # - accommodates # - bedrooms # - bathrooms # - number_of_reviews # **Exercise** # # - Create a list containing the integer values 1, 2, 3, 4, and 5, in that order, and assign to `hyper_params`. # - Create an empty list and assign to `mse_values`. # - Use a for loop to iterate over `hyper_params` and in each iteration: # - Instantiate a `KNeighborsRegressor` object with the following parameters: # - `n_neighbors`: the current value for the iterator variable, # - `algorithm`: brute # - Fit the instantiated k-nearest neighbors model to the following columns from train_df: # - `accommodates` # - `bedrooms` # - `bathrooms` # - `number_of_reviews` # - Use the trained model to make predictions on the same columns from `test_df` and assign to `predictions`. # - Use the mean_squared_error function to calculate the MSE value between `predictions` and the `price column` from `test_df`. # - Append the MSE value to `mse_values`. # - Display `mse_values` using the `print()` function hyper_params =[1, 2, 3, 4, 5] mse_values = [] rmse_values = [] features = ['accommodates', 'bedrooms', 'bathrooms', 'number_of_reviews'] from sklearn.neighbors import KNeighborsRegressor from sklearn.metrics import mean_squared_error for param in hyper_params: knn = KNeighborsRegressor(n_neighbors = param, algorithm = 'brute') train_features = train_df[features] train_target = train_df['price'] knn.fit(train_features, train_target) predictions = knn.predict(test_df[features]) y_true = test_df['price'] y_pred = predictions mse = mean_squared_error(y_true, y_pred) rmse = mse ** (1/2) mse_values.append(mse) rmse_values.append(rmse) print(mse_values) print(rmse_values) # ### Expanding Grid Search # We will now expand grid search to k = 50 and plot the results. We will write a function to avoid repeating calculations. # + features = ['accommodates', 'bedrooms', 'bathrooms', 'number_of_reviews'] def knn_values (k, features = features): from sklearn.neighbors import KNeighborsRegressor from sklearn.metrics import mean_squared_error hyper_params = [r for r in range(1, k+1)] mse_values = [] rmse_values = [] for param in hyper_params: knn = KNeighborsRegressor(n_neighbors = param, algorithm = 'brute') train_features = train_df[features] train_target = train_df['price'] knn.fit(train_features, train_target) predictions = knn.predict(test_df[features]) y_true = test_df['price'] y_pred = predictions mse = mean_squared_error(y_true, y_pred) rmse = mse ** (1/2) mse_values.append(mse) rmse_values.append(rmse) return mse_values fig = plt.figure(figsize=(8, 6)) plt.style.use('fivethirtyeight') plt.plot(knn_values(51), color = 'red') plt.xlabel("k") plt.ylabel('mse') plt.show() # - # Scatter Plot k = 51 fig = plt.figure(figsize=(8, 6)) plt.style.use('fivethirtyeight') plt.scatter(x=[k for k in range(1,k)], y = knn_values(k-1), color = 'red') plt.xlabel("k") plt.ylabel('mse') plt.xticks(np.arange(0, k, 2)) plt.show() # ### Varying Hyperparameters # # From the scatter plot, we can tell that the lowest MSE value was achieved at the k value of 6. As we increased k past 6, the MSE actually increased and hovered but never decreased below 13657 (the approximate MSE value when k was 6). # # We will now repeat the grid search process for the model with all features which earlier performed poorly when we fixed k to 5 and see if it would result in a lower MSE value. columns = list(train_df.columns.drop('price')) k = 51 fig = plt.figure(figsize=(8, 6)) plt.style.use('fivethirtyeight') plt.scatter(x=[k for k in range(1,k)], y = knn_values(k-1, features = columns), color = 'red') plt.xlabel("k") plt.ylabel('mse') plt.show() # ### Workflow # The general workflow for finding the best model is: # # - select relevant features to use for predicting the target column. # - use grid search to find the optimal hyperparameter value for the selected features. # - evaluate the model's accuracy and repeat the process. # # We will practice this work flow below starting from 2 features up to 3 features. # # + # Slightly modified the function knn_values to return a dictionary of minimum values and their k def knn_values_min(k, features): from sklearn.neighbors import KNeighborsRegressor from sklearn.metrics import mean_squared_error hyper_params = [r for r in range(1, k+1)] mse_values = [] min_dictionary = {} min_k = {} for param in hyper_params: knn = KNeighborsRegressor(n_neighbors = param, algorithm = 'brute') train_features = train_df[features] train_target = train_df['price'] knn.fit(train_features, train_target) predictions = knn.predict(test_df[features]) y_true = test_df['price'] y_pred = predictions mse = mean_squared_error(y_true, y_pred) rmse = mse ** (1/2) mse_values.append(mse) min_dictionary[param] = mse min_mse_key = min(min_dictionary, key=lambda param: min_dictionary[param]) min_k[min_mse_key] = min(mse_values) return min_k # + two_features = ['accommodates', 'bathrooms'] three_features = ['accommodates', 'bathrooms', 'bedrooms'] hyper_params = [x for x in range(1,21)] # Append the first model's MSE values to this list. two_mse_values = list() # Append the second model's MSE values to this list. three_mse_values = list() two_hyp_mse = dict() three_hyp_mse = dict() two_mse_values = knn_values(20, two_features) two_hyp_mse = knn_values_min(20, two_features) three_mse_values = knn_values(20, three_features) three_hyp_mse = knn_values_min(20, three_features) print(two_hyp_mse) print(three_hyp_mse) # - # ### Conclusion # # The first model, which used the `accommodates` and `bathrooms` columns, was able to achieve an MSE value of approximately 14790. The second model, which added the `bedrooms` column, was able to achieve an MSE value of approximately 13519, which is even lower than the lowest MSE value we achieved using the model with four features above which used the `accommodates`, `bedrooms`, `bathrooms`, and `number_of_reviews` columns). This demonstrates that using just one lever to find the best model isn't enough and we really have to use both levers in conjunction. #
Lesson-ML IV-Hyperparameter Optimization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SQuAD Exploratory Data Analysis # + import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns import os import random data_dir = "./data/" train_context_path = os.path.join(data_dir, "train.context") train_qn_path = os.path.join(data_dir, "train.question") train_ans_path = os.path.join(data_dir, "train.span") dev_context_path = os.path.join(data_dir, "dev.context") dev_qn_path = os.path.join(data_dir, "dev.question") dev_ans_path = os.path.join(data_dir, "dev.span") # - # ## Import Data # + train_context = pd.read_csv(train_context_path, sep="\n", header=None, error_bad_lines=False) train_qn = pd.read_csv(train_qn_path, sep="\n", header=None, error_bad_lines=False) train_ans = pd.read_csv(train_ans_path, sep="\n", header=None, error_bad_lines=False) train = pd.DataFrame({ "context": train_context[0], "qn": train_qn[0], "ans": train_ans[0] }) train = train.dropna() # + dev_context = pd.read_csv(dev_context_path, sep="\n", header=None, error_bad_lines=False) dev_qn = pd.read_csv(dev_qn_path, sep="\n", header=None, error_bad_lines=False) dev_ans = pd.read_csv(dev_ans_path, sep="\n", header=None, error_bad_lines=False) dev = pd.DataFrame({ "context": dev_context[0], "qn": dev_qn[0], "ans": dev_ans[0] }) # - data = train.append(dev, ignore_index=True) # ## Understanding the Data # + idx = random.randint(0, len(train)) train.loc[idx, 'context'] # - train.loc[idx, 'qn'] # ### First Answer " ".join(train.loc[idx, 'context'].split()[int(train.loc[idx, 'ans'].split()[0]):int(train.loc[idx, 'ans'].split()[1]) + 1]) # ## Context Histogram def get_len(string): return len(string.split()) sns.set(style="ticks") g = sns.distplot(data['context'].apply(get_len), kde=False) plt.xlabel("Context Sequence Length", fontsize=25) # ## Question Histogram sns.distplot(data['qn'].apply(get_len), kde=False) plt.xlabel("Question Sequence Length", fontsize=25) # ## Answer Histogram # + def get_ans_len(string): return int(string.split()[1]) + 1 - int(string.split()[0]) sns.distplot(data['ans'].apply(get_ans_len), kde=False) plt.xlabel("Answer Length", fontsize=25) # - # Critically, we should be more interested in **WHERE** the answer occured in the context # + def get_ans_ind(string): return int(string.split()[1]) sns.distplot(data['ans'].apply(get_ans_ind), kde=False) plt.xlabel("Answer End Index", fontsize=25) # + def get_ans_ind(string): return int(string.split()[0]) sns.distplot(data['ans'].apply(get_ans_ind), kde=False) plt.xlabel("Answer Start Index", fontsize=25) # - sns.distplot(ans, kde=False) plt.xlabel("Answer Occurance in Context", fontsize=25) ans = data['ans'].apply(get_ans_ind) / data['context'].apply(get_len) ans = ans[ans < 1] len(ans)
exploratory_visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.2 64-bit # name: python392jvsc74a57bd0aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49 # --- # # [LEGALST-123] Lab 05: Large N and Hypothesis Testing # This lab will cover the basics of statistical sampling, the law of averages, and hypothesis testing. You should gain an intuition around how samples relate to populations, and the basics of statistical inference in the social sciences. from collections import Counter import numpy as np import pandas as pd from scipy import stats # %matplotlib inline import matplotlib.pyplot as plot plot.style.use('fivethirtyeight') # ## Data # We'll continue using the ANES data for this lab! anes = pd.read_csv('../data/anes/ANES_legalst123_cleaned.csv') anes.head() anes.columns.tolist() # ## Sampling and Empirical Distributions # ### Data Manipulation and Plotting Review # Let's look at how liberal respondents characterized themselves as post-election. Write code that saves the "post_liberal_rating" column in the ANES data to a Series variable. Keep in mind that valid answers have domain [0,100] so be sure to subset to only those values. liberal = anes.loc[:, "post_liberal_rating"] liberal = liberal.where(liberal < 150) # Plot a histogram of the data: liberal.hist() # ### Question 1 # What patterns do you notice? Where is the center of the distribution? What does this suggest about how Americans tend to self-identify? # Answer: The data are unimodal, with responses around "50" being the most common. In part, this may be because people tend to offer middle options when presented with these sorts of survey questions. It could also suggest that Americans tend to identify more toward the middle of the political spectrum than either end of it. # ### Law of Averages # Write a function, "empirical_hist_anes" that takes a Series and a sample size as its argument, and then draws a histogram based on the results. Consult Adhikari and DeNero for help! def empirical_hist_anes(series, n): series.sample(n).hist() # Check how many rows are in the table with the "size" method, and then use your self-defined function to plot histograms taking sample sizes 10, 100, 1000, and the total number of rows. liberal.size empirical_hist_anes(liberal, 10) empirical_hist_anes(liberal, 100) empirical_hist_anes(liberal, 1000) empirical_hist_anes(liberal, liberal.size) # ### Question 2 # What happens to the histograms (compared to the original in Q1) as you increase the sample size? How does this relate to the Law of Averages? What is the relationship between sample size and population parameter estimation? # # ## Hypothesis Testing # In this section, we'll cover the basic tools for hypothesis testing. # # The goal in conducting a hypothesis test is to answer the question, "Was it likely to observe my test statistic due to chance?" We say something is statistically significant if it is sufficiently far enough away from the center of an empirical distribution, and therefore unlikely to have occurred just by chance. # # The basic way to frame a hypothesis test is as follows: # # 1. Define a null $(H_O)$ and alternative $(H_A)$ hypothesis. The null hypothesis is usually framed as "no statistical relationship between the observed data and the background distribution" and the alternative hypothesis is the opposite. More concretely, the null is our default position, and assumes that the observed statistic likely came from the background distribution. # # 2. Calculate a test statistic (for example, t-test, $\chi^2$, etc.) # # 3. Check if the test statistic is far enough away from the center of the distribution. Traditionally, this was done by checking against a reference table, but in Python, we'll use p-values. Typically, a p value of less than .05 (meaning that only 5% of observations should fall where the test statistic does) is used as the threshold for statistical significance in the social sciences. # # 4. Either reject or fail to reject the null hypothesis. # ### Jury Selection # First, we'll use the jury selection example from the Adhikari and DeNero book. This example is based on the U.S. Supreme Court case, Swain v. Alabama. <NAME> was convicted by an all-white jury, and challenged his conviction on the basis that it was statistically unlikely that a jury would be all-white by chance, given that the racial composition of the county was 18% black. Juries were selected from a panel of 100. In this case, only 8 jurors on the panel were black. # # Was it likely that the panel would only include 8 black jurors out of 100, given that 18% of the county was black? # + # Create the table jury = pd.DataFrame(data = {'Ethnicity': ['Asian', 'Black', 'Latino', 'White', 'Other'], 'Eligible': [0.15, 0.18, 0.12, 0.54, 0.01], 'Panels': [0.26, 0.08, 0.08, 0.54, 0.04]} ) jury # - # Horizontal Bar Chart jury.plot.barh('Ethnicity') # Augment with the difference between the "panels" columns and "eligible" column jury_with_diffs = jury.assign(Difference = jury.loc[:, 'Panels'] - jury.loc[:, 'Eligible']) jury_with_diffs # **Testing the hypothesis:** # In this scenario, the null hypothesis is that the jury panel was drawn uniformly from the eligible jury pool. Think of this as a statistical model. We will ask: under a model of uniform jury selection, how likely is it that Swain's jury panel would be selected? If we find it to be sufficiently unlikely, you may conclude that the model does not describe reality, i.e. that his jury panel was not selected uniformly. # # **Simulate selection of 1000 jury panels uniformly from the eligible jury pool.** This should generate a DataFrame with a column for each race, and each row describing the proportion of each race represented in one jury panel. # + def sample_proportions(probabilities, samples): # You may have used datascience.sample_proportions; this is how it works. # Read more here https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.multinomial.html return np.random.multinomial(samples, probabilities) / samples def sample_jury_panel(panel_size=100): """ Generate one sample panel of potential jurors by race, drawn uniformly from the eligble pool """ return sample_proportions(jury['Eligible'], panel_size) simulated_fair_panels = pd.DataFrame([ sample_jury_panel() for _ in range(1000) ], columns=jury['Ethnicity']) simulated_fair_panels.head(2) # - # **Visualize the distribution for each race.** As a first look, how common would a panel like the one in Swain v. Alabama appear to be? simulated_fair_panels.boxplot() # We can't quite compute a t-test yet. We have a set of samples from our simulation of colorblind jury selection, and we have a single sample of a real jury panel. t-tests are for comparing a pair of sample sets which each have a large number of samples. # # Ideally, we would have use the set of real jury panels from the jurisdiction in question as another sample set, and run a t-test between that sample set and the 1000 simulated jury panels. As a *substitute* for this, assume that the particular jury panel in Swain's case is *representative* of jury panels in the district (ask yourself: is this a fair assumption?); simulate the selection of jury panels with a mean of `jury['Panel']`, and run a t-test between those juries and your simulated fair juries. You should get very confident p-values. Why? # # Hint: use https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.stats.ttest_ind.html#scipy.stats.ttest_ind # + unfair_panels = pd.DataFrame([ sample_proportions(jury['Panels'], 100) for _ in range(1000) ], columns=jury['Ethnicity']) stats.ttest_ind(simulated_fair_panels, unfair_panels, equal_var=False) # - # try a chi square test for seeing whether we can reject the null that the draw came from the known population newjury = 100*jury stats.chisquare(newjury['Panels'], f_exp=newjury['Eligible'], ddof=0, axis=0) #note that the chisquare test assumes values larger than 5, and since these are percentages we can express them # as jurors out of 100 people drawn # Since the legal question in this case is concerned with specifically the representation of black jurors, specifically on Swain's jury panel, we can use some simpler statistics. **Visualize the distribution of the 'Black' column in the simulated jury panels, and compute the portion of simulated jury panels having under 9% black jurors.** # + print( "Portion of uniformly selected panels with under 9% black jurors: ", simulated_fair_panels[simulated_fair_panels['Black'] < 0.09].count()['Black'] / simulated_fair_panels['Black'].count() ) simulated_fair_panels['Black'].hist(bins=[ n / 100 for n in range(0, 40, 5) ]) # - # ### Hypothesis Testing on ANES Data # Now let's try with the ANES data! Write code that creates a new DataFrame with the "post_liberal_rating" and "post_conservative_rating" as columns, and only includes values below 150. # + liberal_v_conservative = anes.loc[:, ["post_liberal_rating", "post_conservative_rating"]] liberal_v_conservative = liberal_v_conservative.where(liberal_v_conservative["post_liberal_rating"] < 150) liberal_v_conservative = liberal_v_conservative.where(liberal_v_conservative["post_conservative_rating"] < 150) liberal_v_conservative.head() # - # ### Question 3 # Plot a histogram of both the post liberal rating and post conservative rating side by side. Experiment with different bin widths. Visually, what can you infer about the shape of each data? liberal_v_conservative.hist(sharex=True, sharey=True) # ### Question 4 # Now write code to do a t-test between liberal and conservative. For the t-test to work, you have to remove NaN values first. liberal_v_conservative.dropna(inplace=True) stats.ttest_ind(liberal_v_conservative['post_liberal_rating'], liberal_v_conservative['post_conservative_rating'], equal_var = False) # What does the pvalue of this t-test indicate? Can we reject the null hypothesis that the mean of the two distributions differs significantly among samples given a 95% confidence? # ### Sort of odd way to use a t-test; maybe let's fix this to show Republican vs Democrat feeling thermometer on "liberals" # Wilson fixed the problem above, but here what are we really asking? We are only asking are these two means drawn from the same underlying population but it is two different feeling thermometer questions.We have not defined the groups we are comparing! # # We can't really interpret what this means, since we want to know whether we can distinguish two groups in the population using a particular measure, like a feeling thermometer. So we actually need to do some more work, like choosing groups to see if they differ on a variable. Political party ID seems like a good choice! # what does the pre_party_id variable look like? this is the 7-pt scale summary measure (V161158x in codebook) anes["pre_party_id"].hist() # + # we need to recode the pre_party_id variable so that it is just Democrat, Republican, Independent def change_values(column, new_values): anes[column] = anes[column].map(new_values, na_action="ignore") parties = { 1.0: "dem", 2.0: "dem", 3.0: "dem", 4.0: "ind", 5.0: "rep", 6.0: 'rep', 7.0: 'rep' } change_values("pre_party_id", parties) anes['pre_party_id'].hist() # - # add the pre_party_id variable to the liberal_v_conservative dataframe liberal_v_conservative['pre_party_id'] = anes['pre_party_id'] liberal_v_conservative.dropna(inplace=True) liberal_v_conservative.head() # now we can do a boxplot to see what the mean liberal feeling thermom. # and conservative feeling thermometer scores look like # liberal_v_conservative liberal_v_conservative.boxplot(column='post_liberal_rating', by='pre_party_id', figsize=(9,6)) liberal_v_conservative.boxplot(column='post_conservative_rating', by='pre_party_id', figsize=(9,6)) # + # now we can do a two sample t-test to see whether we can reject the idea at a 95% confidence level that dem and rep # partisan identifiers represent the same underlying population # first, for clarity, get the series of feeling thermometer ratings on liberals for dems and reps, even though # this just makes it easier on the eyes--Pythonic way is not to create new variables dems_on_liberal = liberal_v_conservative['post_liberal_rating'].where(liberal_v_conservative['pre_party_id']=='dem') reps_on_liberal = liberal_v_conservative['post_liberal_rating'].where(liberal_v_conservative['pre_party_id']=='rep') inds_on_liberal = liberal_v_conservative['post_liberal_rating'].where(liberal_v_conservative['pre_party_id']=='ind') # using .where method seems to result in a series with lots of NaN wherever condition is not met # should probably use another method, right? # get rid of the NaN values in each vector of feeling thermometer scores dems_on_liberal.dropna(inplace=True) reps_on_liberal.dropna(inplace=True) inds_on_liberal.dropna(inplace=True) print("mean Democrat feeling thermometer toward liberals", dems_on_liberal.mean()) print("mean Republican feeling thermometer toward liberals", reps_on_liberal.mean()) print("mean Independent feeling thermometer toward liberals", inds_on_liberal.mean()) print("type variable dems_on_liberal", type(dems_on_liberal)) print("length of dems_on_liberal ", len(dems_on_liberal)) print("length of reps_on_liberal ", len(reps_on_liberal)) print("length of inds_on_liberal ", len(inds_on_liberal)) print("two sample t-test for Dems & Reps ", stats.ttest_ind(dems_on_liberal, reps_on_liberal, equal_var = False, nan_policy='raise')) print("two sample t-test for Dems & Inds ", stats.ttest_ind(dems_on_liberal, inds_on_liberal, equal_var = False, nan_policy='raise')) print("two sample t-test for Inds & Reps ", stats.ttest_ind(inds_on_liberal, reps_on_liberal, equal_var = False, nan_policy='raise')) # + # now let's do feeling thermometer ratings on conservatives for dems, inds, reps, even though # this just makes it easier on the eyes--Pythonic way is not to create new variables dems_on_conserv = liberal_v_conservative['post_conservative_rating'].where(liberal_v_conservative['pre_party_id']=='dem') reps_on_conserv = liberal_v_conservative['post_conservative_rating'].where(liberal_v_conservative['pre_party_id']=='rep') inds_on_conserv = liberal_v_conservative['post_conservative_rating'].where(liberal_v_conservative['pre_party_id']=='ind') # using .where method seems to result in a series with lots of NaN wherever condition is not met # should probably use another method, right? # get rid of the NaN values in each vector of feeling thermometer scores dems_on_conserv.dropna(inplace=True) reps_on_conserv.dropna(inplace=True) inds_on_conserv.dropna(inplace=True) print("mean Democrat feeling thermometer toward conservatives", dems_on_conserv.mean()) print("mean Republican feeling thermometer toward conservatives", reps_on_conserv.mean()) print("mean Independent feeling thermometer toward conservatives", inds_on_conserv.mean()) print("length of dems_on_conserv ", len(dems_on_conserv)) print("length of reps_on_conserv ", len(reps_on_conserv)) print("length of inds_on_conserv ", len(inds_on_conserv)) print("two sample t-test for Dems & Reps ", stats.ttest_ind(dems_on_conserv, reps_on_conserv, equal_var = False, nan_policy='raise')) print("two sample t-test for Dems & Inds ", stats.ttest_ind(dems_on_conserv, inds_on_conserv, equal_var = False, nan_policy='raise')) print("two sample t-test for Inds & Reps ", stats.ttest_ind(inds_on_conserv, reps_on_conserv, equal_var = False, nan_policy='raise')) # - # add the post_scientists feeling thermometer variable to the liberal_v_conservative dataframe liberal_v_conservative['post_scientists'] = anes['post_scientists_rating'].where(anes['post_scientists_rating']<150) liberal_v_conservative.dropna(inplace=True) liberal_v_conservative.head() # + # now let's do feeling thermometer ratings on scientists for dems, inds, reps, even though # this feeling thermometer should have less partisan divide liberal_v_conservative.boxplot(column='post_scientists', by='pre_party_id', figsize=(9,6)) # + # here we should expect not to be able to reject the hypothesis of no difference in underlying pop means, # but that seems to be true only for Republicans and Independents dems_on_scientists = liberal_v_conservative['post_scientists'].where(liberal_v_conservative['pre_party_id']=='dem') reps_on_scientists = liberal_v_conservative['post_scientists'].where(liberal_v_conservative['pre_party_id']=='rep') inds_on_scientists = liberal_v_conservative['post_scientists'].where(liberal_v_conservative['pre_party_id']=='ind') # using .where method seems to result in a series with lots of NaN wherever condition is not met # should probably use another method, right? # get rid of the NaN values in each vector of feeling thermometer scores dems_on_scientists.dropna(inplace=True) reps_on_scientists.dropna(inplace=True) inds_on_scientists.dropna(inplace=True) print("mean Democrat feeling thermometer toward scientists", dems_on_scientists.mean()) print("mean Republican feeling thermometer toward scientists", reps_on_scientists.mean()) print("mean Independent feeling thermometer toward scientists", inds_on_scientists.mean()) print("length of dems_on_scientists ", len(dems_on_scientists)) print("length of reps_on_scientists ", len(reps_on_scientists)) print("length of inds_on_scientists ", len(inds_on_scientists)) print("two sample t-test for Dems & Reps ", stats.ttest_ind(dems_on_scientists, reps_on_scientists, equal_var = False, nan_policy='raise')) print("two sample t-test for Dems & Inds ", stats.ttest_ind(dems_on_scientists, inds_on_scientists, equal_var = False, nan_policy='raise')) print("two sample t-test for Inds & Reps ", stats.ttest_ind(inds_on_scientists, reps_on_scientists, equal_var = False, nan_policy='raise')) # - # ## Central Limit Theorem # The central limit theorem (CLT) is a fundamental concept in statistics. It basically says that the means of repeated samples will converge upon a normal distribution centered around the population mean. This is a powerful result that allows us to use a sample mean without measuring other sample means. This insight is particularly important in the social sciences, and justifies the use of regression for causal inference. # Using liberal respondents ("post_liberal_rating") again, let's illustrate this concept. Write code that does the following: # # 1. Define a sample size, and number of repetitions. Also, create an empty array to store the sample means. # # 2. Write a for loop that loops over the number of repetitions and: # a. Samples the liberal respondents by the sample size # b. Calculates its mean # c. Appends the calculated mean to the array that stores sample means # # Using this code, experiment with various sample sizes and number of repetitions. Plot each result. For instance, try the following: # # 1. Sample size = 20, repetititon = 10 # 2. Sample size = 100, repetitions = 10 # 3. Sample size = 100, repetitions = 100000 # 4. Sample size = 500, repetitions = 100000 # 5. Sample size = 1000, repetitions = 150000 # + sample_size = 20 repetitions = 10 means = [] for i in np.arange(repetitions): sample = liberal.sample(sample_size) new_mean = np.mean(sample) means = np.append(means, new_mean) plot.hist(means) # + sample_size = 100 repetitions = 10 means = [] for i in np.arange(repetitions): sample = liberal.sample(sample_size) new_mean = np.mean(sample) means = np.append(means, new_mean) plot.hist(means) # + sample_size = 100 repetitions = 100000 means = [] for i in np.arange(repetitions): sample = liberal.sample(sample_size) new_mean = np.mean(sample) means = np.append(means, new_mean) plot.hist(means) # + sample_size = 500 repetitions = 100000 means = [] for i in np.arange(repetitions): sample = liberal.sample(sample_size) new_mean = np.mean(sample) means = np.append(means, new_mean) plot.hist(means) # + sample_size = 1500 repetitions = 150000 means = [] for i in np.arange(repetitions): sample = liberal.sample(sample_size) new_mean = np.mean(sample) means = np.append(means, new_mean) plot.hist(means) # - # ### Question 5: What happens as you increase the sample size and number of repetitions? How does this property justify the use of statistical methods across a range of problems? # Answer:
labs/05_Large n/05_Large_n_solutions_jon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="ur8xi4C7S06n" # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="title:generic,gcp" # # E2E ML on GCP: MLOps stage 6 : Get started with TensorFlow serving functions with Vertex AI Prediction # # <table align="left"> # <td> # <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage6/get_started_with_tf_serving_function.ipynb"> # <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> # View on GitHub # </a> # </td> # <td> # <a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage6/get_started_with_tf_serving_function.ipynb"> # Open in Vertex AI Workbench # </a> # </td> # </table> # <br/><br/><br/> # + [markdown] id="tvgnzT1CKxrO" # ## Overview # # This tutorial demonstrates how to add a serving function to a model deployed to a `Vertex AI Endpoint`. # + [markdown] id="c9402cfbdc2d" # ### Objective # # In this tutorial, you learn how to use `Vertex AI Prediction` on a `Vertex AI Endpoint` resource with a serving function. # # This tutorial uses the following Google Cloud ML services and resources: # # - `Vertex AI Prediction` # - `Vertex AI Models` # - `Vertex AI Endpoints` # # The steps performed include: # # - Download a pretrained image classification model from TensorFlow Hub. # - Create a serving function to receive compressed image data, and output decomopressed preprocessed data for the model input. # - Upload the TensorFlow Hub model and serving function as a `Vertex AI Model` resource. # - Creating an `Endpoint` resource. # - Deploying the `Model` resource to an `Endpoint` resource. # - Make an online prediction to the `Model` resource instance deployed to the `Endpoint` resource. # + [markdown] id="dataset:iris,lcn" # ### Dataset # # This tutorial uses a pre-trained image classification model from TensorFlow Hub, which is trained on ImageNet dataset. # # Learn more about [ResNet V2 pretained model](https://tfhub.dev/google/imagenet/resnet_v2_101/classification/5). # + [markdown] id="costs" # ### Costs # # This tutorial uses billable components of Google Cloud: # # * Vertex AI # * Cloud Storage # # Learn about [Vertex AI # pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage # pricing](https://cloud.google.com/storage/pricing), and use the [Pricing # Calculator](https://cloud.google.com/products/calculator/) # to generate a cost estimate based on your projected usage. # + [markdown] id="install_aip" # ## Installation # # Install the following packages to execute this notebook. # + id="install_aip" import os # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" # ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG # ! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG # ! pip3 install tensorflow-hub $USER_FLAG # + [markdown] id="hhq5zEbGg0XX" # ### Restart the kernel # # After you install the additional packages, you need to restart the notebook kernel so it can find the packages. # + id="EzrelQZ22IZj" # Automatically restart kernel after installs import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) # + [markdown] id="before_you_begin" # ## Before you begin # # ### GPU runtime # # *Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** # # ### Set up your Google Cloud project # # **The following steps are required, regardless of your notebook environment.** # # 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. # # 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project) # # 3. [Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,storage-component.googleapis.com) # # 4. If you are running this notebook locally, you will need to install the [Cloud SDK]((https://cloud.google.com/sdk)). # # 5. Enter your project ID in the cell below. Then run the cell to make sure the # Cloud SDK uses the right project for all the commands in this notebook. # # **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`. # + [markdown] id="project_id" # #### Set your project ID # # **If you don't know your project ID**, you may be able to get your project ID using `gcloud`. # + id="set_project_id" PROJECT_ID = "[your-project-id]" # @param {type:"string"} # + id="autoset_project_id" if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) # + id="set_gcloud_project_id" # ! gcloud config set project $PROJECT_ID # + [markdown] id="region" # #### Region # # You can also change the `REGION` variable, which is used for operations # throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you. # # - Americas: `us-central1` # - Europe: `europe-west4` # - Asia Pacific: `asia-east1` # # You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. # # Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations). # + id="region" REGION = "[your-region]" # @param {type: "string"} if REGION == "[your-region]": REGION = "us-central1" # + [markdown] id="timestamp" # #### Timestamp # # If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial. # + id="timestamp" from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") # + [markdown] id="gcp_authenticate" # ### Authenticate your Google Cloud account # # **If you are using Google Cloud Notebooks**, your environment is already authenticated. Skip this step. # # **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. # # **Otherwise**, follow these steps: # # In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page. # # **Click Create service account**. # # In the **Service account name** field, enter a name, and click **Create**. # # In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**. # # Click Create. A JSON file that contains your key downloads to your local environment. # # Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. # + id="gcp_authenticate" # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. import os import sys # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): # %env GOOGLE_APPLICATION_CREDENTIALS '' # + [markdown] id="bucket:mbsdk" # ### Create a Cloud Storage bucket # # **The following steps are required, regardless of your notebook environment.** # # When you initialize the Vertex AI SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions. # # Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. # + id="bucket" BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"} BUCKET_URI = f"gs://{BUCKET_NAME}" # + id="autoset_bucket" if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]": BUCKET_URI = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP # + [markdown] id="create_bucket" # **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. # + id="create_bucket" # ! gsutil mb -l $REGION $BUCKET_URI # + [markdown] id="validate_bucket" # Finally, validate access to your Cloud Storage bucket by examining its contents: # + id="validate_bucket" # ! gsutil ls -al $BUCKET_URI # + [markdown] id="setup_vars" # ### Set up variables # # Next, set up some variables used throughout the tutorial. # ### Import libraries and define constants # + id="import_aip:mbsdk" import google.cloud.aiplatform as aip import tensorflow as tf import tensorflow_hub as hub # + [markdown] id="init_aip:mbsdk" # ### Initialize Vertex AI SDK for Python # # Initialize the Vertex AI SDK for Python for your project and corresponding bucket. # + id="init_aip:mbsdk" aip.init(project=PROJECT_ID, staging_bucket=BUCKET_URI) # + [markdown] id="accelerators:training,cpu,prediction,cpu,mbsdk" # #### Set hardware accelerators # # You can set hardware accelerators for training and prediction. # # Set the variables `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: # # (aip.AcceleratorType.NVIDIA_TESLA_K80, 4) # # # Otherwise specify `(None, None)` to use a container image to run on a CPU. # # Learn more about [hardware accelerator support for your region](https://cloud.google.com/vertex-ai/docs/general/locations#accelerators). # # *Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3. This is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support. # + id="accelerators:training,cpu,prediction,cpu,mbsdk" if os.getenv("IS_TESTING_DEPLOY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = ( aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPLOY_GPU")), ) else: DEPLOY_GPU, DEPLOY_NGPU = (None, None) # + [markdown] id="container:training,prediction" # #### Set pre-built containers # # Set the pre-built Docker container image for prediction. # # # For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers). # + id="container:training,prediction" if os.getenv("IS_TESTING_TF"): TF = os.getenv("IS_TESTING_TF") else: TF = "2.5".replace(".", "-") if TF[0] == "2": if DEPLOY_GPU: DEPLOY_VERSION = "tf2-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf2-cpu.{}".format(TF) else: if DEPLOY_GPU: DEPLOY_VERSION = "tf-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf-cpu.{}".format(TF) DEPLOY_IMAGE = "{}-docker.pkg.dev/vertex-ai/prediction/{}:latest".format( REGION.split("-")[0], DEPLOY_VERSION ) print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU) # + [markdown] id="machine:training" # #### Set machine type # # Next, set the machine type to use for prediction. # # - Set the variable `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for prediction. # - `machine type` # - `n1-standard`: 3.75GB of memory per vCPU. # - `n1-highmem`: 6.5GB of memory per vCPU # - `n1-highcpu`: 0.9 GB of memory per vCPU # - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \] # # *Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*. # + id="machine:training" if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Train machine type", DEPLOY_COMPUTE) # + [markdown] id="d8128b8ff025" # ## Get pretrained model from TensorFlow Hub # # For demonstration purposes, this tutorial uses a pretrained model from TensorFlow Hub (TFHub), which is then uploaded to a `Vertex AI Model` resource. Once you have a `Vertex AI Model` resource, the model can be deployed to a `Vertex AI Endpoint` resource. # # ### Download the pretrained model # # First, you download the pretrained model from TensorFlow Hub. The model gets downloaded as a TF.Keras layer. To finalize the model, in this example, you create a `Sequential()` model with the downloaded TFHub model as a layer, and specify the input shape to the model. # + id="c55fa4c826f7" tfhub_model = tf.keras.Sequential( [hub.KerasLayer("https://tfhub.dev/google/imagenet/resnet_v2_101/classification/5")] ) tfhub_model.build([None, 224, 224, 3]) tfhub_model.summary() # + [markdown] id="63de49055083" # ### Save the model artifacts # # At this point, the model is in memory. Next, you save the model artifacts to a Cloud Storage location. # + id="64618c713db9" MODEL_DIR = BUCKET_URI + "/model" tfhub_model.save(MODEL_DIR) # + [markdown] id="how_serving_function_works" # ## Upload the model for serving # # Next, you will upload your TF.Keras model from the custom job to Vertex `Model` service, which will create a Vertex `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex AI, your serving function ensures that the data is decoded on the model server before it is passed as input to your model. # # ### How does the serving function work # # When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`. # # The serving function consists of two parts: # # - `preprocessing function`: # - Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph). # - Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc. # - `post-processing function`: # - Converts the model output to format expected by the receiving application -- e.q., compresses the output. # - Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc. # # Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content. # # One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported. # + [markdown] id="serving_function_image:post" # ### Serving function for image data # # #### Preprocessing # # To pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes, and then preprocessed to match the model input requirements, before it is passed as input to the deployed model. # # To resolve this, you define a serving function (`serving_fn`) and attach it to the model as a preprocessing step. Add a `@tf.function` decorator so the serving function is fused to the underlying model (instead of upstream on a CPU). # # When you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (`tf.string`), which is passed to the serving function (`serving_fn`). The serving function preprocesses the `tf.string` into raw (uncompressed) numpy bytes (`preprocess_fn`) to match the input requirements of the model: # # - `io.decode_jpeg`- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB). # - `image.convert_image_dtype` - Changes integer pixel values to float 32, and rescales pixel data between 0 and 1. # - `image.resize` - Resizes the image to match the input shape for the model. # # At this point, the data can be passed to the model (`m_call`), via a concrete function. The serving function is a static graph, while the model is a dynamic graph. The concrete function performs the tasks of marshalling the input data from the serving function to the model, and marshalling the prediction result from the model back to the serving function. # + id="serving_function_image" repo="snippets_custom.ipynb" CONCRETE_INPUT = "numpy_inputs" def _preprocess(bytes_input): decoded = tf.io.decode_jpeg(bytes_input, channels=3) decoded = tf.image.convert_image_dtype(decoded, tf.float32) resized = tf.image.resize(decoded, size=(224, 224)) return resized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def preprocess_fn(bytes_inputs): decoded_images = tf.map_fn(_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False) return {CONCRETE_INPUT: decoded_images} # User needs to make sure the key matches model's input @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(bytes_inputs): images = preprocess_fn(bytes_inputs) prob = m_call(**images) return prob m_call = tf.function(tfhub_model.call).get_concrete_function([tf.TensorSpec(shape=[None, 224, 224, 3], dtype=tf.float32, name=CONCRETE_INPUT)]) tf.saved_model.save(tfhub_model, MODEL_DIR, signatures={ 'serving_default': serving_fn }) # + [markdown] id="serving_function_signature:image" repo="snippets_custom.ipynb" # ## Get the serving function signature # # You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer. # # For your purpose, you need the signature of the serving function. Why? Well, when we send our data for prediction as a HTTP request packet, the image data is base64 encoded, and our TF.Keras model takes numpy input. Your serving function will do the conversion from base64 to a numpy array. # # When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request. # + id="serving_function_signature:image" repo="snippets_custom.ipynb" loaded = tf.saved_model.load(MODEL_DIR) serving_input = list(loaded.signatures['serving_default'].structured_input_signature[1].keys())[0] print('Serving function input:', serving_input) # + [markdown] id="e8ce91147c93" # ### Upload the TensorFlow Hub model to a `Vertex AI Model` resource # # Finally, you upload the model artifacts from the TFHub model and serving function into a `Vertex AI Model` resource. # # *Note:* When you upload the model artifacts to a `Vertex AI Model` resource, you specify the corresponding deployment container image. # + id="ad61e1429512" model = aip.Model.upload( display_name="example_" + TIMESTAMP, artifact_uri=MODEL_DIR, serving_container_image_uri=DEPLOY_IMAGE, ) print(model) # + [markdown] id="628de0914ba1" # ## Creating an `Endpoint` resource # # You create an `Endpoint` resource using the `Endpoint.create()` method. At a minimum, you specify the display name for the endpoint. Optionally, you can specify the project and location (region); otherwise the settings are inherited by the values you set when you initialized the Vertex AI SDK with the `init()` method. # # In this example, the following parameters are specified: # # - `display_name`: A human readable name for the `Endpoint` resource. # - `project`: Your project ID. # - `location`: Your region. # - `labels`: (optional) User defined metadata for the `Endpoint` in the form of key/value pairs. # # This method returns an `Endpoint` object. # # Learn more about [Vertex AI Endpoints](https://cloud.google.com/vertex-ai/docs/predictions/deploy-model-api). # + id="0ea443f9593b" endpoint = aip.Endpoint.create( display_name="example_" + TIMESTAMP, project=PROJECT_ID, location=REGION, labels={"your_key": "your_value"}, ) print(endpoint) # + [markdown] id="ca3fa3f6a894" # ## Deploying `Model` resources to an `Endpoint` resource. # # You can deploy one of more `Vertex AI Model` resource instances to the same endpoint. Each `Vertex AI Model` resource that is deployed will have its own deployment container for the serving binary. # # *Note:* For this example, you specified the deployment container for the TFHub model in the previous step of uploading the model artifacts to a `Vertex AI Model` resource. # # In the next example, you deploy the `Vertex AI Model` resource to a `Vertex AI Endpoint` resource. The `Vertex AI Model` resource already has defined for it the deployment container image. To deploy, you specify the following additional configuration settings: # # - The machine type. # - The (if any) type and number of GPUs. # - Static, manual or auto-scaling of VM instances. # # In this example, you deploy the model with the minimal amount of specified parameters, as follows: # # - `model`: The `Model` resource. # - `deployed_model_displayed_name`: The human readable name for the deployed model instance. # - `machine_type`: The machine type for each VM instance. # # Do to the requirements to provision the resource, this may take upto a few minutes. # + id="4e93b034a72f" response = endpoint.deploy( model=model, deployed_model_display_name="example_" + TIMESTAMP, machine_type=DEPLOY_COMPUTE, ) print(endpoint) # - # ### Prepare test data for prediction # # Next, you will load a compressed JPEG image into memory and then base64 encode it. For demonstration purposes, you use an image from the Flowers dataset. # ! gsutil cp gs://cloud-ml-data/img/flower_photos/daisy/100080576_f52e8ee070_n.jpg test.jpg # + import base64 with open('test.jpg', 'rb') as f: data = f.read() b64str = base64.b64encode(data).decode('utf-8') # + [markdown] id="predict_request:mbsdk,custom,icn" repo="snippets_mbsdk.ipynb" # ### Make the prediction # # Now that your `Model` resource is deployed to an `Endpoint` resource, you can do online predictions by sending prediction requests to the Endpoint resource. # # #### Request # # Since in this example your test item is in a Cloud Storage bucket, you open and read the contents of the image using `tf.io.gfile.Gfile()`. To pass the test data to the prediction service, you encode the bytes into base64 -- which makes the content safe from modification while transmitting binary data over the network. # # The format of each instance is: # # { serving_input: { 'b64': base64_encoded_bytes } } # # Since the `predict()` method can take multiple items (instances), send your single test item as a list of one test item. # # #### Response # # The response from the `predict()` call is a Python dictionary with the following entries: # # - `ids`: The internal assigned unique identifiers for each prediction request. # - `predictions`: The predicted confidence, between 0 and 1, per class label. # - `deployed_model_id`: The Vertex AI identifier for the deployed `Model` resource which did the predictions. # + id="predict_request:mbsdk,custom,icn" repo="snippets_mbsdk.ipynb" # The format of each instance should conform to the deployed model's prediction input schema. instances = [{serving_input: {'b64': b64str}}] prediction = endpoint.predict(instances=instances) print(prediction) # + [markdown] id="TpV-iwP9qw9c" # ## Cleaning up # # To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud # project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. # # Otherwise, you can delete the individual resources you created in this tutorial: # + id="sx_vKniMq9ZX" delete_bucket = True delete_model = True delete_endpoint = True if delete_endpoint: try: endpoint.undeploy_all() endpoint.delete() except Exception as e: print(e) if delete_model: try: model.delete() except Exception as e: print(e) if delete_bucket or os.getenv("IS_TESTING"): # ! gsutil rm -rf {BUCKET_URI} # -
notebooks/community/ml_ops/stage6/get_started_with_tf_serving_function.ipynb
# # Nested cross-validation # # Cross-validation can be used both for hyperparameter tuning and for # estimating the generalization performance of a model. However, using # it for both purposes at the same time is problematic, as the resulting # evaluation can underestimate some overfitting that results from # the hyperparameter tuning procedure itself. # # Philosophically, hyperparameter tuning is a form of machine learning # itself and therefore, we need another outer loop of cross-validation to # properly evaluate the generalization performance of the full modeling # procedure. # # This notebook highlights nested cross-validation and its impact on the # estimated generalization performance compared to naively using a single level # of cross-validation, both for hyperparameter tuning and evaluation of the # generalization performance. # # We will illustrate this difference using the breast cancer dataset. # + from sklearn.datasets import load_breast_cancer data, target = load_breast_cancer(return_X_y=True) # - # First, we use `GridSearchCV` to find the best parameters via cross-validation # on a minal parameter grid. # + from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC param_grid = {"C": [0.1, 1, 10], "gamma": [.01, .1]} model_to_tune = SVC() search = GridSearchCV( estimator=model_to_tune, param_grid=param_grid, n_jobs=2 ) search.fit(data, target) # - # We recall that, internally, `GridSearchCV` trains several models for each on # sub-sampled training sets and evaluate each of them on the matching testing # sets using cross-validation. This evaluation procedure is controlled via # using the `cv` parameter. The procedure is then repeated for all possible # combinations of parameters given in `param_grid`. # # The attribute `best_params_` gives us the best set of parameters that # maximize the mean score on the internal test sets. print(f"The best parameters found are: {search.best_params_}") # We can also show the mean score obtained by using the parameters `best_params_`. print(f"The mean CV score of the best model is: {search.best_score_:.3f}") # At this stage, one should be extremely careful using this score. The # misinterpretation would be the following: since this mean score was computed # using cross-validation test sets, we could use it to assess the # generalization performance of the model trained with the best # hyper-parameters. # # However, we should not forget that we used this score to pick-up the best # model. It means that we used knowledge from the test sets (i.e. test scores) # to select the hyper-parameter of the model it-self. # # Thus, this mean score is not a fair estimate of our testing error. Indeed, it # can be too optimistic, in particular when running a parameter search on a # large grid with many hyper-parameters and many possible values per # hyper-parameter. A way to avoid this pitfall is to use a "nested" # cross-validation. # # In the following, we will use an inner cross-validation corresponding to the # previous procedure above to only optimize the hyperparameters. We will also # embed this tuning procedure within an outer cross-validation, which is # dedicated to estimate the testing error of our tuned model. # # In this case, our inner cross-validation always gets the training set of the # outer cross-validation, making it possible to always compute the final # testing scores on completely independent sets of samples. # # Let us do this in one go as follows: # + from sklearn.model_selection import cross_val_score, KFold # Declare the inner and outer cross-validation strategies inner_cv = KFold(n_splits=5, shuffle=True, random_state=0) outer_cv = KFold(n_splits=3, shuffle=True, random_state=0) # Inner cross-validation for parameter search model = GridSearchCV( estimator=model_to_tune, param_grid=param_grid, cv=inner_cv, n_jobs=2 ) # Outer cross-validation to compute the testing score test_score = cross_val_score(model, data, target, cv=outer_cv, n_jobs=2) print(f"The mean score using nested cross-validation is: " f"{test_score.mean():.3f} +/- {test_score.std():.3f}") # - # The reported score is more trustworthy and should be close to production's # expected generalization performance. Note that in this case, the two score # values are very close for this first trial. # # We would like to better assess the difference between the nested and # non-nested cross-validation scores to show that the latter can be too # optimistic in practice. To do this, we repeat the experiment several times # and shuffle the data differently to ensure that our conclusion does not # depend on a particular resampling of the data. # + test_score_not_nested = [] test_score_nested = [] N_TRIALS = 20 for i in range(N_TRIALS): # For each trial, we use cross-validation splits on independently # randomly shuffled data by passing distinct values to the random_state # parameter. inner_cv = KFold(n_splits=5, shuffle=True, random_state=i) outer_cv = KFold(n_splits=3, shuffle=True, random_state=i) # Non_nested parameter search and scoring model = GridSearchCV(estimator=model_to_tune, param_grid=param_grid, cv=inner_cv, n_jobs=2) model.fit(data, target) test_score_not_nested.append(model.best_score_) # Nested CV with parameter optimization test_score = cross_val_score(model, data, target, cv=outer_cv, n_jobs=2) test_score_nested.append(test_score.mean()) # - # We can merge the data together and make a box plot of the two strategies. # + import pandas as pd all_scores = { "Not nested CV": test_score_not_nested, "Nested CV": test_score_nested, } all_scores = pd.DataFrame(all_scores) # + import matplotlib.pyplot as plt color = {"whiskers": "black", "medians": "black", "caps": "black"} all_scores.plot.box(color=color, vert=False) plt.xlabel("Accuracy") _ = plt.title("Comparison of mean accuracy obtained on the test sets with\n" "and without nested cross-validation") # - # We observe that the generalization performance estimated without using nested # CV is higher than what we obtain with nested CV. The reason is that the # tuning procedure itself selects the model with the highest inner CV score. If # there are many hyper-parameter combinations and if the inner CV scores have # comparatively large standard deviations, taking the maximum value can lure # the naive data scientist into over-estimating the true generalization # performance of the result of the full learning procedure. By using an outer # cross-validation procedure one gets a more trustworthy estimate of the # generalization performance of the full learning procedure, including the # effect of tuning the hyperparameters. # # As a conclusion, when optimizing parts of the machine learning pipeline (e.g. # hyperparameter, transform, etc.), one needs to use nested cross-validation to # evaluate the generalization performance of the predictive model. Otherwise, # the results obtained without nested cross-validation are often overly # optimistic.
notebooks/cross_validation_nested.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib widget import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle # + rng = np.random.default_rng(42) imshow_kwargs = { "interpolation": "none", "vmin": 0, "vmax": 1, "cmap": "binary", } rect_kwargs = { "facecolor": "white", "zorder": 999, "alpha": 0.95 } N = 6 x = np.zeros((N, N)) for i in range(N): if i % 2 == 0: x[0, i] = 1 for i in range(N): x[i] = np.roll(x[0], i) noise = rng.uniform(size=(4, 1000)) * N rect1 = Rectangle(xy=(0.5, 0.5), width=2, height=2, **rect_kwargs) rect2 = Rectangle(xy=(-0.5, 2.5), width=N, height=3, **rect_kwargs) rect3 = Rectangle(xy=(2.5, -0.5), width=3, height=3, **rect_kwargs) rect4 = Rectangle(xy=(-0.5, -0.5), width=1, height=3, **rect_kwargs) rect5 = Rectangle(xy=(0.5, -0.5), width=2, height=1, **rect_kwargs) offset = 6.68 model = Rectangle(xy=(offset, 1.0), width=2, height=3, facecolor="C2", linewidth=2, edgecolor="k", clip_on=False, alpha=0.5) plt.close("all") fig, axes = plt.subplots(ncols=2, figsize=(9, 4)) ax = axes[0] ax.imshow(x, **imshow_kwargs) for i in np.arange(-1, N, 1): ax.plot([i + 0.5, i + 0.5], [-0.5, N - 0.5], c="gray", lw=2) ax.plot([-0.5, N - 0.5], [i + 0.5, i + 0.5], c="gray", lw=2) ax.scatter(noise[0] - 0.5, noise[1] - 0.5, s=1, c="r") ax.scatter(noise[2] - 0.5, noise[3] - 0.5, s=1, c="c") ax.add_patch(rect1) ax.text(x=1.5, y=1.6, s="Hidden\npartition J", ha="center", va="center", color="k", fontsize=12, zorder=1000) ax.text(x=1.5, y=1.0, s="(not accessible)", ha="center", va="center", color="k", fontsize=8, zorder=1000) ax.axis("off") ax.set_xlim((-1, N)) ax.set_ylim((-1, N)) ax.set_title("Input data", fontsize=18) ax = axes[1] ax.imshow(x, **imshow_kwargs) for i in np.arange(-1, N, 1): ax.plot([i + 0.5, i + 0.5], [-0.5, N - 0.5], c="gray", lw=2) ax.plot([-0.5, N - 0.5], [i + 0.5, i + 0.5], c="gray", lw=2) for i, c in enumerate(("r", "c")): inds = (noise[2*i] >= 1) & (noise[2*i] <= 3) inds = inds & (noise[2*i+1] >= 1) & (noise[2*i+1] <= 3) ax.scatter(noise[2*i][~inds] - 0.5, noise[2*i+1][~inds] - 0.5, s=1, c=c) for patch in (rect2, rect3, rect4, rect5): ax.add_patch(patch) ax.text(x=3.5, y=4.0, s="Complementary\npartition J$^c$", zorder=1000, ha="center", va="center", fontsize=12) ax.axis("off") ax.set_xlim((-1, N)) ax.set_ylim((-1, N)) ax.set_title("Reconstruction", fontsize=18) plt.tight_layout() plt.subplots_adjust(wspace=0.4) axes[0].add_patch(model) axes[0].text(x=offset+1, y=2.5, s="$jDAS$\nmodel", ha="center", va="center", clip_on=False, fontsize=14) axes[0].plot([5.5, offset], [-0.5, 1.0], "k-", clip_on=False) axes[0].plot([5.5, offset], [5.5, 4.0], "k-", clip_on=False) axes[0].plot([offset+2, offset+2+(offset-5.5)], [1.0, -0.5], "k-", clip_on=False) axes[0].plot([offset+2, offset+2+(offset-5.5)], [4.0, 5.5], "k-", clip_on=False) plt.savefig("jDAS_concept.png") plt.show() # -
docs/source/img/plot_checkerboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In this notebook, we can see the different multiprocessing action between notebook and script on Windows. # With script, the subprocess will import the calling module. So we can see 'say_hi' runs both in main process and subprocess. # But in notebook, the `say_hi` runs only in main process. # As a result, the functions or objects used in subprocess must be defined in the seperate files. # Note: Importing module means it will execute the code in the module. Not like fork, functions are copied not executed.<br> # Fork and Spawn are actions in OS. Importing Module is a an action in Python Interpreter. from mycellmagic import * # + import multiprocessing as mp import os import time from mpfunc import * my_name = "Yi" def say_hi(): print(f"I'm in process {os.getpid()}, I can see {my_name}") say_hi() #in fact, __name__ == "__main__" is not necessary in notebook. #because the module isn't imported in sub process. #It's just to keep consistent with the code in scripts to avoid confusion. if __name__ == "__main__": p = mp.Process(target=dir) p.demon = True p.start() print('Done') p.join() # - # Running in Notebook, the say_hi() is only executed once. # + # %%save2file notebookvsscript.py import multiprocessing as mp import os import time my_name = "Yi" def say_hi(): print(f"I'm in process {os.getpid()}, I can see {my_name}") say_hi() def current_module_name(): print(__name__) if __name__ == "__main__": # start new process p = mp.Process(target=current_module_name) p.start() print('Done') p.join() # - cmd = "python notebookvsscript.py" # !{cmd} # Runing with script, We can see `say_hi` is executed both in parent process and sub process.<br> # In the sub process, the module name is changed to `__mp_main__`. <br> # And there is `if __name__ == "__main__"`, so `start new process` won't be executed in sub process again.
demos/00_notebook vs script.ipynb
# # All results on Synthetic datasets - Prob. images # Presenting results all state-of-the-art methods togehter with our APDL method # %matplotlib inline # %load_ext autoreload # %autoreload 2 import os, sys, glob import pandas, numpy from skimage import io import matplotlib.pylab as plt from matplotlib import gridspec sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root import bpdl.data_utils as tl_data from notebooks.notebook_utils import filter_df_results_4_plotting, plot_bpdl_graph_results # p_csv = os.path.expanduser('~/Dropbox/Documents/lab_CMP-BIA/paper_2017_drosophila_APDL/data/experiments_synth_APD_prob_results_NEW_OVERALL.csv') p_csv = os.path.expanduser(os.path.join('results', 'experiments_synth_APD_prob_results_NEW_OVERALL.csv')) print os.path.exists(p_csv), '<-', p_csv # DATASET = 'datasetFuzzy_raw' # p_data = '/mnt/F464B42264B3E590/TEMP/atomicPatternDictionary_v0' # ## Loading data df_all = pandas.read_csv(p_csv, index_col=None) df_all.dropna(subset=['nb_labels', 'atlas_ARS', 'reconstruct_diff'], inplace=True) print('-> loaded DF with', len(df_all), 'items and columns:\n', df_all.columns.tolist()) d_unique = {col: df_all[col].unique() for col in df_all.columns} df_all.sort_values('nb_labels', inplace=True) print('-> unique:', {k: len(d_unique[k]) for k in d_unique if len(d_unique[k]) > 10}) # ## Parse name and noise level df_all['version'] = map(os.path.basename, df_all['path_in']) print('Versions:', df_all['version'].unique().tolist()) print('Datasets:', df_all['dataset'].unique().tolist()) noise, dataset_name = [], [] for d in df_all['dataset'].values.tolist(): if '-' in d: noise.append(float(d.split('-')[-1])) dataset_name.append(d.split('-')[0]) else: noise.append(None) dataset_name.append(d) df_all['dataset'] = dataset_name df_all['noise'] = noise print('Datasets:', df_all['dataset'].unique().tolist()) print('Noise levels:', df_all['noise'].unique().tolist()) # ## Dependency on iter. parameter # take out the series with various param combination # LIST_GRAPHS = ['atlas ARS', 'atlas accuracy', 'atlas f1_weighted', 'atlas precision_weighted', 'atlas recall_weighted', 'reconstruct_diff', 'time'] LIST_GRAPHS = ['atlas ARS', 'atlas f1_weighted', 'reconstruct_diff', 'time'] # ## Dependency on number of samples df_select = df_all[df_all['dataset'] == 'datasetFuzzy_raw'] df_res, dict_samples = filter_df_results_4_plotting(df_select, n_group='version', n_class='method', iter_var='nb_labels', cols=LIST_GRAPHS) print (dict_samples) plot_bpdl_graph_results(df_res, 'version', 'method', l_graphs=LIST_GRAPHS, iter_var='nb_labels', figsize=(6, 2)) # ## Dependency on level of noise df_select = df_all[df_all['dataset'] == 'datasetFuzzy_raw_gauss'] df_select = df_select[df_select['version'] == 'atomicPatternDictionary_v0'] df_select = df_select[df_select['nb_labels'] == 6] df_res, dict_samples = filter_df_results_4_plotting(df_select, n_group='version', n_class='method', iter_var='noise', cols=LIST_GRAPHS) print (dict_samples) plot_bpdl_graph_results(df_res, 'version', 'method', l_graphs=LIST_GRAPHS, iter_var='noise', figsize=(6, 2)) df_select = df_all[df_all['dataset'] == 'datasetFuzzy_raw_gauss'] df_select = df_select[df_select['version'] == 'atomicPatternDictionary_v1'] df_select = df_select[df_select['nb_labels'] == 13] df_res, dict_samples = filter_df_results_4_plotting(df_select, n_group='version', n_class='method', iter_var='noise', cols=LIST_GRAPHS) print (dict_samples) plot_bpdl_graph_results(df_res, 'version', 'method', l_graphs=LIST_GRAPHS, iter_var='noise', figsize=(6, 2)) df_select = df_all[df_all['dataset'] == 'datasetFuzzy_raw_gauss'] df_select = df_select[df_select['version'] == 'atomicPatternDictionary_v2'] df_select = df_select[df_select['nb_labels'] == 23] df_res, dict_samples = filter_df_results_4_plotting(df_select, n_group='version', n_class='method', iter_var='noise', cols=LIST_GRAPHS) print (dict_samples) plot_bpdl_graph_results(df_res, 'version', 'method', l_graphs=LIST_GRAPHS, iter_var='noise', figsize=(6, 2))
notebooks/results_synth_prob_all.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chapter 1 # *Astronomical Data in Python* is an introduction to tools and practices for working with astronomical data. Topics covered include: # # * Writing queries that select and download data from a database. # # * Using data stored in an Astropy `Table` or Pandas `DataFrame`. # # * Working with coordinates and other quantities with units. # # * Storing data in various formats. # # * Performing database join operations that combine data from multiple tables. # # * Visualizing data and preparing publication-quality figures. # As a running example, we will replicate part of the analysis in a recent paper, "[Off the beaten path: Gaia reveals GD-1 stars outside of the main stream](https://arxiv.org/abs/1805.00425)" by <NAME> and <NAME>. # # As the abstract explains, "Using data from the Gaia second data release combined with Pan-STARRS photometry, we present a sample of highly-probable members of the longest cold stream in the Milky Way, GD-1." # # GD-1 is a [stellar stream](https://en.wikipedia.org/wiki/List_of_stellar_streams), which is "an association of stars orbiting a galaxy that was once a globular cluster or dwarf galaxy that has now been torn apart and stretched out along its orbit by tidal forces." # [This article in *Science* magazine](https://www.sciencemag.org/news/2018/10/streams-stars-reveal-galaxy-s-violent-history-and-perhaps-its-unseen-dark-matter) explains some of the background, including the process that led to the paper and an discussion of the scientific implications: # # * "The streams are particularly useful for ... galactic archaeology --- rewinding the cosmic clock to reconstruct the assembly of the Milky Way." # # * "They also are being used as exquisitely sensitive scales to measure the galaxy's mass." # # * "... the streams are well-positioned to reveal the presence of dark matter ... because the streams are so fragile, theorists say, collisions with marauding clumps of dark matter could leave telltale scars, potential clues to its nature." # ## Data # # The datasets we will work with are: # # * [Gaia](https://en.wikipedia.org/wiki/Gaia_(spacecraft)), which is "a space observatory of the European Space Agency (ESA), launched in 2013 ... designed for astrometry: measuring the positions, distances and motions of stars with unprecedented precision", and # # * [Pan-STARRS](https://en.wikipedia.org/wiki/Pan-STARRS), The Panoramic Survey Telescope and Rapid Response System, which is a survey designed to monitor the sky for transient objects, producing a catalog with accurate astronometry and photometry of detected sources. # # Both of these datasets are very large, which can make them challenging to work with. It might not be possible, or practical, to download the entire dataset. # One of the goals of this workshop is to provide tools for working with large datasets. # ## Prerequisites # # These notebooks are meant for people who are familiar with basic Python, but not necessarily the libraries we will use, like Astropy or Pandas. If you are familiar with Python lists and dictionaries, and you know how to write a function that takes parameters and returns a value, you know enough Python to get started. # # We assume that you have some familiarity with operating systems, like the ability to use a command-line interface. But we don't assume you have any prior experience with databases. # # We assume that you are familiar with astronomy at the undergraduate level, but we will not assume specialized knowledge of the datasets or analysis methods we'll use. # ## Outline # # The first lesson demonstrates the steps for selecting and downloading data from the Gaia Database: # # 1. First we'll make a connection to the Gaia server, # # 2. We will explore information about the database and the tables it contains, # # 3. We will write a query and send it to the server, and finally # # 4. We will download the response from the server. # # After completing this lesson, you should be able to # # * Compose a basic query in ADQL. # # * Use queries to explore a database and its tables. # # * Use queries to download data. # # * Develop, test, and debug a query incrementally. # ## Query Language # # In order to select data from a database, you have to compose a query, which is like a program written in a "query language". # The query language we'll use is ADQL, which stands for "Astronomical Data Query Language". # # ADQL is a dialect of [SQL](https://en.wikipedia.org/wiki/SQL) (Structured Query Language), which is by far the most commonly used query language. Almost everything you will learn about ADQL also works in SQL. # # [The reference manual for ADQL is here](http://www.ivoa.net/documents/ADQL/20180112/PR-ADQL-2.1-20180112.html). # But you might find it easier to learn from [this ADQL Cookbook](https://www.gaia.ac.uk/data/gaia-data-release-1/adql-cookbook). # ## Installing libraries # # The library we'll use to get Gaia data is [Astroquery](https://astroquery.readthedocs.io/en/latest/). # # If you are running this notebook on Colab, you can run the following cell to install Astroquery and the other libraries we'll use. # # If you are running this notebook on your own computer, you might have to install these libraries yourself. # # If you are using this notebook as part of a Carpentries workshop, you should have received setup instructions. # # TODO: Add a link to the instructions. # # + tags=["hide-cell"] # If we're running on Colab, install libraries import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: # !pip install astroquery astro-gala pyia # - # ## Connecting to Gaia # # Astroquery provides `Gaia`, which is an [object that represents a connection to the Gaia database](https://astroquery.readthedocs.io/en/latest/gaia/gaia.html). # # We can connect to the Gaia database like this: from astroquery.gaia import Gaia # Running this import statement has the effect of creating a [TAP+](http://www.ivoa.net/documents/TAP/) connection; TAP stands for "Table Access Protocol". It is a network protocol for sending queries to the database and getting back the results. We're not sure why it seems to create two connections. # ## Databases and Tables # # What is a database, anyway? Most generally, it can be any collection of data, but when we are talking about ADQL or SQL: # # * A database is a collection of one or more named tables. # # * Each table is a 2-D array with one or more named columns of data. # # We can use `Gaia.load_tables` to get the names of the tables in the Gaia database. With the option `only_names=True`, it loads information about the tables, called the "metadata", not the data itself. tables = Gaia.load_tables(only_names=True) # + tags=["hide-output"] for table in (tables): print(table.get_qualified_name()) # - # So that's a lot of tables. The ones we'll use are: # # * `gaiadr2.gaia_source`, which contains Gaia data from [data release 2](https://www.cosmos.esa.int/web/gaia/data-release-2), # # * `gaiadr2.panstarrs1_original_valid`, which contains the photometry data we'll use from PanSTARRS, and # # * `gaiadr2.panstarrs1_best_neighbour`, which we'll use to cross-match each star observed by Gaia with the same star observed by PanSTARRS. # # We can use `load_table` (not `load_tables`) to get the metadata for a single table. The name of this function is misleading, because it only downloads metadata. meta = Gaia.load_table('gaiadr2.gaia_source') meta # Jupyter shows that the result is an object of type `TapTableMeta`, but it does not display the contents. # # To see the metadata, we have to print the object. print(meta) # Notice one gotcha: in the list of table names, this table appears as `gaiadr2.gaiadr2.gaia_source`, but when we load the metadata, we refer to it as `gaiadr2.gaia_source`. # # **Exercise:** Go back and try # # ``` # meta = Gaia.load_table('gaiadr2.gaiadr2.gaia_source') # ``` # # What happens? Is the error message helpful? If you had not made this error deliberately, would you have been able to figure it out? # ## Columns # # The following loop prints the names of the columns in the table. # + tags=["hide-output"] for column in meta.columns: print(column.name) # - # You can probably guess what many of these columns are by looking at the names, but you should resist the temptation to guess. # To find out what the columns mean, [read the documentation](https://gea.esac.esa.int/archive/documentation/GDR2/Gaia_archive/chap_datamodel/sec_dm_main_tables/ssec_dm_gaia_source.html). # # If you want to know what can go wrong when you don't read the documentation, [you might like this article](https://www.vox.com/future-perfect/2019/6/4/18650969/married-women-miserable-fake-paul-dolan-happiness). # **Exercise:** One of the other tables we'll use is `gaiadr2.gaiadr2.panstarrs1_original_valid`. Use `load_table` to get the metadata for this table. How many columns are there and what are their names? # # Hint: Remember the gotcha we mentioned earlier. # + tags=["remove-cell"] # Solution meta2 = Gaia.load_table('gaiadr2.panstarrs1_original_valid') print(meta2) # + tags=["hide-cell"] # Solution for column in meta2.columns: print(column.name) # - # ## Writing queries # # By now you might be wondering how we actually download the data. With tables this big, you generally don't. Instead, you use queries to select only the data you want. # # A query is a string written in a query language like SQL; for the Gaia database, the query language is a dialect of SQL called ADQL. # # Here's an example of an ADQL query. query1 = """SELECT TOP 10 source_id, ref_epoch, ra, dec, parallax FROM gaiadr2.gaia_source""" # **Python note:** We use a [triple-quoted string](https://docs.python.org/3/tutorial/introduction.html#strings) here so we can include line breaks in the query, which makes it easier to read. # # The words in uppercase are ADQL keywords: # # * `SELECT` indicates that we are selecting data (as opposed to adding or modifying data). # # * `TOP` indicates that we only want the first 10 rows of the table, which is useful for testing a query before asking for all of the data. # # * `FROM` specifies which table we want data from. # # The third line is a list of column names, indicating which columns we want. # # In this example, the keywords are capitalized and the column names are lowercase. This is a common style, but it is not required. ADQL and SQL are not case-sensitive. # To run this query, we use the `Gaia` object, which represents our connection to the Gaia database, and invoke `launch_job`: job1 = Gaia.launch_job(query1) job1 # The result is an object that represents the job running on a Gaia server. # # If you print it, it displays metadata for the forthcoming table. print(job1) # Don't worry about `Results: None`. That does not actually mean there are no results. # # However, `Phase: COMPLETED` indicates that the job is complete, so we can get the results like this: results1 = job1.get_results() type(results1) # **Optional detail:** Why is `table` repeated three times? The first is the name of the module, the second is the name of the submodule, and the third is the name of the class. Most of the time we only care about the last one. It's like the Linnean name for gorilla, which is *Gorilla Gorilla Gorilla*. # The result is an [Astropy Table](https://docs.astropy.org/en/stable/table/), which is similar to a table in an SQL database except: # # * SQL databases are stored on disk drives, so they are persistent; that is, they "survive" even if you turn off the computer. An Astropy `Table` is stored in memory; it disappears when you turn off the computer (or shut down this Jupyter notebook). # # * SQL databases are designed to process queries. An Astropy `Table` can perform some query-like operations, like selecting columns and rows. But these operations use Python syntax, not SQL. # # Jupyter knows how to display the contents of a `Table`. results1 # Each column has a name, units, and a data type. # # For example, the units of `ra` and `dec` are degrees, and their data type is `float64`, which is a 64-bit floating-point number, used to store measurements with a fraction part. # # This information comes from the Gaia database, and has been stored in the Astropy `Table` by Astroquery. # **Exercise:** Read [the documentation of this table](https://gea.esac.esa.int/archive/documentation/GDR2/Gaia_archive/chap_datamodel/sec_dm_main_tables/ssec_dm_gaia_source.html) and choose a column that looks interesting to you. Add the column name to the query and run it again. What are the units of the column you selected? What is its data type? # ## Asynchronous queries # # `launch_job` asks the server to run the job "synchronously", which normally means it runs immediately. But synchronous jobs are limited to 2000 rows. For queries that return more rows, you should run "asynchronously", which mean they might take longer to get started. # # If you are not sure how many rows a query will return, you can use the SQL command `COUNT` to find out how many rows are in the result without actually returning them. We'll see an example of this later. # # The results of an asynchronous query are stored in a file on the server, so you can start a query and come back later to get the results. # # For anonymous users, files are kept for three days. # # As an example, let's try a query that's similar to `query1`, with two changes: # # * It selects the first 3000 rows, so it is bigger than we should run synchronously. # # * It uses a new keyword, `WHERE`. query2 = """SELECT TOP 3000 source_id, ref_epoch, ra, dec, parallax FROM gaiadr2.gaia_source WHERE parallax < 1 """ # A `WHERE` clause indicates which rows we want; in this case, the query selects only rows "where" `parallax` is less than 1. This has the effect of selecting stars with relatively low parallax, which are farther away. We'll use this clause to exclude nearby stars that are unlikely to be part of GD-1. # # `WHERE` is one of the most common clauses in ADQL/SQL, and one of the most useful, because it allows us to select only the rows we need from the database. # # We use `launch_job_async` to submit an asynchronous query. job2 = Gaia.launch_job_async(query2) print(job2) # And here are the results. results2 = job2.get_results() results2 # You might notice that some values of `parallax` are negative. As [this FAQ explains](https://www.cosmos.esa.int/web/gaia/archive-tips#negative%20parallax), "Negative parallaxes are caused by errors in the observations." Negative parallaxes have "no physical meaning," but they can be a "useful diagnostic on the quality of the astrometric solution." # # Later we will see an example where we use `parallax` and `parallax_error` to identify stars where the distance estimate is likely to be inaccurate. # **Exercise:** The clauses in a query have to be in the right order. Go back and change the order of the clauses in `query2` and run it again. # # The query should fail, but notice that you don't get much useful debugging information. # # For this reason, developing and debugging ADQL queries can be really hard. A few suggestions that might help: # # * Whenever possible, start with a working query, either an example you find online or a query you have used in the past. # # * Make small changes and test each change before you continue. # # * While you are debugging, use `TOP` to limit the number of rows in the result. That will make each attempt run faster, which reduces your testing time. # # * Launching test queries synchronously might make them start faster, too. # ## Operators # # In a `WHERE` clause, you can use any of the [SQL comparison operators](https://www.w3schools.com/sql/sql_operators.asp); here are the most common ones: # # | Symbol | Operation # |--------| :--- # | `>` | greater than # | `<` | less than # | `>=` | greater than or equal # | `<=` | less than or equal # | `=` | equal # | `!=` or `<>` | not equal # # Most of these are the same as Python, but some are not. In particular, notice that the equality operator is `=`, not `==`. # Be careful to keep your Python out of your ADQL! # # You can combine comparisons using the logical operators: # # * AND: true if both comparisons are true # * OR: true if either or both comparisons are true # # Finally, you can use `NOT` to invert the result of a comparison. # **Exercise:** [Read about SQL operators here](https://www.w3schools.com/sql/sql_operators.asp) and then modify the previous query to select rows where `bp_rp` is between `-0.75` and `2`. # # You can [read about this variable here](https://gea.esac.esa.int/archive/documentation/GDR2/Gaia_archive/chap_datamodel/sec_dm_main_tables/ssec_dm_gaia_source.html). # + tags=["hide-cell"] # Solution # This is what most people will probably do query = """SELECT TOP 10 source_id, ref_epoch, ra, dec, parallax FROM gaiadr2.gaia_source WHERE parallax < 1 AND bp_rp > -0.75 AND bp_rp < 2 """ # + tags=["hide-cell"] # Solution # But if someone notices the BETWEEN operator, # they might do this query = """SELECT TOP 10 source_id, ref_epoch, ra, dec, parallax FROM gaiadr2.gaia_source WHERE parallax < 1 AND bp_rp BETWEEN -0.75 AND 2 """ # - # This [Hertzsprung-Russell diagram](https://sci.esa.int/web/gaia/-/60198-gaia-hertzsprung-russell-diagram) shows the BP-RP color and luminosity of stars in the Gaia catalog. # # Selecting stars with `bp-rp` less than 2 excludes many [class M dwarf stars](https://xkcd.com/2360/), which are low temperature, low luminosity. A star like that at GD-1's distance would be hard to detect, so if it is detected, it it more likely to be in the foreground. # ## Cleaning up # # Asynchronous jobs have a `jobid`. job1.jobid, job2.jobid # Which you can use to remove the job from the server. Gaia.remove_jobs([job2.jobid]) # If you don't remove it job from the server, it will be removed eventually, so don't feel too bad if you don't clean up after yourself. # ## Formatting queries # # So far the queries have been string "literals", meaning that the entire string is part of the program. # But writing queries yourself can be slow, repetitive, and error-prone. # # It is often a good idea to write Python code that assembles a query for you. One useful tool for that is the [string `format` method](https://www.w3schools.com/python/ref_string_format.asp). # # As an example, we'll divide the previous query into two parts; a list of column names and a "base" for the query that contains everything except the column names. # # Here's the list of columns we'll select. columns = 'source_id, ra, dec, pmra, pmdec, parallax, parallax_error, radial_velocity' # And here's the base; it's a string that contains at least one format specifier in curly brackets (braces). query3_base = """SELECT TOP 10 {columns} FROM gaiadr2.gaia_source WHERE parallax < 1 AND bp_rp BETWEEN -0.75 AND 2 """ # This base query contains one format specifier, `{columns}`, which is a placeholder for the list of column names we will provide. # # To assemble the query, we invoke `format` on the base string and provide a keyword argument that assigns a value to `columns`. query3 = query3_base.format(columns=columns) # The result is a string with line breaks. If you display it, the line breaks appear as `\n`. query3 # But if you print it, the line breaks appear as... line breaks. print(query3) # Notice that the format specifier has been replaced with the value of `columns`. # # Let's run it and see if it works: job3 = Gaia.launch_job(query3) print(job3) results3 = job3.get_results() results3 # Good so far. # **Exercise:** This query always selects sources with `parallax` less than 1. But suppose you want to take that upper bound as an input. # # Modify `query3_base` to replace `1` with a format specifier like `{max_parallax}`. Now, when you call `format`, add a keyword argument that assigns a value to `max_parallax`, and confirm that the format specifier gets replaced with the value you provide. # + tags=["hide-cell"] # Solution query4_base = """SELECT TOP 10 {columns} FROM gaiadr2.gaia_source WHERE parallax < {max_parallax} AND bp_rp BETWEEN -0.75 AND 2 """ # + tags=["hide-cell"] # Solution query4 = query4_base.format(columns=columns, max_parallax=0.5) print(query) # - # **Style note:** You might notice that the variable names in this notebook are numbered, like `query1`, `query2`, etc. # # The advantage of this style is that it isolates each section of the notebook from the others, so if you go back and run the cells out of order, it's less likely that you will get unexpected interactions. # # A drawback of this style is that it can be a nuisance to update the notebook if you add, remove, or reorder a section. # # What do you think of this choice? Are there alternatives you prefer? # ## Summary # # This notebook demonstrates the following steps: # # 1. Making a connection to the Gaia server, # # 2. Exploring information about the database and the tables it contains, # # 3. Writing a query and sending it to the server, and finally # # 4. Downloading the response from the server as an Astropy `Table`. # ## Best practices # # * If you can't download an entire dataset (or it's not practical) use queries to select the data you need. # # * Read the metadata and the documentation to make sure you understand the tables, their columns, and what they mean. # # * Develop queries incrementally: start with something simple, test it, and add a little bit at a time. # # * Use ADQL features like `TOP` and `COUNT` to test before you run a query that might return a lot of data. # # * If you know your query will return fewer than 3000 rows, you can run it synchronously, which might complete faster (but it doesn't seem to make much difference). If it might return more than 3000 rows, you should run it asynchronously. # # * ADQL and SQL are not case-sensitive, so you don't have to capitalize the keywords, but you should. # # * ADQL and SQL don't require you to break a query into multiple lines, but you should. # # Jupyter notebooks can be good for developing and testing code, but they have some drawbacks. In particular, if you run the cells out of order, you might find that variables don't have the values you expect. # # There are a few things you can do to mitigate these problems: # # * Make each section of the notebook self-contained. Try not to use the same variable name in more than one section. # # * Keep notebooks short. Look for places where you can break your analysis into phases with one notebook per phase. # + active="" #
_build/jupyter_execute/01_query.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Denoising fMRI data using aCompCor and functional connectivity estimation # ================================================ # # This a script for group denoising and FC estimation from on longitudinal fMRI data preprocessed in fmriprep. # Step 1: Importing packages # ------------------------------------------ # + # %reset # %matplotlib inline from os import listdir import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn import preprocessing # for normalization of confounds columns from nilearn import datasets # for fetching atlas from nilearn import plotting from nilearn import input_data from nilearn import signal from nilearn.input_data import NiftiLabelsMasker from nilearn.connectome import ConnectivityMeasure from denoise import * # - # Step 2: Creating atlas # ------------------------------------------- # + # Loading Power ROIs coordinates power = datasets.fetch_coords_power_2011() power_coords = np.vstack((power.rois['x'], power.rois['y'], power.rois['z'])).T # Creating masker file power_spheres = input_data.NiftiSpheresMasker( seeds = power_coords, smoothing_fwhm = 6, radius = 5, detrend = True, standardize = True, low_pass = 0.08, high_pass = 0.009, t_r = 2 ) parcellation = power_spheres # - # Step 3: Loading data # ----------------------- # + # Loading functional data top_dir = '/home/finc/Downloads/fmriprep/' out_dir = '/home/finc/Downloads/fmriprep/' sess = ['ses-1', 'ses-2', 'ses-3', 'ses-4'] tasks = ['dualnback'] subs = listdir(top_dir) suffix = 'space-MNI152NLin2009cAsym_preproc.nii.gz' #suffix = 'space-MNI152NLin2009cAsym_variant-smoothAROMAnonaggr_preproc.nii.gz' # - # Step 4: Obtaining timeseries from ROIs # --------------------------- # Creating 5D matrix with mean timeseries within each ROI # + subs = listdir(top_dir) timeseries_all = np.zeros((len(subs), len(sess), len(tasks), 340, 264)) for sub in range(len(subs)): for ses in range(len(sess)): for task in range(len(tasks)): sub_dir = top_dir + subs[sub] + '/' + sess[ses] + '/func/' data = sub_dir + subs[sub] + '_' + sess[ses] + '_task-' + tasks[task] + '_bold_' + suffix # Loading confound data confounds_path = sub_dir + subs[sub] + '_' + sess[ses] + '_task-' + tasks[task] + '_bold_confounds.tsv' confounds = pd.read_csv(confounds_path, delimiter = '\t') # Select columns of interest confounds_motion = confounds[confounds.filter(regex='X|Y|Z|RotX|RotY|RotZ').columns] confounds_anat = confounds[confounds.filter(regex='CSF|WhiteMatter|aCompCor|Cos').columns] confounds_fd = confounds[confounds.filter(regex='Framewise').columns] # Calculate 24 fristom motion parameters confounds_motion_24 = motion_24_friston(confounds_motion) # Detect outliers confounds_scrub = scrubbing(confounds_fd, before = False) # Standardize confounds confounds_anat = standardize(confounds_anat) # Add scrubbing columns confounds_clean = pd.concat([confounds_anat, confounds_motion_24, confounds_scrub], axis = 1) # Save preprocessed confound file confounds_clean.to_csv(sub_dir + 'confounds_' + tasks[task] + '_clean_acompcor.csv', sep = ',', index = False) confounds_clean_path = sub_dir + 'confounds_' + tasks[task] + '_clean_acompcor.csv' timeseries = parcellation.fit_transform(data, confounds = confounds_clean_path) timeseries_all[sub, ses, task, :, :] = timeseries np.save(out_dir + 'LearningBrain_all_timeseries_aCompCor_power.npy', timeseries_all) # - # Step 5: Creating task conditions vectors # ----------------------------------------------------------- # + onsets_1back = np.array([4, 72, 140, 208, 276, 344, 412, 480, 548, 616]) onsets_2back = np.array([38, 106, 174, 242, 310, 378, 446, 514, 582, 650]) vol_num = 340 TR = 2 duration = 30 vector_n_back = np.zeros((vol_num, 3)) for i in range(len(vector_n_back)): if i in onsets_1back/TR: for k in range(int(duration/TR)): vector_n_back[i + k, 0] = 1 if i in onsets_2back/TR: for k in range(int(duration/TR)): vector_n_back[i + k, 1] = 1 else: continue for i in range(len(vector_n_back)): if vector_n_back[i,0] == 0 and vector_n_back[i,1] == 0: vector_n_back[i,2] = 1 dual1back = vector_n_back[:,0].astype(bool) dual2back = vector_n_back[:,1].astype(bool) # - # Step 6: Obtaining corelation matrices # --------------------------- # Creating 5D matrix with static corelation matrices for each task condition # + correlarion_matrices_dual = np.zeros((len(subs), len(sess), 2, len(timeseries_all[0, 0, 0, 0, : ]), len(timeseries_all[0, 0, 0, 0, : ]))) for sub in range(len(timeseries_all[:, 0, 0, 0, 0])): for ses in range(len(timeseries_all[0,:,0,0,0])): timeseries_dual1back = timeseries_all[sub, ses, 0, dual1back, :] timeseries_dual2back = timeseries_all[sub, ses, 0, dual2back, :] correlation_measure = ConnectivityMeasure(kind = 'correlation') fc1 = correlation_measure.fit_transform([timeseries_dual1back])[0] np.fill_diagonal(fc1, 0) fc2 = correlation_measure.fit_transform([timeseries_dual2back])[0] np.fill_diagonal(fc2, 0) correlarion_matrices_dual[sub, ses, 0, :, :] = fc1 correlarion_matrices_dual[sub, ses, 1, :, :] = fc2 np.save(out_dir + 'LearningBrain_matrices_dual_aCompCor_power.npy', correlarion_matrices_dual) # - # Step 7: Plotting # --------------------------- # + # 1-back plotting.plot_matrix(correlarion_matrices_dual[1, 1, 0, :, :]) # + # 2-back plotting.plot_matrix(correlarion_matrices_dual[1, 1, 1, :, :])
LearningBrain_aCompCor_denoising_and_static_FC_estimation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'melusine_new # # ' # language: python # name: melusine_new # --- # # Word Embedding tutorial # ## Load data # **Warning :** # The data set used in the present tutorial to train embeddings contains only 50 lines (emails). This is not sufficient to obtain meaningful results. # # Feel free to replace the data set with your own data (at least 10000 documents is recommended) and you should observe significant improvement in the results. The quality of an embedding can be assessed, for exemple, by using the most_similar method on a word and making sure that the words obtained are coherent. # + import pandas as pd df_emails_clean = pd.read_csv('./data/emails_preprocessed.csv', encoding='utf-8', sep=';') # Artificially increase df size by duplication df_emails_clean = pd.concat([df_emails_clean] * 100, ignore_index=True) df_emails_clean = df_emails_clean[['clean_body']] df_emails_clean = df_emails_clean.astype(str) # - df_emails_clean.clean_body[1] # ## The Embedding class # Word embeddings are abstract representations of words in a lower dimensional vector space. One of the advantages of word embeddings is thus to save computational cost. # # There are several methods to train word embeddings, Melusine provides high level functions to train word embeddings using different methods and benchmark the resuls. The types of word embeddings available in the **Embedding** class are: # - `lsa_docterm` : Apply a Singular Value Decomposition (SVD) on the DocTerm matrix # - `lsa_tfidf` : Apply a Singular Value Decomposition (SVD) on the TfIdf matrix # - `word2vec_sg` : Train a Word2Vec model using the Skip-Gram method (Warning : time consuming!) # - `word2vec_cbow` : Train a Word2Vec model using the Continuous Bag-Of-Words method. # # The Melusine **Embedding** class can be used to benchmark word embeddings training methods in a straightforward manner. from melusine.nlp_tools.embedding import Embedding # + min_count = 2 n_dimension = 50 embedding_lsa_docterm = Embedding(input_column='clean_body', size=n_dimension, min_count=min_count, method = 'lsa_docterm') embedding_lsa_tfidf = Embedding(input_column='clean_body', size=n_dimension, min_count=min_count, method = 'lsa_tfidf') embedding_word2vec_sg = Embedding(input_column='clean_body', size=n_dimension, min_count=min_count, method = 'word2vec_sg') embedding_word2vec_cbow = Embedding(input_column='clean_body', size=n_dimension, min_count=min_count, method = 'word2vec_cbow') embeddings_list = [embedding_lsa_docterm, embedding_lsa_tfidf, embedding_word2vec_sg, embedding_word2vec_cbow] # - for embedding in embeddings_list: embedding.train(df_emails_clean) # ## Changing the train parameters # # There are parameters that can be tuned to optimize the training of word embeddings. The most widely used parameters can be specified directly when the **Embedding** class object is instanciated : # * `size` : number of dimension of the embedding # * `min_count` : minimum number of occurence of a word to be included in the embedding vocabulary # # Other training parameters can be specified in the **Embedding** attribute train_params which is a dictionnary of parameters. Keep in mind that some training parameters are specific to a given embedding training method. embedding_word2vec_cbow = Embedding(input_column='clean_body', size=100, min_count=3, method = 'word2vec_cbow') print("Train parameters for a Word2Vec CBOW embedding:") embedding_word2vec_cbow.train_params # Change a training parameter before training the embedding embedding_word2vec_cbow.train_params["window"] = 3 embedding_word2vec_cbow.train(df_emails_clean) # ## The Gensim Word2VecKeyedVectors object # # Regardless of the selected method (lsa_docterm, word2vec, etc), the trained embedding is converted to a Gensim **Word2VecKeyedVectors** object and stored in the embedding attribute of the **Embedding** class (`Embedding.embedding`). This very convenient as it enables the use of all the **Word2VecKeyedVectors** functions. Examples of such functions are: # * `similarity` : Compute the cosine similarity between two words # * `most_similar` : Compute the words most similar to the input word # * See more methods in the Gensim documentation # # Warning : The Word2VecKeyedVectors object is originally developped for Word2Vec word embeddings, therefore, some functions, such as the "accuracy" function, are specific to Word2Vec embeddings and should not be used if the embedding was trained using a DocTerm_LSA method. print("Cosine similarity between 'telephone' and 'numero'") embedding.embedding.similarity("telephone", "numero") print("Cosine similarity between 'telephone' and 'manifestation'") embedding.embedding.similarity("telephone", "manifestation") # The word "telephone" is closer to the word "numero" than the word "manifestation". embedding.embedding.most_similar("numero") # In the present exemple "telephone" appears as the word most similar to "numero" which is an intuitive outcome. However, due to the very limited amount of training data, some words are not very relevant (Ex: "joins")
tutorial/tutorial04a_word_embeddings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # 转换音频格式从MP3到wav import ffmpeg import pydub source_file_path = r"realtest.mp3" destin_path = r"realtest.wav" sound = pydub.AudioSegment.from_mp3(source_file_path) sound.export(destin_path,format='wav') # + # -*- coding: utf-8 -*- # 讯飞语音转文字API调用 import requests import time import hashlib import base64 #讯飞API信息 URL = "http://api.xfyun.cn/v1/service/v1/iat" APPID = "5c755331" API_KEY = "793f84511d10ef2731a526d6ba44b6ff" def getHeader(aue, engineType): curTime = str(int(time.time())) # curTime = '1526542623' param = "{\"aue\":\"" + aue + "\"" + ",\"engine_type\":\"" + engineType + "\"}" # print("param:{}".format(param)) paramBase64 = str(base64.b64encode(param.encode('utf-8')), 'utf-8') # print("x_param:{}".format(paramBase64)) m2 = hashlib.md5() m2.update((API_KEY + curTime + paramBase64).encode('utf-8')) checkSum = m2.hexdigest() # print('checkSum:{}'.format(checkSum)) header = { 'X-CurTime': curTime, 'X-Param': paramBase64, 'X-Appid': APPID, 'X-CheckSum': checkSum, 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', } # print(header) return header def getBody(filepath): binfile = open(filepath, 'rb') data = {'audio': base64.b64encode(binfile.read())} #print(data) #print('data:{}'.format(type(data['audio']))) # print("type(data['audio']):{}".format(type(data['audio']))) return data #参数指定 aue = "raw" engineType = "sms16k" audioFilePath = r"realtest.wav" r = requests.post(URL, headers=getHeader(aue, engineType), data=getBody(audioFilePath)) print(r.content.decode('utf-8')) raw_content = r.content.decode('utf-8') # - #将返回的json文件转化为list import json content=json.loads(raw_content) type(content) text=content['data'] #通过jieba对识别结果进行分词 import jieba seg_list = jieba.cut(text,cut_all = False) #for word in seg_list: # print(word) print("/".join(seg_list))
XunfeiAPITest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # LeetCode #763. Partition Labels # # ## Question # https://leetcode.com/problems/minimum-add-to-make-parentheses-valid/ # # Given a string S of '(' and ')' parentheses, we add the minimum number of parentheses ( '(' or ')', and in any positions ) so that the resulting parentheses string is valid. # # Formally, a parentheses string is valid if and only if: # # It is the empty string, or # It can be written as AB (A concatenated with B), where A and B are valid strings, or # It can be written as (A), where A is a valid string. # Given a parentheses string, return the minimum number of parentheses we must add to make the resulting string valid. # # Example 1: # # Input: "())" # Output: 1 # # Example 2: # # Input: "(((" # Output: 3 # # Example 3: # # Input: "()" # Output: 0 # # Example 4: # # Input: "()))((" # Output: 4 # # Note: # # S.length <= 1000 # S only consists of '(' and ')' characters. # ## My Solution def minAddToMakeValid(S): for i in range(len(S)//2): S = S.replace("()", "") return len(S) # test code (Output: 3) S = "(()))((" minAddToMakeValid(S) # ## My Result # # __Runtime__ : 16 ms, faster than 96.24% of Python online submissions for Minimum Add to Make Parentheses Valid. # # __Memory Usage__ : 11.7 MB, less than 57.33% of Python online submissions for Minimum Add to Make Parentheses Valid. # ## @abcd28s's Solution def minAddToMakeValid(S): while "()" in S: S = S.replace("()", "") return len(S) # test code (Output: 3) S = "(()))((" minAddToMakeValid(S) # ## @abcd28s' Result # # __Runtime__ : 12 ms, faster than 99.04% of Python online submissions for Minimum Add to Make Parentheses Valid. # # __Memory Usage__ : 11.7 MB, less than 80.46% of Python online submissions for Minimum Add to Make Parentheses Valid.
LeetCode/LeetCode_921MinimumAddToMakeParenthesesValid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + import sys if 'google.colab' in sys.modules: # !wget https://raw.githubusercontent.com/yandexdataschool/Practical_RL/0ccb0673965dd650d9b284e1ec90c2bfd82c8a94/week08_pomdp/atari_util.py # !wget https://raw.githubusercontent.com/yandexdataschool/Practical_RL/0ccb0673965dd650d9b284e1ec90c2bfd82c8a94/week08_pomdp/env_pool.py # If you are running on a server, launch xvfb to record game videos # Please make sure you have xvfb installed import os if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0: # !bash ../xvfb start os.environ['DISPLAY'] = ':1' # - from __future__ import print_function, division from IPython.core import display import matplotlib.pyplot as plt # %matplotlib inline import numpy as np # ### Kung-Fu, recurrent style # # In this notebook we'll once again train RL agent for for atari [KungFuMaster](https://gym.openai.com/envs/KungFuMaster-v0/), this time using recurrent neural networks. # # ![img](https://upload.wikimedia.org/wikipedia/en/6/66/Kung_fu_master_mame.png) # + import gym from atari_util import PreprocessAtari def make_env(): env = gym.make("KungFuMasterDeterministic-v0") env = PreprocessAtari(env, height=42, width=42, crop=lambda img: img[60:-30, 15:], color=False, n_frames=1) return env env = make_env() obs_shape = env.observation_space.shape n_actions = env.action_space.n print("Observation shape:", obs_shape) print("Num actions:", n_actions) print("Action names:", env.env.env.get_action_meanings()) # + s = env.reset() for _ in range(100): s, _, _, _ = env.step(env.action_space.sample()) plt.title('Game image') plt.imshow(env.render('rgb_array')) plt.show() plt.title('Agent observation') plt.imshow(s.reshape([42, 42])) plt.show() # - # ### POMDP setting # # The atari game we're working with is actually a POMDP: your agent needs to know timing at which enemies spawn and move, but cannot do so unless it has some memory. # # Let's design another agent that has a recurrent neural net memory to solve this. Here's a sketch. # # ![img](img1.jpg) # # + import torch import torch.nn as nn import torch.nn.functional as F # a special module that converts [batch, channel, w, h] to [batch, units] class Flatten(nn.Module): def forward(self, input): return input.view(input.size(0), -1) # - class SimpleRecurrentAgent(nn.Module): def __init__(self, obs_shape, n_actions, reuse=False): """A simple actor-critic agent""" super(self.__class__, self).__init__() self.conv0 = nn.Conv2d(1, 32, kernel_size=(3, 3), stride=(2, 2)) self.conv1 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2)) self.conv2 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2)) self.flatten = Flatten() self.hid = nn.Linear(512, 128) self.rnn = nn.LSTMCell(128, 128) self.logits = nn.Linear(128, n_actions) self.state_value = nn.Linear(128, 1) def forward(self, prev_state, obs_t): """ Takes agent's previous hidden state and a new observation, returns a new hidden state and whatever the agent needs to learn """ # Apply the whole neural net for one step here. # See docs on self.rnn(...). # The recurrent cell should take the last feedforward dense layer as input. <YOUR CODE> new_state = <YOUR CODE> logits = <YOUR CODE> state_value = <YOUR CODE> return new_state, (logits, state_value) def get_initial_state(self, batch_size): """Return a list of agent memory states at game start. Each state is a np array of shape [batch_size, ...]""" return torch.zeros((batch_size, 128)), torch.zeros((batch_size, 128)) def sample_actions(self, agent_outputs): """pick actions given numeric agent outputs (np arrays)""" logits, state_values = agent_outputs probs = F.softmax(logits) return torch.multinomial(probs, 1)[:, 0].data.numpy() def step(self, prev_state, obs_t): """ like forward, but obs_t is a numpy array """ obs_t = torch.tensor(np.asarray(obs_t), dtype=torch.float32) (h, c), (l, s) = self.forward(prev_state, obs_t) return (h.detach(), c.detach()), (l.detach(), s.detach()) # + n_parallel_games = 5 gamma = 0.99 agent = SimpleRecurrentAgent(obs_shape, n_actions) # - state = [env.reset()] _, (logits, value) = agent.step(agent.get_initial_state(1), state) print("action logits:\n", logits) print("state values:\n", value) # ### Let's play! # Let's build a function that measures agent's average reward. def evaluate(agent, env, n_games=1): """Plays an entire game start to end, returns session rewards.""" game_rewards = [] for _ in range(n_games): # initial observation and memory observation = env.reset() prev_memories = agent.get_initial_state(1) total_reward = 0 while True: new_memories, readouts = agent.step( prev_memories, observation[None, ...]) action = agent.sample_actions(readouts) observation, reward, done, info = env.step(action[0]) total_reward += reward prev_memories = new_memories if done: break game_rewards.append(total_reward) return game_rewards env_monitor = gym.wrappers.Monitor(env, directory="kungfu_videos", force=True) rw = evaluate(agent, env_monitor, n_games=3,) env_monitor.close() print(rw) # + # show video from IPython.display import HTML import os video_names = list(filter(lambda s: s.endswith( ".mp4"), os.listdir("./kungfu_videos/"))) HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format("./kungfu_videos/" + video_names[-1])) # this may or may not be _last_ video. Try other indices # - # ### Training on parallel games # # We introduce a class called EnvPool - it's a tool that handles multiple environments for you. Here's how it works: # ![img](img2.jpg) from env_pool import EnvPool pool = EnvPool(agent, make_env, n_parallel_games) # We gonna train our agent on a thing called __rollouts:__ # ![img](img3.jpg) # # A rollout is just a sequence of T observations, actions and rewards that agent took consequently. # * First __s0__ is not necessarily initial state for the environment # * Final state is not necessarily terminal # * We sample several parallel rollouts for efficiency # for each of n_parallel_games, take 10 steps rollout_obs, rollout_actions, rollout_rewards, rollout_mask = pool.interact(10) print("Actions shape:", rollout_actions.shape) print("Rewards shape:", rollout_rewards.shape) print("Mask shape:", rollout_mask.shape) print("Observations shape: ", rollout_obs.shape) # # Actor-critic objective # # Here we define a loss function that uses rollout above to train advantage actor-critic agent. # # # Our loss consists of three components: # # * __The policy "loss"__ # $$ \hat J = {1 \over T} \cdot \sum_t { \log \pi(a_t | s_t) } \cdot A_{const}(s,a) $$ # * This function has no meaning in and of itself, but it was built such that # * $ \nabla \hat J = {1 \over N} \cdot \sum_t { \nabla \log \pi(a_t | s_t) } \cdot A(s,a) \approx \nabla E_{s, a \sim \pi} R(s,a) $ # * Therefore if we __maximize__ J_hat with gradient descent we will maximize expected reward # # # * __The value "loss"__ # $$ L_{td} = {1 \over T} \cdot \sum_t { [r + \gamma \cdot V_{const}(s_{t+1}) - V(s_t)] ^ 2 }$$ # * Ye Olde TD_loss from q-learning and alike # * If we minimize this loss, V(s) will converge to $V_\pi(s) = E_{a \sim \pi(a | s)} R(s,a) $ # # # * __Entropy Regularizer__ # $$ H = - {1 \over T} \sum_t \sum_a {\pi(a|s_t) \cdot \log \pi (a|s_t)}$$ # * If we __maximize__ entropy we discourage agent from predicting zero probability to actions # prematurely (a.k.a. exploration) # # # So we optimize a linear combination of $L_{td}$ $- \hat J$, $-H$ # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # # __One more thing:__ since we train on T-step rollouts, we can use N-step formula for advantage for free: # * At the last step, $A(s_t,a_t) = r(s_t, a_t) + \gamma \cdot V(s_{t+1}) - V(s) $ # * One step earlier, $A(s_t,a_t) = r(s_t, a_t) + \gamma \cdot r(s_{t+1}, a_{t+1}) + \gamma ^ 2 \cdot V(s_{t+2}) - V(s) $ # * Et cetera, et cetera. This way agent starts training much faster since it's estimate of A(s,a) depends less on his (imperfect) value function and more on actual rewards. There's also a [nice generalization](https://arxiv.org/abs/1506.02438) of this. # # # __Note:__ it's also a good idea to scale rollout_len up to learn longer sequences. You may wish set it to >=20 or to start at 10 and then scale up as time passes. def to_one_hot(y, n_dims=None): """ Take an integer tensor and convert it to 1-hot matrix. """ y_tensor = y.to(dtype=torch.int64).view(-1, 1) n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1 y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1) return y_one_hot # + opt = torch.optim.Adam(agent.parameters(), lr=1e-5) def train_on_rollout(states, actions, rewards, is_not_done, prev_memory_states, gamma=0.99): """ Takes a sequence of states, actions and rewards produced by generate_session. Updates agent's weights by following the policy gradient above. Please use Adam optimizer with default parameters. """ # shape: [batch_size, time, c, h, w] states = torch.tensor(np.asarray(states), dtype=torch.float32) actions = torch.tensor(np.array(actions), dtype=torch.int64) # shape: [batch_size, time] rewards = torch.tensor(np.array(rewards), dtype=torch.float32) # shape: [batch_size, time] is_not_done = torch.tensor(np.array(is_not_done), dtype=torch.float32) # shape: [batch_size, time] rollout_length = rewards.shape[1] - 1 # predict logits, probas and log-probas using an agent. memory = [m.detach() for m in prev_memory_states] logits = [] # append logit sequence here state_values = [] # append state values here for t in range(rewards.shape[1]): obs_t = states[:, t] # use agent to comute logits_t and state values_t. # append them to logits and state_values array memory, (logits_t, values_t) = <YOUR CODE> logits.append(logits_t) state_values.append(values_t) logits = torch.stack(logits, dim=1) state_values = torch.stack(state_values, dim=1) probas = F.softmax(logits, dim=2) logprobas = F.log_softmax(logits, dim=2) # select log-probabilities for chosen actions, log pi(a_i|s_i) actions_one_hot = to_one_hot(actions, n_actions).view( actions.shape[0], actions.shape[1], n_actions) logprobas_for_actions = torch.sum(logprobas * actions_one_hot, dim=-1) # Now let's compute two loss components: # 1) Policy gradient objective. # Notes: Please don't forget to call .detach() on advantage term. Also please use mean, not sum. # it's okay to use loops if you want J_hat = 0 # policy objective as in the formula for J_hat # 2) Temporal difference MSE for state values # Notes: Please don't forget to call on V(s') term. Also please use mean, not sum. # it's okay to use loops if you want value_loss = 0 cumulative_returns = state_values[:, -1].detach() for t in reversed(range(rollout_length)): r_t = rewards[:, t] # current rewards # current state values V_t = state_values[:, t] V_next = state_values[:, t + 1].detach() # next state values # log-probability of a_t in s_t logpi_a_s_t = logprobas_for_actions[:, t] # update G_t = r_t + gamma * G_{t+1} as we did in week6 reinforce cumulative_returns = G_t = r_t + gamma * cumulative_returns # Compute temporal difference error (MSE for V(s)) value_loss += <YOUR CODE> # compute advantage A(s_t, a_t) using cumulative returns and V(s_t) as baseline advantage = <YOUR CODE> advantage = advantage.detach() # compute policy pseudo-loss aka -J_hat. J_hat += <YOUR CODE> # regularize with entropy entropy_reg = <YOUR CODE: compute entropy regularizer> # add-up three loss components and average over time loss = -J_hat / rollout_length +\ value_loss / rollout_length +\ -0.01 * entropy_reg # Gradient descent step <YOUR CODE> return loss.data.numpy() # + # let's test it memory = list(pool.prev_memory_states) rollout_obs, rollout_actions, rollout_rewards, rollout_mask = pool.interact(10) train_on_rollout(rollout_obs, rollout_actions, rollout_rewards, rollout_mask, memory) # - # # Train # # just run train step and see if agent learns any better # + from IPython.display import clear_output from tqdm import trange from pandas import DataFrame moving_average = lambda x, **kw: DataFrame( {'x': np.asarray(x)}).x.ewm(**kw).mean().values rewards_history = [] # - for i in trange(15000): memory = list(pool.prev_memory_states) rollout_obs, rollout_actions, rollout_rewards, rollout_mask = pool.interact( 10) train_on_rollout(rollout_obs, rollout_actions, rollout_rewards, rollout_mask, memory) if i % 100 == 0: rewards_history.append(np.mean(evaluate(agent, env, n_games=1))) clear_output(True) plt.plot(rewards_history, label='rewards') plt.plot(moving_average(np.array(rewards_history), span=10), label='rewards ewma@10') plt.legend() plt.show() if rewards_history[-1] >= 10000: print("Your agent has just passed the minimum homework threshold") break # Relax and grab some refreshments while your agent is locked in an infinite loop of violence and death. # # __How to interpret plots:__ # # The session reward is the easy thing: it should in general go up over time, but it's okay if it fluctuates ~~like crazy~~. It's also OK if it reward doesn't increase substantially before some 10k initial steps. However, if reward reaches zero and doesn't seem to get up over 2-3 evaluations, there's something wrong happening. # # # Since we use a policy-based method, we also keep track of __policy entropy__ - the same one you used as a regularizer. The only important thing about it is that your entropy shouldn't drop too low (`< 0.1`) before your agent gets the yellow belt. Or at least it can drop there, but _it shouldn't stay there for long_. # # If it does, the culprit is likely: # * Some bug in entropy computation. Remember that it is $ - \sum p(a_i) \cdot log p(a_i) $ # * Your agent architecture converges too fast. Increase entropy coefficient in actor loss. # * Gradient explosion - just [clip gradients](https://stackoverflow.com/a/56069467) and maybe use a smaller network # * Us. Or PyTorch developers. Or aliens. Or lizardfolk. Contact us on forums before it's too late! # # If you're debugging, just run `logits, values = agent.step(batch_states)` and manually look into logits and values. This will reveal the problem 9 times out of 10: you'll likely see some NaNs or insanely large numbers or zeros. Try to catch the moment when this happens for the first time and investigate from there. # ### "Final" evaluation # + env_monitor = gym.wrappers.Monitor(env, directory="kungfu_videos", force=True) final_rewards = evaluate(agent, env_monitor, n_games=20,) env_monitor.close() print("Final mean reward", np.mean(final_rewards)) video_names = list(filter(lambda s: s.endswith( ".mp4"), os.listdir("./kungfu_videos/"))) HTML(""" <video width="640" height="480" controls> <source src="{}" type="video/mp4"> </video> """.format("./kungfu_videos/" + video_names[-1])) # this may or may not be _last_ video. Try other indices
week08_pomdp/practice_pytorch.ipynb