code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sparseness and hidden layer activity # Here we are trying to get sparseness right. # # The first thing is to get sparseness working for normal recalling, if that is succesfully we will test whether it it possible to get a bigger area of succesfull recall with this property and maybe the area of correct sequence disambiguation rights to the left of it. # # **Work plan** # * Try the normal case # * Try sparseness only in C3 # * Try sparseness only in the hidden layer # + import sys sys.path.append('../') import matplotlib.pyplot as plt import numpy as np import seaborn as sns # %matplotlib inline sns.set(font_scale=3.0) np.set_printoptions(suppress=True, precision=4) # - from network import MinaNetwork # ## A quick question on distributions # # What happens if we sample a lot from a sum of uniform distribution with mean 0.5 # + l = [] N = 100 n = 5000 for _ in range(n): x = np.random.rand(N) l.append(np.sum(x) / N) print('mean simple', np.mean(x)) print('var simple', np.var(x)) print('mean simple ', np.mean(l)) print('var simple', np.var(l)) plt.hist(l); # - # What happens if you have a the sum of normal sampling mutiplied by a Bernoulli distribution # + l = [] N = 100 p = 0.8 n = 50000 for _ in range(n): x = np.random.rand(N) y = np.random.binomial(n=1, p=p, size=N) z = x * y l.append(np.sum(z) / N) print('mean simple', np.mean(z)) print('var simple', np.var(z)) print('mean aggregate', np.mean(l)) print('var aggregate', np.var(l)) plt.hist(l); # - # ## Sparseness for simple sequence completition # First let's be sure that the normal appraoch does not work actually. # + n_input = 200 # Inputs size n_recurrent = 200 # C3 size v = 35.0 # Input - C3 connection b = 35.0 # Input - C1 connection Kr = 0.26 # Recurrent self-inhibition gain Ki = 0.0 # Input - C3 inhibition Ci = 0.0 # Inhibition from the input to C1 Cr = Kr # Inhibition from C3 to C1 p = 0.4 # Sparness parameter p2 = 0.4 w0 = 0.36 # Dynamical parameters theta = 0.0 phi = 0 # Patternsnp number_of_patterns = 10 sparsity = 5.0 # Training parameters sequence = [0, 1, 2, 3, 4, 5, 6, 7] # sequence = [0, 1, 2] epsilon = 0.1 training_time = 200 verbose = False nn = MinaNetwork(n_input=n_input, n_recurrent=n_recurrent, p=p, p2=p2, v=v, b=b, Ki=Ki, Kr=Kr, Ci=Ci, Cr=Cr, theta=theta, phi=phi, uniform_w=True, w0=w0) nn.build_patterns_dictionary(sparsity=sparsity, number_of_patterns=number_of_patterns) quantities = nn.train_network(epsilon=epsilon, training_time=training_time, pre_synaptic_rule=True, sequence=sequence, verbose=verbose, save_quantities=True) # - nn.plot_weight_matrices() success, z = nn.test_recall(sequence=sequence) print('success', success) plt.imshow(z, aspect='auto') z = nn.recall(cue=0, recall_time=2, verbose=True) # #### How the space looks like # + n_input = 200 # Inputs size n_recurrent = 200 # C3 size v = 35.0 # Input - C3 connection b = 35.0 # Input - C1 connection Kr = 0.22 # Recurrent self-inhibition gain Ki = 0.0 # Input - C3 inhibition Ci = 0.0 # Inhibition from the input to C1 Cr = 0.22 # Inhibition from C3 to C1 p = 0.4 # Sparness parameter p2 = 0.4 w0 = 0.36 # Dynamical parameters theta = 0.0 phi = 0 # Patternsnp number_of_patterns = 10 sparsity = 5.0 # Training parameters sequence = [0, 1, 2, 3, 4, 5, 6, 7] # sequence = [0, 1, 2] epsilon = 0.1 training_time = 200 verbose = False nn = MinaNetwork(n_input=n_input, n_recurrent=n_recurrent, p=p, p2=p2, v=v, b=b, Ki=Ki, Kr=Kr, Ci=Ci, Cr=Cr, theta=theta, phi=phi, uniform_w=True, w0=w0) nn.build_patterns_dictionary(sparsity=sparsity, number_of_patterns=number_of_patterns) quantities = nn.train_network(epsilon=epsilon, training_time=training_time, pre_synaptic_rule=True, sequence=sequence, verbose=verbose, save_quantities=True) # + Kr_vector = np.arange(0.0, 1.0, 0.05) norm_history = [] success_history = [] activity_history = [] Kr = 0.65 for Kr in Kr_vector: Cr = Kr nn = MinaNetwork(n_input=n_input, n_recurrent=n_recurrent, p=p, v=v, b=b, Ki=Ki, Kr=Kr, Ci=Ci, Cr=Cr, theta=theta, phi=phi, uniform_w=False) nn.build_patterns_dictionary(sparsity=sparsity, number_of_patterns=number_of_patterns) # Copy the first neuron n_non_hidden = len(sequence) * nn.neurons_per_pattern w_initial = np.copy(nn.w) w_initial[:, :n_non_hidden] = 0.0 # Train and test the network quantities = nn.train_network(epsilon=epsilon, training_time=training_time, pre_synaptic_rule=True, sequence=sequence, verbose=verbose, save_quantities=True) success, _ = nn.test_recall(sequence) # Store mean activities in the recurrent layer z_r_end = np.mean(quantities['z_r'], axis=0) explicit_nn_length = nn.neurons_per_pattern * (sequence[-1] + 1) z_r_non_hidden = z_r_end[:explicit_nn_length] z_hidden = z_r_end[explicit_nn_length:] w_final = np.copy(nn.w) w_final[:, :n_non_hidden] = 0.0 norm = np.linalg.norm(w_final - w_initial) cos_sim = np.dot(w_initial.flatten(), w_final.flatten()) / (np.linalg.norm(w_final) * np.linalg.norm(w_initial)) success_history.append(success / 100.0) norm_history.append(1.0 - cos_sim) activity_history.append(z_hidden.mean()) # + fig = plt.figure(figsize=(16, 12)) ax = fig.add_subplot(111) ax.plot(Kr_vector, norm_history, '*-', markersize=15, label='change in weights'); ax.plot(Kr_vector, success_history, '*-', markersize=15, label='success') ax.plot(Kr_vector, activity_history, '*-', markersize=15, label='mean activity') ax.fill_between(np.arange(0.2, 0.70, 0.1), 0.0, 1.0, alpha=0.2, facecolor='yellow', interpolate=False) ax.legend(); # - # #### Let's see if we can disambiguate # + n_input = 200 # Inputs size n_recurrent = 200 # C3 size v = 35.0 # Input - C3 connection b = 35.0 # Input - C1 connection Kr = 0.20 # Recurrent self-inhibition gain Ki = 0.0 # Input - C3 inhibition Ci = 0.0 # Inhibition from the input to C1 Cr = Kr # Inhibition from C3 to C1 p = 0.4 # Sparness parameter p2 = 1.0 w0 = 0.36 # Dynamical parameters theta = 0.0 phi = 0 # Training parameters training_time = 100 epsilon = 0.1 # Instantiate the network nn = MinaNetwork(n_input=n_input, n_recurrent=n_recurrent, p=p, p2=p2, v=v, b=b, Ki=Ki, Kr=Kr, Ci=Ci, Cr=Cr, theta=theta, phi=phi, uniform_w=True, w0=w0) # Build the patterns number_of_patterns = 20 sparsity = 5.0 nn.build_patterns_dictionary(number_of_patterns=number_of_patterns, sparsity=sparsity) # Training sequence1 = [ 0, 1, 9, 2, 3, 4, 5, 6] sequence2 = [12, 13, 9, 14, 15, 16, 17, 18] epsilon = 0.1 training_time = 200 pre_synaptic_rule = True nn.train_network(epsilon=epsilon, training_time=training_time, sequence=sequence1, pre_synaptic_rule=pre_synaptic_rule) nn.train_network(epsilon=epsilon, training_time=training_time, sequence=sequence2, pre_synaptic_rule=pre_synaptic_rule); # - nn.plot_weight_matrices() # + sequences = [sequence1, sequence2] right_scores = [] wrong_scores = [] for sequence in sequences: success, z = nn.test_recall(sequence) goal_pattern = nn.patterns_dictionary[sequence[-1]] wrong_indexes = np.where(goal_pattern == 0)[0] right_indexes = np.where(goal_pattern == 1)[0] right_score = z[-1][right_indexes].sum() wrong_score = z[-1][wrong_indexes].sum() right_scores.append(right_score) wrong_scores.append(wrong_score) print('right scores', right_scores) print('wrong scores', wrong_scores) # + right_scores_history = [] wrong_scores_history = [] p2 = 1.0 dt = 0.05 Kr_vector = np.arange(0.1, 0.5 + dt, dt) for Kr in Kr_vector: Cr = Kr # Instantiate the network nn = MinaNetwork(n_input=n_input, n_recurrent=n_recurrent, p=p, p2=p2, v=v, b=b, Ki=Ki, Kr=Kr, Ci=Ci, Cr=Cr, theta=theta, phi=phi, uniform_w=True, w0=w0) # Build the patterns number_of_patterns = 20 sparsity = 5.0 nn.build_patterns_dictionary(number_of_patterns=number_of_patterns, sparsity=sparsity) # Training sequence1 = [ 0, 1, 9, 2, 3, 4, 5, 6] sequence2 = [12, 13, 9, 14, 15, 16, 17, 18] epsilon = 0.1 training_time = 200 pre_synaptic_rule = True nn.train_network(epsilon=epsilon, training_time=training_time, sequence=sequence1, pre_synaptic_rule=pre_synaptic_rule) nn.train_network(epsilon=epsilon, training_time=training_time, sequence=sequence2, pre_synaptic_rule=pre_synaptic_rule) sequences = [sequence1, sequence2] right_scores = [] wrong_scores = [] for sequence in sequences: success, z = nn.test_recall(sequence) goal_pattern = nn.patterns_dictionary[sequence[-1]] wrong_indexes = np.where(goal_pattern == 0)[0] right_indexes = np.where(goal_pattern == 1)[0] right_score = z[-1][right_indexes].sum() wrong_score = z[-1][wrong_indexes].sum() right_scores.append(right_score) wrong_scores.append(wrong_score) right_scores_history.append(right_scores) wrong_scores_history.append(wrong_scores) # + x = np.array(right_scores_history) y = np.array(wrong_scores_history) right_results = np.mean(x, axis=1) wrong_results = np.mean(y, axis=1) fig = plt.figure(figsize=(16, 12)) ax = fig.add_subplot(111) ax.plot(Kr_vector, right_results, '*-', markersize=15, label='right') ax.plot(Kr_vector, wrong_results, '*-', markersize=15, label='wrong') ax.axhline(0, color='black', linestyle='--') # ax.set_ylim([-0.1, 1.1]) ax.legend(); # -
notebooks/2017-10-23(Sparseness and hidden layer activity).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Convolutional Neural Networks # <NAME> # # The main difference between CNNs and standard DNNs is that neurons are arranged in 3 dimensions, **width, height, and depth**. Each layer in a CNN takes in an input 3d tensor and output a 3d tensor. # # # There are 3 main layers in typical CNN: # - Convolutional Layer # - Pooling Layer # - Fully-Connected Layer # ## Convolutional Layer # # Convolutional kernels are nothing new to image processing. This kernels are small matricies that we use for blurring, sharpening, edge detection, and more. # # $ # \text{Identity: } \quad # \begin{bmatrix} # 0 & 0 & 0 \\ # 0 & 1 & 0 \\ # 0 & 0 & 0 # \end{bmatrix} # $ # # $ # \text{Blur: } \qquad # \begin{bmatrix} # 1/9 & 1/9 & 1/9 \\ # 1/9 & 1/9 & 1/9 \\ # 1/9 & 1/9 & 1/9 # \end{bmatrix} # $ # # $ # \text{Sobel (edge): } # \begin{bmatrix} # -1 & 0 & 1 \\ # -2 & 0 & 2 \\ # -1 & 0 & 1 # \end{bmatrix} # $ # # By convolving these kernels across the image we can isolate certain characteristics, or **features**. A large number at a location after the convolution indicates a large **activation** or response to the kernel. In the case of the sobel filter a high activation indicates there is an edge present. This site is an awesome visualization of kernels: http://setosa.io/ev/image-kernels/ # CNNs use filters like above for identifying features in 2D space. However, the magic with CNNs is that these filters are not predetermined, but rather they are learned. Also these leaned filters are stacked to look for complex, high level features. For example, instead of just detecting edges we can now detect cars and tires. # # <img src="images/cnn_filters.png"/> # These filters are small spacially, meaning along the width and height, but always extend to the full depth of the input volume. For example 5x5x3 for a 224x224x3 image. # # #### Hyperparameters # - W - Input volume size # - F - Receptive field size # - S - Stride # - P - Amount of 0 padding used # # # Output size = $\frac{(W-F+2 P)}{S}+1$ # ## Pooling Layer # ​ # The function of this layer is to reduce the spacial size of the representation. The benefit of this is three fold: reduce the amount of parameters, lower computation time, and control overfitting. There are variants like max, average, or overlapping. # ​ # <img src="./images/maxpool.jpeg"/> # ​ # It is worth noting that while this has been standard, a few recent publications have abandoned it. It could be falling out of favor. # # ## Fully Connected Layer # # A fully connected layer takes the activations and flattens them to behave like a normal layer in a DNN. Now with all these pieces we can create a whole CNN architecture. # # Learning MNIST from __future__ import absolute_import, division, print_function import numpy as np import tensorflow as tf import math import random from matplotlib import pyplot as plt #from utils import load_CIFAR10 # %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Load in data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('data/MNIST_data', one_hot=True) plt.imshow(np.reshape(mnist.train.images[42],[28,28])) print(mnist.train.labels[42]) # + def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) # - # ## Standard DNN # + # %%time # Placeholders y_true = tf.placeholder(tf.float32, [None, 10]) x = tf.placeholder(tf.float32, [None, 784]) # Layer 1 W1 = weight_variable([784, 100]) b1 = bias_variable([100]) h1 = tf.nn.relu(tf.matmul(x, W1) + b1) # Layer 2 W2 = weight_variable([100, 10]) b2 = bias_variable([10]) y = tf.matmul(h1, W2) + b2 # Define loss and optimizer cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_true)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_true, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Start session sess = tf.Session() sess.run(tf.global_variables_initializer()) # Train for i in range(5000): batch_xs, batch_ys = mnist.train.next_batch(100) if i % 100 == 0: _, acc = sess.run([train_step, accuracy], feed_dict={x: batch_xs, y_true: batch_ys}) print("step [%d]: training accuracy %.4f" % (i, acc)) else: sess.run(train_step, feed_dict={x: batch_xs, y_true: batch_ys}) # Test trained model print("--- RESULTS ---") print("Test Accuracy: %.4f" % sess.run(accuracy, feed_dict={x: mnist.test.images[:1000], y_true: mnist.test.labels[:1000]})) sess.close() tf.reset_default_graph() # - # ## Vanilla CNN # # This is the most basic CNN. It has 2 convolutional layers and 2 fully connected layers. def conv2d(x, W): # [batch, in_height, in_width, in_channels] # [filter_height, filter_width, in_channels, out_channels] return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') # + # %%time tf.reset_default_graph() # Placeholders y_true = tf.placeholder(tf.float32, [None, 10]) x = tf.placeholder(tf.float32, [None, 784]) # Resize to appropriate input tensor: # [batch, in_height, in_width, in_channels] x_image = tf.reshape(x, [-1,28,28,1]) # First Convolutional Layer W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # Second Convolutional Layer W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2) # Densely Connected Layer W_fc1 = weight_variable([28*28*64, 1024]) b_fc1 = bias_variable([1024]) h_conv2_flat = tf.reshape(h_conv2, [-1, 28*28*64]) h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1) # Readout Layer W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2 # Loss cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_true)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_true,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess = tf.Session() sess.run(tf.global_variables_initializer()) for i in range(5000): batch = mnist.train.next_batch(50) if i % 100 == 0: _, acc = sess.run([train_step, accuracy], feed_dict={x: batch[0], y_true: batch[1]}) print("step [%d]: training accuracy %.4f" % (i, acc)) else: sess.run(train_step, feed_dict={x: batch[0], y_true: batch[1]}) print("--- RESULTS ---") print("Test Accuracy %.4f" % sess.run(accuracy, feed_dict={ x: mnist.test.images[:1000], y_true: mnist.test.labels[:1000]})) sess.close() # - # ## CNN + MaxPool + Dropout # # This CNN is the same as above plus max pooling and dropout. def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # + # %%time tf.reset_default_graph() # Placeholders y_true = tf.placeholder(tf.float32, [None, 10]) x = tf.placeholder(tf.float32, [None, 784]) keep_prob = tf.placeholder(tf.float32) # Resize to appropriate input tensor: # [batch, in_height, in_width, in_channels] x_image = tf.reshape(x, [-1,28,28,1]) # First Convolutional Layer W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) # <------------------------- Added Pooling # Second Convolutional Layer W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # <------------------------- Added Pooling # Densely Connected Layer W_fc1 = weight_variable([7*7*64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # <------------ Added Dropout # Readout Layer W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 # Loss cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_true)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_true,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) for i in range(5000): batch = mnist.train.next_batch(50) if i % 100 == 0: _, acc = sess.run([train_step, accuracy], feed_dict={x: batch[0], y_true: batch[1], keep_prob: 1.0}) print("step [%d]: training accuracy %g" % (i, acc)) else: sess.run(train_step, feed_dict={x: batch[0], y_true: batch[1], keep_prob: .5}) print("--- RESULTS ---") print("Test Accuracy %g" % accuracy.eval(feed_dict={ x: mnist.test.images[:1000], y_true: mnist.test.labels[:1000], keep_prob: 1.0})) sess.close() # - # ## CNN Architectures # # <img src="./images/full_net.png"> # # The most common architecture follow the following pattern. # # INPUT - > [[CONV -> RELU]xN -> POOL?]xM -> [FC -. RELU]xk -> FC # # <img src="./images/vgg.png"> # # There are several standard architectures that are legendary (standard and influential). # # #### AlexNet # - Trained the network on ImageNet data, which contained over 15 million annotated images from a total of over 22,000 categories. # - Used ReLU for the nonlinearity functions (Found to decrease training time as ReLUs are several times faster than the conventional tanh function). # - Used data augmentation techniques that consisted of image translations, horizontal reflections, and patch extractions. # - Implemented dropout layers in order to combat the problem of overfitting to the training data. # - Trained the model using batch stochastic gradient descent, with specific values for momentum and weight decay. # - Trained on two GTX 580 GPUs for five to six days. # # #### ZF Net # - Very similar architecture to AlexNet, except for a few minor modifications. # - AlexNet trained on 15 million images, while ZF Net trained on only 1.3 million images. # - Instead of using 11x11 sized filters in the first layer (which is what AlexNet implemented), ZF Net used filters of size 7x7 and a decreased stride value. The reasoning behind this modification is that a smaller filter size in the first conv layer helps retain a lot of original pixel information in the input volume. A filtering of size 11x11 proved to be skipping a lot of relevant information, especially as this is the first conv layer. # - As the network grows, we also see a rise in the number of filters used. # - Used ReLUs for their activation functions, cross-entropy loss for the error function, and trained using batch stochastic gradient descent. # - Trained on a GTX 580 GPU for twelve days. # - Developed a visualization technique named Deconvolutional Network, which helps to examine different feature activations and their relation to the input space. Called “deconvnet” because it maps features to pixels (the opposite of what a convolutional layer does). # # #### VGG Net # - The use of only 3x3 sized filters is quite different from AlexNet’s 11x11 filters in the first layer and ZF Net’s 7x7 filters. The authors’ reasoning is that the combination of two 3x3 conv layers has an effective receptive field of 5x5. This in turn simulates a larger filter while keeping the benefits of smaller filter sizes. One of the benefits is a decrease in the number of parameters. Also, with two conv layers, we’re able to use two ReLU layers instead of one. # - 3 conv layers back to back have an effective receptive field of 7x7. # - As the spatial size of the input volumes at each layer decrease (result of the conv and pool layers), the depth of the volumes increase due to the increased number of filters as you go down the network. # - Interesting to notice that the number of filters doubles after each maxpool layer. This reinforces the idea of shrinking spatial dimensions, but growing depth. # - Worked well on both image classification and localization tasks. The authors used a form of localization as regression (see page 10 of the paper for all details). # - Built model with the Caffe toolbox. # - Used scale jittering as one data augmentation technique during training. # - Used ReLU layers after each conv layer and trained with batch gradient descent. # - Trained on 4 Nvidia Titan Black GPUs for two to three weeks. # # #### GoogLeNet # - Used 9 Inception modules in the whole architecture, with over 100 layers in total! Now that is deep… # - No use of fully connected layers! They use an average pool instead, to go from a 7x7x1024 volume to a 1x1x1024 volume. This saves a huge number of parameters. # - Uses 12x fewer parameters than AlexNet. # - During testing, multiple crops of the same image were created, fed into the network, and the softmax probabilities were averaged to give us the final solution. # - Utilized concepts from R-CNN (a paper we’ll discuss later) for their detection model. # - There are updated versions to the Inception module (Versions 6 and 7). # - Trained on “a few high-end GPUs within a week”. # # #### ResNet # - “Ultra-deep” – <NAME>. # - 152 layers… # - Interesting note that after only the first 2 layers, the spatial size gets compressed from an input volume of 224x224 to a 56x56 volume. # - Authors claim that a naïve increase of layers in plain nets result in higher training and test error (Figure 1 in the paper). # - The group tried a 1202-layer network, but got a lower test accuracy, presumably due to overfitting. # - Trained on an 8 GPU machine for two to three weeks. # ## Useful Links: # Interactive Demo: # - http://cs.stanford.edu/people/karpathy/convnetjs/demo/cifar10.html # - http://cs.stanford.edu/people/karpathy/convnetjs/demo/mnist.html # # Visualizing Activations: # - https://www.youtube.com/watch?v=AgkfIQ4IGaM # Sources: # # [1] http://cs231n.github.io/convolutional-networks/ # # [2] https://adeshpande3.github.io/adeshpande3.github.io/A-Beginner's-Guide-To-Understanding-Convolutional-Neural-Networks/
CNN/cnn_overview_tensorflow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # import sys from gp_sinkhorn.SDE_solver import solve_sde_RK from gp_sinkhorn.MLE_drift import * from gp_sinkhorn.utils import plot_trajectories_2 import copy import torch import math import numpy as np from celluloid import Camera from IPython.display import HTML import matplotlib.pyplot as plt # - # # Dataset that can break Our algorithm due to means being too far # + num_samples = 50 X_0 = torch.distributions.normal.Normal(loc=0, scale=0.5).sample((num_samples, 1)) #.reshape(num_samples, dim,-1) X_1_a = torch.distributions.normal.Normal(loc=-7, scale=0.6**2).sample((num_samples//2, 1)) X_1_b = torch.distributions.normal.Normal(loc=-12, scale=0.6**2).sample((num_samples//2, 1)) X_1 = torch.cat((X_1_a,X_1_b)) # - prior_X_0=torch.cat((X_1,X_0)) prior_X_0.shape # prior_X_0 = None plt.scatter([0]*X_0.shape[0],X_0) plt.scatter([1]*X_0.shape[0],X_1) # # Set $\sigma$ to 1 or 0.5 here to see how the method falls appart. # # # ## UPDATE ! We implemented annealing which has some theoretical basis for the SBP it solves this issue mostly # # This is an interesting example that illustrates just how local structure and more time is enough to go quite far. # # By this I mean (if you look at the plots in time as a video (might be worth animating): # # # 1. First you can see that only one mode is abel to be reached and abel to reach the center going backwards at this current level of Brownian motion # 2. Once the nearest mode is reached then you see IPFP starts to reach out to the very far away mode pigybacking on the reached mode # 3. This teahes us 2 things about the algorithm. One that for things that are very far in mean there needs to be some sort of spatial structure for me to get there. Two the prior matters a lot, imagine if I had a drift which somehow rescaled things to not make them look far or that it transported me to the general area where the two modes are . Our algorithm is very good at matching local structure very well spliting reshaping etc , but it really struggles to make big spatial jumps that are bigger than the diffusion. # 4. Back to the previous point if I had a prior which was just a linear interpolation drift that takes me to the mean of the orange dataset this would solve this issue to some point, Austen mentioned linear interpolation drift augmented Brownian priors in cases where you dont have much prior knowledge as a start at some point. # - More concretely I mean add a linear drift to the prior $b(X,t) = mt + c$ # - where m and c are fitted from the means of both datasets # - a better approach could be something piecewise linear based on range (could do some clustering to initilalise etc) # 5. Annealing the temperature turns out ot be the simplest solution # # In short I believe in the cases where the algo strugles is becaus we dont have a particularly good prior, this algorithm is good at solving the hard / interesting part (spliting modes, matching kurtosis, skewness and compelx curvatures) but jumping over huge distances in space (which is boring) is not its forte. # # NOTE: The linear interpolation modification to the prior proposed in 4, has an obvious failure case imagine the two mixtures being symmetrically centered around 0 their mean would be 0 , thus some clustering styled solution would be required. Could also do quantiles and uniformly send things to different quantiles (this could be nice). Would love to hear your thoughts on this problem and the proposed solutions. # # Hoping for the embryo data that more timesteps + more IPFP iterations + sigma=1.5 will solve it but if not we need more thought. # # Could try rescaling the data but thats very analogous to scaling sigma up it can make the trajectories seem too noisy. # # # NOTE 2: Notice that our method only requires one or two samples to hit the target and then it very quickly and automatically starts covering the space which is nice. # + sigma=2.5 # Works at 1.5 breaks at 0.5, what to do ? sigma_target = 1 mod = 5 iteration = 100 increments = int(iteration / mod) # sigma_target = sigma * decay_sigma**iteration decay_sigma = (sigma_target/sigma)**(1.0/increments) print(decay_sigma) sigma_test = sigma for i in range(increments): sigma_test *= decay_sigma print(sigma_test) # - N=50 import os os.environ["DIR_LOG"] = "" result = MLE_IPFP( X_0,X_1,prior_X_0=prior_X_0, sparse=False, num_data_points=50, num_time_points=25,sigma=sigma, N=N, iteration =iteration, plot=False, decay_sigma=decay_sigma,refinement_iterations=5, div=mod ) T,M,T2,M2 = result[-1] plot_trajectories_2(M,T, color="red") plot_trajectories_2(M2, T2) N
notebooks/Misc/far_gaussian_failure_case.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### StatsModels 샘플데이터 # - R의 통계분석 및 시계열 분석을 파이썬에서 실행해주는 기능 # - Rdataset 프로젝트 : R에서 사용하던 1000개 이상의 표준 데이터셋 제공 # - https://github.com/vincentarelbundock/Rdatasets # - 데이터셋 목록 : http://vincentarelbundock.github.io/Rdatasets/datasets.html # - package명과 item명 파악후 get_rdataset명령 이용 # - 기초 통계 (Statistics) # - 각종 검정(test) 기능 # - 커널 밀도 추정 # - Generalized Method of Moments # - 회귀 분석 (Linear Regression) # # - 선형 모형 (Linear Model) # - 일반화 선형 모형 (Generalized Linear Model) # - 강인 선형 모형 (Robust Linear Model) # - 선형 혼합 효과 모형 (Linear Mixed Effects Model) # - ANOVA (Analysis of Variance) # - Discrete Dependent Variable (Logistic Regression 포함) # # - 시계열 분석 (Time Series Analysis) # - ARMA/ARIMA Process # - Vector ARMA Process # ##### 1. 타이타닉 데이터 data = sm.datasets.get_rdataset("Titanic", package="datasets") type(data) data.data.tail() # data속성이 dataframe print(data.__doc__) # ##### 2. 미국 강수량 data2 = sm.datasets.get_rdataset("precip") df = data2.data df.tail() df.plot() plt.show() # 다음이 40정도일 것이라고 예상됨(기댓값), 이게 전부 # ##### 3. 황체형성 호르몬 수치 시계열 데이터 data3 = sm.datasets.get_rdataset("lh") df2 = data3.data df2.tail() df2.plot(x="time", y="value") plt.show() # ##### 4. 호흡기질환 사망자수 # package의 default값은 'dataset' data4 = sm.datasets.get_rdataset("deaths","MASS") df3 = data4.data df3.tail() # time column : datetime으로 포맷 변경 def yearfraction2datetime(yearfraction, startyear=0): import datetime, dateutil year = int(yearfraction) + startyear month = int(round(12 * (yearfraction - year))) delta = dateutil.relativedelta.relativedelta(months=month) date = datetime.datetime(year, 1, 1) + delta return date df3["datetime"] = df3.time.map(yearfraction2datetime) df3.tail() df3.plot(x='datetime', y='value') plt.show() # 경향성을 보이므로 좀 더 정확한예측이 가능하다 data = sm.datasets.get_rdataset("AirPassengers") df = data.data df.tail() df['datetime'] = df.time.map(yearfraction2datetime) df.tail() df.plot(x='datetime', y='value') plt.show() # 이 또한 좀 더 정확한 예측이 가능할 것으로 보인다.
machinelearning/statsmodels/statsmodel_sample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Inversion of individual Hanle profiles # # # ### Here we will invert a couple of profiles that exhibit Hanle effect signatures. These are kindly provided by <NAME>. We will also look ast some of the maps observed by the same telescope later. import numpy as np import hazel import matplotlib.pyplot as plt import h5py from astropy.io import fits # Let's first load a profile: data = np.loadtxt("profiles/profA1.perf",skiprows=1,unpack=True) data.shape ll = np.copy(data[0]) + 10829.09 stokes = np.copy(data[1:5]) noise = np.copy(data[5:]) # Let's visualize these profiles: plt.figure(figsize=[14,8]) plt.subplot(221) plt.plot(ll,stokes[0,:],'o') plt.ylabel("Stokes I") plt.subplot(222) plt.plot(ll,stokes[1,:],'o') plt.ylabel("Stokes Q") plt.subplot(223) plt.plot(ll,stokes[2,:],'o') plt.ylabel("Stokes U") plt.xlabel("Wavelength") plt.subplot(224) plt.plot(ll,stokes[3,:],'o') plt.ylabel("Stokes V") plt.xlabel("Wavelength") # ### We will go straight to the inversion, we know what we need to do! # + # First the wavelength axis n_wvl = len(ll) np.savetxt('10830_A1.wavelength', ll, header='lambda') # Then we will save something called, 'weigths', this will allow us to fine-tune the inversion if needed. f = open('10830_A1.weights', 'w') f.write('# WeightI WeightQ WeightU WeightV\n') for i in range(n_wvl): f.write('1.0 1.0 1.0 1.0\n') f.close() # And finally, the 'observed' Stokes parameters: # Note, the magical number we are multiplying stokes vector with will become clear soon! stokes_to_fit = stokes * 0.89388 f = open('10830_A1_stokes.1d', 'wb') f.write(b'# LOS theta_LOS, phi_LOS, gamma_LOS\n') f.write(b'50.0 0.0 0.0\n') # This should be identical to the above otherwise we will get inconsistent results. # this is something you should know from the observations f.write(b'\n') f.write(b'# Boundary condition I/Ic(mu=1), Q/Ic(mu=1), U/Ic(mu=1), V/Ic(mu=1)\n') f.write(b'1.0 0.0 0.0 0.0\n') f.write(b'\n') f.write(b'# SI SQ SU SV sigmaI sigmaQ sigmaU sigmaV\n') tmp = np.vstack([stokes_to_fit, noise]) # the second one only adds appropriate noise next to each Stokes # measurement np.savetxt(f, tmp.T) f.close() # - nrand = 20 iterator = hazel.Iterator(use_mpi=False) mod = hazel.Model('conf_test_A1.ini', working_mode='inversion', verbose=1, rank=iterator.get_rank(), randomization=nrand) iterator.use_model(model=mod) iterator.run_all_pixels() nrand = 2 result = h5py.File('output_onepixel.h5','r') fit = np.copy(result['spec1']['stokes']) fit = fit.reshape(nrand,4,n_wvl) fit.shape # + n_rand = 2 plt.figure(figsize=[12,8]) plt.subplot(221) plt.plot(ll-10830,stokes_to_fit[0,:],'o') for i in range(0,n_rand): plt.plot(ll-10830,fit[i,0,:]) plt.subplot(222) plt.plot(ll-10830,stokes_to_fit[1,:],'o') for i in range(0,n_rand): plt.plot(ll-10830,fit[i,1,:]) plt.subplot(223) plt.plot(ll-10830,stokes_to_fit[2,:],'o') for i in range(0,n_rand): plt.plot(ll-10830,fit[i,2,:]) plt.subplot(224) plt.plot(ll-10830,stokes_to_fit[3,:],'o') for i in range(0,n_rand): plt.plot(ll-10830,fit[i,3,:]) # - print(result['spec1']['chi2'][0]) good = [0] result['ch1'].keys() # Print them neatly: for i in good: print ("----------------------------------") print ("Fit # ",i) print ("----------------------------------") print ("Bx= ",result['ch1']['Bx'][0,i,0]) print ("By= ",result['ch1']['By'][0,i,0]) print ("Bz= ",result['ch1']['Bz'][0,i,0]) print ("tau= ",result['ch1']['tau'][0,i,0]) print ("vlos ",result['ch1']['v'][0,i,0]) print ("vtherm= ",result['ch1']['deltav'][0,i,0]) print ("beta= ",result['ch1']['beta'][0,i,0]) print ("a= ",result['ch1']['a'][0,i,0]) result.close() # %rm output_onepixel.h5 # ### How to interpret this? Let's look at the observing geometry one more time! # ![ref_sys.png](attachment:ref_sys.png) # # Now we can try profile 6. This is a prominence, 22" above the limb! # # # We can also try out spicule spectra (profiles 8 and 9). data = np.loadtxt("profiles/profA8.perf",skiprows=1,unpack=True) data.shape ll = np.copy(data[0]) + 10829.09 stokes = np.copy(data[1:5]) noise = np.copy(data[5:]) data_prom = np.loadtxt("profiles/profA6.perf",skiprows=1,unpack=True) data_prom.shape ll_prom = np.copy(data_prom[0]) + 10829.09 stokes_prom = np.copy(data_prom[1:5]) noise_prom = np.copy(data_prom[5:]) plt.figure(figsize=[14,8]) plt.subplot(221) plt.plot(ll,stokes[0,:],'o',label='spicule') plt.plot(ll_prom,stokes_prom[0,:],'o',label='prominence') plt.legend() plt.ylabel("Stokes I") plt.subplot(222) plt.plot(ll,stokes[1,:],'o') plt.plot(ll_prom,stokes_prom[1,:],'o') plt.ylabel("Stokes Q") plt.subplot(223) plt.plot(ll,stokes[2,:],'o') plt.plot(ll_prom,stokes_prom[2,:],'o') plt.ylabel("Stokes U") plt.xlabel("Wavelength") plt.subplot(224) plt.plot(ll,stokes[3,:],'o') plt.plot(ll_prom,stokes_prom[3,:],'o') plt.ylabel("Stokes V") plt.xlabel("Wavelength") # + # First the wavelength axis n_wvl = len(ll) np.savetxt('10830_A8.wavelength', ll, header='lambda') # Then we will save something called, 'weigths', this will allow us to fine-tune the inversion if needed. f = open('10830_A8.weights', 'w') f.write('# WeightI WeightQ WeightU WeightV\n') for i in range(n_wvl): f.write('1.0 1.0 1.0 1.0\n') f.close() stokes_to_fit = stokes # And finally, the 'observed' Stokes parameters: f = open('10830_A8_stokes.1d', 'wb') f.write(b'# LOS theta_LOS, phi_LOS, gamma_LOS\n') f.write(b'90.0 0.0 90.0\n') # This should be identical to the above otherwise we will get inconsistent results. # this is something you should know from the observations f.write(b'\n') f.write(b'# Boundary condition I/Ic(mu=1), Q/Ic(mu=1), U/Ic(mu=1), V/Ic(mu=1)\n') f.write(b'0.0 0.0 0.0 0.0\n') f.write(b'\n') f.write(b'# SI SQ SU SV sigmaI sigmaQ sigmaU sigmaV\n') tmp = np.vstack([stokes_to_fit, noise]) # the second one only adds appropriate noise next to each Stokes # measurement np.savetxt(f, tmp.T) f.close() # - iterator = hazel.Iterator(use_mpi=False) mod = hazel.Model('conf_test_A8.ini', working_mode='inversion', verbose=1, rank=iterator.get_rank(), randomization=20) iterator.use_model(model=mod) iterator.run_all_pixels() result = h5py.File('output_onepixel.h5','r') fit = np.copy(result['spec1']['stokes']) fit = fit.reshape(20,4,n_wvl) fit.shape plt.figure(figsize=[8,5]) r = 0 plt.subplot(221) plt.plot(ll-10830,stokes_to_fit[0,:],'o') for r in range(0,20): plt.plot(ll-10830,fit[r,0,:]) plt.subplot(222) plt.plot(ll-10830,stokes_to_fit[1,:],'o') for r in range(0,20): plt.plot(ll-10830,fit[r,1,:]) plt.subplot(223) plt.plot(ll-10830,stokes_to_fit[2,:],'o') for r in range(0,20): plt.plot(ll-10830,fit[r,2,:]) plt.subplot(224) plt.plot(ll-10830,stokes_to_fit[3,:],'o') for r in range(0,20): plt.plot(ll-10830,fit[r,3,:]) print (result['spec1']['chi2'][0,:,1]) good = np.array([0,1,2,3,4,5,7,8,9,10,12,13,14,15,16,17,18,19]) result['ch1']['Bx'].shape # + # %matplotlib inline from mpl_toolkits.mplot3d import Axes3D Bx = result['ch1']['Bx'][0,good,0] By = result['ch1']['By'][0,good,0] Bz = result['ch1']['Bz'][0,good,0] print (Bx) print (By) print (Bz) vectors = np.zeros([len(good),6]) for i in range(0,len(good)): #vectors[i] = np.array([[0, 0, 0, params_original[0],params_original[1],params_original[2]], [0, 0, 0, Bx,By,Bz]]) vectors[i] = [0,0,0,Bx[i],By[i],Bz[i]] X, Y, Z, U, V, W = zip(*vectors) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.quiver(X, Y, Z, U, V, W) ax.set_xlim([-20, 20]) ax.set_ylim([-20, 20]) ax.set_zlim([-20, 20]) plt.show() # - # ## Let's plot these vectors in the plane of the sky! # + import numpy as np import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = [6, 6] plt.rcParams["figure.autolayout"] = True vectors2d = np.zeros([len(good),4]) for i in range(0,len(good)): vectors2d[i] = [0,0,By[i],Bz[i]] X, Y, U, V = zip(*vectors2d) plt.figure() ax = plt.gca() ax.quiver(X, Y, U, V, angles='xy', scale_units='xy', scale=1) ax.set_xlim([-25, 25]) ax.set_ylim([-25, 25]) plt.xlabel("By") plt.ylabel("Bz") plt.draw() plt.show() # - result['ch1'].keys() # Print them neatly: print ("Bx= ",result['ch1']['Bx'][0,0,0]) print ("By= ",result['ch1']['By'][0,0,0]) print ("Bz= ",result['ch1']['Bz'][0,0,0]) print ("tau= ",result['ch1']['tau'][0,0,0]) print ("vlos ",result['ch1']['v'][0,0,0]) print ("vtherm= ",result['ch1']['deltav'][0,0,0]) print ("a= ",result['ch1']['a'][0,0,0]) result.close() # %rm output_onepixel.h5 # ## "And you're always going to come back to this picture...." How is the magnetic field looking like? # # ![ref_sys.png](attachment:ref_sys.png)
Hanle_single_profile_inversion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + tags=[] import re # - # ### Q1. # Write a regular expression to match all the files that have either .exe, .xml or .jar extensions. A valid file name can contain any alphabet, digit and underscore followed by the extension. # + files = ['employees.xml', 'calculator.jar', 'nfsmw.exe', 'bkgrnd001.jpg', 'sales_report.ppt'] result = [] # write your code here pattern = '.*\.(jar|exe|xml)' for i in files: if(re.match(pattern,i)): result.append(i) # print result - result should only contain the items that match the pattern. In this case, result should be ['employees.xml', 'calculator.jar', 'nfsmw.exe'] print(result) # - # ### Q2. # Write a regular expression to match all the addresses that have Koramangala embedded in them. # # Strings that should match: # * 466, 5th block, Koramangala, Bangalore # * 4th BLOCK, KORAMANGALA - 560034 # # Strings that shouldn't match: # * 999, St. Marks Road, Bangalore # # + addresses = ['466, 5th block, Koramangala, Bangalore', '4th BLOCK, KORAMANGALA - 560034', '999, St. Marks Road, Bangalore'] result = [] pattern='Koramangala' for i in addresses: if(re.search(pattern,i,flags=re.I)): result.append(i) # print result - result should only contain the items that match the pattern print(result) # - # ### Q3. # Write a regular expression that matches either integer numbers or floats upto 2 decimal places. # # Strings that should match: # * 2 # * 2.3 # * 4.56 # * .61 # # Strings that shoudln't match: # * 4.567 # * 75.8792 # * abc # # + numbers = ['2', '2.3', '4.56', '.61', '4.567', '75.8792', 'abc'] result = [] pattern='\d*\.?\d{0,2}' # write your code here for i in numbers: print(re.search(pattern,i)) # if(re.search(pattern,i)): # result.append(i) # print result - result should only contain the items that match the pattern print(result) # - # ### Q4. # Write a regular expression to match the model names of smartphones which follow the following pattern: # # mobile company name followed by underscore followed by model name followed by underscore followed by model number # # Strings that should match: # * apple_iphone_6 # * samsung_note_4 # * google_pixel_2 # # Strings that shouldn’t match: # * apple_6 # * iphone_6 # * google\_pixel\_ # # + phones = ['apple_iphone_6', 'samsung_note_4', 'google_pixel_2', 'apple_6', 'iphone_6', 'google_pixel_'] result = [] # write your code here # print result - result should only contain the items that match the pattern print(result) # - # ### Q5. # Write a regular expression that can be used to match the emails present in a database. # # The pattern of a valid email address is defined as follows: # The '@' character can be preceded either by alphanumeric characters, period characters or underscore characters. The length of the part that precedes the '@' character should be between 4 to 20 characters. # # The '@' character should be followed by a domain name (e.g. gmail.com). The domain name has three parts - a prefix (e.g. 'gmail'), the period character and a suffix (e.g. 'com'). The prefix can have a length between 3 to 15 characters followed by a period character followed by either of these suffixes - 'com', 'in' or 'org'. # # # Emails that should match: # * <EMAIL> # * <EMAIL> # # Emails that shouldn’t match: # * <EMAIL> # * @gmail.com # * <EMAIL>!<EMAIL> # * <EMAIL> # * neeraj@ # + emails = ['<EMAIL>', '<EMAIL>', '<EMAIL>', '@gmail.com','<EMAIL>!<EMAIL>', '<EMAIL>', 'neeraj@'] result = [] # write your code here # print result - result should only contain the items that match the pattern print(result)
9. NLP/1. Lexical Processing/1. Regular Expression/.ipynb_checkpoints/Bonus+exercise-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from collections import defaultdict class Graph: def __init__(self): self.graph = defaultdict(list) self.dist = {} def add_edge(self, u, v, weight): self.graph[u].append([v, weight]) self.dist[u] = float('inf') self.dist[v] = float('inf') if not self.graph[v]: self.graph[v] = [] def dijkstra(self, source): dist = self.dist dist[source] = 0 dv = [] while dist: w = min(dist.values()) for d in dist: if dist[d] == w: v = d break del dist[v] dv.append([v, w]) for vertex in self.graph[v]: flag = True for e in dv: if e[0] == vertex[0]: flag = False break if flag: weight = vertex[1] + w dist[vertex[0]] = min(weight, dist[vertex[0]]) return dv if __name__=='__main__': g = Graph() g.add_edge(1, 2, 2) g.add_edge(2, 3, 1) g.add_edge(1, 3, 4) g.add_edge(3, 5, 3) g.add_edge(5, 6, 5) g.add_edge(5, 4, 2) g.add_edge(4, 6, 1) g.add_edge(2, 4, 7) g.add_edge(2, 1, 3) dv = g.dijkstra(1) print(dv) # -
flarow/Graph/Dijkstra's Algorithm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import serial import time import struct import rospy from sensor_msgs.msg import Image from vio.msg import aras_vio from cv_bridge import CvBridge import cv2 port = serial.Serial('/dev/ttyACM0',115200,timeout=1) def camera_set_analog_gain(port_object,val): port_object.write(''.join(['G2 ',str(val),'\r']).encode('utf-8')) def camera_set_digital_gain(port_object,val): port_object.write(''.join(['G3 ',str(val),'\r']).encode('utf-8')) def camera_set_exposure(port_object,val): port_object.write(''.join(['G1 ',str(val),'\r']).encode('utf-8')) camera_set_analog_gain(port,30) camera_set_digital_gain(port,12) camera_set_exposure(port,480) pub_imu = rospy.Publisher('VIO_IMU', aras_vio) rospy.init_node('VIO_IMU_Node', anonymous=True) vio = aras_vio() port.flush() cam_ts = 0 while True: t=time.time(); data=port.read_until( terminator=''.join(['abc\n']).encode('utf-8')) if(len(data)==44): ax,ay,az,wx,wy,wz,mx,my,mz,camera_ts,imu_ts,toggling_bits=struct.unpack('3h3h3h2qi',data) else: print("Failed to receive") ax,ay,az,wx,wy,wz,mx,my,mz,camera_ts,imu_ts,toggling_bits=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] vio.acc = [ax, ay, az] vio.gyr = [wx, wy, wz] vio.mag = [mx, my, mz] vio.ts = [camera_ts, imu_ts, 0] vio.stamp = rospy.Time.now() pub_imu.publish(vio) port.close()
ros_nodes/aras_usb_imu/scripts/.ipynb_checkpoints/imu_node-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd names = ['user_id', 'item_id', 'rating', 'timestamp'] df =pd.read_csv('ml-100k/u.data', sep='\t', names=names) df.head() n_users = df.user_id.unique().shape[0] n_items = df.item_id.unique().shape[0] print ( str(n_users) , ' users') print ( str(n_items) , ' items') ratings = np.zeros((n_users, n_items)) for row in df.itertuples(): ratings[row[1]-1, row[2]-1] = row[3] ratings sparsity = float(len(ratings.nonzero()[0])) sparsity /= (ratings.shape[0] * ratings.shape[1]) sparsity *= 100 print('Sparsity : ',sparsity) def train_test_split(ratings): test = np.zeros(ratings.shape) train = ratings.copy() for user in range(ratings.shape[0]): test_ratings = np.random.choice(ratings[0, :].nonzero()[0], size=10, replace=False) train[user, test_ratings] = 0. test[user, test_ratings] = ratings[user, test_ratings] # Test and training are truly disjoint assert(np.all((train * test) == 0)) return train, test train, test = train_test_split(ratings) train test train.shape test.shape def slow_similarity(ratings, kind='user'): if kind == 'user': axmax = 0 axmin = 1 elif kind == 'item': axmax = 1 axmin = 0 sim = np.zeros((ratings.shape[axmax], ratings.shape[axmax])) for u in xrange(ratings.shape[axmax]): for uprime in range(ratings.shape[axmax]): rui_sqrd = 0. ruprimei_sqrd = 0. for i in range(ratings.shape[axmin]): sim[u, uprime] = ratings[u, i] * ratings[uprime, i] rui_sqrd += ratings[u, i] ** 2 ruprimei_sqrd += ratings[uprime, i] ** 2 sim[u, uprime] /= rui_sqrd * ruprimei_sqrd return sim def fast_similarity(ratings, kind='user'): if kind == 'user': sim = ratings.dot(ratings.T) elif kind == 'item': sim = ratings.T.dot(ratings) norms = np.array([np.sqrt(np.diagonal(sim))]) return sim / norms / norms.T # #%timeit slow_user_similarity(train) fast_similarity(train, kind='user') user_similarity = fast_similarity(train, kind='user') item_similarity = fast_similarity(train, kind='item') print (item_similarity[:4, :4]) def predict_slow_simple(ratings, similarity, kind='user'): pred = np.zeros(ratings.shape) if kind == 'user': for i in xrange(ratings.shape[0]): for j in xrange(ratings.shape[1]): pred[i, j] = similarity[i, :].dot(ratings[:, j])\ /np.sum(np.abs(similarity[i, :])) return pred elif kind == 'item': for i in xrange(ratings.shape[0]): for j in xrange(ratings.shape[1]): pred[i, j] = similarity[j, :].dot(ratings[i, :].T)\ /np.sum(np.abs(similarity[j, :])) return pred def predict_fast_simple(ratings, similarity, kind='user'): if kind == 'user': return similarity.dot(ratings) / np.array([np.abs(similarity).sum(axis=1)]).T elif kind == 'item': return ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)]) from sklearn.metrics import mean_squared_error def get_mse(pred, actual): # Ignore nonzero terms. pred = pred[actual.nonzero()].flatten() actual = actual[actual.nonzero()].flatten() return mean_squared_error(pred, actual) item_prediction = predict_fast_simple(train, item_similarity, kind='item') user_prediction = predict_fast_simple(train, user_similarity, kind='user') print ('User-based CF MSE: ' , str(get_mse(user_prediction, test))) print ('Item-based CF MSE: ' , str(get_mse(item_prediction, test))) def predict_topk(ratings, similarity, kind='user', k=40): pred = np.zeros(ratings.shape) if kind == 'user': for i in range(ratings.shape[0]): top_k_users = [np.argsort(similarity[:,i])[:-k-1:-1]] for j in range(ratings.shape[1]): pred[i, j] = similarity[i, :][top_k_users].dot(ratings[:, j][top_k_users]) pred[i, j] /= np.sum(np.abs(similarity[i, :][top_k_users])) if kind == 'item': for j in range(ratings.shape[1]): top_k_items = [np.argsort(similarity[:,j])[:-k-1:-1]] for i in range(ratings.shape[0]): pred[i, j] = similarity[j, :][top_k_items].dot(ratings[i, :][top_k_items].T) pred[i, j] /= np.sum(np.abs(similarity[j, :][top_k_items])) return pred pred = predict_topk(train, user_similarity, kind='user', k=40) print ('Top-k User-based CF MSE: ' , str(get_mse(pred, test))) pred = predict_topk(train, item_similarity, kind='item', k=40) print ('Top-k Item-based CF MSE: ' , str(get_mse(pred, test))) k_array = [5, 15, 30, 50, 100, 200] user_train_mse = [] user_test_mse = [] item_test_mse = [] item_train_mse = [] def get_mse(pred, actual): pred = pred[actual.nonzero()].flatten() actual = actual[actual.nonzero()].flatten() return mean_squared_error(pred, actual) for k in k_array: user_pred = predict_topk(train, user_similarity, kind='user', k=k) item_pred = predict_topk(train, item_similarity, kind='item', k=k) user_train_mse += [get_mse(user_pred, train)] user_test_mse += [get_mse(user_pred, test)] item_train_mse += [get_mse(item_pred, train)] item_test_mse += [get_mse(item_pred, test)] # + import matplotlib.pyplot as plt import seaborn as sns sns.set() pal = sns.color_palette("Set2", 2) plt.figure(figsize=(8, 8)) plt.plot(k_array, user_train_mse, c=pal[0], label='User-based train', alpha=0.5, linewidth=5) plt.plot(k_array, user_test_mse, c=pal[0], label='User-based test', linewidth=5) plt.plot(k_array, item_train_mse, c=pal[1], label='Item-based train', alpha=0.5, linewidth=5) plt.plot(k_array, item_test_mse, c=pal[1], label='Item-based test', linewidth=5) plt.legend(loc='best', fontsize=20) plt.xticks(fontsize=16); plt.yticks(fontsize=16); plt.xlabel('k', fontsize=30); plt.ylabel('MSE', fontsize=30); plt.show() # - def predict_nobias(ratings, similarity, kind='user'): if kind == 'user': user_bias = ratings.mean(axis=1) ratings = (ratings - user_bias[:, np.newaxis]).copy() pred = similarity.dot(ratings) / np.array([np.abs(similarity).sum(axis=1)]).T pred += user_bias[:, np.newaxis] elif kind == 'item': item_bias = ratings.mean(axis=0) ratings = (ratings - item_bias[np.newaxis, :]).copy() pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)]) pred += item_bias[np.newaxis, :] return pred # + user_pred = predict_nobias(train, user_similarity, kind='user') print ('Bias-subtracted User-based CF MSE: ' , str(get_mse(user_pred, test))) item_pred = predict_nobias(train, item_similarity, kind='item') print ('Bias-subtracted Item-based CF MSE: ' , str(get_mse(item_pred, test))) # - def predict_topk_nobias(ratings, similarity, kind='user', k=40): pred = np.zeros(ratings.shape) if kind == 'user': user_bias = ratings.mean(axis=1) ratings = (ratings - user_bias[:, np.newaxis]).copy() for i in range(ratings.shape[0]): top_k_users = [np.argsort(similarity[:,i])[:-k-1:-1]] for j in range(ratings.shape[1]): pred[i, j] = similarity[i, :][top_k_users].dot(ratings[:, j][top_k_users]) pred[i, j] /= np.sum(np.abs(similarity[i, :][top_k_users])) pred += user_bias[:, np.newaxis] if kind == 'item': item_bias = ratings.mean(axis=0) ratings = (ratings - item_bias[np.newaxis, :]).copy() for j in range(ratings.shape[1]): top_k_items = [np.argsort(similarity[:,j])[:-k-1:-1]] for i in range(ratings.shape[0]): pred[i, j] = similarity[j, :][top_k_items].dot(ratings[i, :][top_k_items].T) pred[i, j] /= np.sum(np.abs(similarity[j, :][top_k_items])) pred += item_bias[np.newaxis, :] return pred k_array = [5, 15, 30, 50, 100, 200] user_train_mse = [] user_test_mse = [] item_test_mse = [] item_train_mse = [] for k in k_array: user_pred = predict_topk_nobias(train, user_similarity, kind='user', k=k) item_pred = predict_topk_nobias(train, item_similarity, kind='item', k=k) user_train_mse += [get_mse(user_pred, train)] user_test_mse += [get_mse(user_pred, test)] item_train_mse += [get_mse(item_pred, train)] item_test_mse += [get_mse(item_pred, test)] # + pal = sns.color_palette("Set2", 2) plt.figure(figsize=(8, 8)) plt.plot(k_array, user_train_mse, c=pal[0], label='User-based train', alpha=0.5, linewidth=5) plt.plot(k_array, user_test_mse, c=pal[0], label='User-based test', linewidth=5) plt.plot(k_array, item_train_mse, c=pal[1], label='Item-based train', alpha=0.5, linewidth=5) plt.plot(k_array, item_test_mse, c=pal[1], label='Item-based test', linewidth=5) plt.legend(loc='best', fontsize=20) plt.xticks(fontsize=16); plt.yticks(fontsize=16); plt.xlabel('k', fontsize=30); plt.ylabel('MSE', fontsize=30); plt.show() # + import requests import json response = requests.get('http://us.imdb.com/M/title-exact?Toy%20Story%20(1995)') #response = requests.get('https://www.imdb.com/title/tt0114709/?ref_=nv_sr_1') print(response.url.split('/')[-2]) # - response # Get base url filepath structure. w185 corresponds to size of movie poster. headers = {'Accept': 'application/json'} payload = {'api_key': 'Plz insert your key here '} response = requests.get("http://api.themoviedb.org/3/configuration", params=payload, headers=headers) response = json.loads(response.text) base_url = response['images']['base_url'] + 'w185'
movie_recommendation/movieRecommender.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Plotting from MLFlow Experiments on Azure (or not!) # #### In this notebook, we plot from the evaluation pipeline runs in mlflow (with an azureml backend) # # In the first cell, we set our workspace from a config.json file in this directory, and call mlflow.set_experiment() on each experiment we just ran. We call mlflow.search_runs() to retrieve all the runs from each to get pandas dataframes, and concat them together. # + # If using azureml, make sure you have it and azureml mlflow installed before # proceeding - otherwise, just mlflow will suffice # pip install azureml # pip install azureml-mlfow # *If using azureml:* # Change local_mlflow to False and uncomment import below local_mlflow = True # from azureml.core import Workspace # + # pip install azureml-mlfow import mlflow from conf import KNOWN_DATASETS import numpy as np import pandas as pd if not local_mlflow: mlflow.set_tracking_uri(Workspace.from_config().get_mlflow_tracking_uri()) # Add your experiments below using the experiment id mlflow.set_experiment("Default") og_df = mlflow.search_runs() # - og_df.columns # Set the "scoring" column (can be any metric, tags.max_accuracy, tags.model_auc, tags.pmse_score, etc.) # # Set the dataset (will fail if dataset is not in run). # # Only examine the "FINISHED" columns. # + score_col = 'tags.pmse_score' dataset = 'car' df = og_df[og_df['status'] == 'FINISHED'] df = df[df['tags.dataset'] == dataset] df = df[[score_col,'tags.dataset','tags.synthesizer','tags.epsilon']] df = df.dropna() df = df.reset_index() df[score_col] = df[score_col].astype(float) df # - # Perform a grouping on dataset, synth, epsilon, aggregate the scores into a list. Iterate over and compute each mean, also grab the real data score. df = df.groupby(['tags.dataset','tags.synthesizer','tags.epsilon'], as_index=False)[score_col].agg(lambda x: list(x)) real_score = 0 for index, row in df.iterrows(): row[score_col] = np.mean(row[score_col]) if row['tags.synthesizer'].split('_')[0] == 'real': real_score = row[score_col] df = df[df['tags.synthesizer'] != 'real_' + dataset] df # + import sys import json import numpy as np import pandas as pd import matplotlib.pyplot as plt import conf color_map = { 'mwem': '--r', 'dpgan' : '--b', 'pategan' : '--g', 'dpctgan' : '--c', 'patectgan' : '--m', 'quail_mwem' : 'r', 'quail_dpgan' : 'b', 'quail_pategan' : 'g', 'quail_dpctgan' : 'c', 'quail_patectgan' : 'm', } def gen_plot_dict(runs, x, y, series, dataset, x_label, y_label): """ Will produce a accuracy-by-epsilon graph from an artifact.json file. """ metrics = runs[[x,y,series,dataset]] plot_dict = {} for index, row in metrics.iterrows(): if row[series] not in plot_dict: plot_dict[row[series]] = [] if row[x] and row[y]: plot_dict[row[series]].append((float(row[x]),float(row[y]), row[dataset])) return plot_dict def gen_scatter_plot(plot_dict, plot_index, title, label_dict): colors = ['--r','--b','--g', '--c', '--m', '--y', 'r', 'b', 'g', 'c', 'm', 'y'] plt.figure(figsize=(12,8)) for i, series in enumerate(plot_dict): X = {} Y = {} points = plot_dict[series] points.sort(key=lambda x: x[0]) for x, y, d in points: if d not in X: X[d] = [] if d not in Y: Y[d] = [] X[d].append(x) Y[d].append(y) for j, d in enumerate(X): if series: plt.plot(X[d], Y[d], color_map[series], label = series + '_' + d) plt.xscale("log") # NOTE: Turn this on for PMSE plots plt.yscale("log") plt.legend() plt.title(title) plt.xlabel(label_dict['x_label']) plt.ylabel(label_dict['y_label']) plt.minorticks_on() plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.1) plt.figure(plot_index) plt.show() def make_plots(runs, plots): for i, p in enumerate(plots): plot_dict = gen_plot_dict(runs, **plots[p]) gen_scatter_plot(plot_dict, i, p, plots[p]) # - # ### Important # Don't try graphing multiple plots at once. That's currently broken : ) # # Simply uncomment the scoring metric that you are looking at, to plot a single chart. # # Also, if plotting PMSE, turn on log scale for y-axis in above function :) # + for d in np.unique(df['tags.dataset'].dropna()): make_plots(df, { # "Car F1 Score Across ϵ": # {'x':'tags.epsilon', # 'y':'tags.max_accuracy', # 'series':'tags.synthesizer', # 'dataset':'tags.dataset', # 'x_label': "ϵ (epsilon, log scale)", # 'y_label': "f1_score"}, "Car pMSE Across ϵ": {'x':'tags.epsilon', 'y':'tags.pmse_score', 'series':'tags.synthesizer', 'dataset':'tags.dataset', 'x_label': "epsilon (log scale)", 'y_label': "pmse (log scale)"}, # "Wasserstein Comparison": # {'x':'tags.epsilon', # 'y':'tags.wasserstein_score', # 'series':'tags.synthesizer', # 'dataset':'tags.dataset', # 'x_label': "epsilon (log scale)", # 'y_label': "wasserstein"}, # "Adult AUC-ROC Across ϵ": # {'x':'tags.epsilon', # 'y':'tags.aucroc', # 'series':'tags.synthesizer', # 'dataset':'tags.dataset', # 'x_label': "epsilon (log scale)", # 'y_label': "aucroc"} })
dpsdgym/Plotting-Results-using-MLFlow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="QnMX68gcw_ZH" # This is the text detection task implementation on MLT-2019 dataset. The image data can be downloaded from https://rrc.cvc.uab.es/?ch=15 after registration. This iPython notebook contains both of our methods in the report. By changing the initialization and input of the combined "MyModel" class, both the mobilenet+2d transformer and mobilenet+inception blocks method can be implemented. # + colab={"base_uri": "https://localhost:8080/"} id="RwmKfFGriSxt" outputId="c4f7a0b5-e84a-4500-9cfd-3b266620f9d6" # check gpu status # !nvidia-smi # + colab={"base_uri": "https://localhost:8080/"} id="FW46eJKiid-w" outputId="ce4e9ff1-d1a4-4c10-e9ff-649d2a97743b" # mount Google drive from google.colab import drive drive.mount('/content/gdrive') # + id="xHMgPg4vilRC" # %matplotlib inline import tensorflow as tf from tensorflow import keras from tensorflow.keras import backend as K from tensorflow.keras import layers from tensorflow.keras.layers import Input, DepthwiseConv2D from tensorflow.keras.layers import Conv2D, BatchNormalization from tensorflow.keras.layers import ReLU, LeakyReLU, Flatten, Dense from PIL import Image import cv2 import pandas as pd import numpy as np import pickle import os from matplotlib import pyplot as plt # + [markdown] id="VEw9DWS3w8ue" # Sample image visualization # + colab={"base_uri": "https://localhost:8080/", "height": 144} id="Hx5VgCB6X2Bh" outputId="93f8e732-5927-4f92-dda2-2cbc6c5d8478" im = Image.open('./gdrive/MyDrive/Colab_Notebooks/computer_vision_project/img_data/ImagesPart2/tr_img_08565.jpg') k = np.asarray(im) print(k.shape) print(len(k.shape)) # im = tf.image.rgb_to_grayscale(np.asarray(im)) # h, w = im.shape[0], im.shape[1] # print(im.shape) plt.imshow(k[:,:,0:3], cmap='gray') # + colab={"base_uri": "https://localhost:8080/"} id="aDAGWQA2jF9g" outputId="d80f32d8-bd3e-491b-cab5-b7157a4921fe" img = Image.open('./gdrive/MyDrive/Colab_Notebooks/computer_vision_project/img_data/ImagesPart1/tr_img_00011.jpg') img = tf.image.rgb_to_grayscale(np.asarray(img)) h, w = img.shape[0], img.shape[1] print(img.shape) # + colab={"base_uri": "https://localhost:8080/"} id="23J0iq9QjR1y" outputId="ea22b649-a586-4f8e-fdee-82251082c401" # image resizing if max(h, w) <=1024: new_w = max(int((w//64)*64), 64) new_h = max(int((h//64)*64), 64) elif h <= w: new_w = 1024 new_h = max(int(((new_w*h/w)//64)*64), 64) else: new_h = 1024 new_w = max(int(((new_h*w/h)//64)*64), 64) print(new_h, new_w) h_blocks, w_blocks = int(new_h/64), int(new_w/64) print(h_blocks, w_blocks) # + colab={"base_uri": "https://localhost:8080/", "height": 191} id="yQVeOQb5lLsh" outputId="f1e79490-c930-4bff-de4f-d6a4cddb13ee" xxx = tf.image.resize(tf.reshape(img, [1,h,w,1]), (new_h, new_w), method = tf.image.ResizeMethod.BILINEAR) print(xxx.shape) plt.imshow(xxx[0,:,:,0], cmap='gray') # + colab={"base_uri": "https://localhost:8080/"} id="NhVHLyGJS2cA" outputId="b428a62c-0a47-403a-c9a1-c3223118ba34" x_np = xxx.numpy()/255 x_np.shape # + colab={"base_uri": "https://localhost:8080/"} id="WbvqSM3nmNX-" outputId="cd6e4267-6514-4fba-822c-5c2e4bc74b36" # convert to (64, 64, 1) patches img_patches = tf.convert_to_tensor([[xxx[0,i*64:(i+1)*64,j*64:(j+1)*64,:] for j in range(w_blocks)] for i in range(h_blocks)]) img_patches.shape # + colab={"base_uri": "https://localhost:8080/", "height": 306} id="80t6W7ufmnwj" outputId="d1da2dbe-3f19-48fc-bf77-80fac45d0cff" # patches visualization plt.figure(figsize = (10,5)) for i in range(h_blocks*w_blocks): plt.subplot(h_blocks, w_blocks, i+1) plt.imshow(img_patches[i//w_blocks][i%w_blocks][...,0], cmap='gray') # + id="Xq7Fxw_VnoFm" # Load dataframe from csv ground truth labels df = pd.read_csv('./gdrive/MyDrive/Colab_Notebooks/computer_vision_project/img_data/groundtruth_df.csv') df = df[df['word'] != '###'] # + colab={"base_uri": "https://localhost:8080/"} id="wseNzcpV4Wmv" outputId="4d54b920-a3b5-4cd1-9c7d-d3c6a4b644f0" df.size # + colab={"base_uri": "https://localhost:8080/"} id="eOx6fSOVnu60" outputId="0b1c7bbf-281c-4ca5-95d4-a0d3b972d1c7" # visualize sample ground truth gt_np = np.asarray(df[df['image_id']==11]) gt_np # + colab={"base_uri": "https://localhost:8080/"} id="YttKrqfVofdl" outputId="02c03e69-935f-48a5-d1d3-a69daf39e27f" # x->width, y->height # transform into blocks containing the image center centers_x = ((gt_np[:,1]+gt_np[:,3]+gt_np[:,5]+gt_np[:,7])/4)/w * new_w //64 centers_y = ((gt_np[:,2]+gt_np[:,4]+gt_np[:,6]+gt_np[:,8])/4)/h * new_h //64 truth = np.concatenate([[centers_y], [centers_x]], axis=0) truth, truth.shape # + colab={"base_uri": "https://localhost:8080/"} id="JxsYgXQHuXx1" outputId="07db77e5-1c42-4751-8b6a-6371100d0999" truth_locs = [[int(truth[0][i]), int(truth[1][i])] for i in range(truth.shape[1])] truth_locs # + colab={"base_uri": "https://localhost:8080/"} id="M4MFRE-1uzU_" outputId="ce28a683-5b81-47a9-8bc4-c5a110e1ca28" # visualize the padded (16, 16) masks and labels mask = np.zeros((16, 16)) binary_map = np.zeros((16, 16)) mask[0:h_blocks, 0:w_blocks] = 1 for i in range(len(truth_locs)): binary_map[truth_locs[i][0], truth_locs[i][1]] = 1 mask, binary_map # + colab={"base_uri": "https://localhost:8080/"} id="IzyLsVw4TdbN" outputId="45357a98-9e53-4a9b-a9f8-061f19a2f8b7" binary_map.reshape((256,)) # + [markdown] id="oOh9NYqLQENs" # Create and store grayscaled images, truth binary maps and masks for all images # + id="jac8_j60RgGG" parent_path = './gdrive/MyDrive/Colab_Notebooks/computer_vision_project/img_data/' # + id="v0gwPP_8QD11" # for i in range(10000): # print(f'saving img_{i} numpy files ...') # idx = str(i + 1).zfill(5) # if i < 5000: # name = f'{parent_path}/ImagesPart1/tr_img_{idx}' # else: # name = f'{parent_path}/ImagesPart2/tr_img_{idx}' # if os.path.isfile(f'{name}.jpg'): # path = f'{name}.jpg' # elif os.path.isfile(f'{name}.png'): # path = f'{name}.png' # else: # path = f'{name}.gif' # img = np.asarray(Image.open(path)) # if len(img.shape) == 3: # img = tf.image.rgb_to_grayscale(img[:,:,0:3]) # h, w = img.shape[0], img.shape[1] # if max(h, w) <=1024: # new_w = max(int((w//64)*64), 64) # new_h = max(int((h//64)*64), 64) # elif h <= w: # new_w = 1024 # new_h = max(int(((new_w*h/w)//64)*64), 64) # else: # new_h = 1024 # new_w = max(int(((new_h*w/h)//64)*64), 64) # h_blocks, w_blocks = int(new_h/64), int(new_w/64) # xxx = tf.image.resize(tf.reshape(img, [1,h,w,1]), (new_h, new_w), # method = tf.image.ResizeMethod.BILINEAR) # xxx = xxx.numpy()/255 # # img_patches = tf.convert_to_tensor([[xxx[0,i*64:(i+1)*64,j*64:(j+1)*64,:] for j in range(w_blocks)] for i in range(h_blocks)]) # gt_np = np.asarray(df[df['image_id']==i+1]) # # x->width, y->height # centers_x = ((gt_np[:,1]+gt_np[:,3]+gt_np[:,5]+gt_np[:,7])/4)/w * new_w //64 # centers_y = ((gt_np[:,2]+gt_np[:,4]+gt_np[:,6]+gt_np[:,8])/4)/h * new_h //64 # truth = np.concatenate([[centers_y], [centers_x]], axis=0) # truth_locs = [[int(truth[0][j]), int(truth[1][j])] for j in range(truth.shape[1])] # mask = np.zeros((16, 16)) # binary_map = np.zeros((16, 16)) # mask[0:h_blocks, 0:w_blocks] = 1 # for j in range(len(truth_locs)): # binary_map[truth_locs[j][0], truth_locs[j][1]] = 1 # mask_1d = mask.reshape((256,)) # binary_map_1d = binary_map.reshape((256,)) # save_path = f'{parent_path}/npy_files' # with open(f'{save_path}/img_npys/img_{i+1}.npy', 'wb') as f: # np.save(f, xxx) # with open(f'{save_path}/label_npys/label_{i+1}.npy', 'wb') as f: # np.save(f, binary_map_1d) # with open(f'{save_path}/mask_npys/mask_{i+1}.npy', 'wb') as f: # np.save(f, mask_1d) # + id="sPfzwOOBocmF" sizes = np.load(f'./gdrive/MyDrive/Colab_Notebooks/computer_vision_project/img_data/sizes.npy') # + id="mUwuglt636Nw" num_blocks = [] for i in range(10000): h, w = sizes[i,1], sizes[i,0] if max(h, w) <=1024: new_w = max(int((w//64)*64), 64) new_h = max(int((h//64)*64), 64) elif h <= w: new_w = 1024 new_h = max(int(((new_w*h/w)//64)*64), 64) else: new_h = 1024 new_w = max(int(((new_h*w/h)//64)*64), 64) num_blocks.append([new_h//64, new_w//64]) # + colab={"base_uri": "https://localhost:8080/"} id="gG_bVDjy5sQ8" outputId="95a9ba91-32e3-4065-d22b-8d85ef023281" num_blocks = np.asarray(num_blocks) num_blocks[0:10] # + colab={"base_uri": "https://localhost:8080/"} id="o650gMGVQi4Q" outputId="d2d9ef4c-a771-4b25-e33a-e0c6f58cda11" min(num_blocks[:,0]*num_blocks[:,1]), max(num_blocks[:,0]*num_blocks[:,1]) # + id="TEyPBLHQQCBi" # # create and store word center locations # word_center_locs = [] # for i in range(10000): # print(f'saving img_{i} numpy files ...') # h, w = sizes[i,1], sizes[i,0] # if max(h, w) <=1024: # new_w = max(int((w//64)*64), 64) # new_h = max(int((h//64)*64), 64) # elif h <= w: # new_w = 1024 # new_h = max(int(((new_w*h/w)//64)*64), 64) # else: # new_h = 1024 # new_w = max(int(((new_h*w/h)//64)*64), 64) # h_blocks, w_blocks = int(new_h/64), int(new_w/64) # gt_np = np.asarray(df[df['image_id']==i+1]) # # x->width, y->height # centers_x = ((gt_np[:,1]+gt_np[:,3]+gt_np[:,5]+gt_np[:,7])/4)/w * new_w //64 # centers_y = ((gt_np[:,2]+gt_np[:,4]+gt_np[:,6]+gt_np[:,8])/4)/h * new_h //64 # truth = np.concatenate([[centers_y], [centers_x]], axis=0) # truth_locs = [[int(truth[0][j]), int(truth[1][j])] for j in range(truth.shape[1])] # word_center_locs.append(truth_locs) # + id="glocorj0prGP" # word_center_locs[0:5] # + id="XVUw7hJGqB49" # with open(f"{parent_path}/npy_files/word_center_list", "wb") as fp: # Pickling # pickle.dump(word_center_locs, fp) # + id="pTbSvrE6qW2r" with open(f"{parent_path}/npy_files/word_center_list", "rb") as fp: # Unpickling label_list = pickle.load(fp) # + colab={"base_uri": "https://localhost:8080/"} id="Sz_trTZYqwSx" outputId="e9152e47-9b92-4524-b557-288fec5d15a6" label_list[0:5] # + [markdown] id="5edzn8xLw5eQ" # Model Construction # + id="_w_Xvk8Xw64i" # Build customized mobileNet model # MobileNet block def mobilnet_block (x, filters, strides): x = DepthwiseConv2D(kernel_size = 3, strides = strides, padding = 'same')(x) x = BatchNormalization(epsilon=1e-6)(x) x = LeakyReLU(alpha=0.1)(x) x = Conv2D(filters = filters, kernel_size = 1, strides = 1)(x) x = BatchNormalization(epsilon=1e-6)(x) x = LeakyReLU(alpha=0.1)(x) return x # + id="YmkcRUYNxjXp" #stem of the model def mobile_net(): input = Input(shape = (64, 64, 1)) x = Conv2D(filters = 32, kernel_size = 3, strides = 2, padding = 'same')(input) x = BatchNormalization(epsilon=1e-6)(x) x = LeakyReLU(alpha=0.1)(x) # main part of the model x = mobilnet_block(x, filters = 64, strides = 1) x = mobilnet_block(x, filters = 128, strides = 2) x = mobilnet_block(x, filters = 128, strides = 1) x = mobilnet_block(x, filters = 256, strides = 2) x = mobilnet_block(x, filters = 256, strides = 1) x = mobilnet_block(x, filters = 512, strides = 2) x = mobilnet_block(x, filters = 512, strides = 1) # for _ in range (5): # x = mobilnet_block(x, filters = 512, strides = 1) x = mobilnet_block(x, filters = 512, strides = 1) x = mobilnet_block(x, filters = 512, strides = 1) x = mobilnet_block(x, filters = 1024, strides = 2) x = mobilnet_block(x, filters = 1024, strides = 1) x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x) x = keras.layers.Flatten()(x) mobile_out = keras.layers.Dense(512)(x) return keras.Model(inputs=input, outputs=mobile_out) # + colab={"base_uri": "https://localhost:8080/"} id="OPsiGSizxzZB" outputId="11e663be-fe31-458a-a0d7-4738afbcd087" mobile_net = mobile_net() mobile_net.summary() # + [markdown] id="vhTb4VFmy3xB" # This is the 2d-transformer encoder used in Method 1 # + id="Wj92mr9NyAnl" class Encoder(tf.keras.Model): def __init__(self, head_size: int, num_heads: int, ff_dim: int, stack=3, dropout=0): super(Encoder, self).__init__() self.head_size = head_size self.num_heads = num_heads self.ff_dim = ff_dim self.stack = stack self.scale = tf.sqrt(float(head_size)) self.pos_embeds = self._position_embedding() self.attention = [[layers.Attention(use_scale=False) for j in range(num_heads)] for i in range(stack)] self.attn_dense = [[[layers.Dense(head_size, use_bias=False) for k in range(2)] for j in range(num_heads)] for i in range(stack)] self.dropout = layers.Dropout(dropout) self.layer_norm = [[layers.LayerNormalization(epsilon=1e-6) for j in range(2)] for i in range(stack)] self.conv1 = [layers.Conv1D(filters=ff_dim, kernel_size=1) for i in range(stack)] self.leakyrelu = layers.LeakyReLU(alpha=0.1) self.conv2 = [layers.Conv1D(filters=head_size*num_heads, kernel_size=1) for i in range(stack)] self.avgpool = layers.GlobalAveragePooling1D() self.dense1 = layers.Dense(256) self.dropout2 = layers.Dropout(0.2) self.dense2 = layers.Dense(1) def get_angles(self, idx, d_model): angle_rates = 1 / np.power(10000, 4*idx/np.float32(d_model)) return angle_rates # 2d positional embedding def _position_embedding(self, d_model=512): cube = np.zeros((16, 16, d_model)) for x in range(16): for y in range(16): cube[x,y,0:256:2] = np.sin(x*self.get_angles(np.arange(0,128,1), d_model)) cube[x,y,1:256:2] = np.cos(x*self.get_angles(np.arange(0,128,1), d_model)) cube[x,y,256:512:2] = np.sin(y*self.get_angles(np.arange(0,128,1), d_model)) cube[x,y,257:512:2] = np.cos(y*self.get_angles(np.arange(0,128,1), d_model)) pos_enc = tf.cast(cube, dtype=tf.float32) return tf.reshape(tf.constant(pos_enc), [16*16, 512]) def call(self, inputs, mask, training=False): # inputs shape: (batch_size, 16*16, 512) initials = inputs batch_size, seq_len, d_model = inputs.shape pos_embeds = tf.broadcast_to(self.pos_embeds, [batch_size, seq_len, d_model]) pos_mask = tf.expand_dims(tf.constant(tf.cast(mask, dtype=tf.float32)), axis=-1) # print(pos_embeds.shape) # print(pos_embeds) inputs += pos_embeds * pos_mask for i in range(self.stack): # Attention x = inputs entries = [[self.attn_dense[i][j][0](x), self.attn_dense[i][j][1](x)] for j in range(self.num_heads)] attentions = [self.attention[i][j](inputs = [entries[j][0]/self.scale, entries[j][1], entries[j][1]], mask = [mask, mask]) for j in range(self.num_heads)] x = tf.concat(attentions, axis=-1) # print(x.shape) # print(x) x = self.dropout(x, training=training) res = x + inputs # Feed Forward x = self.layer_norm[i][0](res) x = self.conv1[i](x) x = self.leakyrelu(x) x = self.dropout(x, training=training) x = self.conv2[i](x) inputs = self.layer_norm[i][1](x + res) # mask size is (batch_size, 16*16) # shape is (batch_size, 16*16, 512) x = tf.concat([inputs, initials], axis=-1) x = self.dropout2(x, training=training) x = self.dense1(x) x = self.leakyrelu(x) output = self.dense2(x) return output # + id="0qXaQsNW_go4" # pass sample inputs to build the model encoder = Encoder(head_size=64, num_heads=8, ff_dim=512, stack=2, dropout=0) a = np.random.normal(size=(2, 256, 512)) mk = np.zeros((2, 256), dtype=bool) mk[0, 0:10] = 1 mk[1, 0:20] = 1 a = tf.convert_to_tensor(a) mk = tf.convert_to_tensor(mk) # + colab={"base_uri": "https://localhost:8080/"} id="aLqbB8--_9eM" outputId="efa9c67f-7c5c-4382-a041-a2ed276ebdea" out_enc = encoder(a, mk, training=False) out_enc.shape # + colab={"base_uri": "https://localhost:8080/"} id="HfJjlTNCekcW" outputId="ba754887-bd67-4977-d5c0-8c43914837dc" encoder.summary() # + [markdown] id="hcvBC0hbzQQT" # This is the customized inception block used in Method 2 # + id="d__qzgZN9oGV" def inception_blocks(stacks=2, embed_dim=512): inputs = Input(shape = (16, 16, embed_dim)) x = inputs for i in range(stacks): conv_1 = layers.Conv2D(128, 1, padding="same")(x) conv_1 = BatchNormalization(epsilon=1e-6)(conv_1) conv_1 = LeakyReLU(alpha=0.1)(conv_1) conv_3 = layers.SeparableConv2D(128, 3, padding="same")(x) conv_3 = BatchNormalization(epsilon=1e-6)(conv_3) conv_3 = LeakyReLU(alpha=0.1)(conv_3) conv_5 = layers.SeparableConv2D(128, 5, padding="same")(x) conv_5 = BatchNormalization(epsilon=1e-6)(conv_5) conv_5 = LeakyReLU(alpha=0.1)(conv_5) conv_7 = layers.SeparableConv2D(128, 7, padding="same")(x) conv_7 = BatchNormalization(epsilon=1e-6)(conv_7) conv_7 = LeakyReLU(alpha=0.1)(conv_7) # concatenate x = layers.Concatenate(axis=-1)([conv_1, conv_3, conv_5, conv_7]) x = layers.Conv2D(512, 1, padding="same")(x) x = BatchNormalization(epsilon=1e-6)(x) x = LeakyReLU(alpha=0.1)(x) outputs = layers.Concatenate(axis=-1)([x, inputs]) outputs = layers.Reshape((256, -1))(outputs) outputs = layers.Dropout(0.2)(outputs) outputs = layers.Dense(256)(outputs) outputs = LeakyReLU(alpha=0.1)(outputs) outputs = layers.Dense(1)(outputs) # Define the model model = keras.Model(inputs, outputs) return model # + colab={"base_uri": "https://localhost:8080/"} id="RM7VXtEvAPCv" outputId="d7d73e46-be4d-43ec-c970-f993bb9731d9" # Build model inception = inception_blocks() inception.summary() # + [markdown] id="ZC2rUaNwzZSP" # Build the combined model. If we set cnn_blendering to False, then this corresponds to the first method with 2d transformer encoder; if set cnn_blendering to True, then this corresponds to the second method with customized inception blocks. # + id="RvuwZmT6zD4u" class MyModel(tf.keras.Model): def __init__(self, mobile_net, encoder, cnn_blendering=False): super(MyModel, self).__init__() self.mobile_net = mobile_net self.encoder = encoder self.cnn_blendering = cnn_blendering def call(self, inputs, training=False): # inputs format sample: [(5, 16, 64, 64, 1), (16, 7, 64, 64, 1)] batch_size = len(inputs) batch_seq_len = [0] batch_inputs = [] batch_h_shapes = [] batch_w_shapes = [] for j in range(batch_size): h_len = inputs[j].shape[0] w_len = inputs[j].shape[1] batch_h_shapes.append(h_len) batch_w_shapes.append(w_len) seq_len = h_len * w_len batch_seq_len.append(seq_len) for k in range(h_len): for l in range(w_len): batch_inputs.append(inputs[j][k,l,...]) segments = [np.sum(batch_seq_len[0:i+1]) for i in range(batch_size+1)] # [0, 80, 192] train_batch = tf.convert_to_tensor(batch_inputs) mobile_out = self.mobile_net(train_batch, training=training) t_inputs = [] attn_mask_2d = np.zeros((batch_size, 16, 16), dtype=bool) mask = [] for j in range(batch_size): vecs = mobile_out[segments[j]:segments[j+1],:] assert(vecs.shape[0] == batch_h_shapes[j]*batch_w_shapes[j]) vecs = tf.reshape(vecs, [batch_h_shapes[j], batch_w_shapes[j], 512]) # print(vecs.shape) # (5, 16, 512) paddings = tf.constant([[0, 16-batch_h_shapes[j]], [0, 16-batch_w_shapes[j]], [0, 0]]) # print(paddings.shape) vecs = tf.pad(vecs, paddings, "CONSTANT") # (16, 16, 512) if self.cnn_blendering: t_inputs.append(vecs) else: t_inputs.append(tf.reshape(vecs, [16*16, 512])) attn_mask_2d[j, 0:batch_h_shapes[j], 0:batch_w_shapes[j]] = 1 mask.append(tf.reshape(attn_mask_2d[j,...], [256])) train_tfm = tf.convert_to_tensor(t_inputs) attn_mask = tf.convert_to_tensor(mask) # print(train_tfm.shape) # (8, 16*16, 512) # print(attn_mask.shape) # (8, 16*16) # pass into the encoder corresponding to the value of self.cnn_blendering if self.cnn_blendering: output = self.encoder(train_tfm, training=training) else: output = self.encoder(train_tfm, attn_mask, training=training) return output # model_combined = MyModel(mobile_net, encoder) # + colab={"base_uri": "https://localhost:8080/"} id="FEqXLlRE0luY" outputId="5d654888-dc63-4514-88bf-2a5505d91a2e" # pass sample inputs to build the model model_combined = MyModel(mobile_net, inception, cnn_blendering=True) # model_combined = MyModel(mobile_net, encoder) out = model_combined([img_patches]) out.shape # + colab={"base_uri": "https://localhost:8080/"} id="YO8z9-XlAWuj" outputId="b4b9cec6-4fd8-4738-8dd5-23b62b7737f7" model_combined.summary() # + colab={"base_uri": "https://localhost:8080/"} id="YfYy8wT5ARG2" outputId="c2e76530-0074-4fb5-96e6-1c41b2e9d15c" out[0,0:10,:] # + id="EJV_yWjkRcEJ" # create dictionary to map indices to 4,000 training images and 1,000 validation images in our setup train_array = {} val_array = {} train_num_per_thousand = 400 val_num_per_thousand = 100 for i in range(10*train_num_per_thousand): train_array[i] = 1000*(i//train_num_per_thousand) + i%train_num_per_thousand for i in range(10*val_num_per_thousand): val_array[i] = 1000*(i//val_num_per_thousand) + i%val_num_per_thousand + train_num_per_thousand # + colab={"base_uri": "https://localhost:8080/"} id="sBSKpFE4U_Js" outputId="c0cc7136-a78a-44be-98b3-05d7ce7ce4a9" len(train_array), len(val_array), train_array[0], train_array[398], train_array[399], train_array[400], train_array[401] # + colab={"base_uri": "https://localhost:8080/"} id="DA-Hc19LI_29" outputId="8fcc8fc4-4516-4bf8-a0da-c4a5beb15c19" save_path = f'{parent_path}/npy_files' img_exp1 = tf.convert_to_tensor(np.load(f'{save_path}/img_npys/img_{1}.npy')) print(img_exp1.shape) h_blk, w_blk = img_exp1.shape[1]//64, img_exp1.shape[2]//64 # print(w_blk, h_blk) patches1 = tf.convert_to_tensor([[img_exp1[0,i*64:(i+1)*64,j*64:(j+1)*64,:] for j in range(w_blk)] for i in range(h_blk)]) img_exp2 = tf.convert_to_tensor(np.load(f'{save_path}/img_npys/img_{2}.npy')) print(img_exp2.shape) h_blk, w_blk = img_exp2.shape[1]//64, img_exp2.shape[2]//64 # print(w_blk, h_blk) patches2 = tf.convert_to_tensor([[img_exp2[0,i*64:(i+1)*64,j*64:(j+1)*64,:] for j in range(w_blk)] for i in range(h_blk)]) patches1.shape, patches2.shape # + colab={"base_uri": "https://localhost:8080/"} id="_ceeleozR-51" outputId="bfc897d1-a27c-49f5-d7fc-1d34c72f12f6" model_combined = MyModel(mobile_net, encoder) out = model_combined([img_patches/255.]) out.shape # + colab={"base_uri": "https://localhost:8080/"} id="S1ln2XTcSH68" outputId="1c2b853c-db12-4224-86ea-7c03596c0099" # loading saved weights to resume training # model_combined.load_weights(f'{parent_path}/npy_files/model_t1_simplifiedv2_weights_middle_39') model_combined.load_weights(f'{parent_path}/npy_files/model_t1_simplified_blk_weights') model_combined.summary() # + id="v7vVi6KTAeLn" # create optimizer optimizer = keras.optimizers.Adam(learning_rate=2e-4) # + [markdown] id="Pbtj61Ko0WpZ" # Customized training loop # + colab={"base_uri": "https://localhost:8080/"} id="BBfsb5mTPR2T" outputId="d76d71ca-8c3d-40e5-ec47-39321e753d1f" save_path = f'{parent_path}/npy_files' epochs = 40 num = 512 batch_size = 4 for epoch in range(epochs): print("\nStart epoch", epoch) total_loss = 0 np.random.seed(epoch) arr = np.arange(4000) np.random.shuffle(arr) for i in range(num//batch_size): # 512/4 = 128, 512/8 = 64 with tf.GradientTape() as tape: batch_loss = 0 batch_train = [] batch_label = [] batch_mask = [] for j in range(batch_size): idx = batch_size*i+j img_idx = train_array[arr[idx]] img_np = np.load(f'{save_path}/img_npys/img_{img_idx+1}.npy') # img start from 1 here img_tensor = tf.convert_to_tensor(img_np) # print(img_tensor.shape) h_blk, w_blk = img_tensor.shape[1]//64, img_tensor.shape[2]//64 # print(h_blk, w_blk) patches = tf.convert_to_tensor([[img_tensor[0,i*64:(i+1)*64,j*64:(j+1)*64,:] for j in range(w_blk)] for i in range(h_blk)]) batch_train.append(patches) lb = label_list[img_idx] label_map = np.zeros((16, 16)) for k in range(len(lb)): label_map[lb[k][0], lb[k][1]] = 1 label_map = label_map.reshape((256,)) mask = np.zeros((16, 16)) mask[0:h_blk, 0:w_blk] = 1 mask = mask.reshape((256,)) batch_label.append(label_map) batch_mask.append(mask) batch_label = tf.convert_to_tensor(batch_label, dtype=tf.float32) batch_mask = tf.convert_to_tensor(batch_mask, dtype=tf.float32) out = model_combined(batch_train, training=True) # out.shape: (batch_size, 256, 1) # label shape: (batch_size, 256, 1) # weighted binary cross entropy loss, weight is set to 5.0 loss_matrix = tf.nn.weighted_cross_entropy_with_logits(batch_label, out[...,0], pos_weight=tf.constant(5.0)) loss = K.sum(loss_matrix * batch_mask)/batch_size total_loss += loss * batch_size grads = tape.gradient(loss, model_combined.trainable_weights) optimizer.apply_gradients(zip(grads, model_combined.trainable_weights)) # Logging. if i % 16 == 0 or i == num//batch_size-1: # Print metrics print("loss at %d, epoch %d: %f" % (i, epoch, loss)) print("total loss after epoch %d: %.4f" % (epoch, total_loss)) print("average loss after epoch %d: %.4f" % (epoch, total_loss/num)) # save model weights periodically after certain epochs if epoch % 8 == 7: model_combined.save_weights(f"{save_path}/model_t1_simplified_blk_weights_middle_{epoch}") # save trained weights at the end model_combined.save_weights(f"{save_path}/model_t1_simplified_blk_weights") # using transformer, after 40 epochs, avg loss dropped from >70 to around 43 # using inception blocks, after 40 epochs, avg loss dropped from >70 to around 38 # + id="yT2GgSDlLy72" # model_combined.save_weights(f"{save_path}/model_t1_simplified_weights_small_trial") # + colab={"base_uri": "https://localhost:8080/"} id="oiOKh6RixipY" outputId="bbd22ff3-ca37-4f9c-fabe-5bdbe1e19ae9" train_array[0], train_array[399], val_array[96], val_array[99], val_array[100], val_array[101] # + [markdown] id="JHjgEsz30kzM" # Examine the model outputs on sample images # + colab={"base_uri": "https://localhost:8080/"} id="cUNH4GoIx5TI" outputId="bacbe62e-8b1c-4787-b9cd-5fd71ab24ce6" img_np = np.load(f'{parent_path}/npy_files/img_npys/img_{2}.npy') # first start from 1 img_tensor = tf.convert_to_tensor(img_np) # print(img_exp1.shape) h_blk, w_blk = img_tensor.shape[1]//64, img_tensor.shape[2]//64 print(h_blk, w_blk) patches = tf.convert_to_tensor([[img_tensor[0,i*64:(i+1)*64,j*64:(j+1)*64,:] for j in range(w_blk)] for i in range(h_blk)]) print(patches.shape) # + colab={"base_uri": "https://localhost:8080/"} id="jvbP_Yshz2yd" outputId="9024a0cf-ec8f-4749-8e7a-94e72091f2dc" label_list[train_array[1]] # + id="rophtisa0M0P" img = Image.open('./gdrive/MyDrive/Colab_Notebooks/computer_vision_project/img_data/ImagesPart1/tr_img_00002.jpg') img = tf.image.rgb_to_grayscale(np.asarray(img)) h, w = img.shape[0], img.shape[1] if max(h, w) <=1024: new_w = max(int((w//64)*64), 64) new_h = max(int((h//64)*64), 64) elif h <= w: new_w = 1024 new_h = max(int(((new_w*h/w)//64)*64), 64) else: new_h = 1024 new_w = max(int(((new_h*w/h)//64)*64), 64) img = tf.image.resize(tf.reshape(img, [1,h,w,1]), (new_h, new_w), method = tf.image.ResizeMethod.BILINEAR) # + colab={"base_uri": "https://localhost:8080/"} id="aPGL85F81s0V" outputId="53281393-5c25-4fb2-d99c-26212f771686" print(new_h, new_w) h_blocks, w_blocks = int(new_h/64), int(new_w/64) print(h_blocks, w_blocks) # + colab={"base_uri": "https://localhost:8080/", "height": 340} id="B4pt8hNW1w9h" outputId="10bc4378-cdc4-4736-dd04-7736e11ddbfc" img_patches = tf.convert_to_tensor([[img[0,i*64:(i+1)*64,j*64:(j+1)*64,:] for j in range(w_blocks)] for i in range(h_blocks)]) print(img_patches.shape) plt.figure(figsize = (10,5)) for i in range(h_blocks*w_blocks): plt.subplot(h_blocks, w_blocks, i+1) plt.imshow(img_patches[i//w_blocks][i%w_blocks][...,0], cmap='gray') # + colab={"base_uri": "https://localhost:8080/"} id="k8Pf2Dss2M4F" outputId="59a319a0-bdf4-4ab5-f04b-5b382d5adc1c" output = model_combined([img_patches/255], training=False) # the demo is in range [0,255], needs to divide by 255 before # feed into the model output.shape # + colab={"base_uri": "https://localhost:8080/"} id="PxIarwxp2haF" outputId="83a7898e-1b82-457d-8fff-b75ac080fa5e" output = output[0,:,0].numpy().reshape(16, 16) output = tf.convert_to_tensor(output, dtype=tf.float32) output # + colab={"base_uri": "https://localhost:8080/"} id="8BYNXI1g2p-h" outputId="64d3502b-e6e8-4b56-ec1f-c3bc19d7357c" # we can see that generally the values corresponding to the actual labels have positive value # [[6, 1], [6, 4], [7, 2], [7, 3], [7, 4]] y = output[6:8,:] y # + colab={"base_uri": "https://localhost:8080/"} id="7IrhXc5KoGzc" outputId="5c73b0e0-b832-4fbd-f337-8457f21fc6d1" # [[6, 1], [6, 4], [7, 2], [7, 3], [7, 4]] y > 0 # + colab={"base_uri": "https://localhost:8080/"} id="Lpdd7Hkf1q_y" outputId="9224ab49-0b0a-43dc-958b-20f1c6b73b11" # batched prediction of 1,000 validation images val_outputs = [] val_batch = 25 for i in range(len(val_array)//val_batch): # 1000//25 = 40 print(i) batch_val = [] for j in range(val_batch): idx = val_batch*i+j img_idx = val_array[idx] img_np = np.load(f'{save_path}/img_npys/img_{img_idx+1}.npy') # img start from 1 here img_tensor = tf.convert_to_tensor(img_np) h_blk, w_blk = img_tensor.shape[1]//64, img_tensor.shape[2]//64 patches = tf.convert_to_tensor([[img_tensor[0,i*64:(i+1)*64,j*64:(j+1)*64,:] for j in range(w_blk)] for i in range(h_blk)]) batch_val.append(patches) outputs = model_combined(batch_val, training=False) assert(outputs.shape == (val_batch, 256, 1)) for j in range(val_batch): output_slice = outputs[j,:,0].numpy().reshape(16, 16) val_outputs.append(output_slice) # + id="55CZfcyC54xP" # import pickle # with open(f"{parent_path}/npy_files/val_outputs_v2_blk", "wb") as fp: # Pickling # pickle.dump(val_outputs, fp) # + colab={"base_uri": "https://localhost:8080/"} id="kGhB6x_t8yS_" outputId="eecd4410-fe63-4b12-96c7-71d2b9c7a093" with open(f"{parent_path}/npy_files/val_outputs_v2_blk", "rb") as fp: # Unpickling val_outputs = pickle.load(fp) len(val_outputs), val_outputs[0].shape # + [markdown] id="_msoxJoB1DZ3" # Compute F1-score on validation images. Note that we can change the threshold to determine if we are more strict or relaxed on having positive outputs. Due to the tensorflow loss function setup, the threshold here is the value before sigmoid activation (so a threshold of 0. actually means a probability of 0.5). Since we are evaluating on validation sets, we are free to change the threshold to find the best value as long as we keep it fixed during actual testing. # + colab={"base_uri": "https://localhost:8080/"} id="u89gZP9o1GfF" outputId="2df79c67-5402-4d1d-9d81-61be31fbcae5" TP, FP, FN = [0, 0, 0] threshold = 1. for idx in range(len(val_array)): val_map = val_outputs[idx] val_idx = val_array[idx] h_blk, w_blk = num_blocks[val_idx, 0], num_blocks[val_idx, 1] lb = label_list[val_idx] label_map = np.zeros((16, 16)) for k in range(len(lb)): label_map[lb[k][0], lb[k][1]] = 1 for i in range(h_blk): for j in range(w_blk): if label_map[i][j] == 1 and val_map[i][j] > threshold: TP += 1 elif label_map[i][j] == 1 and val_map[i][j] <= threshold: FN += 1 elif label_map[i][j] == 0 and val_map[i][j] > threshold: FP += 1 print(TP, FP, FN, TP+FN) precision, recall = TP/(TP+FP), TP/(TP+FN) f1_score = 2*precision*recall/(precision+recall) print(precision, recall, f1_score) # + id="zUaSh0cY_kfG" # transformer version # TP=3261, FP=4099, FN=3402, total_tags=6663; # threshold = 1. (before sigmoid): # precision=0.3923, recall=0.5669, f1_score=0.4637; # threshold = 1.39 (before sigmoid): [highest one here] # precision=0.4665, recall=0.4818, f1_score=0.4740; # threshold = 1.5 (before sigmoid): # precision=0.4889, recall=0.4570, f1_score=0.4724; # threshold = 1.75 (before sigmoid): # precision=0.5341, recall=0.4025, f1_score=0.4591; # + id="H3jCFIDdLb_x" # inception block version # TP=3509, FP=3824, FN=3154, total_tags=6663; # threshold = 0.5 (before sigmoid): # precision=0.4054, recall=0.6252, f1_score=0.4919; # threshold = 1. (before sigmoid): [highest one here] # precision=0.4785, recall=0.5266, f1_score=0.5014; # threshold = 1.39 (before sigmoid): # precision=0.5372, recall=0.4430, f1_score=0.4856; # threshold = 1.5 (before sigmoid): # precision=0.5514, recall=0.4162, f1_score=0.4743; # + colab={"base_uri": "https://localhost:8080/"} id="Zo2_8EbaAGbz" outputId="af2d1f0d-8ea7-463e-f745-3475c055af58" # the mapping from threshold before sigmoid to the corresponding probability after sigmoid tf.sigmoid([0.5, 1., 1.39, 1.5, 1.75])
MLT_text_detection_simplified.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline from pathlib import Path import pandas as pd import ctd path = Path('..', 'tests', 'data') # - cast = ctd.from_edf(path.joinpath('XBT.EDF.gz')) ax = cast['temperature'].plot_cast() ax.axis([20, 24, 19, 0]); cast = ctd.from_fsi(path.joinpath('FSI.txt.gz')) downcast, upcast = cast.split() ax = downcast['TEMP'].plot_cast() ax.grid(True) cast = ctd.from_cnv(path.joinpath('CTD_big.cnv.bz2')) downcast, upcast = cast.split() ax = downcast['t090C'].plot_cast() ax.grid(True) # + from ctd import rosette_summary ros = rosette_summary(path.joinpath('CTD', 'g01l01s01.ros')) ros = ros.groupby(ros.index).mean() ros # + bottles = ctd.from_btl(path.joinpath('btl', 'bottletest.btl')) bottles.head()
notebooks/00-reading-data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # UNDERSTANDING EXPERIMENTAL DATA # This chapter is about **understanding experimental data**. # # We will # # * make extensive use of `plotting to visualize the data`, and # # * show how to use `linear regression` to build a model of experimental data. # ## The Behavior of Projectiles # # We decided to use one of our springs to build `a device capable of launching a projectile`. # # We used the device **four** times to fire a projectile at **a target** 1080 inches from the **launching point**. # # Each time, we measured `the height of the projectile` at various `distances from the launching point`. # # * The `launching point` and the `target` were at `the same height`, which we treated as `0.0` in our measurements. # # ![projectile](./img/projectile.jpg) # # The data was stored in a file **./data/launcherData.csv** # # * The `first column` contains `distances of the projectile from the target`. # # * The `other columns` contain `the height of the projectile at that distance` for each of the four trials. # # All of the measurements are in **inches**. # # %load ./data/launcherData.csv Distance,Trial1,Trial2,Trial3,Trial4 1080,0.0,0.0,0.0,0.0 1044,2.25,3.25,4.5,6.5 1008,5.25,6.5,6.5,8.75 972,7.5,7.75,8.25,9.25 936,8.75,9.25,9.5,10.5 900,12.0,12.25,12.5,14.75 864,13.75,16.0,16.0,16.5 828,14.75,15.25,15.5,17.5 792,15.5,16.0,16.6,16.75 756,17.0,17.0,17.5,19.25 720,17.5,18.5,18.5,19.0 540,19.5,20.0,20.25,20.5 360,18.5,18.5,19.0,19.0 180,13.0,13.0,13.0,13.0 0,0.0,0.0,0.0,0.0 # The following code was used to plot : # # * the **mean altitude of the projectile(Y)** against the **distance from the point of launch(X)**. # # * the best `linear` and `quadratic` fits to the points. # # ```python # d2h={'d':None,'h':[[],[],[],[]]} # ``` # + import csv def get_trajectory_data(file_name): csvfile = open(file_name, 'r') csvdata = csv.DictReader(csvfile) # fields fields = csvdata.fieldnames trials = len(fields)-1 d2hs = {'d': [], 'h': [[] for i in range(trials)]} # values rows=0 for row in csvdata: # the distance in first column d2hs['d'].append(float(row[fields[0]])) for i in range(trials): cur_height = float(row[fields[i+1]]) d2hs['h'][i].append(cur_height) rows+=1 csvfile.close() return d2hs, trials,rows # + file_name='./data/launcherData.csv' d2hs,trials,rows = get_trajectory_data(file_name) print(f"trials:{trials}, rows={rows}") for key in d2hs.keys(): print(key,end="\t") print("") for i in range(len(d2hs['d'])): print(d2hs['d'][i],end="\t") for j in range(len(d2hs['h'])): cur_column=d2hs['h'][j] print(cur_column[i],end="\t") print("") # - # ### 1 Linear Regression to Find a Fit # # Whenever we fit any curve (including a line) to data we need some way to decide `which curve is the best fit for the data`. # # This means that we need to define <b style="color:blue">an objective function</b> that provides `a quantitative assessment of how well the curve fits the data`. # # Once we have such `a function`, finding the best fit can be formulated as # # * finding a curve that <b style="color:blue">minimizes (or maximizes)</b> the value of that function, i.e., as **an optimization problem** # # The most commonly used objective function is called <b style="color:blue">least squares(最小二乘)</b>, # # The objective function is then defined as # # $$\sum_{i=0}^{len(observed)-1}(observed[i]-predicted[i])^2$$ # # # **Numpy.polyfit** # # http://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html # # `Numpy` provides a function, `polyfit`, that finds the best Least squares polynomial fit. # ```python # numpy.polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False) # ``` # The algorithm used by `polyfit` is called <b style="color:blue">linear regression</b>. # # Fit a polynomial of degree `deg` to points (x, y). # # $$p(x) = p[0] * x^{deg} + ... + p[deg]$$ # # Returns a vector of **coefficients** <b style="color:blue">$p$</b> that minimises the squared error. # # # # ```python # numpy.polyfit(observedXVals, observedYVals, n) # numpy.polyfit(observedXVals, observedYVals, 1) # y = ax + b # numpy.polyfit(observedXVals, observedYVals, 2) # y = ax^2 + bx+c # ``` # # + import numpy as np x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) fit = np.polyfit(x, y, 1) fit # - fit = np.polyfit(x, y, 2) fit # **numpy.polyval(p, x)** # # Evaluate a polynomial at specific values. # # This function takes two arguments: # * <b style="color:blue">$p$</b>: a sequence of polynomial coefficients # * <b style="color:blue">$x$</b>: a sequence of values at which the polynomial is to be evaluated. # > https://numpy.org/doc/stable/reference/generated/numpy.polyval.html # v=np.array([1.1,2.3]) z= np.polyval(fit,v) z # **Elementwise operations** on arrays # # linear fit : `a,b = np.polyfit(x,y, 1)` # # quadratic fit: `a,b,c = np.polyfit(x, y, 2)` # # ```python # y = a*x^2 + b*x+c # ``` # x,y : the array # a,b,c=np.polyfit(x, y, 2) v=np.array([1.1,2.3]) #z=[] #for item in v: # z.append(a*(item**2)+b*item+c z=a*(v**2)+b*v+c z # ### 2 Using numpy.polyfit to fit the experimental data # # # # + import numpy as np import matplotlib.pyplot as plt def get_mean_height(d2hs,trials,rows): distances = np.array(d2hs['d']) mean_heights = np.empty(rows) for i in range(rows): mean_heights[i] = d2hs['h'][0][i] for j in range(trials-1): mean_heights[i] += d2hs['h'][j+1][i] mean_heights[i] /= trials return distances,mean_heights def process_trajectories(distances, mean_heights): plt.title(f'Trajectory of Projectile (Mean Height of {trials} Trials)') plt.xlabel('Inches from Launch Point') plt.ylabel('Mean Height of the projectile') # the experimental data plt.plot(distances, mean_heights, 'ro') # Linear Fit fit = np.polyfit(distances, mean_heights, 1) altitudes=np.polyval(fit,distances) plt.plot(distances, altitudes, 'g-*', label=f"Linear Fit") # Quadratic Fit fit = np.polyfit(distances, mean_heights, 2) altitudes=np.polyval(fit,distances) plt.plot(distances, altitudes, 'b:x', label=f"Quadratic Fit") plt.legend() # - distances,mean_heights =get_mean_height(d2hs,trials,rows) process_trajectories(distances,mean_heights) # A quick look at the plot on the right makes it quite clear that # # * a **quadratic** fit is far better than a linear one # # <b style="color:blue;font-size:120%">how bad a fit is the line and how good is the quadratic fit?</b> # # * **Coefficient of Determination(决定系数)** # --- # #### something about plotting lines with matplotlib.pyplot # # * [Unit2-1-Matplotlib](./Unit2-1-Matplotlib.ipynb) # # The function `process_trajectories` plot lines with a legend(图例说明) # # **line styles** # # * the linear fit for the data.: # ```python # plt.plot(distances, altitudes, 'g-*', label = 'Linear Fit') # ``` # # * the quadratic fit for the data.: # ```python # plt.plot(distances, altitudes, 'b:x', label = 'Quadratic Fit') # ``` # # **Legend of matplotlib.pyplot** # # * **label**:a legend of line # # * **Place a legend on the fig** # # * `plt.legend(loc ='best')` # # * Location String: **`'best'`** # # > reference: [matplotlib.pyplot.legend](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.legend.html): # # --- def r_squared(measured, predicted): """Assumes measured a one-dimensional array of measured values predicted a one-dimensional array of predicted values Returns coefficient of determination""" # RSS: residual sum of squares estimate_error = ((predicted - measured)**2).sum() # the mean of the observed values. mean_measured = measured.mean() # TSS: total sum of squares variability = ((measured - mean_measured)**2).sum() return 1.0 - estimate_error/variability # + import numpy as np import matplotlib.pyplot as plt def process_trajectories( distances , mean_heights): plt.title(f'Trajectory of Projectile (Mean Height of {trials} Trials)') plt.xlabel('Inches from Launch Point') plt.ylabel('Mean Height of the projectile') # the experimental data plt.plot(distances, mean_heights, 'ro') # Linear Fit fit= np.polyfit(distances, mean_heights, 1) altitudes =np.polyval(fit,distances) r_line = r_squared(mean_heights, altitudes) plt.plot(distances, altitudes, 'g-*', label=f"Linear Fit, $R^2$= {r_line:.2f}") # Quadratic Fit fit = np.polyfit(distances, mean_heights, 2) altitudes =np.polyval(fit,distances) r_quad = r_squared(mean_heights, altitudes) plt.plot(distances, altitudes, 'b:x',label=f"Quadratic Fit, $R^2$= {r_quad:.2f}") plt.legend() # - process_trajectories(distances , mean_heights) # This tells us that # # * 1 less than 2% of the variation in the measured data can be explained by the linear model, # # * 2 more than **98%** of the variation can be explained by the quadratic model # #### Matplotlib.pyplot: Formatting text: LaTeX # ```python # plt.plot(distances, altitudes, 'g-*',label=f"Linear Fit, $R^2$= {r_line:.2f}") # ``` # Matplotlib has great support for LaTeX. All we need to do is to use dollar signs encapsulate LaTeX in any text (legend, title, label, etc.). # # * For example, `$R^2$`. $R^2$
notebook/Unit2-2-Understanding_Experimental_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: html # language: python # name: html # --- # + [markdown] _cell_guid="d4c08f48-fe23-4ddb-ac46-d97f05397514" _uuid="f2156d1dd26a1243e18512002e10872c5bd7271e" # # Optimization of CNN - TPE # # In this notebook, we will optimize the hyperparameters of a CNN using the define-by-run model from Optuna. # + # For reproducible results. # See: # https://keras.io/getting_started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development import os os.environ['PYTHONHASHSEED'] = '0' import numpy as np import tensorflow as tf import random as python_random # The below is necessary for starting Numpy generated random numbers # in a well-defined initial state. np.random.seed(123) # The below is necessary for starting core Python generated random numbers # in a well-defined state. python_random.seed(123) # The below set_seed() will make random number generation # in the TensorFlow backend have a well-defined initial state. # For further details, see: # https://www.tensorflow.org/api_docs/python/tf/random/set_seed tf.random.set_seed(1234) # + _cell_guid="f67b9393-8ea1-4e23-b856-2ce149cfe421" _execution_state="idle" _uuid="72334cb006d02a4bcfc2a2fe622524eba824c6f8" import itertools from functools import partial import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix # - from keras.utils.np_utils import to_categorical from keras.models import Sequential, load_model from keras.layers import Dense, Flatten, Conv2D, MaxPool2D from keras.optimizers import Adam, RMSprop import optuna # + [markdown] _cell_guid="6d2fb3e6-ab71-4974-b5a2-4af1ebdb99f4" _execution_state="idle" _uuid="86061d98eccaa02efe0dab0fa3884e71fcf4c310" # # Data Preparation # # The dataset contains information about images, each image is a hand-written digit. The aim is to have the computer predict which digit was written by the person, automatically, by "looking" at the image. # # Each image is 28 pixels in height and 28 pixels in width (28 x 28), making a total of 784 pixels. Each pixel value is an integer between 0 and 255, indicating the darkness in a gray-scale of that pixel. # # The data is stored in a dataframe where each each pixel is a column (so it is flattened and not in the 28 x 28 format). # # The data set the has 785 columns. The first column, called "label", is the digit that was drawn by the user. The rest of the columns contain the pixel-values of the associated image. # + _cell_guid="5e51d00e-62fd-4141-bf73-50ac4f2da7d0" _execution_state="idle" _uuid="84bbd5ab8d7895bd430d5ecfe2f7ddf77baa7b74" # Load the data data = pd.read_csv("../mnist.csv") # first column is the target, the rest of the columns # are the pixels of the image # each row is 1 image data.head() # + # split dataset into a train and test set X_train, X_test, y_train, y_test = train_test_split( data.drop(['label'], axis=1), # the images data['label'], # the target test_size = 0.1, random_state=0) X_train.shape, X_test.shape # + _cell_guid="86570a36-5c20-460a-9dfd-2070548532a7" _execution_state="idle" _uuid="1213b979d5ed3e0d13824d17d694c79d2ece92fa" # number of images for each digit g = sns.countplot(x=y_train) plt.xlabel('Digits') plt.ylabel('Number of images') # + [markdown] _cell_guid="5aea4062-1790-4987-b739-c4bebd79030f" _uuid="b7b1b1d36243c885e57374c8b60c5a7e10abe922" # There are roughly the same amount of images for each of the 10 digits. # + [markdown] _cell_guid="6812040d-80ad-43d2-a571-275f4f20067b" _uuid="2954681f25f0dcbe986e6914396cdbce61db591f" # ## Image re-scaling # # We re-scale data for the CNN, between 0 and 1. # + _cell_guid="cdc4340b-6e24-4e12-be99-ac806098ff17" _execution_state="idle" _uuid="b5d4f8fcf2a967e2c7d57daedf95aa8c5ab7f8cb" # Re-scale the data # 255 is the maximum value a pixel can take X_train = X_train / 255 X_test = X_test / 255 # + [markdown] _cell_guid="7413df94-bcb9-4f75-b174-c127d4445766" _uuid="a66741bf1ac597094f3a3166877008feef27c519" # ## Reshape # # The images were stored in a pandas dataframe as 1-D vectors of 784 values. For a CNN with Keras, we need tensors with the following dimensions: width x height x channel. # # Thus, we reshape all data to 28 x 2 8 x 1, 3-D matrices. # # The 3rd dimension corresponds to the channel. RGB images have 3 channels. MNIST images are in gray-scale, thus they have only one channel in the 3rd dimension. # + _cell_guid="34b6a5f7-8fd2-4387-8ef4-c9dc19584fed" _execution_state="idle" _uuid="f0a6ad80dab8e0f2c2e46165ccd9cd82dd162bc3" # Reshape image in 3 dimensions: # height: 28px X width: 28px X channel: 1 X_train = X_train.values.reshape(-1,28,28,1) X_test = X_test.values.reshape(-1,28,28,1) # + [markdown] _cell_guid="bdb422e2-bdec-444f-97a5-283a1e54bf2c" _uuid="39b7a31e843bac6b705461bcce89da216b91799e" # ## Target encoding # + # the target is 1 variable with the 9 different digits # as values y_train.unique() # + _cell_guid="4b7f3e78-44dc-4561-b1f0-9429ee024cf4" _execution_state="idle" _uuid="cabefd1478d5c1bdfe57fd6a34395340916a854c" # For Keras, we need to create 10 dummy variables, # one for each digit # Encode labels to one hot vectors (ex : digit 2 -> [0,0,1,0,0,0,0,0,0,0]) y_train = to_categorical(y_train, num_classes = 10) y_test = to_categorical(y_test, num_classes = 10) # the new target y_train # + [markdown] _cell_guid="adbeacf0-0dc0-4675-b2df-9c9663750f32" _uuid="60eed15ec5bc0d354385301789ecb8538fc02267" # Let's print some example images. # + _cell_guid="5f76131b-4ba0-45f1-a98c-bd4e7d561793" _execution_state="idle" _uuid="e0dae8943d3d35f075dba3d7ba31bde1d4bf2ff4" # Some image examples g = plt.imshow(X_train[0][:,:,0]) # + # Some image examples g = plt.imshow(X_train[10][:,:,0]) # + [markdown] _cell_guid="d5265777-aeb3-449d-b171-d88cad74c0a4" _uuid="5fa18b37a9acd9e098bac1d12264b0dd4310fdd3" # # Define-by-Run design # # We create the CNN and add the sampling space for the hyperparameters as we go. This is the Desing-by-run concept. # + # we will save the model with this name path_best_model = 'cnn_model_2.h5' # starting point for the optimization best_accuracy = 0 # + # function to create the CNN def objective(trial): # Start construction of a Keras Sequential model. model = Sequential() # Convolutional layers. # We add the different number of conv layers in the following loop: num_conv_layers = trial.suggest_int('num_conv_layers', 1, 3) for i in range(num_conv_layers): # Note, with this configuration, we sample different filters, kernels # stride etc, for each convolutional layer that we add model.add(Conv2D( filters=trial.suggest_categorical('filters_{}'.format(i), [16, 32, 64]), kernel_size=trial.suggest_categorical('kernel_size{}'.format(i), [3, 5]), strides=trial.suggest_categorical('strides{}'.format(i), [1, 2]), activation=trial.suggest_categorical( 'activation{}'.format(i), ['relu', 'tanh']), padding='same', )) # we could also optimize these parameters if we wanted: model.add(MaxPool2D(pool_size=2, strides=2)) # Flatten the 4-rank output of the convolutional layers # to 2-rank that can be input to a fully-connected Dense layer. model.add(Flatten()) # Add fully-connected Dense layers. # The number of layers is a hyper-parameter we want to optimize. # We add the different number of layers in the following loop: num_dense_layers = trial.suggest_int('num_dense_layers', 1, 3) for i in range(num_dense_layers): # Add the dense fully-connected layer to the model. # This has two hyper-parameters we want to optimize: # The number of nodes (neurons) and the activation function. model.add(Dense( units=trial.suggest_int('units{}'.format(i), 5, 512), activation=trial.suggest_categorical( 'activation{}'.format(i), ['relu', 'tanh']), )) # Last fully-connected dense layer with softmax-activation # for use in classification. model.add(Dense(10, activation='softmax')) # Use the Adam method for training the network. optimizer_name = trial.suggest_categorical( 'optimizer_name', ['Adam', 'RMSprop']) if optimizer_name == 'Adam': optimizer = Adam(lr=trial.suggest_float('learning_rate', 1e-6, 1e-2)) else: optimizer = RMSprop( lr=trial.suggest_float('learning_rate', 1e-6, 1e-2), momentum=trial.suggest_float('momentum', 0.1, 0.9), ) # In Keras we need to compile the model so it can be trained. model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) # train the model # we use 3 epochs to be able to run the notebook in a "reasonable" # time. If we increase the epochs, we will have better performance # this could be another parameter to optimize in fact. history = model.fit( x=X_train, y=y_train, epochs=3, batch_size=128, validation_split=0.1, ) # Get the classification accuracy on the validation-set # after the last training-epoch. accuracy = history.history['val_accuracy'][-1] # Save the model if it improves on the best-found performance. # We use the global keyword so we update the variable outside # of this function. global best_accuracy # If the classification accuracy of the saved model is improved ... if accuracy > best_accuracy: # Save the new model to harddisk. # Training CNNs is costly, so we want to avoid having to re-train # the network with the best found parameters. We save it instead # as we search for the best hyperparam space. model.save(path_best_model) # Update the classification accuracy. best_accuracy = accuracy # Delete the Keras model with these hyper-parameters from memory. del model # Remember that Scikit-optimize always minimizes the objective # function, so we need to negate the accuracy (because we want # the maximum accuracy) return accuracy # + # we need this to store the search # we will use it in the following notebook study_name = "cnn_study_2" # unique identifier of the study. storage_name = "sqlite:///{}.db".format(study_name) # + study = optuna.create_study( direction='maximize', study_name=study_name, storage=storage_name, load_if_exists=True, ) study.optimize(objective, n_trials=30) # - # # Analyze results study.best_params study.best_value # + results = study.trials_dataframe() results['value'].sort_values().reset_index(drop=True).plot() plt.title('Convergence plot') plt.xlabel('Iteration') plt.ylabel('Accuracy') # - results.head() # + [markdown] _cell_guid="e758621d-b27b-40ff-a93f-bebd2e0e5243" _uuid="0a1834f2a9f2db15dcaba4a84004b9627d714469" # # Evaluate the model # + # load best model model = load_model(path_best_model) # - model.summary() # + # make predictions in test set result = model.evaluate(x=X_test, y=y_test) # + # print evaluation metrics for name, value in zip(model.metrics_names, result): print(name, value) # + [markdown] _cell_guid="5688faa0-b33b-4e92-b125-7fa0b37e7df3" _uuid="3306d29b732341663e50866140dc569360701a81" # ## Confusion matrix # + # Predict the values from the validation dataset y_pred = model.predict(X_test) # Convert predictions classes to one hot vectors y_pred_classes = np.argmax(y_pred, axis = 1) # Convert validation observations to one hot vectors y_true = np.argmax(y_test, axis = 1) # compute the confusion matrix cm = confusion_matrix(y_true, y_pred_classes) cm # + _cell_guid="11361e73-8250-4bf5-a353-b0f8ea83e659" _execution_state="idle" _uuid="16e161179bf1b51ba66c39b2cead883f1db3a9c7" # let's make it more colourful classes = 10 plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) plt.title('Confusion matrix') plt.colorbar() tick_marks = np.arange(classes) plt.xticks(tick_marks, range(classes), rotation=45) plt.yticks(tick_marks, range(classes)) for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > 100 else "black", ) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # + [markdown] _cell_guid="1b8a5cdc-9122-4e31-b9fa-0f6b57d33fc8" _uuid="ecb928433299b163ecc1f6c4e66d4ddcf38fe898" # Here we can see that our CNN performs very well on all digits. # -
Section-12-Optuna/04-Optimizing-a-CNN-extended.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=[] # # PROBLEMAS DIVERSOS # - # ## FICHEROS # ------------------------- # ### 1. # Escribir una función que pida un número entero entre 1 y 10 y guarde en un fichero con el nombre tabla-n.txt la tabla de multiplicar de ese número, done n es el número introducido. # + import os n = int(input("ingrese un numero del 1 al 10: ")) file_name = "./tabla-{}.txt".format(n) with open(file_name, mode="w") as f: for i in range(1, 11): cadena = "{} x {} = {}\n".format(n,i, i * n) f.write(cadena) # - # <h3>2.</h3> # Escribir una función que pida un número entero entre 1 y 10, lea el fichero tabla-n.txt con la tabla de multiplicar de ese número, done n es el número introducido, y la muestre por pantalla. Si el fichero no existe debe mostrar un mensaje por pantalla informando de ello. # + import os n = int(input("ingrese un numero del 1 al 10: ")) file_name = "./tabla-{}.txt".format(n) with open(file_name, mode="w") as f: for i in range(1, 11): cadena = "{} x {} = {}\n".format(n,i, i * n) f.write(cadena) # - os.path.isfile('./texto-{n}.txt') if isfile = False return "El fichero no existe" # <h3>3.</h3> # # Escribir una función que pida dos números n y m entre 1 y 10, lea el fichero tabla-n.txt con la tabla de multiplicar de ese número, y muestre por pantalla la línea m del fichero. Si el fichero no existe debe mostrar un mensaje por pantalla informando de ello. # + import os n = int(input("ingrese un numero del 1 al 10: ")) m = int(input("ingrese un numero del 1 al 10: ")) file_name = "./tabla-{}.txt".format(n) with open(file_name, mode="w") as f: for i in range(1, 11): cadena = "{} x {} = {}\n".format(n,i, i * n) f.write(cadena) os.path.isfile('./texto-{n}.txt') if isfile = False return "El fichero no existe" # + [markdown] tags=[] # ## Expresiones Regulares # --------------------------------------------- # # - from modulo import datos import re path = './src/re/short_tweets.csv' # <h3>1.</h3> # # Escriba una expresión regular que encuentre todas las coincidencias que sigan el siguiente patrón. # # Ej. <code>@robot3!</code> # Cadena entrada s = '@robot9! @robot4& I have a good feeling that the show isgoing to be amazing! @robot9$ @robot7%' patron = r"@robot\d" print(re.findall(patron, s)) # <h3>2.</h3> # Escriba una expresión regular para cada caso: # # - todos los usuarios que sigan el siguente patron. <code>User_mentions:9</code> # # - encuentre los numero de likes: <code>likes: 5</code> # # - que permita encontrar el numero de retweets. <code>number of retweets: 4</code> # Cadena entrada s = "Unfortunately one of those moments wasn't a giant squid monster. User_mentions:2, likes: 9, number of retweets: 7" ss # <h3>3.</h3> # # Escriba una expresión regular que encuente los nombres de archivos txt en la cadena: # # - Nombre de archivo txt siempre inicia al principio de la cadena # - Siempre comienzan con una secuencia de 2 o 3 vocales mayúsculas o minúsculas (a e i o u). # - Archivo siempre termina con ".txt" . # # analisis_sentimientos = datos.read_pandas(path,780,782) #regex = r"" # complete aqui for tweet in analisis_sentimientos: print(tweet) # Encuentre todos los casos #print(re.findall(regex, tweet)) # <h3>4.</h3> # # Escriba una expresión regular que valide una lista de correos electrónicos # - Primera parte: # - Caracteres en mayuscula y minúscula # - Números # - Caracteres especiales: !, #, %, &, *, $, . # - Debe contener @ # - Dominio: # - Puede ser cualquier conjunto de caracteres # - Solo puede terminar con <code>.com</code> # # ENTRADA: # # - ['<EMAIL>', '<EMAIL>', '!#mary-=@msca.net'] # # SALIDA: # # - The email <code><EMAIL></code> is a valid email # - The email <code><EMAIL></code> is a valid email # - The email <code>!#mary-=<EMAIL></code> is invalid # # + # Escriba una expresión regular para validar un correo regex = r"¡Este email sera enviado a 3 personas!" emails = ['<EMAIL>', '<EMAIL>', '!#mary-=@<EMAIL>'] for example in emails: # Match the regex to the string if re.match(regex, example): # Complete the format method to print out the result print("The email {email_example} is a valid email".format(email_example=example)) else: print("The email {email_example} is invalid".format(email_example=example)) # -
Problemas Diversos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # *Accompanying code examples of the book "Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python" by [<NAME>](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).* # # Other code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning). # %load_ext watermark # %watermark -a '<NAME>' -v -p tensorflow # # Model Zoo -- Multilayer Perceptron with Batch Normalization # + import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data ########################## ### DATASET ########################## mnist = input_data.read_data_sets("./", one_hot=True) ########################## ### SETTINGS ########################## # Hyperparameters learning_rate = 0.1 training_epochs = 10 batch_size = 64 # Architecture n_hidden_1 = 128 n_hidden_2 = 256 n_input = 784 n_classes = 10 # Other random_seed = 123 ########################## ### GRAPH DEFINITION ########################## g = tf.Graph() with g.as_default(): tf.set_random_seed(random_seed) # Batchnorm settings training_phase = tf.placeholder(tf.bool, None, name='training_phase') # Input data tf_x = tf.placeholder(tf.float32, [None, n_input], name='features') tf_y = tf.placeholder(tf.float32, [None, n_classes], name='targets') # Multilayer perceptron layer_1 = tf.layers.dense(tf_x, n_hidden_1, activation=None, # Batchnorm comes before nonlinear activation use_bias=False, # Note that no bias unit is used in batchnorm kernel_initializer=tf.truncated_normal_initializer(stddev=0.1)) layer_1 = tf.layers.batch_normalization(layer_1, training=training_phase) layer_1 = tf.nn.relu(layer_1) layer_2 = tf.layers.dense(layer_1, n_hidden_2, activation=None, use_bias=False, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1)) layer_2 = tf.layers.batch_normalization(layer_2, training=training_phase) layer_2 = tf.nn.relu(layer_2) out_layer = tf.layers.dense(layer_2, n_classes, activation=None, name='logits') # Loss and optimizer loss = tf.nn.softmax_cross_entropy_with_logits(logits=out_layer, labels=tf_y) cost = tf.reduce_mean(loss, name='cost') # control dependency to ensure that batchnorm parameters are also updated with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) train = optimizer.minimize(cost, name='train') # Prediction correct_prediction = tf.equal(tf.argmax(tf_y, 1), tf.argmax(out_layer, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') # + import numpy as np ########################## ### TRAINING & EVALUATION ########################## with tf.Session(graph=g) as sess: sess.run(tf.global_variables_initializer()) np.random.seed(random_seed) # random seed for mnist iterator for epoch in range(training_epochs): avg_cost = 0. total_batch = mnist.train.num_examples // batch_size for i in range(total_batch): batch_x, batch_y = mnist.train.next_batch(batch_size) _, c = sess.run(['train', 'cost:0'], feed_dict={'features:0': batch_x, 'targets:0': batch_y, 'training_phase:0': True}) avg_cost += c train_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.train.images, 'targets:0': mnist.train.labels, 'training_phase:0': False}) valid_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.validation.images, 'targets:0': mnist.validation.labels, 'training_phase:0': False}) print("Epoch: %03d | AvgCost: %.3f" % (epoch + 1, avg_cost / (i + 1)), end="") print(" | Train/Valid ACC: %.3f/%.3f" % (train_acc, valid_acc)) test_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.test.images, 'targets:0': mnist.test.labels, 'training_phase:0': False}) print('Test ACC: %.3f' % test_acc) # -
code/model_zoo/tensorflow_ipynb/multilayer-perceptron-batchnorm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Derivative of CTC Loss # # 关于CTC的介绍已经有很多不错的教程了,但是完整的描述CTCLoss的前向和反向过程的很少,而且有些公式推导省略和错误。本文主要关注CTC Loss的梯度是如何计算的,关于CTC的介绍这里不做过多赘述,具体参看文末参考。 # # CTC主要应用于语音和OCR中,已语音[Deepspeech2](https://arxiv.org/abs/1512.02595)模型为例,CTC的网络一般如下图所示,包含softmax和CTCLoss两部分。反向传播需要求得loss L相对于logits $u^i$​的梯度。下面先介绍CTCLoss的前向计算。 # # > 图片来源于文末参考 # # ![img](./img/ctc_loss_backward_1.png) # # ## CTC Loss 的计算 # # CTC中path的定义与概率的计算如下: # # <img src="./img/ctc_loss_prob_pi_x.png" alt="image-20211104200811966" style="zoom:50%;" /> # # path 是 $ L'^T$​​的元素,用 $ \pi $​​表示。 $ \textbf{x} $​​ 是输入特征,$\textbf{y}$​​ 是输出label, 都是序列。 $ L $​​ 是输出的 vocab, L‘ 是 $ L \cup {blank}$​​。 $y_{\pi_{t}}^t$​​ 表示在t时刻,$\pi_{t}$​​ label时的观察概率。其中$\pi_{t}$​​ 表示 $\pi$​​ path在t时刻的label。$\pi$​​ 是 $\textbf{y}$​​ 与 $ \textbf{x}$​​ 的一个alignment,长度是$T$​​,取值空间为$L'$​​​。path也称为alignment。 # # 公式(2)解释了给定输入 $\textbf{x}$​ ,输出 $ \pi $​ path 的概率,即从时间t=1到T每个时间点的概率 $y_{\pi_{t}}^t$​ 相乘。 # # 求出单条path后,就可以计算$p(l \mid x)$​ 的概率,计算如下: # # <img src="./img/ctc_loss_prob_l_x.png" alt="image-20211104202358513" style="zoom:50%;" /> # # 这里边 $\mathcal{B}$ 就是映射, 即所有多对一的映射(many-to-one mapping )的集合。 这样就算出来对应一个真正的 label $\textbf{l}$ 的概率了,这里是求和。 求和的原因就是 aab 和 abb 都是对应成ab, 所以 aab 的概率 + abb 的概率才是生成ab的概率。 # # 公式(3)解释了给定输入 $\mathbf{x}$​​​​​​ ,求输出$\mathbf{l}$​​​​​​ 的概率, 即所有集合 $\mathcal{B}^{-1} (\mathbf{l})$​​​​​​​​​​ 中 path的概率和。 # # ### CTC forward-backward 算法 # # CTC的优化采用算最大似然估计[MLE (maximum likelihood estimation)](https://en.wikipedia.org/wiki/Maximum_likelihood_estimation), 这个和神经网络本身的训练过程是一致的。 # # 这个CTC 计算过程类似HMM的 [forward-backward algorithm](https://en.wikipedia.org/wiki/Forward%E2%80%93backward_algorithm),下面就是这个算法的推导过程: # # <img src="./img/ctc_loss_alpha_definition.png" alt="image-20211104203040307" style="zoom:50%;" /> # # 上图中的定义很清楚, 但是$ \alpha_{t-1}(s) $ and $ \alpha_{t-1}(s-1)$ 和 $\alpha_t(s)$ 的关系也不那么好看出来,下图给出了具体的关于 $\alpha_t(s)$ 的推导过程: # # <img src="./img/ctc_loss_alpha_recurse.png" alt="image-20211108155714843" style="zoom:50%;" /> # # <img src="./img/ctc_loss_alpha_recurse_2.png" alt="image-20211109153011816" style="zoom:50%;" /> # # 这里的公式比较适合用下面的图来理解,$\alpha_1(1)$​​​​ 其实对应的就是下图中左上角白色的圆圈。 就是上来第一个是blank 的概率, 而 $\alpha_1(2)$​​​​是label l 的第一个字母。 这里边我们假设每个字母之间都插入了空白,即label l扩展成l',例如,l=[a, b, b, c], l'=[-, a, -, b, -, b, -, c, -]。 然后对于其他圆点,在时间是1 的情况下概率都是 0. Figure 3中横轴是时间 t,从左到右是1到T;纵轴是s(sequence),从上到下是 1 到 $\mathbf{\mid l' \mid}$​​​​. # # <img src="./img/ctc_loss_cat_lattice.png" alt="image-20211108155918442" style="zoom:50%;" /> # # 接下来我们分析递归公式 (resursion),更多介绍可以参看 [2]. 公式6分情况考虑: # # * 第一种情况就是当前的label是blank, 或者 $\mathbf{l'}_{s}= \mathbf{l'}_{s-2}$​​​​​​​(相邻是重复字符): # # ![img](https://distill.pub/2017/ctc/assets/cost_no_skip.svg) # # 这个时候他的概率来自于过去t-1的两个label 概率, 也就是 $a_{t-1} (s)$​​ 和 $a_{t-1} (s-1)$​​​ 。 # # $ a_{t-1} (s)$​​ 就是说当前的 sequence 已经是s 了,figure 3中表现为横跳, blank -->blank(例如t=3, s=3); # # 而 $a_{t-1} (s-1) $是说明当前的字符还不够, 需要再加一个, 所以在figure 3中就是斜跳,从黑色圆圈到白色圆圈(例如,t=3, s=5)。 # # 仔细观察figure 3, 除了第一排的白色圆圈, 其他白色圆圈都有两个输入, 就是上述的两种情况。 当然判断blank 的方法也可以是判断$I'_{s-2} = I'_{s}$​. 这种情况也是说明$I'_{s}$​​​ 是blank, 因为每一个字符必须用 blank 隔开, 即使是相同字符。 # # * 第二章情况 也可以用类似逻辑得出, 只不过当前的状态s 是黑色圆圈, 有三种情况输入。 # # ![img](https://distill.pub/2017/ctc/assets/cost_regular.svg) # # 最终的概率就如公式8 所示, 这个计算过程就是 CTC forward algroirthm, 基于 Fig. 3 的左边的初始条件。 # # <img src="./img/ctc_loss_forward_loss.png" alt="image-20211108162544982" style="zoom:50%;" /> # # 基于Fig. 3 右边的初始条件,我们还是可以计算出一个概率, 那个就是 **CTC backward**. 这里我就不详细介绍了, 直接截图。 # # <img src="./img/ctc_loss_backward_recurse.png" alt="image-20211108162859876" style="zoom:50%;" /> # # 这样一直做乘法, 数字值越来越小,很快就会underflow。 这个时候就需要做 scaling. # # <img src="./img/ctc_loss_rescale_loss.png" alt="image-20211108163526616" style="zoom:50%;" /> # # 算出了forward probability 和 backward probability 有什么用呢, 解释如下图。 # # <img src="./img/ctc_loss_forward_backward.png" alt="image-20211108164110404" style="zoom:50%;" /> # # 上图是说 forward probability and backward probability 的乘积, 代表了这个 sequence $\mathbf{l}$ t时刻,是s label 的 所有paths 的概率。 这样的话 我们就计算了 Fig. 3 中的每个圆圈的概率。为什么$\alpha_t(s)\beta_t(s)$ 中多出一个 $y^t_{\mathbf{l'_s}}$ ,这是因为它在 $\alpha$ 和 $\beta$ 中都包含该项,合并公式后就多出一项。 # # <img src="./img/ctc_loss_forward_backward_to_loss.png" alt="image-20211109143104052" style="zoom:50%;" /> # # $p(\mathbf{l}|\mathbf{x})$​ 可以通过任意时刻 t 的所有 s 的 foward-backward 概率计算得来。取负对数后就是单个样本的NLL(Negative Log Likelihood)。 # # ### 总结 # # 总结一下,根据前向概率计算CTCLoss函数,可以得出如下结论: # # 1. 对于时序长度为T的输入序列x和输出序列z,前向概率: # $$ # \begin{split} # \alpha_t(s) &= \sum_{ \underset{\pi_t=l'_s}{\pi \in \mathcal{B}^{-1}(z)} } p(\pi_{1:t}|x) \newline # \alpha_1(1) &= y_{-}^1 ; \quad \alpha_1(2)=y^1_{l'_2}, \quad \alpha_1(s)=0, \forall s > 2 \newline # \alpha_t(s) &= 0, \quad \forall s < |l'| - 2(T-t) - 1 ,\quad \text{or} \quad \forall s < 1 \newline # \alpha_t(s) &= # \begin{cases} # (\alpha_{t-1}(s) + \alpha_{t-1}(s-1) ) y^t_{l'_s} & \text{if $l'_s=b$ or $l'_{s-2} = l'_s$​} \newline # (\alpha_{t-1}(s) + \alpha_{t-1}(s-1) + \alpha_{t-1}(s-2))y^t_{l'_s} & \text{otherwise}\newline # \end{cases} # \end{split} # $$ # # 2. 利用 $\alpha_t(s)$ 计算CTCLoss: # $$ # -ln(p(l \mid x)) = -ln(\alpha_{T}(|l'|)+\alpha_{T}(|l'|-1)) # $$ # # 根据后向概率计算CTCLoss函数,可以得出如下结论: # # 1. 对于时序长度为T的输入序列x和输出序列z,后向概率: # $$ # \begin{split} # \beta_t(s) &= \sum_{ \underset{\pi_t=l'_s}{\pi \in \mathcal{B}^{-1}(z)} } p(\pi_{t:T}|x) \newline # \beta_T(|l'|) &= y_{-}^T ; \quad \beta_T(|l'|-1)=y^T_{l'_{|l'|-1}}, \quad \beta_T(s)=0, \forall s < |l'| - 1 \newline # \beta_t(s) &= 0, \text{$\forall s > 2t$ or $\forall s < |l'|$} \newline # \beta_t(s) &= # \begin{cases} # (\beta_{t+1}(s) + \beta_{t+1}(s+1) ) y^t_{l'_s} & \text{if $l'_s=b$ or $l'_{s+2} = l'_s$} \newline # (\beta_{t+1}(s) + \beta_{t+1}(s+1) + \beta_{t+1}(s+2))y^t_{l'_s} & \text{otherwise}\newline # \end{cases} # \end{split} # $$ # # 2. 利用 $\beta_t(s)$计算CTCLoss: # # $$ # -ln(p(l \mid x)) = -ln(\beta_{1}(1)+\beta_{1}(2)) \newline # $$ # # 根据任意时刻的前向概率和后向概率计算CTC Loss函数,得到如下结论: # # 1. 对于任意时刻t,利用前向概率和后向概率计算CTCLoss: # # $$ # p(l \mid x) = \sum_{s=1}^{|l'|} \frac{\alpha_t(s)\beta_t(s)}{y_{l'_s}^t} \newline # -ln(p(l \mid x)) = -ln( \sum_{s=1}^{|l'|} \frac{\alpha_t(s) \beta_t(s)}{y_{l'_s}^t} ) # $$ # 我们已经得到CTCLoss的计算方法,接下来对其进行求导。 # # ## CTC梯度计算 # # ### 微分公式 # # 在计算梯度前,我们先回顾下基本的微分公式: # $$ # C' = 0 \\ # x' = 1 \newline # x^n = n \cdot x^{n-1} \newline # (e^x)' = e^x \newline # log(x)' = \frac{1}{x} \newline # (u + v)' = u' + v' \newline # (\frac{u}{v})' = \frac{u'v-uv'}{v^2} \newline # \frac{\mathrm{d}f(g(x))}{\mathrm{d}x} = \frac{\mathrm{d}f(g(x))}{\mathrm{d}g(x)} \cdot \frac{\mathrm{d}g(x)}{\mathrm{d}x} # $$ # # ### CTC梯度 # # 最大似然估计训练就是最大化训练集中每一个分类的对数概率,即最小化Eq. 12。 # # <img src="./img/ctc_loss_gradient_of_y_hat.png" alt="image-20211108164206136" style="zoom:50%;" /> # # 最后就是算微分了, 整个推导过程就是加法和乘法, 都可以微分。 $\mathit{O}^{ML}$关于神经网络的输出 $y^t_k$的梯度见Eq. 13。因为训练样本是相互独立的,所以可以单独考虑每个样本,公式如Eq.13。 # # 下面是CTCLoss的梯度计算: # # <img src="./img/ctc_loss_gradient_with_y.png" alt="image-20211109143622448" style="zoom:50%;" /> # # ### CTC梯度推导 # # 回顾下之前的公式,便于理解后续推导过程。 # # $$ # p(l \mid x) = \sum_{s=1}^{|l'|} \frac{\alpha_t(s)\beta_t(s)}{y_{l'_s}^t} \\ # \begin{equation} # \alpha_t(s) \beta_t(s) = \sum_{ \underset{\pi_t=l'_s}{\pi \in \mathcal{B}^{-1}(l):} } y^t_{l'_s} \prod_{t=1}^T y^t_{\pi_t} # \end{equation} # $$ # # 其中Eq. 15的计算过程如下: # # $$ # \begin{align*} # \frac{\partial p( # l \mid x)}{\partial y_k^t} # & = \sum_{s \in lab(z,k)} \frac{ \partial \frac{ \alpha_t(s) \beta_t(s)}{y_{k}^t}}{\partial y_k^t} # \newline # & = \sum_{s \in lab(z,k)} \frac{(\alpha_t(s)\beta_t(s))’y_k^t - \alpha_t(s)\beta_t(s){y_k^t}'}{{y_k^t}^2} # \newline # &= \sum_{s \in lab(z,k)} \frac{( \prod_{t'=1}^{t-1} y^{t'}_{\pi_{t'}} \cdot y_k^t \cdot y_k^t \cdot \prod_{t'=t+1}^{T} y^{t'}_{\pi_{t'}} )’ y_k^t - \alpha_t(s)\beta_t(s){y_k^t}'}{{y_k^t}^2} # \newline # &= \sum_{s \in lab(z,k)} \frac{2\alpha_t(s)\beta_t(s) - \alpha_t(s)\beta_t(s)}{{y_k^t}^2} # \newline # &= \sum_{s \in lab(z,k)} \frac{\alpha_t(s)\beta_t(s)}{{y_k^t}^2} # \newline # &= \frac{1}{{y_k^t}^2} \sum_{s \in lab(z,k)} \alpha_t(s)\beta_t(s) \tag{1} \newline # \end{align*} # $$ # # # NLL的公式推导如下: # $$ # \begin{split} # \frac{\partial {ln(p(l \mid x))} }{ \partial y^t_k } # &= \frac{1}{p(l \mid x)} \frac{ \partial{p(l \mid x)} }{ \partial y_k^t } \newline # &= \frac{1}{p(l \mid x) {y^t_k}^2 } \sum_{s \in lab(z,k)} \alpha_t(s)\beta_t(s) # \end{split} # \tag{2} # $$ # # # 已经算出了CTCLoss对于 $y_k^t$​ 的梯度,接下来我们需要计算 CTCLoss对于$u^t_k$​(logits)的梯度。套用链式法则,并替换$y^t_k$​ 为 $y^t_{k'}$​,结果如下图。图中 $k'$​ 表示vocab中的某一个token,$K$​​ 是vocab的大小。 # # ![](./img/ctc_loss_backward_2.png) # # 图中公式4根据链式法则得到: # $$ # - \frac{ \partial ln(p(l \mid x)) }{ \partial u^t_k } # = - \sum_{k'=1}^{K} \frac{ \partial ln(p(l \mid x)) }{ \partial y^t_{k'} } \frac{ \partial y^t_{k'} }{ \partial u^t_k } \tag{4} # $$ # 图中公式3是softmax的梯度,参考 [4],计算过程如下: # $$ # softmax(j) = S_j = \frac{ e^{a_j} }{ \sum_{k=1}^K e^{a_k} }, \enspace \forall j \in 1 \dots K # $$ # # $$ # \begin{split} # \frac{ \partial S_i }{ \partial a_j} # &= \frac{ \partial (\frac{ e^{ a_i } }{ \sum_k e^{ a_k } }) } { \partial a_j } # \newline # &= # \begin{cases} # \frac{ e^a_i \sum - e^a_j e^a_i }{ \sum^2 } # &= \frac{ e^a_i }{ \sum } \frac{ \sum - e^a_j }{ \sum } \newline # &= S_i(1-S_j) & \text{i = j, $\sum$ stands for $\sum_{k=1}^K e^a_k$} # \newline # \frac{ 0 - e^a_j e^a_i }{ \sum^2 } # &= - \frac{ e^a_j }{ \sum } \frac{ e^a_i }{ \sum } \newline # &= -S_j S_i & \text{i $\neq$ j, $\sum$ stands for $\sum_{k=1}^K e^a_k$} # \end{cases} # \newline # &= # \begin{cases} # S_i(1 - S_j) & \text{$i = j$} # \newline # -S_j S_i = S_i (0 - S_j) & \text{$i \neq j$} # \end{cases} # \newline # &= S_i (\delta_{ij} - S_j ) # \end{split} # \tag{3} # $$ # $$ # \delta_{ij} = # \begin{cases} # 1 & \text{if i = j} \newline # 0 & \text{otherwise} # \end{cases} # $$ # # # # 下图中黄色框中的部分表示公式(1),即遍历所有的vocab中的token,其结果是$p(l \mid x)$​。这是因为label $l$​ 中的token一定在vocab中,且 $s \in lab(l, k')$​ 可以是空集。当 $k'$​ 在 l 中,s 则为label中token是$k'$​的概率;当$k'$​​​不在l中,s为空,概率为0。 # # ![img](./img/ctc_loss_backward_3.png) # # 公式(2),(3)带入(4),并结合公式(1)的结果如上图右边,即: # $$ # \begin{split} # - \frac{ \partial ln(p(l \mid x)) }{ \partial u^t_k } &= # - \sum_{k'=1}^K \frac{ \partial ln(p(l \mid x)) }{ \partial y^t_{k'} } \frac{ \partial y^t_{k'}}{ \partial u^t_k } \newline # &= - \sum_{k'=1}^K \frac{ y^t_{k'}( \delta_{kk'} - y^t_k ) }{ p(l \mid x) {y^t_{k'}}^2 } \sum_{s \in lab(l, k') } \alpha_t(s) \beta_t(s) \newline # &= - \sum_{k'=1}^K \frac{ \delta_{kk'} - y^t_k }{ p(l \mid x) y^t_{k'} } \sum_{s \in lab(l, k') } \alpha_t(s) \beta_t(s) \newline # &= \sum_{k'=1}^K \frac{ y^t_k - \delta_{kk'} }{ p(l \mid x) y^t_{k'} } \sum_{s \in lab(l, k') } \alpha_t(s) \beta_t(s) \newline # &= \sum_{k'=1}^K \frac{ y^t }{ p(l \mid x) y^t_{k'} } \sum_{s \in lab(l, k') } \alpha_t(s) \beta_t(s) - \sum_{k'=1}^K \frac{ \delta_{kk'} }{ p(l \mid x) y^t_{k'} } \sum_{s \in lab(l, k') } \alpha_t(s) \beta_t(s) \newline # &= \frac{ y^t_k }{ p(l \mid x) } ( \sum_{k'=1}^K \frac{1}{y^t_{k'}} \sum_{s \in lab(l, k') } \alpha_t(s) \beta_t(s) ) - \sum_{k'=1}^K \frac{ \delta_{kk'} }{ p(l \mid x) y^t_{k'} } \sum_{s \in lab(l, k') } \alpha_t(s) \beta_t(s) \newline # &= \frac{ y^t_k }{ p(l \mid x) } p(l \mid x) - \sum_{k'=1}^K \frac{ \delta_{kk'} }{ p(l \mid x) y^t_{k'} } \sum_{s \in lab(l, k') } \alpha_t(s) \beta_t(s) \newline # &= y^t_k - \frac{ 1 }{ p(l \mid x) y^t_k } \sum_{s \in lab(l, k)} \alpha_t(s) \beta_t(s) \newline # \end{split} # $$ # 最终,为了通过softmax层传播CTCLoss的梯度,需要计算目标函数与 logits $u^t_k$ 的偏微分,即Eq. 16: # $$ # \begin{align*} # \hat{\alpha}_t(s) & \overset{def}{=} \frac{ \alpha_t(s) }{ C_t } ,\enspace C_t \overset{def}{=} \sum_s \alpha_t(s) # \newline # \hat{\beta}_t(s) & \overset{def}{=} \frac{ \beta_t(s) }{ D_t } ,\enspace D_t \overset{def}{=} \sum_s \beta_t(s) # \newline # - \frac{ \partial ln(p(l \mid x)) }{ \partial u^t_k } &= y^t_k - \frac{1}{y^t_k \sum_{s=1}^{\mid l' \mid} \frac{ \hat{\alpha}_t(s) \hat{\beta}_t(s) }{ y^t_{l'_s} } } \sum_{s \in lab(l, k)} \hat{\alpha}_t(s) \hat{\beta}_t(s) \tag{16} # \newline # \end{align*} # $$ # ### 总结 # # * 通过动态规划算法计算$\alpha_t(s)$ 和 $\beta_t(s)$ # # * 通过$\alpha_t(s)$ 计算 $p(l \mid x)=\alpha_T(\mid l' \mid) + \alpha_T(\mid l' \mid -1)$ # # * 通过$\alpha_t(s)$ 和 $\beta_t(s)$ # # * 计算CTcLoss函数的导数: # $$ # \begin{split} # - \frac{ \partial ln(p(l \mid x)) }{ \partial u^t_k } # &= y^t_k - \frac{ 1 }{ p(l \mid x) y^t_k } \sum_{s \in lab(l, k)} \alpha_t(s) \beta_t(s) # \newline # &= y^t_k - \frac{1}{y^t_k \sum_{s=1}^{\mid l' \mid} \frac{ \hat{\alpha}_t(s) \hat{\beta}_t(s) }{ y^t_{l'_s} } } \sum_{s \in lab(l, k)} \hat{\alpha}_t(s) \hat{\beta}_t(s) # \newline # \end{split} # \tag{16} # $$ # ## Reference # # [[1] <NAME>, <NAME>, <NAME>, <NAME>. Connectionist Temporal lassification: Labeling Unsegmented Sequence Data with Recurrent Neural Networks. ICML 2006, Pittsburgh, USA, pp. 369-376.](http://www.cs.toronto.edu/~graves/icml_2006.pdf) # # [[2] Sequence ModelingWith CTC](https://distill.pub/2017/ctc/) # # [[3] NLP 之 CTC Loss 的工作原理](https://www.jianshu.com/p/e073c9d91b20) # # [[4] The Softmax function and its derivative](https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/) # # [[5] CTC Algorithm Explained Part 1:Training the Network(CTC算法详解之训练篇)](https://xiaodu.io/ctc-explained/)
docs/topic/ctc/ctc_loss.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="M3XH_XLsy_Bn" # _Lambda School Data Science, Unit 2_ # # Sprint Challenge: Predict Steph Curry's shots 🏀 # For your Sprint Challenge, you'll use a dataset with all Steph Curry's NBA field goal attempts. # (Regular season and playoff games, from October 28, 2009, through June 5, 2019.) # You'll predict whether each shot was made, using information about the shot and the game. This is hard to predict! Try to get above 60% accuracy. The dataset was collected with the [nba_api](https://github.com/swar/nba_api) Python library. # + colab_type="code" id="Nw3CL7TE7tNq" colab={} # %%capture import sys if 'google.colab' in sys.modules: # Install packages in Colab # !pip install category_encoders==2.* # !pip install pandas-profiling==2.* # + colab_type="code" id="-Nm24pCHy_Bo" colab={} pycharm={"is_executing": false} # Read data import pandas as pd url = 'https://drive.google.com/uc?export=download&id=1fL7KPyxgGYfQDsuJoBWHIWwCAf-HTFpX' df = pd.read_csv(url) # Check data shape assert df.shape == (13958, 20) # + [markdown] colab_type="text" id="B8BvDKLFy_Bq" # To demonstrate mastery on your Sprint Challenge, do all the required, numbered instructions in this notebook. # To earn a score of "3", also do all the stretch goals. # You are permitted and encouraged to do as much data exploration as you want. # **1. Begin with baselines for classification.** Your target to predict is `shot_made_flag`. What is your baseline accuracy, if you guessed the majority class for every prediction? # **2. Hold out your test set.** Use the 2018-19 season to test. NBA seasons begin in October and end in June. You'll know you've split the data correctly when your test set has 1,709 observations. # **3. Engineer new feature.** Engineer at least **1** new feature, from this list, or your own idea. # - **Homecourt Advantage**: Is the home team (`htm`) the Golden State Warriors (`GSW`) ? # - **Opponent**: Who is the other team playing the Golden State Warriors? # - **Seconds remaining in the period**: Combine minutes remaining with seconds remaining, to get the total number of seconds remaining in the period. # - **Seconds remaining in the game**: Combine period, and seconds remaining in the period, to get the total number of seconds remaining in the game. A basketball game has 4 periods, each 12 minutes long. # - **Made previous shot**: Was <NAME>'s previous shot successful? # **4. Decide how to validate** your model. Choose one of the following options. Any of these options are good. You are not graded on which you choose. # - **Train/validate/test split: train on the 2009-10 season through 2016-17 season, validate with the 2017-18 season.** You'll know you've split the data correctly when your train set has 11,081 observations, and your validation set has 1,168 observations. # - **Train/validate/test split: random 80/20%** train/validate split. # - **Cross-validation** with independent test set. You may use any scikit-learn cross-validation method. # **5.** Use a scikit-learn **pipeline** to **encode categoricals** and fit a **Decision Tree** or **Random Forest** model. # **6.** Get your model's **validation accuracy.** (Multiple times if you try multiple iterations.) # **7.** Get your model's **test accuracy.** (One time, at the end.) # **8.** Given a **confusion matrix** for a hypothetical binary classification model, **calculate accuracy, precision, and recall.** # ### Stretch Goals # - Engineer 4+ new features total, either from the list above, or your own ideas. # - Make 2+ visualizations to explore relationships between features and target. # - Optimize 3+ hyperparameters by trying 10+ "candidates" (possible combinations of hyperparameters). You can use `RandomizedSearchCV` or do it manually. # - Get and plot your model's feature importances. # + [markdown] colab_type="text" id="t6Jt3qjQ-zig" # ## 1. Begin with baselines for classification. # >Your target to predict is `shot_made_flag`. What would your baseline accuracy be, if you guessed the majority class for every prediction? # + colab_type="code" id="I0BDeNFG_Kee" colab={} pycharm={"is_executing": false} print(pd.value_counts(df.shot_made_flag,normalize=True)) print('curry miss 52% of his show, whereas he made 47.2 throughout oct 28 - June5') # + [markdown] colab_type="text" id="Dz2QHBiVy_Br" # ## 2. Hold out your test set. # # >Use the 2018-19 season to test. NBA seasons begin in October and end in June. You'll know you've split the data correctly when your test set has 1,709 observations. # + pycharm={"name": "#%%\n", "is_executing": false} df['game_date'] # + colab_type="code" id="OPod6lBG_wTT" colab={} pycharm={"is_executing": false} df.head(3) df['game_date'] = pd.to_datetime(df['game_date']) start_date = '2018-10-15' end_date = '2019-07-01' mask = (df['game_date'] >= start_date) & (df['game_date'] <= end_date) test_2018_2019 = df.loc[mask] test_2018_2019.shape # + [markdown] colab_type="text" id="P9Nihzk6y_CF" # ## 3. Engineer new feature. # # >Engineer at least **1** new feature, from this list, or your own idea. # > # >- **Homecourt Advantage**: Is the home team (`htm`) the Golden State Warriors (`GSW`) ? # >- **Opponent**: Who is the other team playing the Golden State Warriors? # >- **Seconds remaining in the period**: Combine minutes remaining with seconds remaining, to get the total number of seconds remaining in the period. # >- **Seconds remaining in the game**: Combine period, and seconds remaining in the period, to get the total number of seconds remaining in the game. A basketball game has 4 periods, each 12 minutes long. # >- **Made previous shot**: Was <NAME>'s previous shot successful? # + pycharm={"name": "#%%\n", "is_executing": false} df.head(3) def engineer_features(df): #avoid copy warning df = df.copy() #Does GSW have homecourt advanture? df['HomeCourt'] = df['htm'] == 'GSW' #does curry clutching taken a shot under 20 second in very close game? df['clutch'] =( (df['minutes_remaining'] == 0) & (df['seconds_remaining'] <= 10) & (df['scoremargin_before_shot'] > -4) & (df['scoremargin_before_shot'] < 0) & (df['period'] == 4) ) return df df = engineer_features(df) test_2018_2019 = engineer_features(test_2018_2019) df.head() # + [markdown] colab_type="text" id="eLs7pt7NFJLF" # ## **4. Decide how to validate** your model. # >Choose one of the following options. Any of these options are good. You are not graded on which you choose. # >- **Train/validate/test split: train on the 2009-10 season through 2016-17 season, validate with the 2017-18 season.** You'll know you've split the data correctly when your train set has 11,081 observations, and your validation set has 1,168 observations. # >- **Train/validate/test split: random 80/20%** train/validate split. # >- **Cross-validation** with independent test set. You may use any scikit-learn cross-validation method. # + colab_type="code" id="LJ58CceDISXR" colab={} pycharm={"is_executing": false} df['game_date'] = pd.to_datetime(df['game_date']) start_date_train = '2008-10-15' end_date_train = '2017-07-01' mask_09_10 = (df['game_date'] >= start_date_train) & (df['game_date'] <= end_date_train) train_2009_2017 = df.loc[mask_09_10] print(train_2009_2017.shape) start_date_validate = '2017-10-15' end_date_validate= '2018-07-01' mask_17_18 = (df['game_date'] >= start_date_validate) & (df['game_date'] <= end_date_validate) validate_2017_2018 = df.loc[mask_17_18] train_2009_2017['shot_made_flag'] = train_2009_2017['shot_made_flag'].astype(str) validate_2017_2018['shot_made_flag'] = validate_2017_2018['shot_made_flag'].astype(str) # + pycharm={"name": "#%%\n", "is_executing": false} test_2018_2019['shot_made_flag'] = test_2018_2019['shot_made_flag'].astype(str) # + pycharm={"name": "#%%\n"} #drop columns target = 'shot_made_flag' #I want these features to be tested upon features_in = df.drop(columns=[target ,'game_date', 'player_name','game_id','game_event_id'], axis=1) #rearrange to numeric value then catagory numeric_features = features_in.select_dtypes(include='number').columns.tolist() cardinality = features_in.select_dtypes(exclude='number').nunique() features_cardinality = cardinality[cardinality <= 10].index.tolist() features = numeric_features + features_cardinality # + pycharm={"name": "#%%\n", "is_executing": false} # + pycharm={"name": "#%%\n", "is_executing": false} #assign features to X and target to y X_train_09_17 = train_2009_2017[features] y_train_09_17 = train_2009_2017[target] X_Validate_17_18 = validate_2017_2018[features] y_validate_17_18 = validate_2017_2018[target] X_test = test_2018_2019[features] y_test = test_2018_2019[target] # + pycharm={"name": "#%%\n", "is_executing": false} y_train_09_17.dtypes # - # ## 5. Use a scikit-learn pipeline to encode categoricals and fit a Decision Tree or Random Forest model. # + pycharm={"name": "#%%\n", "is_executing": false} from scipy.stats import randint,uniform from sklearn.impute import SimpleImputer from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.ensemble import RandomForestClassifier import category_encoders as ce from sklearn.pipeline import make_pipeline pipeline_Ordinary_RandomForestClassifier = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='mean'), RandomForestClassifier(n_estimators=6, random_state=42,n_jobs=-1) ) Parameter_distrubution_Ordinary_randomForestClassifer = { 'simpleimputer__strategy' : 'mean', 'randomforestclassifier__n_estimators': 100, 'randomforestclassifier__max_depth': 5, } search_Ordinary_randomForestClassifier = RandomizedSearchCV( pipeline_Ordinary_RandomForestClassifier, param_distributions=Parameter_distrubution_Ordinary_randomForestClassifer, n_iter=5, cv=5, scoring='accuracy', verbose=10, return_train_score=True, n_jobs=-1 ) # + pycharm={"name": "#%%\n", "is_executing": false} # <editor-fold desc="###################Ordinal RandomForestclassifier, randomizedSerachRv###################################"> pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='mean'), RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1) ) param_dist ={ 'simpleimputer__strategy': ['mean'], 'randomforestclassifier__n_estimators': randint(50,500), 'randomforestclassifier__max_depth': [5,10,15,None], 'randomforestclassifier__max_features': uniform(0,1), } search_RFClassifer = RandomizedSearchCV( pipeline, param_distributions=param_dist, n_iter=5, cv=3, verbose=10, scoring='accuracy', return_train_score=True, n_jobs=-1 ) # </editor-fold> # + pycharm={"name": "#%%\n", "is_executing": false} search_RFClassifer.fit(X_train_09_17,y_train_09_17) # + [markdown] colab_type="text" id="8kJXxFpty_CH" # ## 6.Get your model's validation accuracy # > (Multiple times if you try multiple iterations.) # + colab_type="code" id="7560JKvxy_CJ" colab={} pycharm={"is_executing": false} score = search_RFClassifer.score(X_Validate_17_18,y_validate_17_18) print('accuracy validation is', score) # + [markdown] colab_type="text" id="YvyYY9tfy_CL" # ## 7. Get your model's test accuracy # > (One time, at the end.) # + pycharm={"name": "#%%\n", "is_executing": false} # + pycharm={"name": "#%%\n", "is_executing": false} from sklearn.metrics import accuracy_score y_pred_RFClassifer = search_RFClassifer.predict(X_test) accuracy= accuracy_score(y_test, y_pred_RFClassifer) print('test accuracy score is', accuracy) # + [markdown] id="xGL5stLvJCn1" colab_type="text" # ## 8. Given a confusion matrix, calculate accuracy, precision, and recall. # # Imagine this is the confusion matrix for a binary classification model. Use the confusion matrix to calculate the model's accuracy, precision, and recall. # # <table> # <tr> # <td colspan="2" rowspan="2"></td> # <td colspan="2">Predicted</td> # </tr> # <tr> # <td>Negative</td> # <td>Positive</td> # </tr> # <tr> # <td rowspan="2">Actual</td> # <td>Negative</td> # <td style="border: solid">85</td> # <td style="border: solid">58</td> # </tr> # <tr> # <td>Positive</td> # <td style="border: solid">8</td> # <td style="border: solid"> 36</td> # </tr> # </table> # + [markdown] id="nEvt7NkUJNao" colab_type="text" # ### Calculate accuracy # + id="FFszS2A5JJmv" colab_type="code" colab={} pycharm={"is_executing": false} (85+36)/(85+58+8+36) # + [markdown] id="XjHTmk8sJO4v" colab_type="text" # ### Calculate precision # + id="7qX1gbcMJQS_" colab_type="code" colab={} pycharm={"is_executing": false} (85)/(85+6) # + [markdown] id="pFug3ZKaJQ7A" colab_type="text" # ### Calculate recall # + id="L0OKc3JxJR4r" colab_type="code" colab={} pycharm={"is_executing": false} 85/(85+8) # + pycharm={"name": "#%%\n"}
s/DS_Sprint_Challenge_6_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + import numpy as np import os import pandas as pandas from pandas import read_csv from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.callbacks import ModelCheckpoint from keras.wrappers.scikit_learn import KerasClassifier from keras.utils import np_utils from keras.models import model_from_json from keras.constraints import maxnorm from keras.optimizers import SGD from keras import regularizers from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn import preprocessing from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer # - import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True # config.per_process_gpu_memory_fraction=0.3 session = tf.Session(config=config) from config import csv_config as config hdf5_path = config.HDF5_PATH model_path = config.MODEL_PATH weight_path = config.WEIGHT_PATH seed = 7 np.random.seed(seed) # + from helpers import CSVDatasetReader reader = CSVDatasetReader(hdf5_path) (X, Y) = reader.load() X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=seed) y_train = LabelBinarizer().fit_transform(y_train) y_test = LabelBinarizer().fit_transform(y_test) # - def smooth_curve(points, factor=0.9): smoothed_points = [] for point in points: if smoothed_points: previous = smoothed_points[-1] smoothed_points.append(previous * factor + point * (1 - factor)) else: smoothed_points.append(point) return smoothed_points epochs = 150 def model_deep(): model = Sequential() model.add(Dense(768, input_dim=8, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.01), activation='tanh', kernel_constraint=maxnorm(3))) model.add(Dense(768, kernel_initializer='normal', activation='tanh', kernel_constraint=maxnorm(3))) model.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy']) return model model_deep = model_deep() #filepath=weight_path + "deep-sgd-weights-improvement.hdf5" #checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True, mode='max') #callbacks_list = [checkpoint] history = model_deep.fit(X_train, y_train, validation_split=0.33, epochs=epochs, batch_size=10, #callbacks=callbacks_list, verbose=0) score = model_deep.evaluate(X_test, y_test, verbose=0) # + import matplotlib.pyplot as plt # %matplotlib inline print(history.history.keys()) plt.plot(smooth_curve(history.history['loss'])) plt.plot(smooth_curve(history.history['val_loss'])) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() # - print(score[1]*100) num_epochs = 150 kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed) all_scores = [] all_loss_histories = [] for train, test in kfold.split(X, Y): history = model_deep.fit(X[train], Y[train], epochs=num_epochs, batch_size=10, verbose=0) loss_history = history.history['loss'] all_loss_histories.append(loss_history) # evaluate the model score = model_deep.evaluate(X[test], Y[test], verbose=0) all_scores.append(score[1]*100) print("%.2f%% (+/- %.2f%%)" % (np.mean(all_scores), np.std(all_scores)))
process/binary_classification_wider.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="Qvexyww7a-0o" # <div> # <img src="https://drive.google.com/uc?export=view&id=1vK33e_EqaHgBHcbRV_m38hx6IkG0blK_" width="350"/> # </div> # # #**Artificial Intelligence - MSc** # CS6501 - MACHINE LEARNING AND APPLICATIONS # #**Business Analytics - MSc** # ET5003 - MACHINE LEARNING APPLICATIONS # ##***Annual Repeat*** # ###Instructor: <NAME> # # ###RepMLA_3.1 # # + cellView="form" id="LqXD_IwUQuBF" #@title Current Date Today = '2021-08-02' #@param {type:"date"} # + cellView="form" id="uzDKau31OjVO" #@markdown --- #@markdown ### Enter your details here: Student_ID = "0427845" #@param {type:"string"} Student_full_name = "<NAME>" #@param {type:"string"} #@markdown --- # + cellView="form" id="r39xGZckTpKx" #@title Notebook information Notebook_type = 'Example' #@param ["Example", "Lab", "Practice", "Etivity", "Assignment", "Exam"] Version = 'Draft' #@param ["Draft", "Final"] {type:"raw"} Submission = False #@param {type:"boolean"} # + [markdown] id="ZEjSEeEJWAFE" # ## Fuzzy Systems # + [markdown] id="rJyYhNTFWEwi" # The ‘tipping problem’ is commonly used to illustrate the power of fuzzy logic principles to generate complex behavior from a compact, intuitive set of expert rules. # # If you’re new to the world of fuzzy systems, you might want to check out this worked example. # + [markdown] id="0R2Zy1lrWIez" # ### The Tipping Problem # + [markdown] id="NysFacL1WPGU" # Let’s create a fuzzy control system which models how you might choose to tip at a restaurant. When tipping, you consider the service and food quality, rated between 0 and 10. You use this to leave a tip of between 0 and 25%. # # We would formulate this problem as: # # **Antecedents (Inputs)** # * service # * Universe (ie, crisp value range): How good was the service of the wait staff, on a scale of 0 to 10? # * Fuzzy set (ie, fuzzy value range): poor, acceptable, amazing # # * food quality # * Universe: How tasty was the food, on a scale of 0 to 10? # * Fuzzy set: bad, decent, great # # **Consequents (Outputs)** # * tip # * Universe: How much should we tip, on a scale of 0% to 25% # * Fuzzy set: low, medium, high # # **Rules** # * IF the service was good or the food quality was good, THEN the tip will be high. # * IF the service was average, THEN the tip will be medium. # * IF the service was poor and the food quality was poor THEN the tip will be low. # # **Usage** # * If I tell this controller that I rated: # * the service as 9.8, and # * the quality as 6.5, # * it would recommend I leave: # * a 20.2% tip. # + [markdown] id="4Cki-twAXmR5" # ### Creating the Tipping Controller Using the skfuzzy control API # + [markdown] id="8_29Sel4XpSR" # We can use the skfuzzy control system API to model this. First, let’s define fuzzy variables # + colab={"base_uri": "https://localhost:8080/"} id="3oWjB7V8XwSz" outputId="fcc11be1-5bd0-40d1-cd27-d2d20eacc183" # # !pip install scikit-fuzzy import numpy as np import skfuzzy as fuzz from skfuzzy import control as ctrl # + id="TgwqwNeAVnhL" ## Antecedent/Consequent objects hold universe variables # Antecedent-1 # how good was the service of the wait staff, on a scale of 0 to 10? service = ctrl.Antecedent(np.arange(0, 10+1, 1), 'service') # Antecedent-2 # how tasty was the food, on a scale of 0 to 10? quality = ctrl.Antecedent(np.arange(0, 10+1, 1), 'quality') ## Consequent # how much should we tip, on a scale of 0% to 25%? tip = ctrl.Consequent(np.arange(0, 25+1, 1), 'tip') # + [markdown] id="UKvGPKb-3ygQ" # We can use the fuzzy membership function generators, here you have the link to the module [membership](https://pythonhosted.org/scikit-fuzzy/api/skfuzzy.membership.html) # + id="5UHBh7I8REnX" ## auto-membership function population is possible with .automf(3, 5, or 7) # qualtity with 3 membership functions quality.automf(3) # service with 3 membership functions service.automf(3) # + id="RdtTxQ0QRGYc" # you can build your membership functions with custom parameters tip['low'] = fuzz.trimf(tip.universe, [0, 0, 13]) tip['medium'] = fuzz.trimf(tip.universe, [0, 13, 25]) tip['high'] = fuzz.trimf(tip.universe, [13, 25, 25]) # + [markdown] id="Uk1y2AC5YBM7" # To help understand what the membership looks like, use the view methods. # + colab={"base_uri": "https://localhost:8080/", "height": 545} id="oRZAzvOhYDbT" outputId="1501f5c6-4551-4fd9-d063-c6bc4e728765" ## You can see how these look with .view() # show 'quality' membership functions quality.view() # show membership functions and highlight one quality['average'].view() # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="UCy6lEUCYHTQ" outputId="398b4893-5898-412d-df45-25874febb626" # show 'service' membership functions service.view() # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="7vqpKIIGYKdf" outputId="da82441d-f8a8-49fe-ef32-e154a3bbd345" # show 'tip' membership functions tip.view() # + [markdown] id="cOEkMl2PYN8z" # ### Fuzzy rules # + [markdown] id="reolM905YRSs" # Now, to make these triangles useful, we define the fuzzy relationship between input and output variables. For the purposes of our example, consider three simple rules: # # 1. If the food is poor OR the service is poor, then the tip will be low # 2. If the service is average, then the tip will be medium # 3. If the food is good OR the service is good, then the tip will be high. # # Most people would agree on these rules, but the rules are fuzzy. Mapping the imprecise rules into a defined, actionable tip is a challenge. This is the kind of task at which fuzzy logic excels. # + id="yHYENYf-YT_4" ## You could propose your own fuzzy rules # If the food is poor OR the service is poor, then the tip will be low rule1 = ctrl.Rule(quality['poor'] | service['poor'], tip['low']) # If the service is average, then the tip will be medium rule2 = ctrl.Rule(service['average'], tip['medium']) # If the food is good OR the service is good, then the tip will be high. rule3 = ctrl.Rule(service['good'] | quality['good'], tip['high']) # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="bvMieten6k6E" outputId="b84a5629-f9b2-482f-fcc9-7ed3cfad9194" # Rule 1 as a directed graph rule1.view() # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="EdRF64Yc6mYJ" outputId="0bfdfd1c-8663-45c1-bcbb-d326c79db5be" # Rule 2 as a directed graph rule2.view() # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="5PT7KUdl6q2g" outputId="f2319b8a-edc3-4545-a72a-77596580c0b9" # Rule 3 as a directed graph rule3.view() # + [markdown] id="YStQK3ElYYJL" # ### Control System Creation and Simulation¶ # + [markdown] id="Mwxz2ib3Ya_B" # Now that we have our rules defined, we can simply create a control system via: # + id="93p6g73FYdC4" # Fuzzy system for the tip problem tipping_ctrl = ctrl.ControlSystem([rule1, rule2, rule3]) # + [markdown] id="Hk7QzVG4Yguz" # In order to simulate this control system, we will create a <code>ControlSystemSimulation</code>. Think of this object representing our controller applied to a specific set of cirucmstances. For tipping, this might be tipping one friend at the local brew-pub. We would create another <code>ControlSystemSimulation</code> when we’re trying to apply our <code>tipping_ctrl</code> for another friend at the cafe because the inputs would be different. # + id="UbQRY5D2YjN3" # Calculate results from a ControlSystem tipping = ctrl.ControlSystemSimulation(tipping_ctrl) # + [markdown] id="Zir-tjz7Y8o_" # We can now simulate our control system by simply specifying the inputs and calling the compute method. Suppose we rated the quality 6.5 out of 10 and the service 9.8 of 10. # + id="Nl7-xFHtY_Qg" # Pass inputs to the ControlSystem using Antecedent labels # Note: if you like passing many inputs all at once, use .inputs(dict_of_data) tipping.input['quality'] = 6.5 tipping.input['service'] = 9.8 # Compute the fuzzy system tipping.compute() # + [markdown] id="cfFhKLGLZDEb" # Once computed, we can view the result as well as visualize it. # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="jB9UdcHwDcBh" outputId="96343ad2-5d95-46c7-c9e7-54a361499ce0" ## Antecedent 1 # service input = 9.8 service.view(sim=tipping) # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="mBzWkWdfDG3O" outputId="60835423-571f-4ac0-9474-2f01f10de0f7" ## Antecedent 2 # quality input = 6.5 quality.view(sim=tipping) # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="r6gJzkP7Cj__" outputId="d4cddde4-d2d5-43a1-edd4-d0f3303e836f" ## Consequent # result view tip.view(sim=tipping) # + colab={"base_uri": "https://localhost:8080/"} id="i6D5AeaFESqQ" outputId="abb4e77f-3473-47d1-c509-0b8e3d56c76c" # result print(tipping.output['tip']) # + [markdown] id="q3fLVemweU0x" # The resulting suggested tip is 19.85%. # + [markdown] id="eMLgAg5zeYMH" # ## Final thoughts # + [markdown] id="8kRUaGFXeb3v" # The power of fuzzy systems is allowing complicated, intuitive behavior based on a sparse system of rules with minimal overhead. # # Note our membership function universes were coarse, only defined at the integers, but <code>fuzz.interp_membership</code> allowed the effective resolution to increase on demand. # # This system can respond to arbitrarily small changes in inputs, and the processing burden is minimal. # # Python source code: download (generated using skimage 0.2)
WEEK_3/RepMLA_3_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import casadi as ca import sys sys.path.insert(0, '../python/pyecca') import matplotlib.pyplot as plt from pyecca.util import rk4 import numpy as np from casadi.tools.graph import dotgraph from IPython.display import Image def draw_graph(expr): return Image(dotgraph(expr).create_png()) # + def numerical(x_end, n_x): """ Edit this function and setup an optimal control problem that minimizes the time it takes for a ball rolling a long a curve to reach the end of the path assuming it starts at a height of 1 m and ends at a height of 0 m and the length of the path is x_end m. """ x = np.linspace(0, x_end, n_x) # x position where path changes dx = x[1] - x[0] # path steps width n_dy = n_x - 1 # number of height changes we need to find dy0 = -(1/n_dy)*np.ones(n_dy) # initial guess for height change along path dy_vect= ca.SX.sym('dy',n_dy) g = 9.81 t = 0 v = 0 y = 1 counter = 0 for counter in range(n_dy): dy = dy_vect[counter] v_bar = np.sqrt(2*g*(1-y))+np.sqrt(2*g*(1-y-dy)) d = np.sqrt(dx*dx+dy*dy) dt = d/v_bar t = t + dt #dv = (np.sqrt(2*g*dy*ca.sign(dy)))#ca.sign(dy) counter = counter + 1 y += dy nlp = {'x':dy_vect, 'f':t, 'g':y} S = ca.nlpsol('S', 'ipopt', nlp) res = S(x0=dy0, lbg=0, ubg=0) dy_opt = res['x'] # TODO, find optimal change in y along path y_opt = ca.vertcat(1, 1 + np.cumsum(dy_opt)) return x, y_opt #You give the solver three things, the casadi graph you wish to optimize t_final, the casadi graph of the constraint, #y_final, and the design vector that both the t_final and y_final depend on, in this case dy_vect #It then calculates all of the jacobians etc that the nonlinear solver needs and performs the optimization for you, #but requires an initial guess, in this case I have given you dy0, which is a good initial guess. #Here is a hint: t = 0 # y = 1 # for i in range(n_dy): # dy = dy_vect[i] # d = ... # vbar = ... # y += dy # t += d/vbar # y_final = y # t_final = t # - # NLP declaration to solve for boundary condition of brachistochrone def analytical(x_end, n_x): c = ca.SX.sym('c') theta_f = ca.SX.sym('theta_f') xf = c*(theta_f - np.sin(theta_f)) yf = 1 - c*(1 - np.cos(theta_f)) nlp = {'x':ca.vertcat(c, theta_f), 'f':0,'g':ca.vertcat(xf-x_end,yf)} S = ca.nlpsol('S', 'ipopt', nlp, { 'print_time': 0, 'ipopt': { 'sb': 'yes', 'print_level': 0, } }) res = S(x0=(1, np.pi), lbg=(0, 0), ubg=(0, 0)) C_opt = float(res['x'][0]) theta_f_opt = float(res['x'][1]) theta = np.linspace(0, theta_f_opt, n_x) xa = C_opt*(theta - np.sin(theta)) ya = 1 - C_opt*(1 - np.cos(theta)) return xa, ya # + n_x = 100 # number of points for approximation of path x_end = 3 # final x position when height is zero # analytical solution xa, ya = analytical(x_end=x_end, n_x=n_x) # numerical solution x, y_opt = numerical(x_end=x_end, n_x=n_x) # plot plt.title('brachistochrone') plt.plot(x, y_opt, label='numerical') plt.plot(xa, ya, 'r--', label='analytical', alpha=0.5) plt.grid(True) plt.xlabel('x, m') plt.ylabel('z, m') plt.legend() # -
homework/6-Casadi-Brach.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt # + from __future__ import print_function, division from builtins import range def make_poly(X, deg): n = len(X) data = [np.ones(n)] for d in range(deg): data.append(X**(d+1)) return np.vstack(data).T def fit(X, Y): return np.linalg.solve(X.T.dot(X), X.T.dot(Y)) def fit_and_display(X, Y, sample, deg): N = len(X) train_idx = np.random.choice(N, sample) Xtrain = X[train_idx] Ytrain = Y[train_idx] plt.scatter(Xtrain, Ytrain) plt.show() # fit polynomial Xtrain_poly = make_poly(Xtrain, deg) w = fit(Xtrain_poly, Ytrain) # display the polynomial X_poly = make_poly(X, deg) Y_hat = X_poly.dot(w) plt.plot(X, Y) plt.plot(X, Y_hat) plt.scatter(Xtrain, Ytrain) plt.title("deg = %d" % deg) plt.show() def get_mse(Y, Yhat): d = Y - Yhat return d.dot(d) / len(d) def plot_train_vs_test_curves(X, Y, sample=20, max_deg=20): N = len(X) train_idx = np.random.choice(N, sample) Xtrain = X[train_idx] Ytrain = Y[train_idx] test_idx = [idx for idx in range(N) if idx not in train_idx] # test_idx = np.random.choice(N, sample) Xtest = X[test_idx] Ytest = Y[test_idx] mse_trains = [] mse_tests = [] for deg in range(max_deg+1): Xtrain_poly = make_poly(Xtrain, deg) w = fit(Xtrain_poly, Ytrain) Yhat_train = Xtrain_poly.dot(w) mse_train = get_mse(Ytrain, Yhat_train) Xtest_poly = make_poly(Xtest, deg) Yhat_test = Xtest_poly.dot(w) mse_test = get_mse(Ytest, Yhat_test) mse_trains.append(mse_train) mse_tests.append(mse_test) plt.plot(mse_trains, label="train mse") plt.plot(mse_tests, label="test mse") plt.legend() plt.show() plt.plot(mse_trains, label="train mse") plt.legend() plt.show() # + N = 100 X = np.linspace(0, 6*np.pi, N) Y = np.sin(X) plt.plot(X, Y) plt.show() for deg in (2, 3, 5, 6, 7, 8, 9): fit_and_display(X, Y, 10, deg) plot_train_vs_test_curves(X, Y) # -
Section 2: Elementary machine learning algorithms/linear_regression/linear_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd pwd nba_finals_db = pd.read_csv("csv_files/raw_file/NBA Finals and MVP.csv") nba_finals_db champions_by_year = nba_finals_db[['Year', 'NBA Champion']] champions_by_year runner_up_by_year = nba_finals_db[['Year', 'NBA Vice-Champion']] runner_up_by_year mvp_by_year = nba_finals_db[['Year', 'MVP Name']] mvp_by_year finals_appearance = nba_finals_db[['Year', 'Western Champion', 'Eastern Champion']] finals_appearance champions_by_year.to_csv("csv_files/champions_by_year.csv", index=False) runner_up_by_year.to_csv("csv_files/runner_up_by_year.csv", index=False) mvp_by_year.to_csv("csv_files/mvp_by_year.csv", index=False) finals_appearance.to_csv("csv_files/finals_appearance.csv", index=False)
python/NBA_Final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ml_course_env # language: python # name: ml_course_env # --- # + import matplotlib.pyplot as plt import pandas as pd import numpy as np import seaborn as sns from sklearn.neighbors import KNeighborsClassifier from IPython.display import Image from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_curve, auc, plot_precision_recall_curve, plot_roc_curve,precision_recall_curve from sklearn.preprocessing import label_binarize from sklearn.multiclass import OneVsRestClassifier from scipy import interp from itertools import cycle from sklearn.multiclass import OneVsOneClassifier from sklearn.preprocessing import MultiLabelBinarizer from sklearn import svm import pydotplus from plotly.offline import plot as plotoff plt.rc('xtick', color='k', labelsize='medium', direction='in') plt.rc('xtick.major', size=8, pad=12) plt.rc('xtick.minor', size=8, pad=12) plt.rc('ytick', color='k', labelsize='medium', direction='in') plt.rc('ytick.major', size=8, pad=12) plt.rc('ytick.minor', size=8, pad=12) # - import scipy.stats as stats import math # + X=np.random.randn(800)*0.5+2 mu=np.arange(0,4,0.1) # from 0 to 4 by increments of 0.1 sigma=np.arange(0.1,2.1,0.1) # from 0.1 to 2.1 by increments of 0.1 mu,sigma=np.meshgrid(mu,sigma) # this useful function combines all possibles values for mu and sigma def loglike_func(X,mu,sigma): """returns a list of the loglikelihoods of mus and sigmas given data X""" ll = [] for i in range(len(mu)): ll.append( sum(np.log(stats.norm.pdf(X,mu[i],sigma[i]))) ) if math.isnan(ll[-1]) or ll[-1] < -10000: ll[-1] = -10000 # we verify that no numerical error gave us an NaN or very small log value return ll # we compute the log-likelihood for all tested parameters values zs=np.array( loglike_func(X,np.ravel(mu),np.ravel(sigma)) ) loglike=zs.reshape(mu.shape) bestMu = np.ravel(mu)[np.argmax(zs)] bestSigma = np.ravel(sigma)[np.argmax(zs)] # make a 3D figure of our loglikelihood landscape from mpl_toolkits.mplot3d import Axes3D print(r'Highest likelihood is for \mu and \sigma :',bestMu,bestSigma) fig = plt.figure() ax = Axes3D(fig) ax.plot_surface(mu,sigma,loglike,cmap='plasma') ax.scatter(bestMu,bestSigma,max(zs),s=2000,c='r') # put a dot at the ML value ax.set_xlabel('$\mu$') ax.set_ylabel('$\sigma$') ax.set_zlabel('Loglike') plt.title("Loglikelihood landscape") plt.show() # + X=np.random.randn(800)*0.5+2 mu=np.arange(0,4,0.1) # from 0 to 4 by increments of 0.1 sigma=np.arange(0.1,2.1,0.1) # from 0.1 to 2.1 by increments of 0.1 mu,sigma=np.meshgrid(mu,sigma) # this useful function combines all possibles values for mu and sigma def loglike_func(X,mu,sigma,alpha,reg): """returns a list of the loglikelihoods of mus and sigmas given data X""" ll = [] for i in range(len(mu)): ll.append( alpha*sum(np.log(stats.norm.pdf(X,mu[i],sigma[i]))) -(1-alpha)*(abs(mu[i])**reg+abs(1./sigma[i])**reg)) if math.isnan(ll[-1]) or ll[-1] < -10000: ll[-1] = -10000 # we verify that no numerical error gave us an NaN or very small log value return ll # - np.random.seed(42) X=np.linspace(0,10,100) a=-5 b=30 Y=a*X**1+b*X**0.5+1*np.random.randn(len(X)) plt.rc("font", size=15) plt.scatter(X,Y,label='y=-5*x+30*$\sqrt{x}$ +$\epsilon$') plt.xlabel('X') plt.ylabel('Y') plt.legend(loc='best') plt.title('y=w1*x+w2*$\sqrt{x}$') plt.show() # + a1=np.arange(-18,22,1) b1=np.arange(5,45,1) #a,b=np.meshgrid(a,b) # this useful function combines all possibles values for mu and sigma def loglike_func(Y,X,a1,b1,alpha,reg): """returns a list of the loglikelihoods of mus and sigmas given data X""" ll = [] for i in range(len(a1)): #ll.append( alpha*sum((Y-X*a[i]-b[i])**2) +(1-alpha)*(abs(a[i])**reg+abs(b[i])**reg)) ll.append( alpha*sum([(Y[j]-X[j]*a1[i]-b1[i]*X[j]**0.5)**2 for j in range(len(X))]) +(1-alpha)*(abs(a1[i])**reg+abs(b1[i])**reg)) #if math.isnan(ll[-1]) or ll[-1]>10000: #ll[-1] = 10000 # we verify that no numerical error gave us an NaN or very small log value return ll # + def make_meshgrid(x, y, n=100): """Create a mesh of points to plot in Parameters ---------- x: data to base x-axis meshgrid on y: data to base y-axis meshgrid on n: number of intermediary points (optional) Returns ------- xx, yy : ndarray """ x_min, x_max = x.min() - 1, x.max() + 1 y_min, y_max = y.min() - 1, y.max() + 1 xx, yy = np.meshgrid(np.linspace(x_min, x_max, n), np.linspace(y_min, y_max, n)) return xx, yy def plot_contours(ax,Y,X,function, xx, yy,alphas,reg, **params): """Plot the decision boundaries for a classifier. Parameters ---------- ax: matplotlib axes object function: a function xx: meshgrid ndarray yy: meshgrid ndarray params: dictionary of params to pass to contourf, optional """ zs=np.array( function(Y,X,np.ravel(xx),np.ravel(yy),alphas,reg) ) out = ax.contourf(xx, yy, zs.reshape(xx.shape), 100,**params) ma=zs.argmin() mam=[np.ravel(xx)[ma],np.ravel(yy)[ma]] return out,mam # + from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable fig, ax = plt.subplots(2, 3,'all',figsize=(15,10)) xx, yy = make_meshgrid(a1, b1 , n=100) graph,ma=plot_contours(ax[0,0],Y,X,loglike_func, xx, yy,1,1, cmap=plt.cm.coolwarm, alpha=0.8) ct=graph ax[0,0].scatter(2, 25, c='k', s=40, edgecolors='k',label='ground truth') ax[0,0].scatter(ma[0], ma[1], c='y', s=40, edgecolors='k',label='space min') ax[0,0].set_xlim(xx.min(), xx.max()) ax[0,0].set_ylim(yy.min(), yy.max()) ax1_divider = make_axes_locatable(ax[0,0]) ax[0,0].legend(loc='best') cax1 = ax1_divider.append_axes("right", size="7%", pad="2%") fig.colorbar(ct,label='Loss', cax=cax1,orientation='vertical') ax[0,0].set_title('No regularization') graph,ma=plot_contours(ax[0,1],Y,X,loglike_func, xx, yy,7*10**-3,1, cmap=plt.cm.coolwarm, alpha=0.8) ct=graph ax[0,1].scatter(2, 25, c='k', s=40, edgecolors='k',label='ground truth') ax[0,1].scatter(ma[0], ma[1], c='y', s=40, edgecolors='k',label='space min') ax[0,1].set_xlim(xx.min(), xx.max()) ax[0,1].set_ylim(yy.min(), yy.max()) ax2_divider = make_axes_locatable(ax[0,1]) ax[0,1].legend(loc='best') cax2 = ax2_divider.append_axes("right", size="7%", pad="2%") fig.colorbar(ct,label='Loss', cax=cax2,orientation='vertical') ax[0,1].set_title('Some L1 regularization') graph,ma=plot_contours(ax[0,2],Y,X,loglike_func, xx, yy,0,1, cmap=plt.cm.coolwarm, alpha=0.8) ct=graph ax[0,2].scatter(2, 25, c='k', s=40, edgecolors='k',label='ground truth') ax[0,2].scatter(ma[0], ma[1], c='y', s=40, edgecolors='k',label='space min') ax[0,2].set_xlim(xx.min(), xx.max()) ax[0,2].set_ylim(yy.min(), yy.max()) ax3_divider = make_axes_locatable(ax[0,2]) ax[0,2].legend(loc='best') cax3 = ax3_divider.append_axes("right", size="7%", pad="2%") fig.colorbar(ct,label='Loss', cax=cax3,orientation='vertical') ax[0,2].set_title('Only L1 regularization') graph,ma=plot_contours(ax[1,0],Y,X,loglike_func, xx, yy,1,2, cmap=plt.cm.coolwarm, alpha=0.8) ct=graph ax[1,0].scatter(2, 25, c='k', s=40, edgecolors='k',label='ground truth') ax[1,0].scatter(ma[0], ma[1], c='y', s=40, edgecolors='k',label='space min') ax[1,0].set_xlim(xx.min(), xx.max()) ax[1,0].set_ylim(yy.min(), yy.max()) ax4_divider = make_axes_locatable(ax[1,0]) ax[1,0].legend(loc='best') cax4 = ax4_divider.append_axes("right", size="7%", pad="2%") fig.colorbar(ct,label='Loss', cax=cax4,orientation='vertical') ax[1,0].set_title('No regularization') graph,ma=plot_contours(ax[1,1],Y,X,loglike_func, xx, yy,3*10**-1,2, cmap=plt.cm.coolwarm, alpha=0.8) ct=graph ax[1,1].scatter(2, 25, c='k', s=40, edgecolors='k',label='ground truth') ax[1,1].scatter(ma[0], ma[1], c='y', s=40, edgecolors='k',label='space min') ax[1,1].set_xlim(xx.min(), xx.max()) ax[1,1].set_ylim(yy.min(), yy.max()) ax5_divider = make_axes_locatable(ax[1,1]) ax[1,1].legend(loc='best') cax5 = ax5_divider.append_axes("right", size="7%", pad="2%") fig.colorbar(ct,label='Loss', cax=cax5,orientation='vertical') ax[1,1].set_title('Some L2 regularization') graph,ma=plot_contours(ax[1,2],Y,X,loglike_func, xx, yy,0,2, cmap=plt.cm.coolwarm, alpha=0.8) ct=graph ax[1,2].scatter(2, 25, c='k', s=40, edgecolors='k',label='ground truth') ax[1,2].scatter(ma[0], ma[1], c='y', s=40, edgecolors='k',label='space min') ax[1,2].set_xlim(xx.min(), xx.max()) ax[1,2].set_ylim(yy.min(), yy.max()) ax6_divider = make_axes_locatable(ax[1,2]) ax[1,2].legend(loc='best') cax6 = ax6_divider.append_axes("right", size="7%", pad="2%") fig.colorbar(ct,label='Loss', cax=cax6,orientation='vertical') ax[1,2].set_title('Only L2 regularization') fig.supxlabel('w1',fontsize=20) fig.supylabel('w2',fontsize=20) plt.tight_layout() plt.show() # -
python_notebooks/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [nctest] # language: python # name: Python [nctest] # --- # Before running this notebook, it's helpful to # # `conda install -c conda-forge nb_conda_kernels` # # `conda install -c conda-forge ipywidgets` # # and set the kernel to the conda environment in which you installed glmtools (typically, `glmval`) # # # To load data via siphon from opendap, you must # # `conda install -c conda-forge siphon` # # # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt from glmtools.io.glm import GLMDataset, fix_event_locations # + # filename = '/data/LCFA-production/OR_GLM-L2-LCFA_G16_s20152242125152_e20152242125357_c20152242125498.nc' # filename = '/data/LCFA-production/OR_GLM-L2-LCFA_G16_s20171081818480_e20171081818500_c20171081819229.nc' filename = '/data/LCFA-production/OR_GLM-L2-LCFA_G16_s20171161230400_e20171161231000_c20171161231027.nc' # flash 6567 has 81 groups, 188 events # 2017-04-26T12:30:56 # filename = '/data/LCFA-production/L2_test_Apr/FGE_LCFA/glm_fge_pnew_201703301520.nc' # flash 1976 is weird # - # Load data from the most recent minute or two! if True: from siphon.catalog import TDSCatalog g16url = "http://thredds-test.unidata.ucar.edu/thredds/catalog/satellite/goes16/GRB16/GLM/LCFA/current/catalog.xml" satcat = TDSCatalog(g16url) filename = satcat.datasets[-1].access_urls['OPENDAP'] glm = GLMDataset(filename) print(glm.dataset) # print(glm.dataset.event_energy) # print(glm.dataset.event_count.encoding) # + from glmtools.plot.locations import plot_flash import ipywidgets as widgets # print(widgets.Widget.widget_types.values()) fl_id_vals = list(glm.dataset.flash_id.data) fl_id_vals.sort() flash_slider = widgets.SelectionSlider( description='Flash', options=fl_id_vals, ) # from functools import partial # glm_plotter = partial(plot_flash, glm) # fails with a __name__ attr not found def do_plot(flash_id): fig = plot_flash(glm, flash_id) widgets.interact(do_plot, flash_id=flash_slider) # - # # Find flashes in some location flashes_subset = glm.subset_flashes(lon_range = (-105.5, -104.5), lat_range = (40.5, 41.5)) print(flashes_subset) # + from glmtools.plot.locations import plot_flash import ipywidgets as widgets # print(widgets.Widget.widget_types.values()) fl_id_vals = list(flashes_subset.flash_id.data) fl_id_vals.sort() flash_slider = widgets.SelectionSlider( description='Flash', options=fl_id_vals, ) # from functools import partial # glm_plotter = partial(plot_flash, glm) # fails with a __name__ attr not found def do_plot(flash_id): fig = plot_flash(glm, flash_id) widgets.interact(do_plot, flash_id=flash_slider) # - # Here is an example of a low-level read with xarray, and dumping data in a pretty way with pandas. # Experimental and not well tested. # + # some_flash = 16841 # some_flash = 16468 # d = xr.open_dataset(filename) singleton_flash_id = 15928 filename = '/data/LCFA-production/OR_GLM-L2-LCFA_G16_s20171161230400_e20171161231000_c20171161231027.nc' some_flash = 6359 max_area_flash = 6472 fov_dim = 'number_of_field_of_view_bounds' wave_dim = 'number_of_wavelength_bounds' time_dim = 'number_of_time_bounds' gr_dim = 'number_of_groups' ev_dim = 'number_of_events' fl_dim = 'number_of_flashes' # import pandas as pd # d.indexes['number_of_events'] = pd.Int64Index(data=d.event_id.data, name='number_of_events') # d.indexes['number_of_groups'] = pd.Int64Index(data=d.group_id.data, name='number_of_groups') # d.indexes['number_of_flashes'] = pd.Int64Index(data=d.flash_id.data, name='number_of_flashes') # empty = slice(0, 0) # print(d[{fov_dim:empty, time_dim:empty, wave_dim:empty}]) # d = d[{'number_of_events':slice(0,0), 'number_of_groups':slice(0,0), 'number_of_flashes':slice(None)}] # print(d) egf = glm.dataset#.group_id[glm.dataset.group_id==20208] egf_grgb=egf.groupby('group_parent_flash_id') gr_idx = egf_grgb.groups[some_flash] this_flash = egf[{'number_of_groups':gr_idx}] print(this_flash) # + # This is an example of a lower-level read, directly using NetCDF4 # instead of xarray. xarray does some additional metadata processing # and provides groupby, etc., which is especially helpful when # querying clustered datasets with common keys linking data tables. # For '/data/LCFA-production/OR_GLM-L2-LCFA_G16_s20152242125152_e20152242125357_c20152242125498.nc' singleton_flash_id = 15928 singleton_group_id = 13280582 singleton_event_id = 13521629 # demonstrate it's not a problem with xarray import netCDF4 nc = netCDF4.Dataset(filename) this_ev = nc.variables['event_id'][:] == singleton_event_id this_gr = nc.variables['group_id'][:] == singleton_group_id this_fl = nc.variables['flash_id'][:] == singleton_flash_id ev_id = nc.variables['event_id'][this_ev] gr_id = nc.variables['group_id'][this_gr] fl_id = nc.variables['flash_id'][this_fl] ev_time_offset = nc.variables['event_time_offset'] print(ev_time_offset) ev_times = netCDF4.num2date(ev_time_offset[:], units=ev_time_offset.units) print ev_times[0:5] ev_parent = nc.variables['event_parent_group_id'][this_ev] gr_parent = nc.variables['group_parent_flash_id'][this_gr] ev_lat = nc.variables['event_lat'][this_ev] gr_lat = nc.variables['group_lat'][this_gr] fl_lat = nc.variables['flash_lat'][this_fl] # print(ev_id) # print(ev_parent) # print(gr_id) # print(gr_parent) # print(fl_id) # print(ev_lat) # print(gr_lat) # print(fl_lat) # print(nc) # Dimensions: (number_of_events: 1, number_of_field_of_view_bounds: 2, number_of_flashes: 1, number_of_groups: 1, number_of_time_bounds: 2, number_of_wavelength_bounds: 2) # Coordinates: # event_id (number_of_events) int32 13521629 # event_time_offset (number_of_events) datetime64[ns] 2015-08-12T21:25:14.732000 ... # event_lat (number_of_events) float64 -47.5 # event_lon (number_of_events) float64 -130.7 # event_parent_group_id (number_of_events) int32 13280582 # group_id (number_of_groups) int32 13280582 # group_time_offset (number_of_groups) datetime64[ns] 2015-08-12T21:25:14.732000 ... # group_lat (number_of_groups) float32 3.81698 # group_lon (number_of_groups) float32 -126.438 # group_parent_flash_id (number_of_groups) int16 15928 # flash_id (number_of_flashes) int16 15928 # flash_time_offset_of_first_event (number_of_flashes) datetime64[ns] 2015-08-12T21:25:14.732000 ... # flash_time_offset_of_last_event (number_of_flashes) datetime64[ns] 2015-08-12T21:25:14.732000 ... # flash_lat (number_of_flashes) float32 3.81698 # flash_lon (number_of_flashes) float32 -126.438 # product_time datetime64[ns] 2015-08-12T21:25:15.260505 ... # lightning_wavelength float64 777.4 # group_time_threshold float64 0.0 # flash_time_threshold float64 3.33 # lat_field_of_view float64 0.0 # lon_field_of_view float64 -75.0 # * number_of_events (number_of_events) int64 0 # * number_of_groups (number_of_groups) int64 0 # * number_of_flashes (number_of_flashes) int64 0 # * number_of_time_bounds (number_of_time_bounds) int64 0 1 # * number_of_wavelength_bounds (number_of_wavelength_bounds) int64 0 ... # * number_of_field_of_view_bounds (number_of_field_of_view_bounds) int64 0 ... # + filename = '/data/LCFA-production/OR_GLM-L2-LCFA_G16_s20171161230400_e20171161231000_c20171161231027.nc' some_flash = 6359 import netCDF4 nc = netCDF4.Dataset(filename) # from PUG spec lon_fov = (-156.06, -22.94) dlon_fov = lon_fov[1]-lon_fov[0] lat_fov = (-66.56, 66.56) # zeroth element of the above are the add_offset values in the spec, and in the data file group_lons = nc.variables['group_lon'][:] group_lats = nc.variables['group_lat'][:] flash_lons = nc.variables['flash_lon'][:] flash_lats = nc.variables['flash_lat'][:] event_lons = nc.variables['event_lon'] event_lats = nc.variables['event_lat'] event_lons.set_auto_scale(False) event_lats.set_auto_scale(False) # event_lats_fixed2, event_lons_fixed2 = fix_event_locations(event_lats, event_lons) # unsigned = 2**16 # event_lons = event_lons[:].astype('int32') # event_lons[event_lons < 0] += unsigned # event_lats = event_lats[:].astype('int32') # event_lats[event_lats < 0] += unsigned # Same thing as above event_lons_fixed = event_lons[:].view('<u2') event_lats_fixed = event_lats[:].view('<u2') # Rescale scale_factor = 0.00203128 # from file and spec; same for both event_lons_fixed = (event_lons_fixed)*scale_factor+lon_fov[0] event_lats_fixed = (event_lats_fixed)*scale_factor+lat_fov[0] plt.figure(figsize=(11,4)) plt.subplot(131) hist_plot=plt.hist(event_lons_fixed, bins=100, label='events') plt.title('longitude') plt.legend() plt.subplot(132) hist_plot=plt.hist(group_lons, bins=100, label='groups') plt.title('longitude') plt.legend() plt.subplot(133) hist_plot=plt.hist(flash_lons, bins=100, label='flashes') plt.title('longitude') plt.legend() ax = plt.gca() ax.get_xaxis().get_major_formatter().set_useOffset(False) plt.figure(figsize=(11,4)) plt.subplot(131) hist_plot=plt.hist(event_lats_fixed, bins=100, label='events') plt.title('latitude') plt.legend() plt.subplot(132) hist_plot=plt.hist(group_lats, bins=100, label='groups') plt.title('latitude') plt.legend() plt.subplot(133) hist_plot=plt.hist(flash_lats, bins=100, label='flashes') plt.title('latitude') plt.legend() ax = plt.gca() ax.get_xaxis().get_major_formatter().set_useOffset(False) print(event_lons) print(event_lats) # - nc['event_parent_group_id'] # + # From earlier development - not needed. def member_mask_for_cluster(dataset, cluster_id_column_name, cluster_ids): """ Given cluster_ids which correspond to values in dataset.cluster_id_column_name, . """ cluster_id_col = getattr(dataset, cluster_id_column_name) mask = np.zeros(cluster_id_col.shape[0], dtype=bool) for cluster_id in cluster_ids: mask |= (cluster_id == cluster_id_col) return mask def gen_flashes(dataset): # need to set index on flash_id, group_id, and event_id coordinates. split_gr = d.groupby('group_parent_flash_id') # split_gr gives us a mapping from a flash index to the group indices # corresponding to that flash. Index refers to columns marked with # an "*" in print(dataset) split_ev = d.groupby('event_parent_group_id') # careful: xarray uses groups to refer to the result of the groupby, # and does not refer here to GLM groups. for fl_idx, gr_idxs in split_gr.groups.items(): # now we can use the flash index and group indices directly # need to pull out a combined set of event indices corresponding # to all groups ev_iter = (split_ev.groups[gr_id] for gr_id in gr_idxs) ev_idxs = list(itertools.chain.from_iterable(ev_iter)) this_flash = d[{'number_of_events':ev_idxs, 'number_of_groups':gr_idxs, 'number_of_flashes':fl_idx}] yield this_flash def get_flash(self, flash_id): good_gr = (self.group_parent_flash_id == flash_id) group_ids = self.group_id[good_gr] group_areas = self.group_area[good_gr] for group_area, group_id in zip(group_areas, group_ids): good_ev = (self.event_parent_group_id == group_id) pixel_energies = d.event_energy[good_ev] print(pixel_energies, type(group_area)) # get_flash(d, some_flash) for fl in gen_flashes(d): print fl # can we subset everything along the number_of_events dimension?
examples/basic_read_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import requests as r url = 'https://api.covid19api.com/dayone/country/brazil' resp = r.get(url) resp.status_code raw_data = resp.json() raw_data[0] final_data = [] for obs in raw_data: final_data.append([obs['Confirmed'], obs['Deaths'], obs['Recovered'], obs['Active'], obs['Date']]) final_data.insert(0, ['confirmados', 'obitos', 'recuperados', 'ativos', 'data']) final_data # + CONFIRMADOS = 0 OBITOS = 1 RECUPERADOS = 2 ATIVOS = 3 DATA = 4 for i in range(1, len(final_data)): final_data[i][DATA] = final_data[i][DATA][:10] # - final_data import datetime as dt print(dt.time(12, 6, 21, 7), 'Hora:minuto:segundo.microsegundo') print('-----') print(dt.date(2020, 4, 25), 'Ano-mês-dia') print('-----') print(dt.datetime(2020, 4, 25, 12, 6, 21, 7), 'Ano-mês-dia Hora:minuto:segundo.microsegundo') # + natal = dt.date(2020, 12,25) reveillon = dt.date(2021, 1, 1) print(reveillon - natal) print((reveillon - natal).days) print((reveillon - natal).seconds) print((reveillon - natal).microseconds) # - import csv # + with open('brasilcovid.csv', 'w') as file: writer = csv.writer(file) writer.writerows(final_data) for i in range(1, len(final_data)): final_data[i][DATA] = dt.datetime.strptime(final_data[i][DATA], '%Y-%m-%d') # - final_data def get_datasets(y, labels): if type(y[0]) == list: datasets = [] for i in range(len(y)): datasets.append({ 'label': labels[i], 'data': y[i] }) return datasets else: return [ { 'label': labels[0], 'data': y } ] def set_title(title=''): if title != '': display = 'true' else: display = 'false' return { 'title': title, 'display': display } def create_chart(x, y, labels, kind='bar', title=''): datasets = get_datasets(y, labels) options = set_title(title) chart = { 'type': kind, 'data': { 'labels': x, 'datasets': datasets }, 'options': options } return chart def get_api_chart(chart): url_base = 'https://quickchart.io/chart' resp = r.get(f'{url_base}?={str(chart)}') return resp.content def save_image(path, content): with open(path, 'wb') as image: image.write(content) from PIL import Image from IPython.display import display def display_image(path): img_pil: Image.open(path) display(img_pill) # + y_data_1 = [] for obs in final_data[1::10]: y_data_1.append(obs[CONFIRMADOS]) y_data_2 = [] for obs in final_data[1::10]: y_data_2.append(obs[RECUPERADOS]) labels = ['Confirmados', 'Recuperados'] x= [] for obs in final_data[1::10]: x.append(obs[DATA].strftime('%d/%m/%Y')) chart = create_chart(x, [y_data_1, y_data_2], labels, title='Gráfico confirmado vs recuperados') chart_content = get_api_chart(chart) save_image('meu-primeiro-grafico.png', chart_content) display_image('meu-primeiro-grafico.png') # - from urllib.parse import quote def get_api_qrcode(link): text = quote(link) # parsing do link para url url_base = 'https://quickchart.io/qr' resp = r.get(f'{url_base}?text={text}') return resp.content url_base = 'https://quickchart.io/chart' link = r.get(f'{url_base}?={str(chart)}') get_api_qrcode()
basics/basics5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Unsupervised Learning Project: Creating Customer Segments # + # Import libraries necessary for this project import numpy as np import pandas as pd from IPython.display import display # Allows the use of display() for DataFrames # Import supplementary visualizations code visuals.py import visuals as vs # Pretty display for notebooks # %matplotlib inline # Load the wholesale customers dataset try: data = pd.read_csv("customers.csv") data.drop(['Region', 'Channel'], axis = 1, inplace = True) print("Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)) except: print("Dataset could not be loaded. Is the dataset missing?") # - # ## Data Exploration # Display a description of the dataset display(data.describe()) # ### Selecting Samples # + indices = [26,176,392] # Create a DataFrame of the chosen samples samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True) print("Chosen samples of wholesale customers dataset:") display(samples) # - # ### Feature Relevance # + from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor new_data = data.drop(['Milk'],axis=1) X_train, X_test, y_train, y_test = train_test_split(new_data,data['Milk'],test_size=0.25,random_state=101) regressor = DecisionTreeRegressor(random_state=101).fit(X_train,y_train) score = regressor.score(X_test,y_test) print(score) # - # ### Visualize Feature Distributions # Produce a scatter matrix for each pair of features in the data pd.plotting.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde'); # #### Correlations # - Milk and Groceries # - Milk and Detergents_Paper # - Grocery and Detergents_Paper # ## Data Preprocessing # ### Feature Scaling # + # Scale the data using the natural logarithm log_data = data.apply(lambda x: np.log(x)) # Scale the sample data using the natural logarithm log_samples = samples.apply(lambda x: np.log(x)) # Produce a scatter matrix for each pair of newly-transformed features pd.plotting.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde'); # - # ### Observation # After applying a natural logarithm scaling to the data, the distribution of each feature should appear much more normal. # Display the log-transformed sample data display(log_samples) # ### Outlier Detection # + # Select the indices for data points you wish to remove outliers = [] # For each feature find the data points with extreme high or low values for feature in log_data.keys(): # Calculate Q1 (25th percentile of the data) for the given feature Q1 = np.percentile(log_data[feature],25) # Calculate Q3 (75th percentile of the data) for the given feature Q3 = np.percentile(log_data[feature],75) # Use the interquartile range to calculate an outlier step (1.5 times the interquartile range) step = (Q3-Q1) * 1.5 # Display the outliers print("Data points considered outliers for the feature '{}':".format(feature)) out = log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))] display(out) outliers = outliers + list(out.index.values) #Creating list of more outliers which are the same for multiple features. outliers = list(set([x for x in outliers if outliers.count(x) > 1])) print("Outliers: {}".format(outliers)) # Remove the outliers, if any were specified good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True) print("The good dataset now has {} observations after removing outliers.".format(len(good_data))) # - # Upon quick inspection, our sample doesn't contain any of the outlier values. # ## Feature Transformation # ### PCA # + from sklearn.decomposition import PCA # Apply PCA by fitting the good data with the same number of dimensions as features pca = PCA().fit(good_data) # Transform log_samples using the PCA fit above pca_samples = pca.transform(log_samples) # Generate PCA results plot pca_results = vs.pca_results(good_data, pca) # - # Display sample log-data after having a PCA transformation applied display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values)) # ### Dimensionality Reduction # + # Apply PCA by fitting the good data with only two dimensions pca = PCA(n_components=2).fit(good_data) # Transform the good data using the PCA fit above reduced_data = pca.transform(good_data) # Transform log_samples using the PCA fit above pca_samples = pca.transform(log_samples) # Create a DataFrame for the reduced data reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2']) # - # Display sample log-data after applying PCA transformation in two dimensions display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2'])) # ### Visualizing a Biplot # Create a biplot vs.biplot(good_data, reduced_data, pca) # ## Clustering # ### K-Means or Gaussian Mixture Model? # ### Creating Clusters # + n_clusters = [8,6,4,3,2] from sklearn.mixture import GaussianMixture from sklearn.metrics import silhouette_score for n in n_clusters: # Apply your clustering algorithm of choice to the reduced data clusterer = GaussianMixture(n_components=n).fit(reduced_data) # Predict the cluster for each data point preds = clusterer.predict(reduced_data) # Find the cluster centers centers = clusterer.means_ # Predict the cluster for each transformed sample data point sample_preds = clusterer.predict(pca_samples) # Calculate the mean silhouette coefficient for the number of clusters chosen score = silhouette_score(reduced_data,preds) print("The silhouette_score for {} clusters is {}".format(n,score)) # - # ### Cluster Visualization # Display the results of the clustering from implementation vs.cluster_results(reduced_data, preds, centers, pca_samples) # ### Data Recovery # + # Inverse transform the centers log_centers = pca.inverse_transform(centers) # Exponentiate the centers true_centers = np.exp(log_centers) # Display the true centers segments = ['Segment {}'.format(i) for i in range(0,len(centers))] true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys()) true_centers.index = segments display(true_centers) # - # Display the predictions for i, pred in enumerate(sample_preds): print("Sample point", i, "predicted to be in Cluster", pred)
customer_segments/customer_segments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt plt.rcParams['figure.dpi' ] = 300 plt.rcParams['font.family']='sans-serif' plt.rcParams['font.sans-serif'] = ['Heiti TC'] df = pd.read_excel('../datasets/ctr.xlsx') def plot_ctr(df, isWorkday, device): df = df[df['isWorkday'] == isWorkday] df = df[df['device'] == device] df = df.sort_values(by=df.columns[0], ascending=True) floor_ids = set(df['floor_id']) for floor_id in floor_ids: part_df = df[df['floor_id']==floor_id] x = (part_df[part_df.columns[0]] % 1000).astype(str) y = part_df[part_df.columns[-1]] plt.plot(x, y, label='floor_id=%d' %(floor_id)) plt.ylim([0.2, 1.1]) plt.xticks(rotation=270) plt.title('Top1点击率(isWorkday=%d, device=%s)' %(isWorkday, device)) plt.legend(loc='best', ncol=4, prop={'size': 8}) plt.show() plot_ctr(df, 1, 'All')
codes/da_ctr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # # Goals # # The goal for this notebook is to introduce various loss functions and work through a Gradient Descent algorithm. # # Gradient Descent will be implemented for you in most Data Science or Machine Learning toolkits. # # Scikit-learn has information on its approach on its documentation site: [here](https://scikit-learn.org/stable/modules/sgd.html) # # Strictly speaking Gradienct Descent is about _fitting_ a model to some data (given a way to measure 'loss'), and can be seen as _independent_ of the type of model in question. That said it is often used in training Linear models. # # At the end of this you should have to following tools in your toolbelt: # # * An understanding of why we need 'loss' functions # * An understanding of what's desirable in a loss function # * How Gradient Descent works # # Additionally, we will talk a little bit about Linear Regressions at the end. # ## Loss Functions # # Imagine that we wanted to classify a bunch of documents as one of the following: # # * Pro video games # * Against video games # # We would train some model that, given a document, would tell us which of the above labels applies. In more mathematical terms: each document is a point in some multi-dimensional space, our model would define a _hyperplane_ that splits the space into the 'pro video games' portion and the 'against video games' portion. # # But how do we know if our model is any good? Well, we need a _loss function_. # # In the case of a classifier such as the above, we have a simple loss function: the number of misclassified documents, based on some pre-labelled set. If we minimize the number of misclassifications, we're 'doing better' than we otherwise could. This notion of 'the number of wrong answers' is known as $0/1$ loss. Remembering that $\theta$ represents our parameters to a model, this loss function is usually written as: # # $ \text{arg} \text{min}_{\theta} \sum_{i=1}^{n} 1 [y^{(i)}\cdot \langle \theta, x^{(i)} \rangle \leq 0] $ # # The problem with this function is that it's very fragile, small changes in $\theta$ can result in huge changes in the total loss. It's also not _continuous_ (convince yourself that this is the case), which means that it's going to be hard to iteratively improve. # # So, despite being the ideal loss function, it's not going to work. Instead we look for some _surrogate_ loss functions that might be a bit better suited. Let's look at a few and compare them to $0/1$ loss. We'll define them as python functions so that we can plot them. # + # A whole bunch of predictions predictions = np.arange(-2,2,0.01) def run_and_plot(f): outputs = [f(p, 1) for p in predictions] plt.plot(predictions,outputs) plt.show() # - # the 0/1 loss function # Takes in a predicted value (`p`) and the actual value (`a`) def zero_one(p, a): if p*a <= 0: return 1 else: return 0 run_and_plot(zero_one) # Hinge loss def hinge(p,a): return max(0, 1 - p*a) run_and_plot(hinge) # Exponential, luckily numpy already has e defined for us def exponential(p,a): return pow(np.e, -(p * a)) run_and_plot(exponential) # Squared loss, very common! def squared_loss(p,a): return pow(a - p, 2) run_and_plot(squared_loss) # As you can see, squared loss is nice because at any point along the function you could figure out 'which way to go'! # # However, squared loss isn't always used because it doesn't always make sense. What if the 'difference' between your prediction and the actual value is meaningless? Then squared-loss may not be appropriate. As always, you'll need to think about what your data means and what your model's prediction means. # # # Gradient Descent # # Now let's look at Gradient Descent, a versatile machine learning technique. Gradient Descent provides us a way of _iteratively_ minimizing (or maximizing) a function. Often these functions are some sort of statistical model (e.g. a linear regression). To make things really simple we're going to minimize the following function: # We could use np.dot here, but let's be explicit for clarity def sum_of_squares(v): return np.sum([i * i for i in v]) # ## The 'Gradient' in 'Gradient Descent' # # As we discussed in class, the 'gradient' is a generalization of the derivative of a function, where the gradient represents a vector of partial derivatives. So if we had a way of calculating the gradient for any point in a function, it would tell us which 'way to go' in order to minimize that function (this should be rining all sorts of bells with what we wanted out of loss functions). # # If there's a unique minimum (which isn't always the case), it may not even matter where we start, we can just pick a random point, calculate the gradient, and go in the appropriate direction. # # What would be nice is if for any function we write in a programming language, we could _automatically_ get its gradient. In fact there's a who set of programming languages for this exact putpose (sometimes called 'differentiable programming languages' or 'probabilistic programming languages'). However, given an arbitrary function written in Python, we won't be able to automatically compute the gradient. As such, estimating the gradient is a big part of doing gradient descent. # # We estimate the gradient in the same way we would estimate the derivative: calculate the difference between two _very close points_ (you may remember your secondary school definition of a derivative where you take the limit of a difference that approaches 0, it's very much like that). Just to remind you: # for some function, and for some input to that function, compute the difference quotient for the given `h` def estimate_derivative(f, x, h): return (f(x + h) - f(x)) / h # This is great for one dimension, but we'll be working in multiple dimensions, so we have to estimate the vector of _partial derivatives_. It sounds scarier than it is: # + # for some function, and for an input vector to that function, compute the difference quotient of the _ith_ # parameter given `h`. # Main thing is that we only 'wiggle' the input vector at the given index. def estimate_partial(f, v, i, h): new_v = [val + (h if d == i else 0) for d, val in enumerate(v)] return (f(new_v) - f(v)) / h # Getting the vector of partial derivatives is now just doing that for every index def estimate_gradient(f, v, h): return [estimate_partial(f,v,i,h) for i in range(len(v))] # - # Performing a single step of the algorith is easy when you have an input vector, a gradient, and a step-size: # A single step of Gradient Descent # We assume that the input vector and the length of the gradient match!!! # We also parameterize the function with a step-size def single_step(vec, grad, ss): # multiply our gradient by the scalar step-size step = np.multiply(grad, ss) # add the resulting step vector to our input vector return vec + step # Now that we have a way to estimate the gradient, we can start figuring out how we're going to navigate the search for the minimum. One way is to just pick a random starting point and go in the _opposite_ direction of the gradient (think about why it needs to be the opposite): # + # random starting point start = [np.random.uniform(-10,10) for i in range(3)] # each 'epoch' is defined as the following: # # * estimate the gradient # * take a step (opposite of the gradient) v = start for e in range(1000): g = estimate_gradient(sum_of_squares, v, 0.001) v = single_step(v, g, -0.01) # We would hope that the resulting vector is close to 0 (because that's the minimum for our function) print(v) # - # The more we run this, the close to 0 we should get (though it's possible we never reach zero!) # + for e in range(1000): g = estimate_gradient(sum_of_squares, v, 0.001) v = single_step(v, g, -0.01) print(v) # - # There are a few reasons we may never reach 0: numerical instability, floating point errors, etc. One such reason that is very much within our control is our step size. You could imagine a step size that's just too big, and we _would_ get to 0 but we 'over step' and end up on the other side. A very common approach to solving this issue is to gradually decrease the step size as you iterate. # # ## Using Gradient Descent to fit a Linear Model # # Okay, let's try something a little more sophisticated: A linear model. The function we'll want to minimize is our _loss function_, which will represent the loss for a given prediction. For the linear model, our input vector represents the parameters to the model $\theta$. Our gradient is based on the loss function. # # Our loss is known as a _residual_ here. The residual error is the difference between a prediction from a model and the value in the dataset. It's useful to have this term because we sometimes also want to talk about the _noise_ in the dataset, which is the difference between the observed value (in our dataset) and the 'real' value (that we can't know). # # Let's write it out: def estimate_linear_gradient(x, y, theta): m, b = theta # slope and intercept y1 = m * x + b # your high school math teacher would be proud residual = (y1 - y) # the _residual error_ is the difference between # our prediction and the value in the dataset #r_squared = residual ** 2 we don't actually need this, it's just here to show you # the above two lines are just an inlined version of the square loss function # we can cheat (by using calculus) and just use the actual derivative: return [2 * residual * x, 2 * residual] # The above code uses the gradient of the Squared Loss function, which determines the loss of our linear model at a single point. We'll also want to determine the loss for the entire dataset. For this _a very common_ approach is to use 'Mean Squared Error' (MSE). The gradient of MSE is the mean of the individual gradients computed above. # # In this example, we'll use an actual linear function, and then a random starting point (remember a 'point' in this case is just a vector representing a slope and intercept). # # Let's do it: # + # Warning I used `np.mean` below and the algorith didn't work and it took me a _long_ # time to figure out that the bug was that I assumed it was a vector mean # Also note that I would have found the bug way sooner had I actually asserted something # I said above and made sure that the gradient and the input vector were the same shape! # All that said, here's an actual vector mean def v_sum(vs): return np.array([sum(v[i] for v in vs) for i in range(len(vs[0]))]) def v_mean(vs): return np.multiply(v_sum(vs), 1/len(vs)) # A linear relationship between `x` and `y`. m = 42 b = 7 linear_rel = [(x, m * x + b) for x in range(-50, 50)] # a random starting vector for the model we want to train start_theta = [np.random.uniform(-1,1), np.random.uniform(-1,1)] step_size = 0.001 theta = start_theta for e in range(5000): predictions = [estimate_linear_gradient(x, y, theta) for x, y in linear_rel] mse_gradient = v_mean(predictions) theta = single_step(theta, mse_gradient, -step_size) def is_close(a,b): return (a - 0.01) < b < (a + 0.01) s, i = theta if is_close(m,s) and is_close(b,i): print("we got close!") else: print("we got so far!") # - # ## Downsides and alternatives # # This is flexible, but _expensive_. We are calculating the residuals for the _entire dataset_ for _each epoch_. This is a cheap linear function and I still have to wait a few seconds on my machine. Imagine if this was an expensive model! # # What we've implemented above is called 'Batch Gradient Descent'. Because it is expensive folks have come up with the following alternatives: # # * Minibatch: get a smaller batch by randomly sampling from the original dataset for each epoch # * Stochastic: each gradient step is based on _one_ training example # # A good exercise would be to modify the above code to work as minibatch or as stochastic gradient descent. Let me know if you do so! # # As always, there's no 'right' answer here. All three come up in the real world. A big factor in your decision is going to be how expensive making a prediction is. # # Linear Regressions # # The last thing we want to cover in this lecture is some information about Linear Regressions. We've already seen some aspects (what a residual is, how to fit a model to data, etc.) but we want to talk a little bit more about them. # # While implementing our own gradient descent is fun, really you should use the Linear Model provided by some library. There are many to choose from: # # * SciKit Learn # * Patsy (very cool!) # * Statsmodels # # We'll be using SciKit Learn here, but you should definitely check out Patsy, it makes some things _way_ easier. # # While Linear Regressions are great (they help us model all sorts of things, they scale well, they are easy to understand, etc.) you must be aware that whenver you use a Linear Regression you are _assuming_ the following 4 things: # # 1. That a linear relationship exists # 2. Independence of residuals # 3. Constant Variance of residuals (this is known as 'Homoscedasticity') # 4. That the residuals are normally distributed (this is known as 'Normality') # # The first seems obvious, but it bears keeping in mind: If there _isn't_ a linear relationship in your data, then your results are meaningless _even if they show that a linear model fits very well_ this is the classic 'correlation does not equal causation'. You have to have a plausible explanation for the linear relationship. # # The second can be tricky. When using a linear model you are assuming that at each 'step' in one dimension, it does not have an effect on the _residuals_. In other words, if there is a correlation between one of the dimensions and the _residuals_ in the other dimension, this is bad and makes the model untrustworthy. This crops up a lot when dealing with time-series data. # # The last two deal with the assumptions regarding how the residuals are distributed. If they are not normally distributed, and do not have a constant variance, there may be an issue with your model. # # Sometimes the solutions is to add an _interaction_ term to your model. import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model # A linear relationship between `x` and `y`. (same as before) m = 5 b = 7 r = range(-50, 50) ind = [ [i] for i in r] # notice this is a list of lists, # the inner list length is the dimensionality dep = [m * x + b + np.random.uniform(-10,10) for x in r] # Here we have some data, our independent variable `ind` and our dependent variable `dep` and we'd like to fit a linear model to it, sklearn has us covered (pretend we got the data from a dataset and now that we defined it ourselves): lm = linear_model.LinearRegression() lm.fit(ind,dep) # Now we can plot the regression compared to our dataset: # + fig, ax = plt.subplots() ax.scatter(ind, dep) ax.set_xlabel("Independent") ax.set_ylabel("Dependent") ax.set_title("A Linear Regression") ax.plot(ind, lm.predict(ind), color='red') # - # Now consider the following: # + # A linear relationship between `x` and `y`. (same as before) m = 5 m2 = 10 b = 7 r = range(-50, 50) # doesn't likes cats ind = [ [i] for i in r] + [ [i] for i in r ] # likes cats dep = [m * x + b + np.random.uniform(-10,10) for x in r] + [m2 * x + b + np.random.uniform(-10,10) for x in r] lm = linear_model.LinearRegression() lm.fit(ind,dep) fig, ax = plt.subplots() ax.scatter(ind, dep) ax.set_xlabel("Independent") ax.set_ylabel("Dependent") ax.set_title("A Linear Regression") ax.plot(ind, lm.predict(ind), color='red') # - # This is a rather extreme example, meant to illustrate a point. This linear Regression is wrong, even though it has OK predictive power. The issue is that some other factor is _interacting_ with our model. What we need to do is teach the model about this interaction. This is known as an _interaction term_. In this case the interaction has to do with the _evenness_ of the value. While this is a numerical property, I want you to pretend it's a categorical property like, I don't know _country_ or _continent_ perhaps. # # The idea is that we can convert any categorical property into a numerical one by creating a new dimension in our dataset that is 0 when the value is not in the category and 1 when it is. Let's look: # + # This is the main change!!! ind = [ [i,0] for i in r] + [ [i,1] for i in r] # notice our inner list is longer now! # - # So, now our independent variables have another dimension to them. Great! # + lm2 = linear_model.LinearRegression() lm2.fit(ind,dep) fig, ax = plt.subplots() just_data = [i[0] for i in ind] ax.scatter(just_data, dep) ax.set_xlabel("Independent") ax.set_ylabel("Dependent") ax.set_title("A Linear Regression") ax.plot(just_data, lm2.predict(ind), color='red') # - # It may not look very different in this form (though the red line is thicker), but we have reduced the residual error! Take a look! print(f"Without interaction term at x = 1: {lm.predict([[1]])}\n") print(f"With interaction term at x = 1: {lm2.predict([[1,0]])} (doesn't like cats), {lm2.predict([[1,1]])} (likes cats)\n") # Before our linear model (`lm`) couldn't take 'liking cats' into account, so it had to give a prediction that was 'in the middle'. Now, like all good models, it (`lm2`) can give better predictions when it _does_ take it into account. # # I didn't do this with Pandas, but a useful function when trying to add interaction terms with pands is `pd.get_dummies` (not a joke). This function will _automatically_ create the appropriate columns for you out of a categorical variable. However, beware of the dummy variable trap! I've fallen for it many times. You don't need a column for _every_ possible value of the categorical value, just enough to disambiguate. It's not just innefficient to have too many columns, it will result in a very innacurate model! # # To relate it to what we did above, we didn't need a "likes cats" _and_ a "doesn't like cats" dimension, as just one dimension with 0 or 1 was enough. If it was "likes cats", "doesn't like cats", and "is neutral", you wouldn't need three new columns, only two! One value can always be the 'default' so if you aren't "is neutral" or "doesn't like cats" that _means_ you're "likes cats".
notebooks/gradient-descent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import jax.numpy as np import numpy as onp from jax import grad, jit, vmap from jax import random import matplotlib.pyplot as plt import poppy # %matplotlib inline import matplotlib as mpl mpl.style.use('seaborn-colorblind') phasemap = mpl.cm.rainbow phasemap.set_bad(color='k') #To make sure we have always the same matplotlib settings #(the ones in comments are the ipython notebook settings) mpl.rcParams['figure.figsize']=(12.0,9.0) #(6.0,4.0) mpl.rcParams['font.size']=20 #10 mpl.rcParams['savefig.dpi']= 200 #72 mpl.rcParams['axes.labelsize'] = 18 mpl.rcParams['axes.labelsize'] = 18 mpl.rcParams['xtick.labelsize'] = 14 mpl.rcParams['ytick.labelsize'] = 14 from matplotlib import rc mpl.rcParams["font.family"] = "Times New Roman" colours = mpl.rcParams['axes.prop_cycle'].by_key()['color'] from astropy import units as units shift = np.fft.fftshift fft = np.fft.fft2 ifft = np.fft.ifft2 fftfreq = np.fft.fftfreq dtor = np.pi/180.0 import warnings warnings.filterwarnings("ignore") # + osys = poppy.OpticalSystem() osys.add_pupil(poppy.CircularAperture(radius=3)) # pupil radius in meters osys.add_detector(pixelscale=0.025, fov_arcsec=0.75) # image plane coordinates in arcseconds # - osys.planes[1].pixelscale osys.input_wavefront().amplitude osys.intermediate_wfs psf,intermediate = osys.propagate_mono(2e-6) plt.imshow(psf.amplitude**0.25) # + def objective(wavelength): psf,intermediate = osys.propagate_mono(wavelength*1e-6) return (np.sum(psf.amplitude**2.)) objective(2.) # - thisgrad = grad(objective) thisgrad(2.0) # %%time print(thisgrad(2.0)) # %%time vals = [] wavels = np.linspace(1.5,2.5,100) for wavel in wavels: vals.append(objective(wavel)) vals = np.array(vals) plt.plot(wavels,vals,'.') plt.plot(wavels,-0.01927825*(wavels-2.0)+0.96685994)
notebooks/morphine.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cmip6 # language: python # name: cmip6 # --- # # Compare North Atlantic chlorophyll values to krill relative abundance from CPR surveys # + import os import numpy as np import netCDF4 as nc import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec import matplotlib.cm as cm import seaborn as sb sb.set(style='ticks') import cartopy.crs as ccrs import cartopy.feature as cfeature os.getcwd() # + #%% get data ### krill data os.chdir('/gws/pw/j05/cop26_hackathons/bristol/project09/krill') data = nc.Dataset('CPR_bin.nc') krill = data.variables['count'][...] effort = data.variables['effort'][...] krill_lon = data.variables['longitude'][...] krill_lat = data.variables['latitude'][...] krill_year = data.variables['time'][...] ### wrap the longitidues to agree with the chlorophyll data krill_lon = np.ma.concatenate((krill_lon[:,200::], krill_lon[:,0:200]+360.0), axis=1) krill = np.ma.concatenate((krill[:,:,200::], krill[:,:,0:200]), axis=2) effort = np.ma.concatenate((effort[:,:,200::], effort[:,:,0:200]), axis=2) chl = np.zeros((12,180,360)) ### Ocean chlorophyll data os.chdir('/gws/pw/j05/cop26_hackathons/bristol/project09/chlorophyll') data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-01-fv4.2.nc','r') chl[0,:,:] = data.variables['chlor_a'][...] data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-02-fv4.2.nc','r') chl[1,:,:] = data.variables['chlor_a'][...] data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-03-fv4.2.nc','r') chl[2,:,:] = data.variables['chlor_a'][...] data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-04-fv4.2.nc','r') chl[3,:,:] = data.variables['chlor_a'][...] data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-05-fv4.2.nc','r') chl[4,:,:] = data.variables['chlor_a'][...] data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-06-fv4.2.nc','r') chl[5,:,:] = data.variables['chlor_a'][...] data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-07-fv4.2.nc','r') chl[6,:,:] = data.variables['chlor_a'][...] data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-08-fv4.2.nc','r') chl[7,:,:] = data.variables['chlor_a'][...] data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-09-fv4.2.nc','r') chl[8,:,:] = data.variables['chlor_a'][...] data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-10-fv4.2.nc','r') chl[9,:,:] = data.variables['chlor_a'][...] data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-11-fv4.2.nc','r') chl[10,:,:] = data.variables['chlor_a'][...] data = nc.Dataset('ETOPO_ESACCI-OC-MAPPED-CLIMATOLOGY-1M_MONTHLY_4km_PML_OCx_QAA-12-fv4.2.nc','r') chl[11,:,:] = data.variables['chlor_a'][...] lon = data.variables['ETOPO60X'][...] lat = data.variables['ETOPO60Y'][...] data.close() ### mask bad values chl = np.ma.masked_where(chl > 1e5, chl) # + #%% create average values over the year for chlorophyll and sum of all krill counts across years chl_ave = np.ma.average(chl, axis=0) ### counts / effort and averaged over all years krill_abu = krill / effort krill_abu = np.ma.average(krill_abu, axis=0) ### remove krill relative abundance where it equals 1 and select only north atlantic region krill_abu = np.ma.masked_where(krill_abu == 1.0, krill_abu) llon, llat = np.meshgrid(lon,lat) krill_abu = np.ma.masked_where(llon < 270.0, krill_abu) krill_abu = np.ma.masked_where(llat < 35, krill_abu) krill_abu = np.ma.masked_where(llat > 70, krill_abu) ### mask chlorophyll where krill are not mask = np.ma.getmask(krill_abu) chl_m = np.ma.masked_where(mask, chl_ave) krill_abu = np.ma.masked_where(np.ma.getmask(chl_m), krill_abu) # + #%% calculate density of points from scipy.stats import gaussian_kde def get_gaussian(x1,y1): x = x1 y = y1 x = x.compressed() y = y.compressed() xy = np.vstack([x, y]) z = gaussian_kde(xy)(xy) idx = z.argsort() x, y, z = x[idx], y[idx], z[idx] return x,y,z [chlx,chly,chlz] = get_gaussian(chl_m, krill_abu) ### deal with zeros tmp = chlx*chly chlx = np.ma.compressed(np.ma.masked_where(tmp == 0.0, chlx)) chly = np.ma.compressed(np.ma.masked_where(tmp == 0.0, chly)) chlz = np.ma.compressed(np.ma.masked_where(tmp == 0.0, chlz)) chl_log10 = np.log10(chlx) kri_log10 = np.log10(chly) # + #%% have a look proj = ccrs.Robinson(central_longitude=20) levs1 = np.arange(0,101,5)*0.1 levs2 = np.arange(-100,1,5)*0.01 colmap1 = cm.viridis colmap2 = cm.viridis fstic = 13 fslab = 15 fig = plt.figure(figsize=(6.5,9)) gs = GridSpec(3,10) ax1 = plt.subplot(gs[0,0:8], projection=proj) ax1.set_extent([-90,20,30,70]) p1 = plt.contourf(krill_lon, krill_lat, krill_abu, transform=ccrs.PlateCarree(), cmap=colmap1, levels=levs1, vmin=np.min(levs1), vmax=np.max(levs1), extend='both') #c1 = plt.contour(lon, lat, s2n_npp_jan[0,:,:], transform=ccrs.PlateCarree(), colors='k', linewidths=0.75, levels=[-1,1]) ax1.add_feature(cfeature.LAND, color='silver', zorder=2) ax1.coastlines(zorder=2) ax2 = plt.subplot(gs[1,0:8], projection=proj) ax2.set_extent([-90,20,30,70]) p2 = plt.contourf(lon, lat, np.log10(chl_ave[:,:]), transform=ccrs.PlateCarree(), cmap=colmap2, levels=levs2, vmin=np.min(levs2), vmax=np.max(levs2), extend='both') c2 = plt.contour(lon, lat, np.log10(chl_ave[:,:]), transform=ccrs.PlateCarree(), colors='k', linewidths=0.75, levels=levs2[::2]) ax2.add_feature(cfeature.LAND, color='silver', zorder=2) ax2.coastlines(zorder=2) ax3 = plt.subplot(gs[2,1:]) ax3.spines['top'].set_visible(False) ax3.spines['right'].set_visible(False) ax3.tick_params(labelsize=fstic) plt.scatter(chl_log10, kri_log10, c=chlz, cmap='copper_r', s=20, alpha=0.5) ''' from scipy.optimize import curve_fit def linreg(x,a,b): return a*x + b coef, cova = curve_fit(linreg, chl_log10, kri_log10, method='lm') print(coefs) xx = np.linspace(np.min(chl_log10), np.max(chl_log10), 100) yy = linreg(xx, coef[0], coef[1]) plt.plot(xx, yy, color='firebrick', linewidth=1.5, linestyle='-', alpha=0.75) ''' plt.plot((-1,-1),(-1,3), 'k--', alpha=0.5, linewidth=0.5) plt.plot((-0.8,-0.8),(-1,3), 'k--', alpha=0.5, linewidth=0.5) plt.plot((-0.6,-0.6),(-1,3), 'k--', alpha=0.5, linewidth=0.5) plt.plot((-0.4,-0.4),(-1,3), 'k--', alpha=0.5, linewidth=0.5) plt.plot((-0.2,-0.2),(-1,3), 'k--', alpha=0.5, linewidth=0.5) plt.plot((0,0),(-1,3), 'k--', alpha=0.5, linewidth=0.5) plt.plot((0.2,0.2),(-1,3), 'k--', alpha=0.5, linewidth=0.5) plt.plot((0.4,0.4),(-1,3), 'k--', alpha=0.5, linewidth=0.5) plt.plot((0.6,0.6),(-1,3), 'k--', alpha=0.5, linewidth=0.5) plt.plot((0.8,0.8),(-1,3), 'k--', alpha=0.5, linewidth=0.5) plt.xlim(-1,1) plt.ylim(-1,2) plt.ylabel('log$_{10}$(counts per effort)', fontsize=fslab) plt.xlabel('log$_{10}$(chlorophyll-a) (mg m$^{-3}$)', fontsize=fslab) x = 0.05; y = 1.05 plt.text(x,y,'a', transform=ax1.transAxes, fontweight='bold', fontsize=fslab+2, ha='center', va='center') plt.text(x,y,'b', transform=ax2.transAxes, fontweight='bold', fontsize=fslab+2, ha='center', va='center') plt.text(x,y,'c', transform=ax3.transAxes, fontweight='bold', fontsize=fslab+2, ha='center', va='center') plt.subplots_adjust(top=0.95) cbax1 = fig.add_axes([0.8, 0.725, 0.05, 0.2]) cbar1 = plt.colorbar(p1, cax=cbax1, orientation='vertical', ticks=levs1[::2]) cbar1.ax.set_ylabel('counts per effort', fontsize=fslab) cbar1.ax.tick_params(labelsize=fstic) cbax2 = fig.add_axes([0.8, 0.43, 0.05, 0.2]) cbar2 = plt.colorbar(p2, cax=cbax2, orientation='vertical', ticks=levs2[::2]) cbar2.ax.set_ylabel('log$_{10}$(mg m$^{-3}$)', fontsize=fslab) cbar2.ax.tick_params(labelsize=fstic) fig.savefig('Chlorophyll_krill.png', dpi=300, bbox_inches='tight')
notebooks/chlorophyll_krill.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="rDwYCeIX-kH1" # # Outlier 처리하기 # # * 통계적으로 outlier, 특이치는 다른 관측치와 크게 다른 데이터 포인트를 말한다. # * 특이치는 통계 분석에 문제를 일으킬 수 있다.(평균과 표준편차에 영향을 줌) # # + [markdown] id="SNqVictr-kH5" # ### 이상치 확인하기 # * [-1.5 * IQR ~ 1.5*IQR] 의 구간에서 벗어나 있는 경우 # * 백분위 수에서 5th ~ 95th 범위에서 벗어나는 경우 # + id="84pWAY6R-kH6" import pandas as pd import numpy as np # + colab={"base_uri": "https://localhost:8080/"} id="-kn2Fs1h-kIA" outputId="b9953cda-1fa2-4b59-f05e-42d42c9409b3" # outlier를 포함한 임의의 데이터 x = pd.Series([23,1,3,5,34,6,32,7,45,34,78]) x.describe() # + [markdown] id="Dpj-hGKd-kIF" # 보통 통계적으로 [Q1-1.5 * IQR ~ Q3+1.5*IQR] 의 구간에서 벗어나 있는 경우를 outlier라고 한다. # + id="Xx8rgePS-kIF" # 이상치의 인덱스 값을 리턴하는 함수를 만든다. def idx_of_outliers(x): q1, q3 = np.percentile(x, [25,75]) IQR = q3 - q1 lower_bound = q1 - [IQR * 1.5] upper_bound = q3 + [IQR * 1.5] return np.where((x > upper_bound) | (x < lower_bound)) # + colab={"base_uri": "https://localhost:8080/"} id="CGDAGfBe-kIJ" outputId="8c58f0e3-cd33-487a-fd80-ccef333ea8b9" idx_of_outliers(x.values) # + [markdown] id="vt8oihUU-kIM" # 백분위 수에서 5th ~ 95th 범위에서 벗어나는 경우를 outlier로 할 수도 있다. # + id="1hkTVIng-kIN" outputId="74552776-5415-4810-afa4-280e4bc48bd6" print('5th percentile: ', x.quantile(q=0.05)) print('95th percentile: ', x.quantile(q=0.95)) # + id="dYfJhoI9-kIQ" outputId="78f0bd65-ac34-4312-a826-4f112a9b0d69" x[(x < x.quantile(q=0.05)) | (x > x.quantile(q=0.95))] # + [markdown] id="78WO0DCR-kIU" # ### outlier 처리하기 # * outliers의 값을 제거 # * 자연로그를 취해서 값을 감소시키는 방법 등으로 변환 # # + id="Ynd05g-B-kIU" outputId="008401cb-cf74-4e63-a790-a80fd92df81c" houses = pd.DataFrame() houses['Price'] = [534433, 392333, 293222, 4322032] houses['Bedrooms'] = [2, 3.5, 2, 116] houses['Square_Feets'] = [1500, 2500, 1500, 48000] houses # + [markdown] id="MgvhEEdT-kIZ" # outlier를 처리하는 가장 간단한 방법은 outlier를 삭제하는 것이다. # + id="BCXHvYoT-kIa" outputId="d68b812a-4bc6-4688-a188-d6f322fa548c" houses.describe() # + id="7_fR9RYu-kIe" outputId="84434b77-4554-4933-c03d-c891cbe26b7a" q1 = houses['Bedrooms'].quantile(0.25) q3 = houses['Bedrooms'].quantile(0.75) iqr = q3 - q1 # Apply filter with respect to IQR filter = (houses['Bedrooms'] >= q1 - 1.5*iqr) & (houses['Bedrooms'] <= q3 + 1.5*iqr) houses.loc[filter] # + [markdown] id="phy85E9f-kIi" # outlier의 영향이 줄어들도록 column을 변환한다. # + id="5qolKa-N-kIj" outputId="60e793bc-c380-4d8d-dbe8-8bc0fd0b1668" # 로그변환 houses['Log_Square_Feets'] = [np.log(x) for x in houses['Square_Feets']] houses # + [markdown] id="4duRunOp-kIn" # ### 결론 # * outlier의 확인 및 처리 방법에는 정답이 없다. # * 여러가지 방법을 고려하여 적절한 방법을 선택한다. # # + id="2Pw-7KfS-kIo"
02ML/01DataPreprocess/02Outlier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analysis of MovieLens dataset (Beginner'sAnalysis) # https://www.kaggle.com/jneupane12/analysis-of-movielens-dataset-beginner-sanalysis import sys print(sys.executable) # ## 1. First we import necessary Libaries import pandas as pd # pandas is a data manipulation library import numpy as np #provides numerical arrays and functions to manipulate the arrays efficiently import random import matplotlib.pyplot as plt # data visualization library import wordcloud #used to generate world cloud import time import datetime import re # ## 2.Reading and Exploring the Data # ### 2.1 Load Movies Data movies = pd.read_csv("datasets/movies.csv") movies.sample(5) movies.shape, movies.info(), len(movies['movieId'].unique().tolist()) # ### 2.2 Load Ratings Data ratings = pd.read_csv("datasets/ratings.csv") ratings.info() ratings.sample(5) ratings.describe() ratings.shape, len(ratings['movieId'].unique().tolist()) # ### 2.3 Load Tags Data tags = pd.read_csv("datasets/tags.csv") ratings.shape, len(ratings['movieId'].unique().tolist()) tags.sample(5) # ## 3.Cleaning of data movies.isnull().any() ratings.isnull().any() tags.isnull().any() # + # # lets drop null rows # tags=tags.dropna() # - # ## 4.Data Analysis # ### 4.1 Make dates more readable # https://stackoverflow.com/a/62968313/2049763 def timestamp_to_date_converter(t): # https://realpython.com/python-time-module/ return datetime.datetime.fromtimestamp(t).strftime("%A, %B %d, %Y %I:%M:%S") ratings['date'] = ratings['timestamp'].apply(timestamp_to_date_converter) tags['date'] = tags['timestamp'].apply(timestamp_to_date_converter) ratings.sort_values(by=['timestamp'], ascending=True).tail(5) s = "01/01/2018" t = time.mktime(datetime.datetime.strptime(s, "%m/%d/%Y").timetuple()) ratings.loc[ratings.timestamp>t].sort_values(by=['timestamp'], ascending=True).head(5) ratings.loc[ratings.timestamp>t].sort_values(by=['timestamp'], ascending=True).shape # ### 4.2 Reading Movie Release Year # https://stackoverflow.com/a/8569258 def title_to_release_year(s): m = re.findall(r"\(([0-9]+)\)", s) # https://www.guru99.com/python-regular-expressions-complete-tutorial.html if m is None or len(m) <= 0: return None return m[-1] movies['year'] = movies['title'].apply(title_to_release_year) movies.sample(5) movies.isnull().any() # https://datatofish.com/rows-with-nan-pandas-dataframe/ movies[movies.isnull().any(axis=1)] movies = movies.dropna() # ## 5. Basic Recomendation # # https://www.kaggle.com/subhamoybhaduri/diff-approaches-of-building-recommender-system ratings.shape # + # https://stackoverflow.com/a/39881230 # ratings = ratings.loc[ratings.movieId.isin(movies.movieId)] movies_ratings = ratings.merge(movies, on = 'movieId', how = 'inner') movies_ratings.shape # - movies_ratings = movies_ratings.sort_values(['year', 'timestamp'], ascending=[True, True]) movies_ratings.head(5) # + nb_users = movies_ratings['userId'].nunique() nb_movies = movies_ratings['movieId'].nunique() nb_users, nb_movies # + ratings_matrix = movies_ratings.pivot_table(index=['userId'],columns=['movieId'],values='rating').reset_index(drop=True) ratings_matrix.fillna(0, inplace = True) ratings_matrix.sample(5) # - data_matrix = np.array(ratings_matrix) print(data_matrix.shape) # ### 5.1 Gaussian Mixture Model and Expectation-Maximization Algorithm # + from sklearn.model_selection import train_test_split from sklearn.mixture import GaussianMixture from scipy.special import logsumexp import itertools # - # split the data into train and test set train, test = train_test_split(data_matrix, test_size=0.2, random_state=42, shuffle=True) test.shape, test # + # # https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html#How-many-components? gmm_model = GaussianMixture(n_components=2, covariance_type='full', tol=0.001, reg_covar=1e-06, max_iter=100, n_init=1, init_params='kmeans', weights_init=None, means_init=None, precisions_init=None, random_state=42, warm_start=False, verbose=0, verbose_interval=10) gmm_model.fit(train) # - print(gmm_model.means_.shape) print(gmm_model.covariances_.shape) print(gmm_model.weights_.shape) gmm_model.predict(test) # + # Fill Missing Values i.e Recommend inver0, inver1 = gmm_model.covariances_[0], gmm_model.covariances_[1] inver0, inver1 = np.linalg.inv(inver0), np.linalg.inv(inver1) deter0, deter1 = gmm_model.covariances_[0], gmm_model.covariances_[1] deter0, deter1 = np.linalg.det(deter0), np.linalg.det(deter1) n, d = train.shape K = gmm_model.means_.shape[0] print(n, d, K) mean = gmm_model.means_ variance = gmm_model.covariances_ weight = np.log(gmm_model.weights_) calc = np.zeros((n, K)) ind = np.zeros((n, d)) soft = calc X_pred = ind add = np.zeros((n,)) dim = np.zeros((n,)) ind = np.where(train != 0, 1, 0) dim = np.sum(ind, axis=1) for i in range(n): for j in range(K): res = train[i] - mean[j] res = np.multiply(res, ind[i]) # Multivariate Gaussian if j == 0: A = (res.T @ inver0) @ res C = (dim[i]/2)*np.log(2*np.pi) + np.log(deter0 + 1e-16)/2 else: A = (res.T @ inver1) @ res C = (dim[i]/2)*np.log(2*np.pi) + np.log(deter1 + 1e-16)/2 B = 2 calc[i, j] = weight[j] + (-A/B) - C add = logsumexp(calc, axis = 1) #Since the entire computation is done in log-domain to avoid Numerical instability #we need to bring it back in its original domain soft = np.exp(np.subtract(np.transpose(calc), add)) lg = np.sum(add) X_calc = np.transpose(soft) @ gmm_model.means_ # + #We will use predicted value if the entry is 0 in original rating matrix data_matrix_pred_GMM = np.where(data_matrix == 0, X_calc, data_matrix) for i in range(data_matrix_pred_GMM.shape[0]): for j in range(data_matrix_pred_GMM.shape[1]): data_matrix_pred_GMM[i, j] = round(data_matrix_pred_GMM[i, j]) #For measuring the performance we have to use the predicted matrix for i in range(X_calc.shape[0]): for j in range(X_calc.shape[1]): X_pred[i, j] = round(X_calc[i, j]) # + ind_matrix = np.zeros((nb_users, nb_movies)) ind_matrix = np.where(data_matrix != 0, 1, 0) x = np.multiply(X_pred, ind_matrix) RMSE_GMM = np.sqrt(np.mean((x - data_matrix)**2)) print("RMSE of GMM Model is %f." %RMSE_GMM) # - # ### 5.2 Altenating Least Squares using Non-Negative Matrix Factorization
notebooks/02. Building Recommender of Movie Lens dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Al3927/charges_regression_analysis/blob/main/Main.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="yISGxg67rl9R" # # <center><font size ="9"> ĐỒ ÁN 01: REGRESSION</font></center> # <br /> # # __TÊN MÔN HỌC:__ NHẬP MÔN HỌC MÁY # # __ĐỀ TÀI:__ CHI PHÍ SỬ DỤNG DỊCH VỤ Y TẾ # # __GIẢNG VIÊN:__ NGUYỄN TIẾN HUY # # __THỨ TỰ NHÓM:__ 07 # # __THÀNH VIÊN:__ # # - 18120184 Nguyễn N<NAME> # - 18120189 Trần Đăng Khoa # - 18120264 Nguyễn Duy Vũ # - 18120283 Nguyễn Chiêu Bản # - 18120286 Nguyễn Quốc Bảo # # __PHÂN CÔNG:__ # # Công việc | Thực hiện | Mức độ hoàn thành # ------------ | ------------- | ------------ # Khám phá dữ liệu cơ bản | Vũ | 100% # Tiền xử lý dữ liệu | Vũ | 100% # Mô hình hóa dữ liệu | Bản, Bảo | 100% # Phân tích dữ liệu tìm Insight| Khang, Khoa | 100% # # + [markdown] id="rM4_J3Mf_1xP" # ## Get Data # + colab={"base_uri": "https://localhost:8080/"} id="mmYogQFq8nzl" outputId="5a7965f4-2326-43c0-c976-40e92317aec9" # !gdown --id 19ig1GFDCioN_O2LjhF_JSzhwl_erengT # !gdown --id 1xvHczHpMFgPIQhyMKWT__fHimq0yVVK9 # + colab={"base_uri": "https://localhost:8080/"} id="KuOC9BrcjFpL" outputId="013396b7-400b-41c8-dc19-2e5c38717671" # !pip install --upgrade scikit-learn # + [markdown] id="O-ANv-qw_p9j" # ## Import # + id="T3jAZ3YA_GbB" import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder, StandardScaler, OrdinalEncoder from sklearn.preprocessing import PowerTransformer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline, make_pipeline from sklearn.compose import ColumnTransformer, make_column_transformer from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn import metrics from sklearn.compose import TransformedTargetRegressor from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.cross_decomposition import PLSRegression from sklearn.svm import SVR from sklearn import set_config set_config(display='diagram') # + [markdown] id="nov_1W8RAE5O" # ## Data Exploration # + colab={"base_uri": "https://localhost:8080/", "height": 194} id="037tOG1O_KSd" outputId="fccad880-9c5f-4c76-c19e-d5904b241ef8" train_df = pd.read_csv('train.csv') train_df.head(5) # + [markdown] id="dsdWK4kdAY5a" # ### Kích thước của tập dữ liệu # + colab={"base_uri": "https://localhost:8080/"} id="vDd1CWf7AVbJ" outputId="93f9f0a2-0035-4cbb-cbd2-f272b1bda624" train_df.shape # + [markdown] id="5eIYrFeJBD9c" # ### Ý nghĩa của mỗi cột # + colab={"base_uri": "https://localhost:8080/", "height": 252} id="pVoTs1IkBHGB" outputId="5fc55c6f-283b-46ef-dabb-bb5994d3bd2e" columns = train_df.columns mean_column = ["Tuổi", "Giới tính", "Chỉ số khối cơ thể", "Số lượng trẻ con/người phụ thuộc", 'Tình trạng hút thuốc', 'Khu vực sinh sống', 'Chi phí y tế cá nhân'] mean_df = pd.DataFrame(list(zip(columns, mean_column)), columns=['Column', 'Meaning of Column']) mean_df # + [markdown] id="bxNkvFVcA0c8" # ### Dữ liệu có bị lặp hay không ? # + colab={"base_uri": "https://localhost:8080/"} id="4nXcte0EA62c" outputId="fcde6a57-736e-432f-fcfe-e6216aca9512" train_df.duplicated().sum() # + [markdown] id="3x-cmgHoBLgH" # ### Dữ liệu có bị thiếu hay không ? # + colab={"base_uri": "https://localhost:8080/"} id="SoQAlkg7BOPn" outputId="9e0f8c7a-874b-48dc-83e2-c0774aaba60f" train_df.isna().sum() # + [markdown] id="ExS_6ZDRBm20" # ### Các cột có kiểu dữ liệu gì ? # + id="A5PRGHnWEpkw" colab={"base_uri": "https://localhost:8080/"} outputId="1cf5e838-5d27-41d7-aafc-29fb41d182dd" train_df.dtypes # + [markdown] id="JUTfl_rU6DIp" # Các cột "sex" "smoker" "region" mang kiểu object -> Cần chuyển quả kiểu numeric để thuận lợi cho việc huấn luyện các mô hình # + [markdown] id="49OXNfm2_dWe" # ### Giá trị của các cột có kiểu dữ liệu object # + [markdown] id="eJ9Gv8uy_ph1" # **Sex** # + colab={"base_uri": "https://localhost:8080/"} id="8yawR9on_dAP" outputId="b08e8966-689a-41df-c226-1805083dcc13" train_df['sex'].value_counts() # + [markdown] id="UmmT5LDW_zA_" # Có thể thấy giá trị male và female cách biệt nhau không quá lớn điều này sẽ không ảnh hưởng đến dữ liệu # + [markdown] id="gojCcAiX_9UN" # **Smoker** # + colab={"base_uri": "https://localhost:8080/"} id="OvIZ39aMAEUp" outputId="f6d6a8a7-862c-4fe7-d322-bcd235508caa" train_df['smoker'].value_counts() # + [markdown] id="8X6g4jEWAWK5" # Lượng người hút thuốc và không hút thuốc chênh lệch nhau rất lớn # + [markdown] id="IxPzTsG9AAOW" # **Region** # + colab={"base_uri": "https://localhost:8080/"} id="sdVLdfWqAQG5" outputId="fefb5217-2ef4-4b5c-c82e-9c5e4f16ffb2" train_df['region'].value_counts() # + [markdown] id="-rjbj9MgAd96" # Không có giá trị bất thường ở đây # + [markdown] id="BB7oJk7DFNmy" # ### Xem xét sự phân bổ của các cột dạng số # + colab={"base_uri": "https://localhost:8080/", "height": 223} id="dHA23yvMF6DH" outputId="8989af02-afd4-4b90-8fc9-6613a0912be0" def missing_ratio(df): return (df.isna().mean() * 100).round(1) def lower_quartile(df): return df.quantile(0.25).round(1) def median(df): return df.quantile(0.5).round(1) def upper_quartile(df): return df.quantile(0.75).round(1) col_numeric = train_df[['age', 'bmi', 'children', 'charges']] col_numeric.agg([missing_ratio, 'min', lower_quartile, median, upper_quartile, 'max']).round(1) # + [markdown] id="zML-92tAF50s" # # + [markdown] id="9skbk4S_6-d1" # ## Preprocessing # + [markdown] id="gLS4WSIW8ouV" # ### Tách tập input output # + colab={"base_uri": "https://localhost:8080/"} id="dz3MRIPL7AjN" outputId="ee410bd1-60e0-433c-95e1-c56f3df766f5" train_y_df = train_df['charges'] train_X_df = train_df.drop('charges', axis = 1) train_X_df.shape, train_y_df.shape # + id="vp9wu91AX0aU" X, y = train_df.iloc[:, :-1], train_df.iloc[:, -1] # + [markdown] id="V-H88bBeIKFe" # ### Xử lý các cột có kiểu dữ liệu Categorical # + [markdown] id="eVe0hscAHt9A" # Ở bước khám phá dữ liệu ta có thể thấy cột "sex" "smoker" "region" mang kiểu object. Vì vậy ta sẽ tiến hành xử lý các cột này # + id="F6ozEOrDQyW_" onehot_features = ['region'] onehot_transformer = Pipeline( [ ('imputer_cat', SimpleImputer(strategy = 'constant')), ('onehot', OneHotEncoder(handle_unknown = 'ignore')) ] ) imp_mode = SimpleImputer(missing_values=np.nan, strategy='most_frequent') # + id="Sbz047sskm4q" ordinal_features = ['sex', 'smoker'] ordinal_transformer = Pipeline( [ ('imputer_cat', SimpleImputer(strategy = 'constant')), ('ordinal', OrdinalEncoder()) ] ) # + [markdown] id="xnm-lkjuH4sL" # ### Xử lý các cột có kiểu dữ liệu Numerical # + id="BQZcOZKGH9rP" numeric_features = ['age', 'bmi', 'children'] numeric_transformer = Pipeline( [ ('imputer_num', SimpleImputer(strategy = 'median')), ('scaler', StandardScaler()) ] ) numeric_transformer_insight = Pipeline( [ ('imputer_num', SimpleImputer(strategy = 'median')), ] ) # + [markdown] id="KtahC3-yIYIG" # ### Preprocessor # + id="r9GMoftgIYyh" preprocessor = ColumnTransformer( [ ('ordinals', ordinal_transformer, ordinal_features), ('onehot', onehot_transformer, onehot_features), ('numericals', numeric_transformer, numeric_features) ], remainder = 'drop' ) preprocessor_insight = ColumnTransformer( [ ('ordinals', ordinal_transformer, ordinal_features), ('numericals', numeric_transformer_insight, numeric_features), ('unorder_categorical', imp_mode, onehot_features) ], remainder = 'drop' ) # + colab={"base_uri": "https://localhost:8080/", "height": 399} id="rbIL_9_tTFju" outputId="3678e14b-cab0-4153-89c8-6f508d978e00" preprocessed_train_X = preprocessor_insight.fit_transform(X) insight_df = pd.DataFrame(preprocessed_train_X) columns = ['sex', 'smoker', 'age', 'bmi', 'children', 'region'] insight_df.columns = columns insight_df['sex'] = insight_df['sex'].astype(int) insight_df['smoker'] = insight_df['smoker'].astype(int) insight_df['age'] = insight_df['age'].astype(int) insight_df['bmi'] = insight_df['bmi'].astype(float) insight_df['children'] = insight_df['children'].astype(int) insight_X_df = insight_df.copy() insight_X_no_object_df = insight_df.drop('region', 1) insight_df['charges'] = y insight_df # X.columns # + [markdown] id="Dth4hFU9IAr1" # ## **Phân tích dữ liệu** # + [markdown] id="mukvC_8GtUiK" # ### Vẽ biểu đồ một biến và nhận xét # + colab={"base_uri": "https://localhost:8080/", "height": 270} id="0Nle3qW1g02G" outputId="6ae7ed79-0c1f-4711-8fd1-919ca716acb0" sns.boxplot(data=insight_df, y='charges') # + [markdown] id="J9iem3_1h09O" # Nhận xét: Biến charges có phân bố bị lệch trái, nhiều outlier # + colab={"base_uri": "https://localhost:8080/", "height": 270} id="zll5E5Z8hICD" outputId="e260974c-556d-4ba5-f989-42d2aae97452" sns.boxplot(data=insight_df, y='age') # + [markdown] id="J9r2cb2-kQzN" # Nhận xét: Biến age có phân bố chuẩn # + colab={"base_uri": "https://localhost:8080/", "height": 270} id="AhK48GskhtSO" outputId="4d6dce05-5c54-4c5f-d8a2-6f690ccb5763" sns.boxplot(data=insight_df, y='bmi') # + [markdown] id="9vxoAgpokYxJ" # Nhận xét: Biến bmi có phân bố chuẩn, tồn tại outlier # + colab={"base_uri": "https://localhost:8080/", "height": 352} id="KVPSqYjNd7DH" outputId="bafbcc2b-c894-45e9-e07a-3d9103354c9b" sns.distplot(insight_df['sex'], hist=True, kde=False, bins=20, color = 'blue', hist_kws={'edgecolor':'black'}) plt.xlabel('Sex') plt.ylabel('Frequency') # + [markdown] id="nUi62uoplLi-" # Nhận xét: Tỉ lệ nam nữ bằng nhau # + colab={"base_uri": "https://localhost:8080/", "height": 352} id="2oFG1VZ0gEbk" outputId="1f8b178c-75aa-4750-8126-7df9d0f1ad20" sns.distplot(insight_df['smoker'], hist=True, kde=False, bins=20, color = 'blue', hist_kws={'edgecolor':'black'}) plt.xlabel('Smoker') plt.ylabel('Frequency') # + [markdown] id="zHGJWRwRlf8Y" # Nhận xét: Tỉ lệ người không hút thuốc gấp 4 lần người hút thuốc # + colab={"base_uri": "https://localhost:8080/", "height": 352} id="SnKi5N1SfdIm" outputId="2020bd93-f7ab-4d3c-8f4e-3cdb93ada59b" sns.distplot(insight_df['children'], hist=True, kde=False, bins=20, color = 'blue', hist_kws={'edgecolor':'black'}) plt.xlabel('Children') plt.ylabel('Frequency') # + [markdown] id="MCQ8NK_EmYJG" # Nhận xét: Tỉ lệ người có càng nhiều con giảm dần # + [markdown] id="RmfAwXpLRZ_M" # ### Vẽ biểu đồ các biến tương quan và nhận xét # + [markdown] id="rQyCOrFCG9Gk" # Tính ma trận tương quan # + colab={"base_uri": "https://localhost:8080/", "height": 446} id="MzdAOhZ7GEAI" outputId="2edeff0b-5f06-4504-fa12-a237f3402bbd" corr = insight_df.corr() plt.figure(figsize=(7, 7)) sns.heatmap(corr,square=True, annot=True, cbar=False, fmt='.3f', cmap='Reds') # + [markdown] id="stC3P6xaH0NU" # Có thể thấy những thuộc tính như age (yếu), bmi (yếu), smoker (mạnh) có tương quan với thuộc tính charges # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="ITECGvaQJTHi" outputId="7f616018-98a2-435a-de63-1d958404d583" sns.boxplot(data=insight_df, x='smoker', y='charges') # + [markdown] id="120VZ1MwKsB9" # Biểu đồ trên cho ta thấy người hút thuốc thì có chi phí y tế cao hơn, cụ thể : # - hơn 75% người hút thuốc trả chi phí cao hơn hầu hết tất cả người không hút thuốc # - chi phí thấp nhất của người hút thuốc chỉ nhỉnh hơn một chút so với chi phí của 75% người không hút thuốc. # - nếu chi phí dưới 10k, xác suất cao là người đó không hút thuốc # - nếu chi phí trên 20k, xác suất cao là người đó hút thuốc # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="bKx-V6VcJfbR" outputId="475a3b75-ac06-4437-c52b-23fdeb90a44f" sns.scatterplot(data=insight_df, x='age', y='charges', hue='smoker') # + [markdown] id="wnCRI3hiM0gF" # Nhìn vào biểu đồ trên, ta thấy # - người càng cao tuổi thì số tiền chi cho y tế càng nhiều # - Nếu dưới 35 tuổi và không hút thuốc thì khả năng cao chi phí dưới 6k # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="JUMurDf3QY8V" outputId="e6a6641d-dce2-4028-c355-12665297adb0" sns.scatterplot(data=insight_df, x='bmi', y='charges', hue='smoker') # + [markdown] id="C-AT3_clQz3R" # - Người hút thuốc và có chỉ số BMI lớn hơn 30 thì chi phí tổi thiểu là khoảng 30k # + [markdown] id="Bl8A59xzlvMj" # ### VIF # + id="gGgIugVBlvoz" colab={"base_uri": "https://localhost:8080/"} outputId="09c46f40-cd5f-4d8b-8577-c54a8069585e" from statsmodels.stats.outliers_influence import variance_inflation_factor # VIF dataframe vif_data = pd.DataFrame() vif_data["feature"] = insight_X_no_object_df.columns # calculating VIF for each feature vif_data["VIF"] = [variance_inflation_factor(insight_X_no_object_df.values, i) for i in range(len(insight_X_no_object_df.columns))] print(vif_data) # + [markdown] id="z9gAkTQtaKN3" # 1 = Không tương quan # # Giữa 1 và 5 = Tương quan vừa # # Lớn hơn 5 = Tương quan mạnh # # (Tham khảo : [1]) # # # Ta thấy các biến `sex`, `smoker`, `children` tương quan vừa với các biến còn lại. # # `age` và `bmi` có sự tương quan mạnh với các biến còn lại # # Nên thu thập thêm data để giảm sự phụ thuộc giữa các biến # + [markdown] id="LyEI1TlyAjXW" # ### Insight: Sex có ảnh hưởng đến Smoker? # # + [markdown] id="f76h7MXBQHBx" # # ${H_0}$: sex và smoker độc lập nhau # # ${H_A}$: sex và smoker phụ thuộc nhau # # </br> # # Đặt: # # ${A =}$ sex, ${A_1 =}$ `male`, ${A_2}$ = `female` # # ${B =}$ smoker, ${B_1 =}$ `yes`, ${B_2 =}$ `no` # # </br> # # Ta có: # # ${H_0}$: ${P(A_i\cap B_j) = P(A_i)P(B_j)}$ # # ${H_A}$: ${P(A_i\cap B_j) \neq P(A_i)P(B_j)}$ # # </br> # # Phần dưới sẽ trình bày về mặt toán học lẫn sử dụng thư viện scipy.stats để tính toán # # </br> # + id="GkMOpM-5BCs-" from scipy.stats import chi2_contingency from scipy.stats import chi2 # + id="PLHoYJ9YBwv2" insight_df_1 = insight_df.copy() insight_df_1['sex'] = insight_df_1['sex'].replace(1,'male') insight_df_1['sex'] = insight_df_1['sex'].replace(0,'female') insight_df_1['smoker'] = insight_df_1['smoker'].replace(1,'yes') insight_df_1['smoker']= insight_df_1['smoker'].replace(0,'no') # + colab={"base_uri": "https://localhost:8080/", "height": 135} id="JD6hzEtKBVlB" outputId="999df379-8cf3-4b64-c8b9-ed584cce491c" contigency= pd.crosstab(insight_df_1['sex'], insight_df_1['smoker']) contigency # + id="GwBq9CmkCRbV" contigency.columns = ['no', 'yes'] # + colab={"base_uri": "https://localhost:8080/", "height": 135} id="3m4IZzJoC5vQ" outputId="176cab37-db2e-465c-f718-8d2c7c79cdf2" contigency['Pr(Ai)'] = contigency['no'] + contigency['yes'] sum_table = sum(contigency['Pr(Ai)']) contigency['Pr(Ai)'] = contigency['Pr(Ai)']/sum_table contigency # + colab={"base_uri": "https://localhost:8080/"} id="jPRGFXWxCYS9" outputId="d16d6cd4-86c2-445b-9466-7d311f6d798f" Pr_No = sum(contigency['no'])/sum_table Pr_Yes = sum(contigency['yes'])/sum_table PrBj = [Pr_No,Pr_Yes] print(f'Pr_No = {Pr_No}\nPr_Yes = {Pr_Yes}\n') print(sum(PrBj)) # + [markdown] id="Bo10kOnqDW22" # Ta đã tính được ${Pr(A_i)}$ như bảng trên và # # ${Pr(B_1)}$ = 0.2053838484546361 # # ${Pr(B_2)}$ = 0.7946161515453639 # # --- # # Đến đây ta có thể tính: # # Giá trị mong đợi ${E}$: # # \begin{equation} # \text{Do kỳ vọng A và B độc lập:}\\ # E_{ij} = Pr(A_i) \times Pr(B_j) \times N [2]\\ # \text{hay}\\ # E_{ij} = \frac{\text{(Tổng dòng} \times \text{Tổng cột)}}{\text{Tổng bảng}} [3] \\ # \text{với bảng là bảng contingency} # \end{equation} # # Giá trị ${\chi^2}$: # # \begin{equation} # \chi^2=\Sigma\frac{(O-E)^2}{E} [2][3]\\ # \text{với O là giá trị thực sự và E là giá trị mong đợi} # \end{equation} # # Giá trị dof: Degree of freedom # # dof cho ${\chi^2}$ độc lập: # # \begin{equation} # dof = v = rc - 1 - (r-1) - (c-1) = (r-1)(c-1) [2]\\ = 1 # \end{equation} # # Chọn mức ý nghĩa: # # \begin{equation} # \alpha = 0.05 # \end{equation} # # Tra bảng Chi Squared với ${\alpha = 0.05, dof = 1}$ ta được critical value ${ = 3.841459}$ # # Chấp nhận ${H_0}$ nếu # \begin{equation} # \chi^2_v <= 3.841459 # \end{equation} # + [markdown] id="7Q9W2lKGEgjt" # </br> # # Ta có thể sử dụng `chi2_contingency` của thư viện spicy để tính toán: # # + colab={"base_uri": "https://localhost:8080/"} id="OgiXX-NFEAyP" outputId="197e2e86-5ebd-4b45-cecb-488fe8c2a1bd" # [3]Source: https://towardsdatascience.com/gentle-introduction-to-chi-square-test-for-independence-7182a7414a95 chi, pval, dof, expected = chi2_contingency(contigency.drop(['Pr(Ai)'],1)) print('p-value là: ', pval) significance = 0.05 p = 1 - significance critical_value = chi2.ppf(p, dof) print('chi = %.6f, critical value = %.6f\n' % (chi, critical_value)) if chi > critical_value: print("""Với mức ý nghĩa %.2f, ta bác bỏ Ho và chấp nhận HA. Kết luận: sex và smoker không độc lập.""" % (significance)) else: print("""Với mức ý nghĩa %.2f, ta bác bỏ HA và chấp nhận H0. Kết luận: sex và smoker độc lập.""" % (significance)) # + colab={"base_uri": "https://localhost:8080/"} id="qalNI0NhJznO" outputId="cd950395-6a2f-409c-e8c2-32f40705946c" chi # + [markdown] id="0125b8h1EjfN" # Ta kiểm tra, không dùng thư viện: # # </br> # + colab={"base_uri": "https://localhost:8080/", "height": 164} id="-CNtXZw9IIlk" outputId="c566bd83-27c8-4f81-fb0c-63ae60b40f36" append_data = [] for sex in insight_df_1['sex'].unique(): append_data.append(insight_df_1['smoker'][insight_df_1['sex'] == f'{sex}'].value_counts().rename_axis('smoker').reset_index(name='count')) # items_df['Accidents'].value_counts().rename_axis('Accidents').reset_index(name='Frequency') contigency_1 = pd.concat(append_data) contigency_1.reset_index(inplace = True) contigency_1 contigency_1['sex'] = np.nan i = 0 pd.options.mode.chained_assignment = None # default='warn' for sex in insight_df_1['sex'].unique(): contigency_1['sex'][i] = f'{sex}' contigency_1['sex'][i+1] = f'{sex}' i = i + 2 contigency_1 = contigency_1.reindex(columns = ['smoker', 'sex', 'count']) contigency_1 # + colab={"base_uri": "https://localhost:8080/", "height": 182} id="y-wz8S7gIz2N" outputId="57435735-a1d1-4644-c640-51c0ac46d3e3" expectedValue = [] for Ai in contigency['Pr(Ai)'][::-1]: #female, male for Bj in PrBj: # no, yes expectedValue.append(Ai*Bj*sum_table) chiij = [] oij = np.array(contigency_1['count']) for ij in range(0, len(expectedValue)): chiij.append((oij[ij] - expectedValue[ij])**2/expectedValue[ij]) contigency_1['Expected value'] = expectedValue contigency_1['(O_ij - E_ij)^2/E_ij'] = chiij chi_square = sum(contigency_1['(O_ij - E_ij)^2/E_ij']) print('chi_square = ', chi_square) contigency_1.head() # + [markdown] id="fXrk6_v-OyY3" # Ta thấy: # # \begin{equation} # \chi^2_v = 2.997908815661011 < 3.841459 # \end{equation} # # </br> # # Vậy bác bỏ ${H_A}$ với mức ý nghĩa 0.05, chấp nhận ${H_0}$ # # </br> # # <center><font size ="5">Kết luận: sex và smoker độc lập</font></center> # # </br> # + [markdown] id="B-Jis1XW_yG0" # ### Insight: Trung bình của 'age', 'bmi', 'children' giữa người có hút thuốc và người không hút thuốc có bằng nhau # + id="M5cNUroyAlgA" numeric_features = ['age', 'bmi', 'children'] # + colab={"base_uri": "https://localhost:8080/"} id="j8jcia1lBTj-" outputId="fd57cc9d-bb8e-43fd-ac24-d6423b4c3c25" from statsmodels.stats.weightstats import ztest significance = 0.05 for i in range(len(numeric_features)): print(f'\nHo: trung bình `{numeric_features[i]}` của người có hút thuốc = trung bình `{numeric_features[i]}` của người không hút thuốc\n') print(f'\nHa: trung bình `{numeric_features[i]}` của người có hút thuốc khác trung bình `{numeric_features[i]}` của người không hút thuốc\n') stat, p = ztest(insight_df[numeric_features[i]][insight_df['smoker'] == 1],insight_df[numeric_features[i]][insight_df['smoker'] == 0]) print('stat=%.3f, p=%.3f' % (stat, p)) print('\nKẾT LUẬN: ') if p >= significance: print(f'Với mức ý nghĩa {significance}, ta chấp nhận Ho, bác bỏ Ha.\nTrung bình `{numeric_features[i]}` của người có hút thuốc = trung bình `{numeric_features[i]}` của người không hút thuốc\n---') else: print(f'Với mức ý nghĩa {significance}, ta chấp nhận Ha, bác bỏ Ho.\nTrung bình `{numeric_features[i]}` của người có hút thuốc khác trung bình `{numeric_features[i]}` của người không hút thuốc\n---') # + [markdown] id="zuFDiJWAaISu" # ### Insight: Sự phụ thuộc của `charges` vào `sex`, `smoker`, `age`, `bmi`, `children` # + id="j9VSvK_0aDFu" import statsmodels.api as sm import statsmodels.formula.api as smf # + colab={"base_uri": "https://localhost:8080/"} id="roOI6vQCajQp" outputId="e6b53b62-56c5-4c69-b106-20a5b6967021" results = sm.OLS(y, sm.add_constant(insight_X_no_object_df)).fit() print(results.summary()) # + [markdown] id="DSOAeoJMcMVj" # </br> # # Kết luận: # # - Biến `sex` không có ý nghĩa (có thể loại bỏ) # - Biến `smoker` có ý nghĩa đối với mô hình về mặt thống kê (với mức ý nghĩa (***) hay p-value = 0.000) # - Biến `age` có ý nghĩa đối với mô hình về mặt thống kê (với mức ý nghĩa (***) hay p-value = 0.000) # - Biến `bmi` có ý nghĩa đối với mô hình về mặt thống kê (với mức ý nghĩa (***) hay p-value = 0.000) # - Biến `children` không có ý nghĩa (có thể loại bỏ) # - Mô hình có thể giải thích được 74.3% sự thay đổi của biến `charges` # - Mô hình tương đối tốt (p-value = 1.78e-129) # + [markdown] id="Pgr_84L2x55M" # Ta huấn luyện lại mô hình dựa theo kết luận trên # + colab={"base_uri": "https://localhost:8080/"} id="Ox15ATn_x375" outputId="226069de-9ac7-4df7-b7a8-7cf7e1e2fef3" results = sm.OLS(y, sm.add_constant(insight_X_no_object_df.drop(['sex', 'children'],1))).fit() print(results.summary()) # + id="VxAaCyA4d4ma" colab={"base_uri": "https://localhost:8080/"} outputId="d92986c6-1dd4-4cdf-9802-d0afffcd60a5" print('Parameters: ', results.params) # + [markdown] id="ucZqigV5fX85" # Ta thấy: # # - Cứ tăng 1 tuổi thì chi phí y tế cá nhân tăng 262.144961, tăng 1 chỉ số bmi thì tăng 326.725200 chi phí y tế cá nhân # - Riêng với smoker, người có hút thuốc thì có chi phí y tế cá nhân cao hơn người không hút thuốc đến 23668.497446 # # + [markdown] id="nZU1f-7AQr_l" # ## TIỀN XỬ LÝ + MÔ HÌNH HÓA # # reference: [machinelearningmastery](https://machinelearningmastery.com/how-to-transform-target-variables-for-regression-with-scikit-learn/) # + [markdown] id="-kUD3F3ORuZL" # ### Sử dụng SVR với kernel non-linear: RBF (mặc định) # + [markdown] id="-eqRSkDqRzXi" # ### Chọn tham số cho mô hình # + [markdown] id="YP1e-_F_R6ky" # #### Hàm svr_r2 # # Hàm này để tính độ chính xác trung bình khi sử dụng phương pháp đánh giá lỗi K-fold của 1 bộ tham số C, gamma # + id="2ygnPlIc5SZQ" def svr_r2(X, y, kernel, C, gamma, cv): svr = SVR(kernel=kernel, C=C, gamma=gamma) pipeline = Pipeline(steps=[('Column Transformer', preprocessor), ('model', svr),]) model = TransformedTargetRegressor(regressor=pipeline, transformer=StandardScaler()) scores = cross_val_score(model, X, y, scoring='r2', cv=cv, n_jobs=-1) return np.mean(scores) # + [markdown] id="IwUUlnnYTPhd" # #### Hàm optimal_svr # # Để chọn ra chọn siêu tham số làm cho mô hình có độ chính xác cao nhất trên tập huấn luyện # + id="leEFM50NsDTO" def optimal_svr(X, y, kernel, c_range, gamma_range, train_r2=[]): max_score = 0 cv = KFold(n_splits=10, shuffle=True, random_state=1) for c in c_range: scores = [] for g in gamma_range: score = svr_r2(X, y, kernel, c, g, cv) scores.append(score) if score > max_score: max_score = score res = (c, g) train_r2.append(scores) return res # + id="YDIebvS93axZ" C_range = [0.05,0.1,1,10, 50] gamma_range = [0.01,0.05,0.1,1,10] kernel = 'rbf' train_r2 = [] best_c, best_g = optimal_svr(X, y, kernel, C_range, gamma_range, train_r2) # + [markdown] id="jIwZmtirTigg" # #### Trực quan kết quả # + id="se-R2pZhKL-9" train_r2 = pd.DataFrame(train_r2, columns=gamma_range, index=C_range) # + id="Pj1vvQ7KO803" colab={"base_uri": "https://localhost:8080/", "height": 458} outputId="c8c7bedb-bb84-45f8-c035-17ad00052910" plt.figure(figsize=(7, 7)) sns.heatmap(train_r2,square=True, annot=True, cbar=False, fmt='.3f', cmap='Reds') plt.xlabel('Gamma'); plt.ylabel('C');plt.title('R-square of training set') plt.show() # + [markdown] id="KU1K1jN6T5qI" # Vậy các tham số tìm được là: # + id="s5-IYbRFT-wx" colab={"base_uri": "https://localhost:8080/"} outputId="6e54e814-a7c9-4f96-99e5-ac95fae26419" print('C= %s và gamma= %s' %(best_c, best_g)) # + [markdown] id="xKjUZLXhTv9T" # ### Dùng các siêu tham số tìm được ở trên để huấn luyện mô hình # + id="rzySAbfd9D-K" df_test = pd.read_csv('test.csv') X_test, y_test = df_test.iloc[:, :-1], df_test.iloc[:, -1] # + id="VTfcHOXq8ZgO" colab={"base_uri": "https://localhost:8080/", "height": 196} outputId="bb69d6db-80e7-43f5-c0ab-bb3ba1c48d6d" svr = SVR(kernel= kernel, C=best_c, gamma=best_g) pipeline = Pipeline(steps=[('Column Transformer', preprocessor), ('model', svr),]) model = TransformedTargetRegressor(regressor=pipeline, transformer=StandardScaler()) model.fit(X,y) # + id="VePdQxu09rCV" preds = model.predict(X_test) # + id="ac3enITIVZv9" colab={"base_uri": "https://localhost:8080/"} outputId="3f5f1b65-03e7-49d2-f9dd-60ab295944cd" score = model.score(X_test,y_test) score # + [markdown] id="DHtB7YqmC70a" # ### Sử dụng SVR với kernel linear # + [markdown] id="HWquAIICGCWr" # #### Lựa chọn siêu tham số tốt nhất cho mô hình # + id="AawaX-6xC6-Z" kernel = 'linear' train_r2 = [] best_c, best_g = optimal_svr(X, y, kernel, C_range, gamma_range, train_r2) # + [markdown] id="6733oTE_GMXx" # #### Trực quan kết quả # + id="7kE_BTH4DL__" train_r2 = pd.DataFrame(train_r2, columns=gamma_range, index=C_range) # + colab={"base_uri": "https://localhost:8080/", "height": 458} id="fTB4fOBXDRBP" outputId="ceca8ee2-0968-48cd-9787-21df6dfe7f2e" plt.figure(figsize=(7, 7)) sns.heatmap(train_r2,square=True, annot=True, cbar=False, fmt='.3f', cmap='Reds') plt.xlabel('Gamma'); plt.ylabel('C');plt.title('R-square of training set') plt.show() # + [markdown] id="IBKBIrsHGSo5" # Vậy các siêu tham số tốt nhất tìm được là: # + colab={"base_uri": "https://localhost:8080/"} id="lPzE1vGEDbrU" outputId="d376b3fa-ff75-493e-94d4-cbf3fad41710" print('C= %s và gamma= %s' %(best_c, best_g)) # + [markdown] id="369rQOGgGYv9" # **Nhận xét:** # - Siêu tham số gamma chỉ có giá trị trong các mô hình phi tuyến (bao gồm: ‘rbf’, ‘poly’ và ‘sigmoid’) # + [markdown] id="2sZxazNkHLrr" # #### Dùng các siêu tham số tìm được để huấn luyện mô hình # + colab={"base_uri": "https://localhost:8080/", "height": 196} id="E_Xa0MMODbj-" outputId="c54b72b7-fc70-41c3-bb72-3b62b93f65b0" svr = SVR(kernel= kernel, C=best_c, gamma=best_g) pipeline = Pipeline(steps=[('Column Transformer', preprocessor), ('model', svr),]) model = TransformedTargetRegressor(regressor=pipeline, transformer=StandardScaler()) model.fit(X,y) # + id="xWFnVdhwDQ3P" preds = model.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="vO95KM06Dm3b" outputId="3d2baa92-4cc1-4a94-8209-6e8dcd80b856" score = model.score(X_test,y_test) score # + [markdown] id="ng6HwC17HUnP" # **Nhận xét:** # - Ta thấy vấn đề dự đoán chi phí y tế được giải quyết tốt hơn trên mô hình phi tuyến # - Thuật toán SVR ở kernel phi tuyến cho độ chính xác cao hơn rất nhiều so với kernel tuyến tính # - Độ chính xác của mô hình còn khá thấp, nên ta thử các các tối ưu khác # + [markdown] id="4P2p8OC5JSl-" # ### Sử dụng mô hình SVR với kernel non-linear trên dữ liệu được xóa outliers # + [markdown] id="Ck75cnNyPb1r" # Từ biểu đồ hộp ở phần phân tích dữ liệu, ta thấy ở các bệnh nhân không hút thuốc thì có sự biến động khá lớn trong chi phí điều trị bệnh. Có thể những người này mắc các bệnh bẩm sinh hoặc không liên quan đến thuốc lá. # # Giả sử các bệnh nhân không hút thuốc có chi phí điều trị bệnh cao bất thường là các outliers ta tiến hành xóa các bệnh nhân này ra khỏi tập huấn luyện # + [markdown] id="npLY0DmMQjum" # Theo biểu đồ hộp, thì khoảng lớn hơn 1.5 lần ipr là các outliers # + id="Vwv-K5UgJSWy" q25 = train_df[train_df.smoker == 'no']['charges'].agg(lower_quartile) q75 = train_df[train_df.smoker == 'no']['charges'].agg(upper_quartile) iqr = q75 - q25 cut_off = iqr * 1.5 lower, upper = q25 - cut_off, q75 + cut_off # + [markdown] id="yMTGfxIJQy6C" # #### Xóa các outliers # + id="fbIEV5QgK50m" removed_outliers = train_df[((train_df.smoker == 'no') & (train_df.charges > lower) & (train_df.charges < upper)) | (train_df.smoker == 'yes')] # + [markdown] id="ydlK1jBCQ5FX" # Ta xem lại biểu đồ để xác nhận không còn những điểm outliers # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="9CfSQXTvL8SJ" outputId="1e6547f1-bbb3-4ddf-9bd8-b1a473b3401d" sns.boxplot(data= removed_outliers, x='smoker', y='charges'); # + [markdown] id="QEehWtLFRFd-" # Tách tập train đã bỏ các outlier # + id="9jtO9GYpM5au" removed_outliers_X, removed_outliers_y = removed_outliers.iloc[:, :-1], removed_outliers.iloc[:, -1] # + [markdown] id="sfP__2SrRM3m" # #### Tìm các siêu tham số tốt nhất với dữ liệu hiện tại # # Các bước thực hiện tương tự như trên # + id="iN2S-dOSMfJr" kernel = 'rbf' train_r2 = [] best_c, best_g = optimal_svr(removed_outliers_X, removed_outliers_y, kernel, C_range, gamma_range, train_r2) # + id="BdivT_daMkLs" train_r2 = pd.DataFrame(train_r2, columns=gamma_range, index=C_range) # + colab={"base_uri": "https://localhost:8080/", "height": 458} id="JoIVEJ8WMkFP" outputId="67a9ca27-4e3c-47a1-e549-32afce9c36f2" plt.figure(figsize=(7, 7)) sns.heatmap(train_r2,square=True, annot=True, cbar=False, fmt='.3f', cmap='Reds') plt.xlabel('Gamma'); plt.ylabel('C');plt.title('R-square of training set') plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="pmGdKe7aMj-Y" outputId="1caa664a-aaf0-4311-9984-f42e26909e87" print('C= %s và gamma= %s' %(best_c, best_g)) # + colab={"base_uri": "https://localhost:8080/", "height": 196} id="IYxBlB2iMj1Q" outputId="d393f93e-5326-46f8-83dc-6a192d16f78a" svr = SVR(kernel= kernel, C=best_c, gamma=best_g) pipeline = Pipeline(steps=[('Column Transformer', preprocessor), ('model', svr),]) model = TransformedTargetRegressor(regressor=pipeline, transformer=StandardScaler()) model.fit(removed_outliers_X,removed_outliers_y) # + colab={"base_uri": "https://localhost:8080/"} id="SYsI7K0mNdmn" outputId="fa714332-b07a-4dd6-871e-cdc71fbbf972" preds = model.predict(X_test) score = model.score(X_test,y_test) score # + [markdown] id="hW-SkLtoRjyv" # **Nhận xét:** # - Khi xóa các dữ liệu bệnh nhân bất thường thì độ chính xác trên tập train tăng lên đến gần 90% # - Tuy nhiên khi dự đoán trên tập test thì độ chính xác của mô hình không tăng mà lại thấp đi một chút # - Có vẻ là lớp bệnh nhân này cũng có tỉ lệ cao trong thực tế nên nó không hẵn là các outlier mà có thể là các đối tượng khá đặc biệt mà cần quan sát thêm các đặc trưng khác để có thể dự đoán chính xác # + [markdown] id="ud407shnZDzx" # ### Dùng Simple Regression # + id="sGZLFHy3Uq00" colab={"base_uri": "https://localhost:8080/"} outputId="5c9ddc8c-3d5d-4213-e18b-3c0b57f7abac" from sklearn.linear_model import LinearRegression reg = LinearRegression() pipeline = Pipeline(steps=[('Column Transformer', preprocessor), ('model', reg),]) model = TransformedTargetRegressor(regressor=pipeline, transformer=StandardScaler()) model.fit(X,y) model.score(X, y) # + colab={"base_uri": "https://localhost:8080/", "height": 196} id="Na8K8yh0n8Vc" outputId="f08e1493-8dfd-45e0-a650-554bd4fd7e55" model # + id="97YUBOwBM775" preds = model.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="g592Uvk5Nf0t" outputId="feaad08b-27a9-4b77-a198-a96f2fd3122b" score = model.score(X_test,y_test) score # + [markdown] id="jmfhcJVGOWEb" # #### Giảm chiều và trực quan # + id="RA8ufxsR7G-N" Xpre = preprocessor.fit_transform(X) # + colab={"base_uri": "https://localhost:8080/", "height": 71} id="tY301DcL2dPU" outputId="71b11e00-e187-490c-d5c8-6cb21effd607" pls = PLSRegression(n_components=1) pls.fit(Xpre, y) # + id="cb7SJAj08-ZT" X_test_pre = preprocessor.transform(X_test) # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="uyPDombE7gpa" outputId="54f1a44d-69bf-4f40-f42c-0352595f1a49" plt.scatter(x=pls.transform(X_test_pre), y= y_test, alpha = 0.5,color= 'r', label= 'True') plt.scatter(x=pls.transform(X_test_pre), y = pls.predict(X_test_pre), alpha= 0.5, label= 'Predictions') plt.legend() plt.show() # + [markdown] id="yj6BZMcA8Ahb" # ### Kết luận: # # - Đối với dữ liệu này, trong các mô hình được thực hiện thì mô hình SVR với kernel là RBF cho độ chính xác cao nhất là khoảng 86% trên tập test và lên đến 90% trên tập train khi xóa đi các bệnh nhân không hút thuốc nhưng chi phí y tế cao # - Đối tượng bệnh nhân không hút thuốc nhưng lại mất nhiều chi phí cho dịch vụ y tế cần được quan sát thêm các đặc trưng để dự đoán được chính xác hơn # + [markdown] id="poM8JF5rDkWu" # ## Tham khảo # # [1]. [Stephanie - Variance Inflation Factor - Statisticshowto.com](https://www.statisticshowto.com/variance-inflation-factor/) # # [2]. https://www3.nd.edu/~rwilliam/stats1/x51.pdf # # [3]. https://towardsdatascience.com/gentle-introduction-to-chi-square-test-for-independence-7182a7414a95 # + id="BZIN_f9uwR-C"
Main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.7 64-bit (''PythonData'': conda)' # name: python37764bitpythondatacondaf52d3ce673e8414fae95a6e5f2defcbe # --- # + import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import gmaps import os # variable g_key = "<KEY>" CityWeather = pd.read_csv("/Users/sarahgrant/Desktop/BCHomeworks/pythonAPIs-challenge/output/cities.csv") CityWeather.head() # + #setting city location, creating new dataframe only containing city lat/lng coords citylocation = CityWeather[["Lat", "Lng"]] #creating var containg humidity values humidity = CityWeather["Humidity"] #configuring google api key gmaps.configure(api_key=g_key) #Creating layout list to use later layout = { 'width': '1000px', 'height': '600px', 'border': '1px solid black', 'padding': '1px'} #creating gmaps figure fig = gmaps.figure(map_type="TERRAIN", zoom_level=1.5, center=(0,0), layout=layout) #creating heatmap layer to show humidity in dataframe locations Humidity_layer = gmaps.heatmap_layer(citylocation, weights=humidity, dissipating=False, max_intensity=100, point_radius= 1.5) #adding heatmap layer to the figure fig.add_layer(Humidity_layer) fig # + tags=[] #setting filters for ideal weather vacation spots Filtered_data = CityWeather.loc[CityWeather['Humidity']<=50] Filtered_data = Filtered_data.loc[Filtered_data["Max Temp"] >= 60] Filtered_data = Filtered_data.loc[Filtered_data['Max Temp'] <= 70] Filtered_data = Filtered_data.loc[Filtered_data['Wind Speed'] <= 10] print(Filtered_data.count()) #showing filtered data Filtered_data.head() # - # Create Hotel Map # + hotel_df = Filtered_data #adding new empty columns to dataframe to create areas for new data from api to be added hotel_df['Hotel Name'] = "" hotel_df['Hotel Lat'] = "" hotel_df['Hotel Lng'] = "" hotel_df['Hotel Rating'] = "" hotel_df # + tags=[] # Set parameters for hotel search params = { "radius": 5000, "types": "lodging", "key": g_key } # Iterate through for index, row in hotel_df.iterrows(): #specifing latitude values var lat = row["Lat"] #specifing longidude values var lng = row["Lng"] #specifing city name value var(only created for print output) city = row["City Name"] #creating new parameter for the city location coords params["Location"] = f'{lat},{lng}' #setting the url for the api search apiurl = "https://maps.googleapis.com/maps/api/place/nearbysearch/json" #api request hotel_api = requests.get(apiurl, params=params) #making api request info in json format hotel_api = hotel_api.json() #exception to let know if hotel information for city list is not found, if found, hotel info added to dataframe(name, exact coords, and rating) try: hotel_df.loc[index, "Hotel Name"] = hotel_api["results"][0]["name"] hotel_df.loc[index, "Hotel Lat"] = hotel_api["results"][0]["geometry"]["location"]['lat'] hotel_df.loc[index, "Hotel Lng"] = hotel_api["results"][0]["geometry"]["location"]['lng'] hotel_df.loc[index, "Hotel Rating"] = hotel_api["results"][0]["rating"] except: print(f"Missing results for {city}, skipping entry") hotel_df # + #add the hotel marks to the heatmap info_box_template = """ <dl> <dt>Name</dt><dd>{Hotel Name}</dd> <dt>City</dt><dd>{City Name}</dd> <dt>Country</dt><dd>{Country}</dd> </dl> """ # Store the DataFrame Row hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()] locations = hotel_df[["Lat", "Lng"]] # + # Add marker layer ontop of heat map markers = gmaps.marker_layer(locations, info_box_content=hotel_info) fig.add_layer(markers) # Display figure fig # -
VacationPy/VacationPy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1. Pytorch basics # - Used as part of INFO8010 Deep Learning (<NAME>, 2018-2019). # - Originally adapted from [Pytorch tutorial for Deep Learning researchers](https://github.com/yunjey/pytorch-tutorial) (<NAME>, 2018). # # --- # + # Imports import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import torch import torchvision import torch.nn as nn import torch.utils.data as data import torchvision.transforms as transforms import torchvision.datasets as dsets # - # # Basic autograd example 1 # Create tensors x = torch.tensor(1.) w = torch.tensor(2., requires_grad=True) b = torch.tensor(3., requires_grad=True) # + # Build a computational graph y = w * x + b # y = 2 * x + 3 # Compute gradients y.backward() # Print out the gradients print(x.grad) # x.grad = 2 print(w.grad) # w.grad = 1 print(b.grad) # b.grad = 1 # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # # <ul> # <li>Define a polynomial model <code>y = w1*x + w2*x^2 + b</code>. # <li>Compute the value of <code>y</code> at <code>x=2</code>, <code>w1=1.5</code>, <code>w2=-1.0</code> and <code>b=3</code>. # <li>Evaluate the derivate of <code>y</code> with respect to <code>w2</code> at these values. # </ul> # # </div> # # Basic autograd example 2 # Create tensors x = torch.randn(5, 3) y = torch.randn(5, 2) y # Build a linear layer linear = nn.Linear(3, 2) print('w: ', linear.weight) print('b: ', linear.bias) for p in linear.parameters(): print(p, p.numel()) # Forward propagation pred = linear(x) print(pred) # + # Build Loss and Optimizer criterion = nn.MSELoss() optimizer = torch.optim.SGD(linear.parameters(), lr=0.01) # Compute loss loss = criterion(pred, y) print('loss: ', loss) # - # Backpropagation loss.backward() # Print out the gradients print ('dL/dw: ', linear.weight.grad) print ('dL/db: ', linear.bias.grad) # + # 1-step Optimization (gradient descent) optimizer.step() # You can also do optimization at the low level as shown below. # linear.weight.data.sub_(0.01 * linear.weight.grad.data) # linear.bias.data.sub_(0.01 * linear.bias.grad.data) # Print out the loss after optimization pred = linear(x) loss = criterion(pred, y) print('loss after 1 step optimization: ', loss.item()) # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # # Write the code above within a for loop that trains the linear models for 100 steps. Check that your loss is decreasing. # # </div> # # Load data from numpy a = np.array([[1,2], [3,4]]) b = torch.from_numpy(a) # convert numpy array to torch tensor c = b.numpy() # convert torch tensor to numpy array # # Implementing the input pipeline # Download and construct dataset train_dataset = dsets.CIFAR10(root='./data/', train=True, transform=transforms.ToTensor(), download=True) # Select one data pair (read data from disk) image, label = train_dataset[7] print(image.size()) print(label) from scipy.misc import toimage toimage(image) # + # Data Loader (this provides queue and thread in a very simple way) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=100, shuffle=True, num_workers=2) # When iteration starts, queue and thread start to load dataset from files data_iter = iter(train_loader) # Mini-batch images and labels images, labels = data_iter.next() # Actual usage of data loader is as below for images, labels in train_loader: # XXX: your training code will be written here pass # - # # Input pipeline for custom dataset # # You should build custom dataset as below: # + class CustomDataset(data.Dataset): def __init__(self): # TODO # 1. Initialize file path or list of file names pass def __getitem__(self, index): # TODO # 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open) # 2. Preprocess the data (e.g. torchvision.Transform) # 3. Return a data pair (e.g. image and label) pass def __len__(self): # You should change 0 to the total size of your dataset return 0 # Then, you can just use prebuilt torch's data loader custom_dataset = CustomDataset() train_loader = torch.utils.data.DataLoader(dataset=custom_dataset, batch_size=100, shuffle=True, num_workers=2) # - # # Save and load model # + # Save and load the entire model torch.save(linear, 'model.pkl') model = torch.load('model.pkl') # Save and load only the model parameters(recommended) torch.save(linear.state_dict(), 'params.pkl') linear.load_state_dict(torch.load('params.pkl'))
tutorials/lecture1-basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # You'll probably want to set our data rate higher for this notebook. # follow: http://stackoverflow.com/questions/43288550/iopub-data-rate-exceeded-when-viewing-image-in-jupyter-notebook # - # # Setup # Let's setup our environment. We'll pull in the the usual gis suspects and setup a leaflet map, read our API keys from a json file, and setup our Planet client # %load_ext autoreload # %autoreload 2 from __future__ import print_function # + # See requirements.txt to set up your dev environment. import sys import os import json import scipy import urllib import datetime import urllib3 import rasterio import subprocess import numpy as np import pandas as pd import seaborn as sns from osgeo import gdal from planet import api from planet.api import filters from traitlets import link # import rasterio.tools.mask as rio_mask from shapely.geometry import mapping, shape from IPython.display import display, Image, HTML import matplotlib.pyplot as plt import matplotlib.image as mpimg urllib3.disable_warnings() from ipyleaflet import ( Map, Marker, TileLayer, ImageOverlay, Polyline, Polygon, Rectangle, Circle, CircleMarker, GeoJSON, DrawControl ) # %matplotlib inline # will pick up api_key via environment variable PL_API_KEY # but can be specified using `api_key` named argument my_key = '81c0dedd35fd4a398da77753b90d62ea' # songcoco15 # my_key = '7d8af35b6e944f33bb5e33ada32ab4a0' #hayleysong15 # Set up the Planet API client - method1 # os.environ['PL_API_KEY'] = my_key client = api.ClientV1(api_key=my_key) # - # # Make a slippy map to get GeoJSON # # * The planet API allows you to query using a [geojson](https://en.wikipedia.org/wiki/GeoJSON) which is a special flavor of json. # * We are going to create a slippy map using leaflet and apply the Planet 2017 Q1 mosaic as the basemap. This requires our api key. # * We are going to add a special draw handler that shoves a draw region into a object so we get the geojson. # * If you don't want to do this, or need a fixed query try [geojson.io](http://geojson.io/#map=2/20.0/0.0) # * To install and run: # ``` # $ pip install ipyleaflet # $ jupyter nbextension enable --py --sys-prefix ipyleaflet # $ jupyter nbextension enable --py --sys-prefix widgetsnbextension # ``` # * [More information](https://github.com/ellisonbg/ipyleaflet) # + # Basemap Mosaic (v1 API) mosaicsSeries = 'global_quarterly_2017q1_mosaic' # Planet tile server base URL (Planet Explorer Mosaics Tiles) mosaicsTilesURL_base = 'https://tiles0.planet.com/experimental/mosaics/planet-tiles/' + mosaicsSeries + '/gmap/{z}/{x}/{y}.png' # Planet tile server url mosaicsTilesURL = mosaicsTilesURL_base + '?api_key=' + my_key # Map Settings # Define colors colors = {'blue': "#009da5"} # Define initial map center lat/long center = [45.5231, -122.6765] # Define initial map zoom level zoom = 11 # Set Map Tiles URL planetMapTiles = TileLayer(url= mosaicsTilesURL) # Create the map m = Map( center=center, zoom=zoom, # default_tiles = planetMapTiles # Uncomment to use Planet.com basemap ) # Define the draw tool type options polygon = {'shapeOptions': {'color': colors['blue']}} rectangle = {'shapeOptions': {'color': colors['blue']}} # Create the draw controls # @see https://github.com/ellisonbg/ipyleaflet/blob/master/ipyleaflet/leaflet.py#L293 dc = DrawControl( polygon = polygon, rectangle = rectangle ) # Initialize an action counter variable actionCount = 0 AOIs = {} # Register the draw controls handler def handle_draw(self, action, geo_json): # Increment the action counter global actionCount actionCount += 1 # Remove the `style` property from the GeoJSON geo_json['properties'] = {} # Convert geo_json output to a string and prettify (indent & replace ' with ") geojsonStr = json.dumps(geo_json, indent=2).replace("'", '"') AOIs[actionCount] = json.loads(geojsonStr) # Attach the draw handler to the draw controls `on_draw` event dc.on_draw(handle_draw) m.add_control(dc) m # - # # Querying the Planet API. # * First we'll grab our geojson area of interest (AOI) and use it to construct a query. # * We'll then build a search to search that area looking for PSScene3Band # * We have lots of products: RapidEye, PlanetScope (PS) 3 and 4 band, LandSat, and Sentinel are all possible. # * Once we have our query, we'll do the search. We will then iterate over the results, slurp up the data, and put them in a pandas data frame for easy sorting. # * We'll print the first few so we're sure it works. # + print(AOIs[1]) myAOI = AOIs[1]["geometry"] # build a query using the AOI and # a cloud_cover filter that excludes 'cloud free' scenes old = datetime.datetime(year=2013,month=1,day=1) query = filters.and_filter( filters.geom_filter(myAOI), filters.range_filter('cloud_cover', lt=50), filters.date_range('acquired', gt=old) ) # build a request for only PlanetScope imagery request = filters.build_search_request( query, item_types=['PSScene3Band'] ) # if you don't have an API key configured, this will raise an exception result = client.quick_search(request) scenes = [] planet_map = {} for item in result.items_iter(limit=500): planet_map[item['id']]=item props = item['properties'] props["id"] = item['id'] props["geometry"] = item["geometry"] props["thumbnail"] = item["_links"]["thumbnail"] scenes.append(props) scenes = pd.DataFrame(data=scenes) display(scenes) print(len(scenes)) # - # # Cleanup # * The data we got back is good, but we need some more information # * We got back big scenes, but we only care about our area of interest. The scene may not cover the whole area of interest. # * We can use the [Shapely](http://toblerity.org/shapely/manual.html) library to quickly figure out how much each scene overlaps our AOI # * We will convert our AOI and the geometry of each scene to calculate overlap using a shapely call. # * The returned acquisition, publish, and update times are strings, we'll convert them to datatime objects so we wan search. # now let's clean up the datetime stuff # make a shapely shape from our aoi portland = shape(myAOI) footprints = [] overlaps = [] # go through the geometry from our api call, convert to a shape and calculate overlap area. # also save the shape for safe keeping for footprint in scenes["geometry"].tolist(): s = shape(footprint) footprints.append(s) overlap = 100.0*(portland.intersection(s).area / portland.area) overlaps.append(overlap) # take our lists and add them back to our dataframe scenes['overlap'] = pd.Series(overlaps, index=scenes.index) scenes['footprint'] = pd.Series(footprints, index=scenes.index) # now make sure pandas knows about our date/time columns. scenes["acquired"] = pd.to_datetime(scenes["acquired"]) scenes["published"] = pd.to_datetime(scenes["published"]) scenes["updated"] = pd.to_datetime(scenes["updated"]) scenes.head() # # Filtering our search using pandas. # * Using our dataframe we will filter the scenes to just what we want. # * First we want scenes with less than 10% clouds. # * Second we want standard quality images. Test images may not be high quality. # * Third well only look for scenes since January. # * Finally we will create a new data frame with our queries and print the results. # + # Now let's get it down to just good, recent, clear scenes clear = scenes['cloud_cover']<0.1 good = scenes['quality_category']=="standard" recent = scenes["acquired"] > datetime.date(year=2017,month=1,day=1) partial_coverage = scenes["overlap"] > 30 good_scenes = scenes[(good&clear&recent&partial_coverage)] display(good_scenes) print(len(good_scenes)) # Now let's get it down to just good, recent, clear scenes clear = scenes['cloud_cover']<0.5 good = scenes['quality_category']=="standard" all_time = scenes["acquired"] > datetime.date(year=2014,month=1,day=1) full_coverage = scenes["overlap"] >= 60 all_scenes = scenes[(good&clear&all_time&full_coverage)] display(all_scenes) print(len(all_scenes)) # - # # Visualizing scene foot prints overlap with our AOI # * We know these scenes intersect with our AOI, but we aren't quite sure about the geometry. # * We are going to plot our scene footprints and original AOI on our slippy map. # * To do this we create GeoJson objects with properties. # first create a list of colors colors = ["#ff0000","#00ff00","#0000ff","#ffff00","#ff00ff","#00ffff"] # grab our scenes from the geometry/footprint geojson footprints = good_scenes["geometry"].tolist() # for each footprint/color combo for footprint,color in zip(footprints,colors): # create the leaflet object feat = {'geometry':footprint,"properties":{ 'style':{'color': color,'fillColor': color,'fillOpacity': 0.2,'weight': 1}}, 'type':u"Feature"} # convert to geojson gjson = GeoJSON(data=feat) # add it our map m.add_layer(gjson) # now we will draw our original AOI on top feat = {'geometry':myAOI,"properties":{ 'style':{'color': "#FFFFFF",'fillColor': "#FFFFFF",'fillOpacity': 0.5,'weight': 1}}, 'type':u"Feature"} gjson = GeoJSON(data=feat) m.add_layer(gjson) m # # Let's see what we got. # * The API returns a handy thumbnail link. # * Let's tell jupyter to show it. # * You may need to login to planet explorer to have auth. # * If this is the case just print the urls and paste them into your browser. imgs = [] # loop through our thumbnails and add display them for img in good_scenes["thumbnail"].tolist(): imgs.append(Image(url=img)) print img display(*imgs) # # Product Activation and Downloading # * There are two things we need to know, the satellite type (asset) and image type (product). # * Full resolution uncompressed satellite images are *big* and there are lots of ways to view them. # * For this reason Planet generally keeps images in their native format and only processes them on customer requests. There is some caching of processed scenes, but this is the exception not the rule. # * All images must be activated prior to downloading and this can take some time based on demand. # * Additionally we need to determine what sort of product we want to download. Generally speaking there are three kinds of scenes: # * Analytic - multi-band full resolution images that have not been processed. These are like raw files for DSLR camers. # * Visual - these are color corrected rectified tifs. If you are just starting out this is your best call. # * UDM - Usable data mask. This mask can be used to find bad pixels and columns and to mask out areas with clouds. # # + def get_products(client, scene_id, asset_type='PSScene3Band'): """ Ask the client to return the available products for a given scene and asset type. Returns a list of product strings """ out = client.get_assets_by_id(asset_type,scene_id) temp = out.get() return temp.keys() def activate_product(client, scene_id, asset_type="PSScene3Band",product="analytic"): """ Activate a product given a scene, an asset type, and a product. On success return the return value of the API call and an activation object """ temp = client.get_assets_by_id(asset_type,scene_id) products = temp.get() if( product in products.keys() ): return client.activate(products[product]),products[product] else: return None def download_and_save(client,product): """ Given a client and a product activation object download the asset. This will save the tiff file in the local directory and return its file name. """ out = client.download(product) fp = out.get_body() fp.write() return fp.name def scenes_are_active(scene_list): """ Check if all of the resources in a given list of scene activation objects is read for downloading. """ retVal = True for scene in scene_list: if scene["status"] != "active": print "{} is not ready.".format(scene) return False return True # - # # Scenes ACTIVATE! # * Given our good scenes list we will convert the data frame "id" column into a list and activate every item in that list. # * For this example we are going to default to using a 3Band visual product but I have included some four band methods to help you out. # * Activation usually takes about 5-15 minutes so get some coffee. to_get = good_scenes["id"].tolist() activated = [] # for each scene to get for scene in to_get: # get the product product_types = get_products(client,scene) for p in product_types: # if there is a visual product if p == "visual": # p == "basic_analytic_dn" print "Activating {0} for scene {1}".format(p,scene) # activate the product _,product = activate_product(client,scene,product=p) activated.append(product) # # Download Scenes # * In this section we will see if our scenes have been activated. # * If they are activated the client object will have its status flag set to active. # * Once that is done we will then save the scenes to the local directory. # * A smart engineer would set a path variable to store these files and check if the asset has already been downloaded prior to downloading # + tiff_files = [] asset_type = "_3B_Visual" # check if our scenes have been activated if True: #scenes_are_active(activated): for to_download,name in zip(activated,to_get): # create the product name name = name + asset_type + ".tif" # if the product exists locally if( os.path.isfile(name) ): # do nothing print "We have scene {0} already, skipping...".format(name) tiff_files.append(name) elif to_download["status"] == "active": # otherwise download the product print "Downloading {0}....".format(name) fname = download_and_save(client,to_download) tiff_files.append(fname) print "Download done." else: print "Could not download, still activating" else: print "Scenes aren't ready yet" print tiff_files # - # # Loading Images # * There are a varitety of ways to load tif data including Rasterio, GDAL, OpenCV, SKImage. # * Today we are going to use rasterio and load each channel into a numpy array. # * Since the visual 3Band products are rotated we can also open a mask layer for processing. # + def load_image4(filename): """Return a 4D (r, g, b, nir) numpy array with the data in the specified TIFF filename.""" path = os.path.abspath(os.path.join('./', filename)) if os.path.exists(path): with rasterio.open(path) as src: b, g, r, nir = src.read() return np.dstack([r, g, b, nir]) def load_image3(filename): """Return a 3D (r, g, b) numpy array with the data in the specified TIFF filename.""" path = os.path.abspath(os.path.join('./', filename)) if os.path.exists(path): with rasterio.open(path) as src: b,g,r,mask = src.read() return np.dstack([b, g, r]) def get_mask(filename): """Return a 1D mask numpy array with the data in the specified TIFF filename.""" path = os.path.abspath(os.path.join('./', filename)) if os.path.exists(path): with rasterio.open(path) as src: b,g,r,mask = src.read() return np.dstack([mask]) def rgbir_to_rgb(img_4band): """Convert an RGBIR image to RGB""" return img_4band[:,:,:3] # - # # Read Images and Use Matplotlib to show them. img_files = [] masks = [] # load the images and masks for fname in tiff_files[0:2]: img_files.append(load_image3(fname)) masks.append(get_mask(fname)) i = 0 # use matplotlib to display the map for img,name in zip(img_files,tiff_files): plt.figure(i,figsize=(18,36)) plt.imshow(img) plt.title(name) i+=1 # # Quick Histogram # * Next up we'll plot the histogram of the image. # * A histogram is just a plot of the number of pixels with a specific intensity for a given color. # + import numpy.ma as ma def plot_hist4(img_4band,title=""): # Plot a four band histogram r, g, b, nir = img_4band[:, :, 0], img_4band[:, :, 1], img_4band[:, :, 2], img_4band[:, :, 3] for slice_, name, color in ((r,'r', 'red'),(g,'g', 'green'),(b,'b', 'blue'), (nir, 'nir', 'magenta')): plt.hist(slice_.ravel(), bins=100, range=[0,img_4band.max()], label=name, color=color, histtype='step') plt.title(title) plt.legend() def plot_hist3(img_3band,mask,title=""): # plot a three band histogramwaiter = [] r, g, b = img_3band[:, :, 0], img_3band[:, :, 1], img_3band[:, :, 2] r = ma.masked_array(r,mask=mask) g = ma.masked_array(g,mask=mask) b = ma.masked_array(b,mask=mask) for slice_, name, color in ((r,'r', 'red'),(g,'g', 'green'),(b,'b', 'blue')): plt.hist(slice_.ravel(), bins=25, range=[0,img_3band.max()], label=name, color=color, histtype='step') plt.title(title) plt.legend() # - i = 0 for img,name,mask in zip(img_files,tiff_files,masks): plt.figure(i,figsize=(9,18)) plot_hist3(img,mask=mask,title=name) i+=1 # # Decomposing Channels # * We can also decompose the channels of the image. # * Sometimes it is useful to work just in a single channel. # * Other times channels can be used to do useful things, like filter out clouds. # # + def plot_bands4(img,title="",i=0): fig = plt.figure(i) fig.set_size_inches(24, 3) r, g, b, nir = img[:, :, 0], img[:, :, 1], img[:, :, 2], img[:, :, 3] fig.suptitle(title) for i, (x, c) in enumerate(((r, 'r'), (g, 'g'), (b, 'b'), (nir, 'near-ir'))): a = fig.add_subplot(1, 4, i+1) a.set_title(c) plt.imshow(x) def plot_bands3(img,title="",i=0): fig = plt.figure(i) fig.set_size_inches(24, 5) r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2] fig.suptitle(title) for i, (x, c) in enumerate(((r, 'r'), (g, 'g'), (b, 'b'))): a = fig.add_subplot(1, 4, i+1) a.set_title(c) plt.imshow(x) # - plot_bands3(img_files[0],title=tiff_files[0],i=0) # # But all of these scenes are big, and we want downtown Portland # * We can clip all of the scenes to the AOI we selected at the start of the notebook # * First we'll dump the geojson to a file. # * Since geospatial data is "big" we often work with files and get stuff out of memory ASAP. # * For each of our scenes we'll create a 'clip' file. # * We will use a tool called GDAL to clip the scene to our AOI # * GDAL stands for [Geospatial Data Abstraction Library](http://www.gdal.org/) # * GDAL is a C++ library that is often run from the command line, but it does have SWIG bindings. # + aoi_file ="portland.geojson" # write our input AOI to a geojson file. with open(aoi_file,"w") as f: f.write(json.dumps(myAOI)) # create our full input and output names clip_names = [os.path.abspath(tiff[:-4]+"_clip"+".tif") for tiff in tiff_files] full_tif_files = [os.path.abspath("./"+tiff) for tiff in tiff_files] for in_file,out_file in zip(tiff_files,clip_names): commands = ["gdalwarp", # t "-t_srs","EPSG:3857", "-cutline",aoi_file, "-crop_to_cutline", "-tap", "-tr", "3", "3" "-overwrite"] subprocess.call(["rm",out_file]) commands.append(in_file) commands.append(out_file) print " ".join(commands) subprocess.call(commands) # - # # Awesome, Let's take a look at what we got. # + clip_img_files = [load_image3(fname) for fname in clip_names] i = 0 for img,name in zip(clip_img_files,clip_names): plt.figure(i,figsize=(6,12)) plt.imshow(img) plt.title(name) i+=1 # - # # Hrm... that's not right. # * You'll notice that a lot of these scenes don't fill our AOI. # * A lot of theses images were taken roughly at the same time. # * We should try to merge these scenes together to make one big scene. # * This process is called mosaicking, and GDAL can help. # * We will call GDAL from the command line using subprocess to do this for us. # subprocess.call(["rm","merged.tif"]) commands = ["gdalwarp", # t "-t_srs","EPSG:3857", "-cutline",aoi_file, "-crop_to_cutline", "-tap", "-tr", "3", "3" "-overwrite"] output_mosaic = "merged.tif" for tiff in tiff_files[0:2]: commands.append(tiff) commands.append(output_mosaic) print " ".join(commands) subprocess.call(commands) # # Let's take a look.... looks much better merged = load_image3("./merged.tif") plt.figure(i,figsize=(6,12)) plt.imshow(merged) plt.title("merged") # # Now let's pull it all together to do something interesting. # * First we'll download and activate all of our targe scenes. # * Then we'll clip them using GDAL to the small AOI we selected above. # * Finally we'll export them and use that data to make a mosaic. # * We'll use [ImageMagick](https://www.imagemagick.org/script/index.php) to convert our tifs to gifs, and our multiple gifs to an animated gif. # + # Activate to_get = all_scenes["id"].tolist() activated = [] for scene in to_get: product_types = get_products(client,scene) for p in product_types: if p == "visual": # p == "basic_analytic_dn" print "Activating {0} for scene {1}".format(p,scene) _,product = activate_product(client,scene,product=p) activated.append(product) # Download tiff_files = [] asset_type = "_3B_Visual" if True: #scenes_are_active(activated): for to_download,name in zip(activated,to_get): name = name + asset_type + ".tif" if( os.path.isfile(name) ): print "We have scene {0} already, skipping...".format(name) tiff_files.append(name) elif to_download["status"] == "active": print "Downloading {0}....".format(name) fname = download_and_save(client,to_download) tiff_files.append(fname) print "Download done." else: print "Could not download, still activating" else: print "Scenes aren't ready yet" # - # # Finally let's process the scenes we just downloaded and make a gif. # + tiff_files = sorted(tiff_files) # Create a list of tif file names. for tiff in tiff_files: clip_names.append(os.path.abspath(tiff[:-4]+"_clip"+".tif")) full_tif_files = [] for tiff in tiff_files: full_tif_files.append(os.path.abspath("./"+tiff)) # Run GDAL to crop our file down. for in_file,out_file in zip(tiff_files,clip_names): commands = ["gdalwarp", # t "-t_srs","EPSG:3857", "-cutline",aoi_file, "-crop_to_cutline", "-tap", "-tr", "3", "3" "-overwrite"] subprocess.call(["rm",out_file]) commands.append(in_file) commands.append(out_file) print " ".join(commands) subprocess.call(commands) temp_names = [] i = 0 # use image magic convert to for in_file in clip_names: temp_name = "img{0}.gif".format(i) command = ["convert", in_file, "-sample", "30x30%",temp_name] temp_names.append(temp_name) i += 1 subprocess.call(command) magic = "portland.gif" last_call = ["convert","-delay", "40","-loop","0", "img*.gif",magic] subprocess.call(last_call) print "done!" # - # <img src="./XXX.gif">
TheBasics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/kdmoreira/data-science-alura/blob/master/starting-with-data-science/1_first_steps_5_refining_visualization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="9VC7UPo4KJsu" # # Refining Visualization # + colab={"base_uri": "https://localhost:8080/", "height": 401} id="Oe7F9YkKJ58z" outputId="c52bf4b7-cd67-4be7-93a6-5aea8014ec34" import pandas as pd import seaborn as sns import matplotlib.pyplot as plt tmdb = pd.read_csv("tmdb_5000_movies.csv") total_por_lingua = tmdb["original_language"].value_counts() total_geral = total_por_lingua.sum() total_de_ingles = total_por_lingua.loc["en"] total_do_resto = total_geral - total_de_ingles dados = {"lingua":["ingles", "outros"], "total":[total_de_ingles, total_do_resto]} dados = pd.DataFrame(dados) total_por_lingua_de_outros_filmes = tmdb.query( "original_language != 'en'").original_language.value_counts() filmes_sem_lingua_original_em_ingles = tmdb.query("original_language != 'en'") sns.catplot(x = "original_language", kind = "count", data = filmes_sem_lingua_original_em_ingles, aspect = 2, order = total_por_lingua_de_outros_filmes.index, palette = "GnBu_d") # + colab={"base_uri": "https://localhost:8080/", "height": 615} id="colBImDMO1Ss" outputId="b7939ed0-cd12-45e3-cbd2-b5c015427cf5" sns.set(style="ticks") df = sns.load_dataset("anscombe") sns.lmplot(x="x", y="y", col="dataset", hue="dataset", data=df, col_wrap=2, ci=None, palette="muted", height=4, scatter_kws={"s": 50, "alpha": 1})
starting-with-data-science/1_first_steps_5_refining_visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ee225d # language: python # name: ee225d # --- # + import os import sys sys.path.append("../") # - # %load_ext autoreload # %autoreload 2 # %env DATASET_PATH=/shared/g-luo/vctk # + from hyper_params import * from train import load_data from data_utils import VCTK, Collate from models.tacotron2 import Tacotron2, Tacotron2Loss from models.wav2vec_asr import Wav2VecASR, Wav2VecASRLoss from models.wav2vec_id import Wav2VecID, Wav2VecIDLoss from multitask import AccentedMultiTaskNetwork, Task import pytorch_lightning as pl from pytorch_lightning import loggers as pl_loggers import torch from torch.utils.data import DataLoader, random_split from metrics import SoftmaxAccuracy # - tp = TrainingParams(val_size=0.1, batch_size=4) dp = DataParams(filter_length=800, sample_rate=16000, win_length=800, hop_length=200) mp = MultiTaskParams(hidden_dim=[13], in_dim=1024) tacotron = Tacotron2(TacotronParams()) tacotron_loss = Tacotron2Loss() tts_task = Task(model=tacotron, loss=tacotron_loss, learning_rate=1e-3, weight_decay=1e-6, name='TTS', loss_weight=0.5, metrics=[]) asr = Wav2VecASR(Wav2VecASRParams()) asr_loss = Wav2VecASRLoss() asr_task = Task(model=asr, loss=asr_loss, learning_rate=1e-5, weight_decay=0, name='ASR', loss_weight=1, metrics=[]) accent_id = Wav2VecID(Wav2VecIDParams()) accent_id_loss = Wav2VecIDLoss() accent_id_task = Task(model=accent_id, loss=accent_id_loss, learning_rate=1e-5, weight_decay=1e-6, name='ID', loss_weight=1, metrics=[SoftmaxAccuracy()]) AccentedMultiTaskNetwork(mp, [accent_id_task]) model = AccentedMultiTaskNetwork.load_from_checkpoint("../runs/freeze_feat_extractor.ckpt", params=mp, tasks=[accent_id_task]) train_loader, val_loader = load_data(tp, dp) # + logger = pl_loggers.CSVLogger("./eval/freeze_feat_extract") trainer = pl.Trainer(gradient_clip_val=tp.grad_clip_thresh, max_epochs=30, gpus=1, logger=logger, accumulate_grad_batches=16, log_every_n_steps=5, accelerator="gpu", devices=6) # - # %env CUDA_VISIBLE_DEVICES=6 trainer.validate(model=model, dataloaders=val_loader) # + # share
notebooks/test-id-only.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day-5 # # # Assignment-4 # # # Question -1 # # # Write a program to identify sub list [1,1,5] is there in the given list in the same order, if yes print "it's a Match" if no then print "it's Gone" in function. # + x = [1,5,6,5,1,2,3,6] y = [1,1,5] print("x:" + str(x)) print("y:" + str(y)) flag = 0 if(set(y).issubset(set(x))): flag = 1 if (flag): print("Its a Match") else: print("Its Gone") # - # # Day-5 # # Question 2 # # Make a function for Prime Numbers and use filter to filter # out all the prime numbers from 1-2500 def prime(num): if(num==1): return True elif(num==2): return False else: for i in range(2,num): if(num%i==0): return False return True prime(3) prime(1) prime(4) lst=list(range(1,2500)) print(lst) list_prime=[] list_prime_num=filter(prime,lst) print(list(list_prime_num)) # # Day 5 # # Question 3 # # Make a Lambda Funtion for Capitalizing the Whole sentence passed using arguments # And Map all the sentences in the List, with the Lambda Function # # ["hey this is sai", i am in mumbai",....] # # o/p- # ["<NAME>", I Am In Mumbai",....] lst=["hey this is sai , " "i am in mumbai,...."] lst lst_new=map(lambda x: x.title(),lst) list(lst_new)
Day-5 Assignment (1).ipynb
# coding: utf-8 # # 📃 Solution for Exercise M5.01 # # In the previous notebook, we showed how a tree with a depth of 1 level was # working. The aim of this exercise is to repeat part of the previous # experiment for a depth with 2 levels to show how the process of partitioning # is repeated over time. # # Before to start, we will: # # * load the dataset; # * split the dataset into training and testing dataset; # * define the function to show the classification decision function. # + import pandas as pd penguins = pd.read_csv("../datasets/penguins_classification.csv") culmen_columns = ["Culmen Length (mm)", "Culmen Depth (mm)"] target_column = "Species" # - # <div class="admonition note alert alert-info"> # <p class="first admonition-title" style="font-weight: bold;">Note</p> # <p class="last">If you want a deeper overview regarding this dataset, you can refer to the # Appendix - Datasets description section at the end of this MOOC.</p> # </div> # + from sklearn.model_selection import train_test_split data, target = penguins[culmen_columns], penguins[target_column] data_train, data_test, target_train, target_test = train_test_split( data, target, random_state=0 ) range_features = { feature_name: (data[feature_name].min() - 1, data[feature_name].max() + 1) for feature_name in data.columns } # + import numpy as np import matplotlib.pyplot as plt def plot_decision_function(fitted_classifier, range_features, ax=None): """Plot the boundary of the decision function of a classifier.""" from sklearn.preprocessing import LabelEncoder feature_names = list(range_features.keys()) # create a grid to evaluate all possible samples plot_step = 0.02 xx, yy = np.meshgrid( np.arange(*range_features[feature_names[0]], plot_step), np.arange(*range_features[feature_names[1]], plot_step), ) # compute the associated prediction Z = fitted_classifier.predict(np.c_[xx.ravel(), yy.ravel()]) Z = LabelEncoder().fit_transform(Z) Z = Z.reshape(xx.shape) # make the plot of the boundary and the data samples if ax is None: _, ax = plt.subplots() ax.contourf(xx, yy, Z, alpha=0.4, cmap="RdBu") return ax # - # Create a decision tree classifier with a maximum depth of 2 levels and fit # the training data. Once this classifier trained, plot the data and the # decision boundary to see the benefit of increasing the depth. # + from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier(max_depth=2) tree.fit(data_train, target_train) # + import seaborn as sns palette = ["tab:red", "tab:blue", "black"] ax = sns.scatterplot(data=penguins, x=culmen_columns[0], y=culmen_columns[1], hue=target_column, palette=palette) plot_decision_function(tree, range_features, ax=ax) plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left') _ = plt.title("Decision boundary using a decision tree") # - # Did we make use of the feature "Culmen Length"? # Plot the tree using the function `sklearn.tree.plot_tree` to find out! # + from sklearn.tree import plot_tree _, ax = plt.subplots(figsize=(16, 12)) _ = plot_tree(tree, feature_names=culmen_columns, class_names=tree.classes_, impurity=False, ax=ax) # - # We see that the second tree level used the "Culmen Length" to make # two new decisions. Qualitatively, we saw that such a simple tree was enough # to classify the penguins' species. # # Compute the accuracy of the decision tree on the testing data. test_score = tree.fit(data_train, target_train).score(data_test, target_test) print(f"Accuracy of the DecisionTreeClassifier: {test_score:.2f}") # At this stage, we have the intuition that a decision tree is built by # successively partitioning the feature space, considering one feature at a # time. # # We predict an Adelie penguin if the feature value is below the threshold, # which is not surprising since this partition was almost pure. If the feature # value is above the threshold, we predict the Gentoo penguin, the class that # is most probable.
notebooks/trees_sol_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="zyJ25uz0kSaw" # # Natural Language Processing # # ## <NAME> # # ### NB and LSTM based classifiers # + [markdown] id="Ao1nhg9RknmF" # The central idea of this tutorial is to use Naive Bayes classifier and LSTM based classifier and compare the models by accuracy on IMDB dataset. # # # + id="ElRkQElWUMjG" import pandas as pd import numpy as np import nltk, keras, string, re, html, math from nltk.tokenize import word_tokenize, sent_tokenize from nltk.corpus import stopwords, wordnet from nltk.stem import WordNetLemmatizer from sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction.text import CountVectorizer from collections import Counter, defaultdict from sklearn.model_selection import train_test_split from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from sklearn.metrics import accuracy_score, classification_report # + id="fhHRim2AUm4z" #Load the IMDB dataset. We load it using pandas as dataframe data = pd.read_csv('/Users/ruthu/Desktop/IMDB Dataset.csv') print("Data shape - ", data.shape, "\n") #prints the number of rows and columns for col in data.columns: print("The number of null values - ", col, data[col].isnull().sum()) #prints the number of null values in each column data["review"]= data["review"].str.lower() data["sentiment"]= data["sentiment"].str.lower() #converts every value in the column to lowercase data.head() # + [markdown] id="lK_Hn2f6VMP7" # # Preprocessing # Pre-precessing that needs to be done on lower cased corpus - # # 1. Removal of html tags # 2. Removal of URLS # 3. Removal of non alphanumeric character # 4. Removal of Stopwords # 5. Performing stemming and lemmatization # # We use regex from re. # + def cleaning(data): clean = re.sub('<.*?>', ' ', str(data)) #removes HTML tags clean = re.sub('\'.*?\s',' ', clean) #removes all hanging letters afer apostrophes (s in it's) clean = re.sub(r'http\S+',' ', clean) #removes URLs clean = re.sub('\W+',' ', clean) #replacing the non alphanumeric characters return html.unescape(clean) data['cleaned'] = data['review'].apply(cleaning) def tokenizing(data): review = data['cleaned'] #tokenizing is done tokens = nltk.word_tokenize(review) return tokens data['tokens'] = data.apply(tokenizing, axis=1) stop_words = set(stopwords.words('english')) def remove_stops(data): my_list = data['tokens'] meaningful_words = [w for w in my_list if not w in stop_words] #stopwords are removed from the tokenized data return (meaningful_words) data['tokens'] = data.apply(remove_stops, axis=1) lemmatizer = WordNetLemmatizer() def lemmatizing(data): my_list = data['tokens'] lemmatized_list = [lemmatizer.lemmatize(word) for word in my_list] #lemmatizing is performed. It's more efficient and better than stemming. return (lemmatized_list) data['tokens'] = data.apply(lemmatizing, axis=1) def rejoin_words(data): my_list = data['tokens'] joined_words = ( " ".join(my_list)) #rejoins all stemmed words return joined_words data['cleaned'] = data.apply(rejoin_words, axis=1) data.head() # + id="DyaSkfcvYGXk" # Prints statistics of Data like avg length of sentence , proportion of data w.r.t class labels def sents(data): clean = re.sub('<.*?>', ' ', str(data)) #removes HTML tags clean = re.sub('\'.*?\s',' ', clean) #removes all hanging letters afer apostrophes (s in it's) clean = re.sub(r'http\S+',' ', clean) #removes URLs clean = re.sub('[^a-zA-Z0-9\.]+',' ', clean) #removes all non-alphanumeric characters except periods. tokens = nltk.sent_tokenize(clean) #sentence tokenizing is done return tokens sents = data['review'].apply(sents) length_s = 0 for i in range(data.shape[0]): length_s+= len(sents[i]) print("The number of sentences is - ", length_s) #prints the number of sentences length_t = 0 for i in range(data.shape[0]): length_t+= len(data['tokens'][i]) print("\nThe number of tokens is - ", length_t) #prints the number of tokens average_tokens = round(length_t/length_s) print("\nThe average number of tokens per sentence is - ", average_tokens) #prints the average number of tokens per sentence positive = negative = 0 for i in range(data.shape[0]): if (data['sentiment'][i]=='positive'): positive += 1 #finds the proprtion of positive and negative sentiments else: negative += 1 print("\nThe number of positive examples are - ", positive) print("\nThe number of negative examples are - ", negative) print("\nThe proportion of positive sentiments to negative ones are - ", positive/negative) # + [markdown] id="_FkJ-e2pUwun" # # Naive Bayes classifier # + id="eVq-mN28U_J4" # gets reviews column from df reviews = data['cleaned'].values # gets labels column from df labels = data['sentiment'].values # + id="Ljo5NquhXTXr" # Uses label encoder to encode labels. Convert to 0/1 encoder = LabelEncoder() encoded_labels = encoder.fit_transform(labels) data['encoded']= encoded_labels print(data['encoded'].head()) # prints(enc.classes_) encoder_mapping = dict(zip(encoder.classes_, encoder.transform(encoder.classes_))) print("\nThe encoded classes are - ", encoder_mapping) labels = data['encoded'] # + id="wzG-C_EVWWET" # Splits the data into train and test (80% - 20%). # Uses stratify in train_test_split so that both train and test have similar ratio of positive and negative samples. train_sentences, test_sentences, train_labels, test_labels = train_test_split(reviews, labels, test_size=0.2, random_state=42, stratify=labels) # train_sentences, test_sentences, train_labels, test_labels print("The training sentences are -",train_sentences, sep='\n\n') print("\nThe test sentences are -",test_sentences, sep='\n\n') print("\nThe training labels are -",train_labels, sep='\n\n') print("\nThe test labels are -",test_labels, sep='\n\n') # + [markdown] id="Bz1YdsSkiWCX" # There are two approaches possible for building vocabulary for the Naive Bayes classifier. # 1. We take the whole data (train + test) to build the vocab. In this way while testing there is no word which will be out of vocabulary. # 2. We take the train data to build vocab. In this case, some words from the test set may not be in vocab and hence one needs to perform smoothing so that one of the probability terms are not zero. # # We use the 2nd approach. # # Also, building vocab by taking all words in the train set is memory intensive, hence we build the vocab by choosing the top 2000 - 3000 frequent words in the training corpus. # # > $ P(x_i | w_j) = \frac{ N_{x_i,w_j}\, +\, \alpha }{ N_{w_j}\, +\, \alpha*d} $ # # # $N_{x_i,w_j}$ : Number of times feature $x_i$ appears in samples of class $w_j$ # # $N_{w_j}$ : Total count of features in class $w_j$ # # $\alpha$ : Parameter for additive smoothing. Here consider $\alpha$ = 1 # # $d$ : Dimentionality of the feature vector $x = [x_1,x_2,...,x_d]$. In our case its the vocab size. # + id="1cllNfGmUr77" # Uses Count vectorizer to get frequency of the words vectorizer = CountVectorizer(max_features = 3000) sents_encoded = vectorizer.fit_transform(train_sentences) #encodes all training sentences counts = sents_encoded.sum(axis=0).A1 vocab = list(vectorizer.get_feature_names()) # + id="iE7pxWIYW1z0" # Builds the model. # Uses laplace smoothing for words in test set not present in vocab of train set class MultinomialNaiveBayes: def __init__(self, classes, tokenizer): #self.tokenizer = tokenizer self.classes = classes def group_by_class(self, X, y): data = dict() for c in self.classes: #grouping by positive and negative sentiments data[c] = X[np.where(y == c)] return data def fit(self, X, y): self.n_class_items = {} self.log_class_priors = {} self.word_counts = {} self.vocab = vocab #using the pre-made vocabulary of 3000 most frequent training words n = len(X) grouped_data = self.group_by_class(X, y) for c, data in grouped_data.items(): self.n_class_items[c] = len(data) self.log_class_priors[c] = math.log(self.n_class_items[c] / n) #taking log for easier calculation self.word_counts[c] = defaultdict(lambda: 0) for text in data: counts = Counter(nltk.word_tokenize(text)) for word, count in counts.items(): self.word_counts[c][word] += count return self def laplace_smoothing(self, word, text_class): #smoothing num = self.word_counts[text_class][word] + 1 denom = self.n_class_items[text_class] + len(self.vocab) return math.log(num / denom) def predict(self, X): result = [] for text in X: class_scores = {c: self.log_class_priors[c] for c in self.classes} words = set(nltk.word_tokenize(text)) for word in words: if word not in self.vocab: continue for c in self.classes: log_w_given_c = self.laplace_smoothing(word, c) class_scores[c] += log_w_given_c result.append(max(class_scores, key=class_scores.get)) return result # + id="AtQSl1zvW4DD" MNB = MultinomialNaiveBayes( classes=np.unique(labels), tokenizer=Tokenizer() ).fit(train_sentences, train_labels) # Tests the model on test set and reports the Accuracy predicted_labels = MNB.predict(test_sentences) print("The accuracy of the MNB classifier is ", accuracy_score(test_labels, predicted_labels)) print("\nThe classification report with metrics - \n", classification_report(test_labels, predicted_labels)) # + [markdown] id="WlNql0acU7sa" # # LSTM based Classifier # # We use the above train and test splits. # + id="SkqnvbUOXoN0" # Hyperparameters of the model oov_tok = '<OOK>' embedding_dim = 100 max_length = 150 padding_type='post' trunc_type='post' # tokenizes sentences tokenizer = Tokenizer() tokenizer.fit_on_texts(train_sentences) # vocabulary size word_index = tokenizer.word_index vocab_size = len(tokenizer.word_index) + 1 # converts train dataset to sequence and pads sequences train_sequences = tokenizer.texts_to_sequences(train_sentences) train_padded = pad_sequences(train_sequences, padding='post', maxlen=max_length) # converts Test dataset to sequence and pads sequences test_sequences = tokenizer.texts_to_sequences(test_sentences) test_padded = pad_sequences(test_sequences, padding='post', maxlen=max_length) # + id="Mtw3w895ZP39" # model initialization model = keras.Sequential([ keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length), keras.layers.Bidirectional(keras.layers.LSTM(64)), keras.layers.Dense(24, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ]) # compiles model model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # model summary model.summary() # + id="skmaDJMnZTzc" #training the model num_epochs = 5 history = model.fit(train_padded, train_labels, epochs=num_epochs, verbose=1, validation_split=0.1) # + id="TjEhWEr5Zq7M" # Gets probabilities prediction = model.predict(test_padded) print("The probabilities are - ", prediction, sep='\n') # Gets labels based on probability 1 if p>= 0.5 else 0 for each in prediction: if each[0] >=0.5: each[0] = 1 else: each[0] = 0 prediction = prediction.astype('int32') print("\nThe labels are - ", prediction, sep='\n') # Calculates accuracy on Test data print("\nThe accuracy of the model is ", accuracy_score(test_labels, prediction)) print("\nThe accuracy and other metrics are \n", classification_report(test_labels, prediction, labels=[0, 1]),sep='\n') # + [markdown] id="TIICV-ySOYL0" # ## To get predictions for random examples # + id="m2RmfNL3OYL0" # reviews on which we need to predict sentence = ["The movie was very touching and heart whelming", "I have never seen a terrible movie like this", "the movie plot is terrible but it had good acting"] # converts to a sequence test_sequences = tokenizer.texts_to_sequences(sentence) # pads the sequence test_padded = pad_sequences(test_sequences, padding='post', maxlen=max_length) # Gets probabilities prediction = model.predict(test_padded) print("The probabilities are - ", prediction, sep='\n') # Gets labels based on probability 1 if p>= 0.5 else 0 for each in prediction: if each[0] >=0.5: each[0] = 1 else: each[0] = 0 prediction = prediction.astype('int32') print("\nThe labels are - ", prediction, sep='\n') # - # ### We see that the MNB classifier has an accuracy of around 85%, while the LSTM classifier has an accuracy of around 87%, and is hence the better classifier.
classifier-models/NB and LSTM Based Classifiers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline import numpy as np import matplotlib.pyplot as plt # # Support Vector Machines # + from sklearn.datasets import load_digits from sklearn.cross_validation import train_test_split digits = load_digits() X_train, X_test, y_train, y_test = train_test_split(digits.data / 16., digits.target % 2, random_state=2) # - from sklearn.svm import LinearSVC, SVC linear_svc = LinearSVC(loss="hinge").fit(X_train, y_train) svc = SVC(kernel="linear").fit(X_train, y_train) np.mean(linear_svc.predict(X_test) == svc.predict(X_test)) # ## Kernel SVMs # # # Predictions in a kernel-SVM are made using the formular # # $$ # \hat{y} = \alpha_0 + \alpha_1 y_1 k(\mathbf{x^{(1)}}, \mathbf{x}) + ... + \alpha_n y_n k(\mathbf{x^{(n)}}, \mathbf{x})> 0 # $$ # # $$ # 0 \leq \alpha_i \leq C # $$ # # # Radial basis function (Gaussian) kernel: # $$k(\mathbf{x}, \mathbf{x'}) = \exp(-\gamma ||\mathbf{x} - \mathbf{x'}||^2)$$ from sklearn.metrics.pairwise import rbf_kernel line = np.linspace(-3, 3, 100)[:, np.newaxis] kernel_value = rbf_kernel([[0]], line, gamma=1) plt.plot(line, kernel_value.T) from figures import plot_svm_interactive plot_svm_interactive() svc = SVC().fit(X_train, y_train) svc.score(X_test, y_test) # + Cs = [0.001, 0.01, 0.1, 1, 10, 100] gammas = [0.001, 0.01, 0.1, 1, 10, 100] from sklearn.grid_search import GridSearchCV param_grid = {'C': Cs, 'gamma' : gammas} grid_search = GridSearchCV(SVC(), param_grid, cv=5) grid_search.fit(X_train, y_train) # - grid_search.score(X_test, y_test) # + # We extract just the scores scores = [x[1] for x in grid_search.grid_scores_] scores = np.array(scores).reshape(6, 6) plt.matshow(scores) plt.xlabel('gamma') plt.ylabel('C') plt.colorbar() plt.xticks(np.arange(6), param_grid['gamma']) plt.yticks(np.arange(6), param_grid['C']); # -
Chapter 2/Support Vector Machines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Probability theory is a cornerstone for machine learning. We can think of quantum states as probability distributions with certain properties that make them different from our classical notion of probabilities. Contrasting these properties is an easy and straightforward introduction to the most basic concepts we need in quantum computing. # # Apart from probability theory, linear algebra is also critical for many learning protocols. As we will see, geometry and probabilities are intrinsically linked in quantum computing, but geometric notions are also familiar in dealing with classical probability distributions. This notebook first talks about classical probabilities and stochastic vectors, and introduces quantum states as a natural generalization. # # Throughout this course, we will assume finite probability distributions and finite dimensional spaces. This significantly simplifies notation and most quantum computers operate over finite dimensional spaces, so we do not lose much in generality. # # # # Classical probability distributions # # Let us toss a biased coin. Without getting too technical, we can associate a random variable $X$ with the output: it takes the value 0 for heads and the value 1 for tails. We get heads with probability $P(X=0) = p_0$ and tails with $P(X=1) = p_1$ for each toss of the coin. In classical, Kolmogorovian probability theory, $p_i\geq 0$ for all $i$, and the probabilities sum to one: $\sum_i p_i = 1$. Let's sample this distribution import numpy as np n_samples = 100 p_1 = 0.2 x_data = np.random.binomial(1, p_1, (n_samples,)) print(x_data) # We naturally expect that the empirically observed frequencies also sum to one: frequency_of_zeros, frequency_of_ones = 0, 0 for x in x_data: if x: frequency_of_ones += 1/n_samples else: frequency_of_zeros += 1/n_samples print(frequency_of_ones+frequency_of_zeros) # Since $p_0$ and $p_1$ must be non-negative, all possible probability distributions are restricted to the positive orthant. The normalization constraint puts every possible distribution on a straight line. This plot describes all possible probability distributions by biased and unbiased coins. import matplotlib.pyplot as plt # %matplotlib inline p_0 = np.linspace(0, 1, 100) p_1 = 1-p_0 fig, ax = plt.subplots() ax.set_xlim(-1.2, 1.2) ax.set_ylim(-1.2, 1.2) ax.spines['left'].set_position('center') ax.spines['bottom'].set_position('center') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.set_xlabel("$p_0$") ax.xaxis.set_label_coords(1.0, 0.5) ax.set_ylabel("$p_1$") ax.yaxis.set_label_coords(0.5, 1.0) plt.plot(p_0, p_1) # We may also arrange the probabilities in a vector $\vec{p} = \begin{bmatrix} p_0 \\ p_1 \end{bmatrix}$. Here, for notational convenience, we put an arrow above the variable representing the vector, to distinguish it from scalars. You will see that quantum states also have a standard notation that provides convenience, but goes much further in usefulness than the humble arrow here. # # A vector representing a probability distribution is called a *stochastic vector*. The normalization constraint essentially says that the norm of the vector is restricted to one in the $l_1$ norm. In other words, $||\vec{p}||_1 = \sum_i |p_i| = 1$. This would be the unit circle in the $l_1$ norm, but since $p_i\geq 0$, we are restricted to a quarter of the unit circle, just as we plotted above. We can easily verify this with numpy's norm function: p = np.array([[0.8], [0.2]]) np.linalg.norm(p, ord=1) # We know that the probability of heads is just the first element in the $\vec{p}$, but since it is a vector, we could use linear algebra to extract it. Geometrically, it means that we project the vector to the first axis. This projection is described by the matrix $\begin{bmatrix} 1 & 0\\0 & 0\end{bmatrix}$. The length in the $l_1$ norm gives the sought probability: Π_0 = np.array([[1, 0], [0, 0]]) np.linalg.norm(Π_0.dot(p), ord=1) # We can repeat the process to get the probability of tails: Π_1 = np.array([[0, 0], [0, 1]]) np.linalg.norm(Π_1.dot(p), ord=1) # The two projections play an equivalent role to the values 0 and 1 when we defined the probability distribution. In fact, we could define a new random variable called $\Pi$ that can take the projections $\Pi_0$ and $\Pi_1$ as values and we would end up with an identical probability distribution. This may sound convoluted and unnatural, but the measurement in quantum mechanics is essentially a random variable that takes operator values, such as projections. # What happens when we want to transform a probability distribution to another one? For instance, to change the bias of a coin, or to describe the transition of a Markov chain. Since the probability distribution is also a stochastic vector, we can apply a matrix on the vector, where the matrix has to fulfill certain conditions. A left *stochastic matrix* will map stochastic vectors to stochastic vectors when multiplied from the left: its columns add up to one. In other words, it maps probability distributions to probability distributions. For example, starting with a unbiased coin, the map $M$ will transform the distribution to a biased coin: p = np.array([[.5], [.5]]) M = np.array([[0.7, 0.6], [0.3, 0.4]]) np.linalg.norm(M.dot(p), ord=1) # One last concept that will come handy is entropy. A probability distribution's entropy is defined as $H(p) = - \sum_i p_i \log_2 p_i$. Let us plot it over all possible probability distributions of coin tosses: ϵ = 10e-10 p_0 = np.linspace(ϵ, 1-ϵ, 100) p_1 = 1-p_0 H = -(p_0*np.log2(p_0) + p_1*np.log2(p_1)) fig, ax = plt.subplots() ax.set_xlim(0, 1) ax.set_ylim(0, -np.log2(0.5)) ax.set_xlabel("$p_0$") ax.set_ylabel("$H$") plt.plot(p_0, H) plt.axvline(x=0.5, color='k', linestyle='--') # Here we can see that the entropy is maximal for the unbiased coin. This is true in general: the entropy peaks for the uniform distribution. In a sense, this is the most unpredictable distribution: if we get heads with probability 0.2, betting tails is a great idea. On the other hand, if the coin is unbiased, then a deterministic strategy is of little help in winning. Entropy quantifies this notion of surprise and unpredictability. # # Quantum states # # A classical coin is a two-level system: it is either heads or tails. At a first look a quantum state is a probability distribution, and the simplest case is a two-level state, which we call a qubit. Just like the way we can write the probability distribution as a column vector, we can write a quantum state as a column vector. For notational convenience that will become apparent later, we write the label of a quantum state in what is called a ket in the Dirac notation. So for instance, for some qubit, we can write # # $$ # |\psi\rangle = \begin{bmatrix} # a_0 \\ # a_1 \\ # \end{bmatrix}. # $$ # # In other words, a ket is just a column vector, exactly like the stochastic vector in the classical case. Instead of putting an arrow over the name of the variable to express that it is a vector, we use the ket to say that it is a column vector that represents a quantum state. There's more to this notation, as we will see. # # The key difference to classical probability distributions and stochastic vectors is the normalization constraint. The square sum of their absolute values adds up to 1: # # $$ # \sqrt{|a_0|^2+|a_1|^2}=1, # $$ # # where $a_0, a_1\in \mathbb{C}$. In other words, we are normalizing in the $l_2$ norm instead of the $l_1$ norm. Furthermore, we are no longer restricted to the positive orthant: the components of the quantum state vector, which we call *probability amplitudes*, are complex valued. # # Let us introduce two special qubits, corresponding to the canonical basis vectors in two dimensions: $|0\rangle$ and $|1\rangle$. # # $$ # |0\rangle = \begin{bmatrix} # 1 \\ # 0 \\ # \end{bmatrix}, \,\,\, |1\rangle = \begin{bmatrix} # 0 \\ # 1 \\ # \end{bmatrix}. # $$ # # This basis is also called the computational basis in quantum computing. # # We can expand an arbitrary qubit state in this basis: # # $$ # |\psi\rangle = \begin{bmatrix} # a_0 \\ # a_1 \\ # \end{bmatrix}=a_0\begin{bmatrix} # 1 \\ # 0 \\ # \end{bmatrix} + # a_1\begin{bmatrix} # 0 \\ # 1 \\ # \end{bmatrix}= # a_0|0\rangle+a_1|1\rangle. # $$ # # This expansion in a basis is called a superposition. If we sample the qubit state, we obtain the outcome 0 with probability $|a_0|^2$, and 1 with probability $|a_1|^2$. This is known as the Born rule; you will learn more about measurements and this rule in a subsequent notebook. # # For now, let's take a look at how we can simulate classical coin tossing on a quantum computer. Let's start with a completely biased case where we get heads with probability 1. This means that our qubit $|\psi\rangle=|0\rangle$. We create a circuit of a single qubit and a single classical register where the results of the sampling (measurements) go. # + from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister from qiskit import execute from qiskit import BasicAer from qiskit.tools.visualization import plot_histogram, plot_bloch_multivector import numpy as np π = np.pi backend = BasicAer.get_backend('qasm_simulator') q = QuantumRegister(1) c = ClassicalRegister(1) circuit = QuantumCircuit(q, c) # - # Any qubit is initialized in $|0\rangle$, so if we measure it rightaway, we should get our maximally biased coin. circuit.measure(q, c) # Let us execute it a hundred times and study the result job = execute(circuit, backend, shots=100) result = job.result() result.get_counts(circuit) # As expected, all of our outcomes are 0. To understand the possible quantum states, we use the Bloch sphere visualization. Since the probability amplitudes are complex and there are two of them for a single qubit, this would require a four-dimensional space. Now since the vectors are normalized, this removes a degree of freedom, allowing a three-dimensional representation with an appropriate embedding. This embedding is the Bloch sphere. It is slightly different than an ordinary sphere in three dimensions: we identify the north pole with the state $|0\rangle$, and the south pole with $|1\rangle$. In other words, two orthogonal vectors appear as if they were on the same axis -- the axis Z. The computational basis is just one basis: the axes X and Y represent two other bases. Any point on the surface of this sphere is a valid quantum state. This is also true the other way around: every pure quantum state is a point on the Bloch sphere. Here it 'pure' is an important technical term and it essentially means that the state is described by a ket (column vector). Later in the course we will see other states called mix states that are not described by a ket (you will see later that these are inside the Bloch sphere). # # To make it less abstract, let's plot our $|0\rangle$ on the Bloch sphere: backend_statevector = BasicAer.get_backend('statevector_simulator') circuit = QuantumCircuit(q, c) circuit.iden(q[0]) job = execute(circuit, backend_statevector) plot_bloch_multivector(job.result().get_statevector(circuit)) # Compare this sphere with the straight line in the positive orthant that describes all classical probability distributions of coin tosses. You can already see that there is a much richer structure in the quantum probability space. # # Let us pick another point on the Bloch sphere, that is, another distribution. Let's transform the state $|0\rangle$ to $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$. This corresponds to the unbiased coin, since we will get 0 with probability $|\frac{1}{\sqrt{2}}|^2=1/2$, and the other way around. There are many ways to do this transformation. We pick a rotation around the Y axis by $\pi/2$, which corresponds to the matrix $\frac{1}{\sqrt{2}}\begin{bmatrix} 1 & -1\\1 & 1\end{bmatrix}$. circuit = QuantumCircuit(q, c) circuit.ry(π/2, q[0]) circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit)) # To get an intuition why it is called a rotation around the Y axis, let's plot it on the Bloch sphere: circuit = QuantumCircuit(q, c) circuit.ry(π/2, q[0]) job = execute(circuit, backend_statevector) plot_bloch_multivector(job.result().get_statevector(circuit)) # It does exactly what it says: it rotates from the north pole of the Bloch sphere. # # Why is interesting to have complex probability amplitudes instead of non-negative real numbers? To get some insight, take a look what happens if we apply the same rotation to $|1\rangle$. To achieve this, first we flip $|0\rangle$ to $|1\rangle$ by applying a NOT gate (denoted by X in quantum computing) and then the rotation. circuit = QuantumCircuit(q, c) circuit.x(q[0]) circuit.ry(π/2, q[0]) job = execute(circuit, backend_statevector) plot_bloch_multivector(job.result().get_statevector(circuit)) # You can verify that the result is $\frac{1}{\sqrt{2}}(-|0\rangle + |1\rangle)$. That is, the exact same state as before, except that the first term got a minus sign: it is a negative probability amplitude. Note that the difference cannot be observed from the statistics: circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit)) # It still looks like an approximately biased coin. Yet, that negative sign -- or any complex value -- is what models *interference*, a critically important phenomenon where probability amplitudes can interact in a constructive or a destructive way. To see this, if we apply the rotation twice in a row on $|0\rangle$, we get another deterministic output, $|1\rangle$, although in between the two, it was some superposition. circuit = QuantumCircuit(q, c) circuit.ry(π/2, q[0]) circuit.ry(π/2, q[0]) circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit)) # Many quantum algorithms exploit interference, for instance, the seminal [Deutsch-Josza algorithm](https://en.wikipedia.org/wiki/Deutsch–Jozsa_algorithm), which is among the simplest to understand its significance. # # More qubits and entanglement # # We have already seen that quantum states are probability distributions normed to 1 in the $l_2$ norm and we got a first peek at interference. If we introduce more qubits, we see another crucial quantum effect emerging. To do that, we first have to define how we write down the column vector for describing two qubits. We use a tensor product, which, in the case of qubits, is equivalent to the Kronecker product. Given two qubits, $|\psi\rangle=\begin{bmatrix}a_0\\a_1\end{bmatrix}$ and $|\psi'\rangle=\begin{bmatrix}b_0\\b_1\end{bmatrix}$, their product is $|\psi\rangle\otimes|\psi'\rangle=\begin{bmatrix}a_0b_0\\ a_0b_1\\ a_1b_0\\ a_1b_1\end{bmatrix}$. Imagine that you have two registers $q_0$ and $q_1$, each can hold a qubit, and both qubits are in the state $|0\rangle$. Then this composite state would be described by according to this product rule as follows: q0 = np.array([[1], [0]]) q1 = np.array([[1], [0]]) np.kron(q0, q1) # This is the $|0\rangle\otimes|0\rangle$ state, which we often abbreviate as $|00\rangle$. The states $|01\rangle$, $|10\rangle$, and $|11\rangle$ are defined analogously, and the four of them give the canonical basis of the four dimensional complex space, $\mathbb{C}^2\otimes\mathbb{C}^2$. # # Now comes the interesting and counter-intuitive part. In machine learning, we also work we high-dimensional spaces, but we never construct it as a tensor product: it is typically $\mathbb{R}^d$ for some dimension $d$. The interesting part of writing the high-dimensional space as a tensor product is that not all vectors in can be written as a product of vectors in the component space. # # Take the following state: $|\phi^+\rangle = \frac{1}{\sqrt{2}}(|00\rangle+|11\rangle)$. This vector is clearly in $\mathbb{C}^2\otimes\mathbb{C}^2$, since it is a linear combination of two of the basis vector in this space. Yet, it cannot be written as $|\psi\rangle\otimes|\psi'\rangle$ for some $|\psi\rangle$, $|\psi'\rangle\in\mathbb{C}^2$. # # To see this, assume that it can be written in this form. Then # # $$ # |\phi^+\rangle = \frac{1}{\sqrt{2}}(|00\rangle+|11\rangle) = \begin{bmatrix}a_0b_0\\ a_0b_1\\ a_1b_0\\ a_1b_1\end{bmatrix} = a_0b_0|00\rangle + a_0b_1|01\rangle + a_1b_0|10\rangle + a_1b_1|11\rangle. # $$ # # $|01\rangle$ and $|10\rangle$ do not appear on the left-hand side, so their coefficients must be zero: $a_1b_0=0$ and $a_0b_1=0$. This leads to a contradiction, since $a_1$ cannot be zero ($a_1b_1=1$), so $b_0$ must be zero, but $a_0b_0=1$. Therefore $|\phi^+\rangle$ cannot be written as a product. # # States that cannot be written as a product are called entangled states. This is the mathematical form of describing a phenomenon of strong correlations between random variables that exceed what is possible classically. Entanglement plays a central role in countless quantum algorithms. A simple example is [quantum teleportation](https://en.wikipedia.org/wiki/Quantum_teleportation). We will also see its applications in quantum machine learning protocols. # # We will have a closer look at entanglement in a subsequent notebook on measurements, but as a teaser, let us look at the measurement statistics of the $|\phi^+\rangle$ state. The explanation of the circuit preparing it will also come in a subsequent notebook. q = QuantumRegister(2) c = ClassicalRegister(2) circuit = QuantumCircuit(q, c) circuit.h(q[0]) circuit.cx(q[0], q[1]) circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit)) # Notice that 01 or 10 never appear in the measurement statistics. # # Further reading # # Chapter 9 in Quantum Computing since Democritus by <NAME> describes a similar approach to understanding quantum states -- in fact, the interference example was lifted from there.
qiskit_version/01_Classical_and_Quantum_Probability_Distributions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DATA 531 Lab 1: Python Functions, Lists, and Dictionaries # **** # **Name**: FirstName LastName # # **Date**: September 8, 2020 # **** # ## Objectives # # 1. Create and call Python functions with parameters (including parameters with default values). # 2. Use of Python built-in functions including math functions and functions in random package. # 3. Mastery of list operations including traversing a list with a for loop, list indexing, and operations such as append. # 4. Experience with use of dictionaries as a key-value data store. # ## Practice Questions (Not for marks) # # If you need or would like extra practice with Python, I have added some [practice exercises on the main website](https://firas.moosvi.com/courses/data531/practice/intro.html). These are optional, not for marks. # # The Python exercises are courtesy of [<NAME> from his “Learn Python 3” series](https://github.com/jerry-git/learn-python3.git) and are reproduced here with permission, and under a CC-BY-SA 4.0 license. # ## Question #1 - Creating and Calling Functions (10 marks) # # Create a Python program that creates and calls a function for calculating the average of a list of values. Details: # # - Updater the header with your name and the date. (0.5 mark) # - Create function with name ``avglist`` that accepts three parameters: (1 mark) # - ``lst`` - list of numbers # - ``low`` - miminum value (not inclusive). Default = 0 # - ``high`` - maximum value (not inclusive). Default = 100 # - Function will calculate and return the average of all values in the range (``low``, ``high``). (3 marks) # - Function should have a docstring as shown in output. (1 mark) # - Call ``help(avglist)`` to display function information. (0.5 marks) # - Create a list with the numbers 1 to 10 and call ``avglist()`` with default range and print result. (1 mark) # - Create a list with the numbers 1 to 100 and call ``avglist()`` with range (``20``, ``80``) and print result. (1 mark) # - Create a list with 100 random numbers between 1 and 100 and call avglist() with range (``30``, ``100``) and print the result. Note: Will need to ``import random`` and use ``random.randint(1,100)``. To set seed, use ``random.seed(0)``. (2 marks) # ### Sample Output # # Help on function avglist in module __main__: # # avglist(lst, low=0, high=100) # Returns average of a list of numbers in given range (low, high) # # Args: # lst: list of numbers # low: minimum value (not inclusive) Default: 0 # high: maximum value (not inclusive) Default: 100 # # Return: # Average of list of numbers # # Average of list from 1 to 10 (default range): 5.5 # Average of list from 1 to 100 range (20,80): 50.0 # Average of random list of 100 numbers from 1 to 100 and range (30,100): 62.41095890410959 # ### Solution # # # # # + ### Your solution here # - # ## Question #2 - Using Lists (10 marks) # # Create a Python program that performs list operations based on user input. Details: # # - Create a list with the numbers ``2, 4, 6, 8, and 10``. (1 mark) # - Using a while loop, prompt the user for an operation to perform until ``X`` for exit is selected. (1 mark) # - If operation is ``A``, prompt for a number and append that number to the list. (1 mark) # - If operation is ``P``, print the list. (1 mark) # - If operation is ``G``, prompt for an index and print the number at that index from the list. Make sure it is a valid index otherwise print ``Error invalid index``. (3 marks) # - If operation is ``D``, prompt for a number and delete that number from the list. (1 mark) # - If operation is ``R``, prompt for two numbers ``startIdx`` and ``endIdx`` and return a range from the list ``[startIdx, endIdx]``. Make sure it is a valid range otherwise print ``Error invalid index``. (2 marks) # - **Bonus:** If operation is ``S``, sort the list in ascending order. (1 mark) # - **Bonus:** Write a method that takes a list and index as a parameter and returns true if index is valid. (1 mark) # ### Sample Output # # Enter an operation: P # [2, 4, 6, 8, 10] # Enter an operation: A # Enter number to add: 5 # Enter an operation: P # [2, 4, 6, 8, 10, 5] # Enter an operation: D # Enter number to remove: 4 # Enter an operation: P # [2, 6, 8, 10, 5] # Enter an operation: G # Enter index to get: 3 # Number at index 3 is 10 # Enter an operation: G # Enter index to get: 5 # Error invalid index # Enter an operation: R # Enter start index: 2 # Enter end index: 4 # List range: [8, 10] # Enter an operation: R # Enter start index: 0 # Enter end index: 5 # Error invalid index # Enter an operation: S # Enter an operation: P # [2, 5, 6, 8, 10] # Enter an operation: X # # ### Solution # + ### Your solution here # - # ## Question #3 - Letter Frequency Analysis using Dictionaries (5 marks) # # Create a Python program that takes a string text and calculates the frequency of each letter and punctuation. Data set (copy as string into Python code): # # text = """She should have died hereafter. # There would have been a time for such a word. # Tomorrow, and tomorrow, and tomorrow, # Creeps in this petty pace from day to day # To the last syllable of recorded time, # And all our yesterdays have lighted fools # The way to dusty death. Out, out, brief candle! # Life's but a walking shadow, a poor player # That struts and frets his hour upon the stage # And then is heard no more. It is a tale # Told by an idiot, full of sound and fury, # Signifying nothing.""" # Macbeth: Act 5, Scene 5, Page 2 # # Details: # - Copy data set above into Python code. (0.5 mark) # - Create an empty dictionary to store letter counts. Upper and lower case letters should be counted together as upper case (e.g. 'A' and 'a'). Punction '.' and '!' and spaces (' ') have their own entries. All other characters counted as '#' (for other). (0.5 mark) # - Note that upper case letters are in ASCII table from 65 to 91 and lower case from 97 to 123. [ASCII Table](https://ascii.cl/) [Helpful info](https://terrameijar.wordpress.com/2017/02/03/python-how-to-generate-a-list-of-letters-in-the-alphabet/) # - Note: the [ord() function](https://docs.python.org/3.4/library/functions.html?highlight=ord#ord) may be useful. # - Use a for loop to process each letter in the text: (1 mark) # - Use dictionary to update letter counts. Note: Will need to search dictionary to see if letter exists. If not, then add it otherwise add one to count. (2 marks) # - Output the total number of characters and for each character its count and frequency. (1 marks) # - **Bonus 1:** [Read text data from file](https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files) [``data.txt``](data.txt) instead of putting it in code. (1 mark) # - **Bonus 2:** Sort output alphabetically. *Hint: [use the sorted()](https://docs.python.org/3/howto/sorting.html) function*. (1 mark) # ### Sample Output # # Total characters: 477 # S 22 4.612159329140461 # H 21 4.40251572327044 # E 38 7.966457023060797 # 78 16.352201257861637 # O 36 7.547169811320755 # U 14 2.9350104821802936 # L 18 3.7735849056603774 # D 25 5.241090146750524 # A 33 6.918238993710692 # V 3 0.6289308176100629 # I 19 3.9832285115303985 # R 26 5.450733752620545 # F 12 2.5157232704402515 # T 36 7.547169811320755 # . 5 1.0482180293501049 # # 21 4.40251572327044 # W 8 1.6771488469601676 # B 5 1.0482180293501049 # N 19 3.9832285115303985 # M 7 1.4675052410901468 # C 5 1.0482180293501049 # P 6 1.2578616352201257 # Y 12 2.5157232704402515 # G 6 1.2578616352201257 # ! 1 0.20964360587002095 # K 1 0.20964360587002095 # # Bonus: # 78 16% # ! 1 0% # # 21 4% # . 5 1% # A 33 7% # B 5 1% # C 5 1% # D 25 5% # E 38 8% # F 12 3% # G 6 1% # H 21 4% # I 19 4% # K 1 0% # L 18 4% # M 7 1% # N 19 4% # O 36 8% # P 6 1% # R 26 5% # S 22 5% # T 36 8% # U 14 3% # V 3 1% # W 8 2% # Y 12 3% # ### Solution # + ### Your solution here
labs/lab1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import scipy import numpy as np import pandas as pd import seaborn as sns from scipy import stats import statsmodels.api as sm import matplotlib.pyplot as plt # + house_data = pd.read_csv('datasets/HousingData.csv') house_data.head(5) # - house_data.shape house_data.columns # + house_data = house_data.drop(['CRIM', 'B'], axis=1) house_data.head() # - house_data.isnull().sum() house_data.dropna(inplace=True, axis=0) house_data.shape # + median_price = scipy.median(house_data['MEDV']) median_price # + house_data['above_median'] = np.where(house_data['MEDV'] > median_price, 1, 0) house_data.sample(10) # - house_data.to_csv('datasets/house_data_processed.csv', index = False) # !ls datasets/ # + house_data_selected = house_data[['MEDV', 'RM', 'DIS', 'AGE']] house_data_selected.head(10) # + plt.figure(figsize=(12, 8)) sns.scatterplot(x='AGE', y='MEDV', s=80, data=house_data_selected) plt.title('House Data') # + plt.figure(figsize=(12, 8)) sns.scatterplot(x='RM', y='MEDV', s=80, data=house_data_selected) plt.title('House Data') # + sns.pairplot(house_data_selected) plt.show() # - with sns.axes_style('white'): sns.jointplot(x='RM', y='MEDV', data=house_data_selected, kind='hex') plt.show() # + sns.jointplot(x='AGE', y='MEDV', data=house_data_selected, kind='kde') plt.show() # + house_data_selected_cov = np.cov(house_data_selected.T) house_data_selected_cov # + house_data_selected_corr = np.corrcoef(house_data_selected.T) house_data_selected_corr # + plt.figure(figsize=(12, 8)) sns.heatmap(house_data_selected_corr, xticklabels=house_data_selected.columns, yticklabels=house_data_selected.columns, annot=True) plt.show() # + plt.figure(figsize=(12, 8)) sns.lmplot(x='RM', y='MEDV', data=house_data) plt.title('Salary') # - slope, intercept, r_value, _, _ ,= stats.linregress(house_data['RM'], house_data['MEDV']) print('R-square value', r_value**2) print('Slope', slope) print('Intercept', intercept) # + plt.figure(figsize=(12, 8)) sns.scatterplot(x='RM', y='MEDV', s=100, data=house_data, label='Original') sns.lineplot(x=house_data['RM'], y=(slope * house_data['RM'] + intercept), color='r', label='Fitted line') plt.title('Salary') # + plt.figure(figsize=(12, 8)) sns.scatterplot(x='AGE', y='MEDV', s=80, hue='RAD', data=house_data) plt.title('House Data') # + plt.figure(figsize=(12, 8)) sns.scatterplot(x='RM', y='MEDV', s=80, hue='RAD', data=house_data) plt.title('House Data') # + X = house_data.drop(['MEDV', 'above_median'], axis=1) y = house_data['MEDV'] # - X.head() # + reg_model = sm.OLS(y, X).fit() reg_model.params # - reg_model.summary()
book/_build/.jupyter_cache/executed/bb8dfa82f48676bbc3cfa3af2f874acf/base.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.display import HTML import saspy #sas = saspy.SASsession(cfgname='winlocal', encoding='cp1252') sas = saspy.SASsession(cfgname='winiomj', encoding='cp1252', iomport=8591) sas # + ll = sas.submit(''' data NyTest ; Length t1 t2 t3 8. t4 t5 $100. t6 8.; do i=1 to 40000; t1=1; t2=.; t3=3; t4='ÆØÅ -678'; t5='ÆØÅ-678'; t6=.; t7='ÆØÅ- 678 '; output; end; run; ''') # - print(ll['LOG']) ds = sas.sasdata('NyTest ') dsh = sas.sasdata('NyTest ', results='html') ds.head(30) dsh.tail(100) df = ds.to_df_CSV() print(sas.saslog()) df.head(30) df df2 = ds.to_df() df2 sas
Issue_examples/Issue147.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Wisawasi/100-Days-Of-ML-Code/blob/master/Concat%2C_Merge%2C_Join_DataFrame_in_Pandas_By_INVESTIC.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="I3b_TJOUHFS4" import pandas as pd # + [markdown] id="fiMdLxoqXko-" # ## Create DataFrame # + id="7ELktnruHO3R" dummy_data1 = { 'id' : ['1', '2', '3', '4', '5'], 'Feature1' : ['A', 'C', 'E', 'G', 'I'], 'Feature2' : ['B', 'D', 'F', 'H', 'J']} # + id="nLvim1VQHmIU" df1 = pd.DataFrame(dummy_data1, columns = ['id', 'Feature1', 'Feature2']) # + id="8uxhIRkNHwlE" df1 # + id="NSFCLvOAH2Hg" dummy_data2 = { 'id': ['1', '2', '6', '7', '8'], 'Feature1': ['K', 'M', 'O', 'Q', 'S'], 'Feature2': ['L', 'N', 'P', 'R', 'T']} # + id="ZdG5iSp4H2sw" df2 = pd.DataFrame(dummy_data2, columns = ['id', 'Feature1', 'Feature2']) # + id="0_J9Rzy-H4oJ" df2 # + id="oXzy2ixKH6wk" dummy_data3 = { 'id': ['1', '2', '3', '4', '5', '7', '8', '9', '10', '11'], 'Feature3': [12, 13, 14, 15, 16, 17, 15, 12, 13, 23]} # + id="xnzxMBcNH734" df3 = pd.DataFrame(dummy_data3, columns = ['id', 'Feature3']) # + id="0-eCGD1tH_DJ" df3 # + [markdown] id="pJ0iLYqWX7P1" # ## The simple cocatenate # + id="CE1LTz33IjZZ" df_row = pd.concat([df1, df2]) # + id="x3zH6vp-Im_A" df_row # + id="jagCYMmFInzr" df_row_reindex = pd.concat([df1, df2], ignore_index=True) # + id="cErx_dNhIwox" df_row_reindex # + id="PRfLaB5TJAXz" frames = [df1,df2] # + id="H5V0dhhLJEPZ" df_keys = pd.concat(frames, keys=['x', 'y']) # + id="jdF_NgtIJNkk" df_keys # + id="I1Z1MM9mJPX6" df_keys.loc['y'] # + id="Az06z4r1JTRs" pieces = {'x':df1, 'y':df2} # + id="GyWxeZbYJYrO" df_piece = pd.concat(pieces) # + id="hIyPB0dQJkry" df_piece # + id="kGtQkqslJyWp" df_col = pd.concat([df1, df2], axis=1) # + id="UR8hZ78KJ3Aj" df_col # + [markdown] id="ehktA5_8YNVF" # # Merge DataFrames # + id="MLvopNbKK95S" df_merge_col = pd.merge(df_row, df3, on='id') # + id="Mq7a_L-0LEMR" df_merge_col # + id="wJTLZztMLP0b" df_merge_difkey = pd.merge(df_row, df3, left_on='id', right_on='id') # + id="IKOWpGppLZDj" df_merge_difkey # + id="TZN58jUBTv7u" add_row = pd.Series(['10', 'X1', 'X2', 'X3'], index=['id','Feature1','Feature2','Feature3']) # + id="k7lWwcfjUmUS" df_add_row = df_merge_col.append(add_row, ignore_index=True) # + id="rmcG5HrfU7z1" df_add_row # + [markdown] id="ZCoRR1mTYV1h" # # Join DataFrames # + id="J37pUh4vqALs" df4 = pd.DataFrame ({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) # + id="ZG2IkRESqT3W" df4 # + id="bI9KvLuAqU89" df5 = pd.DataFrame({'key': ['K0', 'K1', 'K2'], 'B' : ['B0', 'B1', 'B2']}) # + id="HC69sefdqkVg" df5 # + id="h0Bwl6moqplu" df4.join(df5, lsuffix='_caller', rsuffix='_other') # + id="7j-E-jthq0AF" df4.set_index('key').join(df5.set_index('key')) # + id="sYiREVMiq7UL" df4.join(df5.set_index('key'), on='key') # + [markdown] id="MFP2XfiZYb_q" # ## Full Outer Join # + id="zr8FzQ5rVIs8" df_outer = pd.merge(df1, df2, on='id', how='outer') # + id="Alq1WNSkVPe3" df_outer # + id="yFzSB1YfVQpD" df_suffix = pd.merge(df1, df2, left_on='id', right_on='id', how='outer', suffixes=('_left','_right')) # + id="nSx1ENgwVib8" df_suffix # + [markdown] id="GQ3TU4qiYglE" # ## Inner Join # + id="6ZJvRP5-Vl8U" df_inner = pd.merge(df1, df2, on='id', how='inner') # + id="Kjy0mPy2VtZU" df_inner # + [markdown] id="aZ1podcpYnin" # ## Right Join # + id="DFepFSgKVvIf" df_right = pd.merge(df1, df2, on='id', how='right') # + id="GtRD4LXyV1E_" df_right # + [markdown] id="As3kKyW0YpyJ" # ## Left Join # + id="62GTHnj9V3F2" df_left = pd.merge(df1, df2, on='id', how='left') # + id="d1UVbpoEWAaG" df_left # + [markdown] id="ZLaJuXnVYuTW" # ## Joining Index # # + id="34IiV_bkWIaU" df_index = pd.merge(df1, df2, right_index=True, left_index=True) # + id="8nO0L_upWS9W" df_index
Concat,_Merge,_Join_DataFrame_in_Pandas_By_INVESTIC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Bquery/Bcolz Taxi Set Performance # Based on the great work by <NAME>, see http://matthewrocklin.com/blog/work/2016/02/22/dask-distributed-part-2 # # NB: The auto-caching features will make the second (and subsequent) runs faster for multi-column groupings, which is reflected in the scores below. # + import os import urllib import glob import pandas as pd from bquery import ctable import bquery import bcolz from multiprocessing import Pool, cpu_count from collections import OrderedDict import contextlib import time # do not forget to install numexpr # os.environ["BLOSC_NOLOCK"] = "1" bcolz.set_nthreads(1) workdir = '/home/carst/Documents/taxi/' # + elapsed_times = OrderedDict() @contextlib.contextmanager def ctime(message=None): "Counts the time spent in some context" assert message is not None global elapsed_times t_elapsed = 0.0 print('\n') t = time.time() yield if message: print (message + ": ") t_elapsed = time.time() - t print (round(t_elapsed, 4), "sec") elapsed_times[message] = t_elapsed # + def sub_query(input_args): rootdir = input_args['rootdir'] group_cols = input_args['group_cols'] measure_cols = input_args['measure_cols'] ct = ctable(rootdir=rootdir, mode='a') result = ct.groupby(group_cols, measure_cols) result_df = result.todataframe() return result_df.to_msgpack() def execute_query(ct_list, group_cols, measure_cols): p = Pool(cpu_count()) query_list = [{ 'rootdir': rootdir, 'group_cols': group_cols, 'measure_cols': measure_cols} for rootdir in ct_list] result_list = p.map(sub_query, query_list) p.close() result_list = [pd.read_msgpack(x) for x in result_list] result_df = pd.concat(result_list, ignore_index=True) result_df = result_df.groupby(group_cols)[measure_cols].sum() return result_df # - # create workfiles if not available ct_list = glob.glob(workdir + 'taxi_*') # import bquery.benchmarks.taxi.load as taxi_load # taxi_load.download_data(workdir) # taxi_load.create_bcolz(workdir) # taxi_load.create_bcolz_chunks(workdir) ct_list = glob.glob(workdir + 'taxi_*') ct = ctable(rootdir=workdir + 'taxi', mode='a') measure_list = ['extra', 'fare_amount', 'improvement_surcharge', 'mta_tax', 'nr_rides', 'passenger_count', 'tip_amount', 'tolls_amount', 'total_amount', 'trip_distance'] # ## Single Process # + with ctime(message='CT payment_type nr_rides sum, single process'): ct.groupby(['payment_type'], ['nr_rides']) with ctime(message='CT yearmonth nr_rides sum, single process'): ct.groupby(['pickup_yearmonth'], ['nr_rides']) with ctime(message='CT yearmonth + payment_type nr_rides sum, single process'): ct.groupby(['pickup_yearmonth', 'payment_type'], ['nr_rides']) # - # ## Multi Process # + with ctime(message='CT payment_type nr_rides sum, ' + str(cpu_count()) + ' processors'): execute_query(ct_list, ['payment_type'], ['nr_rides']) with ctime(message='CT yearmonth nr_rides sum, ' + str(cpu_count()) + ' processors'): execute_query(ct_list, ['pickup_yearmonth'], ['nr_rides']) with ctime(message='CT yearmonth + payment_type nr_rides sum, ' + str(cpu_count()) + ' processors'): execute_query(ct_list, ['pickup_yearmonth', 'payment_type'], ['nr_rides']) # - # ## Single Process, All Measures # + with ctime(message='CT payment_type all measure sum, single process'): ct.groupby(['payment_type'], measure_list) with ctime(message='CT yearmonth all measure sum, single process'): ct.groupby(['pickup_yearmonth'], measure_list) with ctime(message='CT yearmonth + payment_type all measure sum, single process'): ct.groupby(['pickup_yearmonth', 'payment_type'], measure_list) # - # ## Multi Process, All Measures # + with ctime(message='CT payment_type all measure sum, ' + str(cpu_count()) + ' processors'): execute_query(ct_list, ['payment_type'], measure_list) with ctime(message='CT yearmonth all measure sum, ' + str(cpu_count()) + ' processors'): execute_query(ct_list, ['pickup_yearmonth'], measure_list) with ctime(message='CT yearmonth + payment_type all measure sum, ' + str(cpu_count()) + ' processors'): execute_query(ct_list, ['pickup_yearmonth', 'payment_type'], measure_list) # -
bquery/benchmarks/taxi/Taxi Set.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Visualization of Stock Trading # import libraries import pandas as pd import numpy as np import altair as alt import seaborn as sns # ## Set relevant parameters train_stock = 'data/GOOGL.csv' val_stock = 'data/BTC-USD_Val.csv' window_size = 10 batch_size = 16 ep_count = 10 model_name = 'model_GOOGL' pretrained = False debug = False # ## Load Dataset into pandas DataFrame # + # read csv into dataframe df = pd.read_csv(val_stock) # filter out the desired features df = df[['Date', 'Adj Close']] # rename feature column names df = df.rename(columns={'Adj Close': 'actual', 'Date': 'date'}) df.head() # - # convert dates from object to DateTime type dates = df['date'] dates = pd.to_datetime(dates, infer_datetime_format=True) df['date'] = dates df.info() df.head() # ## Visualization of actions taken by Trading Bot def visualize(df, history): # add history to dataframe position = [history[0][0]] + [x[0] for x in history] actions = ['HOLD'] + [x[1] for x in history] df['position'] = position df['action'] = actions # specify y-axis scale for stock prices scale = alt.Scale(domain=(min(min(df['actual']), min(df['position'])) - 50, max(max(df['actual']), max(df['position'])) + 50), clamp=True) # plot a line chart for stock positions actual = alt.Chart(df).mark_line( color='green', opacity=0.5 ).encode( x='date:T', y=alt.Y('position', axis=alt.Axis(format='$.2f', title='Price'), scale=scale) ).interactive( bind_y=False ) # plot the BUY and SELL actions as points points = alt.Chart(df).transform_filter( alt.datum.action != 'HOLD' ).mark_point( filled=True ).encode( x=alt.X('date:T', axis=alt.Axis(title='Date')), y=alt.Y('position', axis=alt.Axis(format='$.2f', title='Price'), scale=scale), color='action' #color=alt.Color('action', scale=alt.Scale(range=['blue', 'red'])) ).interactive(bind_y=False) # merge the two charts chart = alt.layer(actual, points, title=val_stock).properties(height=300, width=1000) return chart # ## Training the model # + import logging import coloredlogs from train import * from evaluate import show_eval_result coloredlogs.install(level='DEBUG') switch_k_backend_device() agent = Agent(window_size, pretrained=pretrained, model_name=model_name) train_data = get_stock_data(train_stock) val_data = get_stock_data(val_stock) initial_offset = val_data[1] - val_data[0] for i in range(1, ep_count + 1): train_result = train_model(agent, i, train_data, ep_count=ep_count, batch_size=batch_size, window_size=window_size) val_result, _ = evaluate_model(agent, val_data, window_size, debug) show_train_result(train_result, val_result, initial_offset) # - # ## Plotting the evaluation actions val_result, history = evaluate_model(agent, val_data, window_size, debug) show_eval_result(model_name, val_result, initial_offset) chart = visualize(df, history) chart
visualize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install SpeechRecognition pydub # !pip install ffmpeg # !pip install moviepy import wave, math, contextlib import speech_recognition as sr from moviepy.editor import AudioFileClip transcribed_audio_file_name = "transcribed_speech.wav" zoom_video_file_name = "zoom_Faiza.mp4" audioclip = AudioFileClip(zoom_video_file_name) audioclip.write_audiofile(transcribed_audio_file_name) with contextlib.closing(wave.open(transcribed_audio_file_name,'r')) as f: frames = f.getnframes() rate = f.getframerate() duration = frames / float(rate) total_duration = math.ceil(duration / 60) r = sr.Recognizer() for i in range(0, total_duration): with sr.AudioFile(transcribed_audio_file_name) as source: audio = r.record(source, offset=i*60, duration=60) f = open("transcription.txt", "a") f.write(r.recognize_google(audio)) f.write(" ") f.close()
Vision/Speech_to_Text/Speech to text.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="xzfJd9DSIuu0" colab_type="code" colab={} cellView="form" # ============================= FORM ============================= # #@markdown <h3>📝 Fill in the token and folder fields</h3> # ================================================================ # import os, time, urllib.request, json, string, psutil, uuid; from IPython.display import clear_output import ipywidgets as widgets from IPython.display import HTML, clear_output from google.colab import output Ngrok_Token = "" #@param {type:"string"} RPC_token = "" #@param {type:"string"} Remote_folder = "" #@param {type:"string"} Remote_token = "" #@param {type:"string"} os.environ["RCLONE_CONFIG_MYDRIVE_TYPE"] = "drive" os.environ["RCLONE_CONFIG_MYDRIVE_SCOPE"] = "drive" os.environ["RCLONE_CONFIG_MYDRIVE_ROOT_FOLDER_ID"] = Remote_folder os.environ["RCLONE_CONFIG_MYDRIVE_TOKEN"] = Remote_token SuccessRun = widgets.Button( description='✔ Successfully', disabled=True, button_style='success' ) UnsuccessfullyRun = widgets.Button( description='✘ Unsuccessfully', disabled=True, button_style='danger' ) class MakeButton(object): def __init__(self, title, callback): self._title = title self._callback = callback def _repr_html_(self): callback_id = 'button-' + str(uuid.uuid4()) output.register_callback(callback_id, self._callback) template = """<button class="p-Widget jupyter-widgets jupyter-button widget-button mod-info" id="{callback_id}">{title}</button> <script> document.querySelector("#{callback_id}").onclick = (e) => {{ google.colab.kernel.invokeFunction('{callback_id}', [], {{}}) e.preventDefault(); }}; </script>""" html = template.format(title=self._title, callback_id=callback_id) return html def CheckProcess(processName): for proc in psutil.process_iter(): try: if processName.lower() in proc.name().lower(): return True except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): pass return False; def Start_Server(): if CheckProcess("screen") == False or CheckProcess("aria2c main") == False: # %cd /content/aria2_home # !/usr/bin/screen -d -m -fa -S aria2c /usr/bin/aria2c --conf-path="/content/aria2_home/aria2.conf" --rpc-secret={RPC_token} --daemon=false def init_ngrok(): if os.path.isfile("/usr/local/bin/ngrok") == False: # !wget -q -c -nc https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip # !unzip -qq -n ngrok-stable-linux-amd64.zip # !mv ngrok /usr/local/bin/ngrok # !rm -f /content/ngrok-stable-linux-amd64.zip else: # !pkill ngrok get_ipython().system_raw('/usr/local/bin/ngrok authtoken $Ngrok_Token && /usr/local/bin/ngrok http 6800 &') def get_url(): time.sleep(10) with urllib.request.urlopen('http://localhost:4040/api/tunnels') as response: data = json.loads(response.read().decode()) (url) = data['tunnels'][0]['public_url'] #print(f'{url}') display(HTML("<h2 style=\"font-family:Trebuchet MS;color:#446785;\">aria2box</h2><h4 style=\"font-family:Trebuchet MS;color:#446785;\">" \ "HOST: "+ url.strip('https://') + "<br>PORT: 443<br>TOKEN: "+RPC_token+"</h4><h2>https://token:"+RPC_token+"@"+ url.strip('https://') +":443/jsonrpc</h2><br>")) try: if os.path.isfile("/usr/bin/aria2c") == False: # !rm -rf /content/sample_data/ # !apt update -qq -o=Dpkg::Use-Pty=0 > /dev/null # !apt install -qq -o=Dpkg::Use-Pty=0 screen autossh aria2 > /dev/null # !wget https://github.com/rclone/rclone/releases/download/v1.49.3/rclone-v1.49.3-linux-amd64.deb && dpkg -i rclone-v1.49.3-linux-amd64.deb && rm rclone-v1.49.3-linux-amd64.deb # !git clone https://gitlab.com/mega036/aria2cloud-colab.git /content/aria2_home # !chmod +x /content/aria2_home/.aria2/*.sh clear_output() Start_Server() init_ngrok() get_url() display(SuccessRun) except: clear_output() display(UnsuccessfullyRun) # + id="3nH0lvZ-J90J" colab_type="code" colab={} cellView="form" # ============================= FORM ============================= # #@markdown <h3>Test rclone setup</h3> # ================================================================ # import os from google.colab import output Remote_folder = "" #@param {type:"string"} Remote_token = "" #@param {type:"string"} os.environ["RCLONE_CONFIG_MYDRIVE_TYPE"] = "drive" os.environ["RCLONE_CONFIG_MYDRIVE_SCOPE"] = "drive" os.environ["RCLONE_CONFIG_MYDRIVE_ROOT_FOLDER_ID"] = Remote_folder os.environ["RCLONE_CONFIG_MYDRIVE_TOKEN"] = Remote_token if os.path.isfile("/usr/bin/rclone") == False: # !wget https://github.com/rclone/rclone/releases/download/v1.49.3/rclone-v1.49.3-linux-amd64.deb && dpkg -i rclone-v1.49.3-linux-amd64.deb && rm rclone-v1.49.3-linux-amd64.deb # !rclone lsd mydrive:
notebooks/aria2cloud_ngrok.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/yoyoyo-yo/DeepLearningMugenKnock/blob/master/notes_pytorch/GAN/WGANGP_Cifar10_pytorch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="xa9STwNFx962" # # WGAN-GP (Wasserstein GAN Gradient Penalty) # # 元論文 >> Improved Training of Wasserstein GANs https://arxiv.org/abs/1704.00028 (2017) # # WGANのパラメータのクリッピングは最適化を難しくするというのがWGAN-GPの導入背景。 # Critic(DiscriminatorをWGANの論文ではCriticと呼ぶ)にBatch Normalizationが入っているとパラメータのクリッピング問題は弱めることができるけど、深いCriticでは収束しにくいらしい。 # # Criticのパラメータを -c <= param <= cにクリッピングするが、cの値を注意深く選ばないと勾配消失か勾配爆発になってしまう。しかし、WGANでWasserstain距離を用いた画像でLossを作るために1Lipschits制約を実現するために、このクリッピングが必要だった。 # # なので、**WGAN-GPでは勾配を1に近づける正則化項(= GP : Gradient Penalty) を導入することで、クリッピングを行わずに1Lipschits制約を実現する。** # # ただし、BatchNormalizationはバッチで正規化するけど、GPは入力毎に行うので、相性が悪い。CriticではBatchNormalizationの代わりにLayerNormalizationを入れた。(これで結果も良くなった) # # 以上がWGAN-GPの論文での主張 # # DCGAN, LSGAN, WGAN, WGAN-GPを比較するために、GeneratorとDiscriminatorにいろんな条件をつけて LSUNデータセットで試した。その結果がFigure.2。WGAN-GPがずば抜けていい画像を作っている。しかもRes101をGとDに使ってもモード崩壊に陥らないという。 # # ## アルゴリズム # # WGAN-GPのアルゴリズムは、イテレーション毎に以下のDiscriminatorとGeneratorの学習を交互に行っていく。 # - 最適化 : Adam (LearningRate: 0.0001, β1=0, β2=0.9) # - λ = 10 # # アルゴリズム # # ### Discriminatorの学習(以下をcriticの回数だけ繰り返す) # # 1. ノイズzをサンプリングし、 $G_z = G(z)$ を求める # 2. $\epsilon \in [0, 1]$ をランダムに選び、Realデータ$X$ と $G_z$ の重み付き平均 $\hat{x} = \epsilon X + (1 - \epsilon) G_z$ を計算する # 3. Loss $L^i = D(G_z) - D(X) + \lambda (\| \nabla_{\hat{x}} D(\hat{x}) \|_2 - 1)^2$ を計算し、SGD # # ### Generatorの学習 # # 1. ノイズzをサンプリングして、 Loss $L = \frac{1}{|Minibatch|} \sum_{i} - D(G(z))$ を計算し、SGD # # GeneratorとDiscriminatorの構造は次の通り。WGAN-GPではResBlock構造を導入して、Deepな構造にしている。 # # ## Generator # # ResNetの活性化関数はReLU # # <img src='images/WGANGP_G.png' width=600> # # ## Discriminator # # ResNetの活性化関数はLeakyReLU(0.2) # # <img src='images/WGANGP_D.png' width=600> # # それぞれにおいてResBlockは以下の構造となる # # #### ResBlock # # <img src='images/WGANGP_resblock.png' width=500> # # # + [markdown] id="3u9ehbSkx963" # # Import and Config # + id="tDVr-eTxrXhe" # !pip install -q --upgrade albumentations==0.5.1 # + id="fx5MA7lRm4tz" import os import time from tqdm.notebook import tqdm import numpy as np import cv2 import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from sklearn.model_selection import KFold, GroupKFold, StratifiedKFold import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import Dataset, DataLoader import albumentations as A import albumentations.pytorch as Ap import torchvision import torchvision.models as models # + id="ZxI9fnf1nRS5" IN_HEIGHT, IN_WIDTH = 32, 32 RESOURCE = "GPU" FOLD = "StratifiedKFold" # KFold, GroupKFold, StratifiedKFold, StratifiedGroupXX FOLD_N = 2 # fold num RANDOM_SEED = 42 if RESOURCE == "CPU": device = torch.device("cpu") elif RESOURCE == "GPU": device = torch.device("cuda") # + [markdown] id="UL99hDa-x967" # # Define Model # + id="GxdY2y83x967" class Flatten(nn.Module): def forward(self, x): return x.view(x.size()[0], -1) class Reshape(nn.Module): def __init__(self, c, h, w): super(Reshape, self).__init__() self.c = c self.h = h self.w = w def forward(self, x): x = x.view(x.size()[0], self.c, self.h, self.w) return x # ResNet block class ResBlock(nn.Module): def __init__(self, dim_first=None, dim=64, activation_fn=nn.ReLU(), batch_norm=False): super(ResBlock, self).__init__() if dim_first is None: dim_first = dim #else: if batch_norm: self.skip_conv = nn.Sequential( nn.BatchNorm2d(dim_first), activation_fn, nn.Conv2d(dim_first, dim, kernel_size=3, padding=1, stride=1) ) else: self.skip_conv = nn.Sequential( activation_fn, nn.Conv2d(dim_first, dim, kernel_size=3, padding=1, stride=1) ) if batch_norm: self.block = nn.Sequential( nn.BatchNorm2d(dim_first), activation_fn, nn.Conv2d(dim_first, dim, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(dim), activation_fn, nn.Conv2d(dim, dim, kernel_size=3, padding=1, stride=1) ) else: self.block = nn.Sequential( activation_fn, nn.Conv2d(dim_first, dim, kernel_size=3, padding=1, stride=1), activation_fn, nn.Conv2d(dim, dim, kernel_size=3, padding=1, stride=1) ) def forward(self, x): res_x = self.block(x) if hasattr(self, 'skip_conv'): x = self.skip_conv(x) return F.relu(res_x + x) class Generator(nn.Module): def __init__(self, class_num=10, dim=32, in_height=IN_HEIGHT, in_width=IN_WIDTH, in_channel=3): super(Generator, self).__init__() self.module = nn.Sequential( nn.Linear(100, (in_height // 8) * (in_width // 8) * dim, bias=False), Reshape(dim, in_height // 8, in_width // 8), nn.BatchNorm2d(dim), nn.ReLU(), ResBlock(dim=dim, batch_norm=True), nn.UpsamplingBilinear2d(scale_factor=2), ResBlock(dim=dim, batch_norm=True), nn.UpsamplingBilinear2d(scale_factor=2), ResBlock(dim=dim, batch_norm=True), nn.UpsamplingBilinear2d(scale_factor=2), nn.Conv2d(dim, in_channel, kernel_size=3, stride=1, padding=1), nn.Tanh(), ) def forward(self, x): return self.module(x) class Discriminator(torch.nn.Module): def __init__(self, class_num=10, dim=32, in_height=IN_HEIGHT, in_width=IN_WIDTH, in_channel=3): super(Discriminator, self).__init__() self.module = nn.Sequential( ResBlock(dim_first=in_channel, dim=dim, activation_fn=nn.LeakyReLU(0.2), batch_norm=False), nn.AvgPool2d(2, stride=2), ResBlock(dim=dim, activation_fn=nn.LeakyReLU(0.2), batch_norm=False), nn.AvgPool2d(2, stride=2), ResBlock(dim=dim, activation_fn=nn.LeakyReLU(0.2), batch_norm=False), ResBlock(dim=dim, activation_fn=nn.LeakyReLU(0.2), batch_norm=False), nn.ReLU(), nn.Conv2d(dim, 1, kernel_size=[in_height // 4, in_width // 4], padding=0, stride=1), Flatten(), nn.Sigmoid() ) def forward(self, x): return self.module(x) # + [markdown] id="hm-tUIRkppbl" # # Dataset # + id="Hl6fXlMTpqij" class Cifar10Dataset(Dataset): def __init__(self, xs, ys, transforms=None): self.xs = xs self.ys = ys self.transforms=transforms self.data_num = len(xs) def __len__(self): return self.data_num def __getitem__(self, idx): x = self.xs[idx] / 127.5 - 1 y = self.ys[idx] if self.transforms: transformed = self.transforms(image=x) x = transformed["image"] return x, y # + id="B2l9tCkerR7v" transforms_train = A.Compose([ # A.HorizontalFlip(p=0.5), # A.VerticalFlip(p=0.5), # A.ShiftScaleRotate(p=0.5), # A.RandomRotate90(p=0.5), # A.Resize(IN_HEIGHT, IN_WIDTH), # A.Normalize(max_pixel_value=255.0, p=1.0), Ap.ToTensorV2(p=1.0), ]) transforms_val = A.Compose([ # A.Resize(IN_HEIGHT, IN_WIDTH), # A.Normalize(max_pixel_value=255.0, p=1.0), Ap.ToTensorV2(p=1.0), ]) # + [markdown] id="DsNa1zJ1pRfC" # # Train # + id="pzKwyS264N18" def show_sample(Xs, show_num=10, name="input"): Xs = Xs.detach().cpu().numpy().transpose(0, 2, 3, 1) Xs = (Xs * 127.5 + 127.5).astype(np.uint8) plt.figure(figsize=(12, 1)) for i in range(show_num): # show input x = Xs[i] plt.subplot(1, show_num, i + 1) plt.imshow(x, cmap="gray") plt.title(name) plt.axis('off') plt.show() # + id="pKRGTQIMpS5r" def train(): train_models = [] train_model_paths = [] EPOCH = 50 train_losses_d = [] train_losses_g = [] #--- # datasert #--- batch_size = 256 train_ds = torchvision.datasets.CIFAR10(root="./", train=True, download=True, transform=None) train_Xs = train_ds.data.astype(np.float32) train_ys = np.array(train_ds.targets) dataset_train = Cifar10Dataset(train_Xs, train_ys, transforms=transforms_train) dataloader_train = DataLoader(dataset_train, batch_size=batch_size, num_workers=4, shuffle=True, pin_memory=True) train_n = len(dataloader_train) #--- # model #--- gen = Generator().to(device) dis = Discriminator().to(device) criterion = nn.BCELoss() opt_d = torch.optim.Adam(dis.parameters(), lr=0.0001, betas=(0.5, 0.9)) opt_g = torch.optim.Adam(gen.parameters(), lr=0.0001, betas=(0.5, 0.9)) # opt_d = torch.optim.RMSprop(dis.parameters(), lr=0.0005) # opt_g = torch.optim.RMSprop(gen.parameters(), lr=0.0005) #--- # WGAN hyper params #--- Clip_value = 0.01 Critic_time = 5 #--- # epoch #--- for epoch in range(EPOCH): gen.train() dis.train() tr_loss_d = 0 tr_loss_g = 0 total = 0 #--- # train #--- train_time_start = time.time() for step, batch in enumerate(dataloader_train): opt_g.zero_grad() xs = batch[0].to(device) ys = batch[1] _batch_size = len(xs) ones = torch.ones([_batch_size, 1]).to(device) zeros = torch.zeros([_batch_size, 1]).to(device) #--- # update discriminator #--- for c_time in range(Critic_time): opt_d.zero_grad() # real x y_real = dis(xs) loss_real = torch.mean(y_real) # fake x zs = np.random.uniform(-1, 1, size=(_batch_size, 100)) zs = torch.tensor(zs, dtype=torch.float).to(device) x_fake = gen(zs) y_fake = dis(x_fake) loss_fake = torch.mean(y_fake) #--- # Gradient Penalty #--- # sample epsilon from [0, 1] epsilon = np.random.random() # sample x_hat x_hat = (epsilon * xs + (1 - epsilon) * x_fake).requires_grad_(True) Dx_hat = dis(x_hat) musk = torch.ones_like(Dx_hat) gradients = torch.autograd.grad(Dx_hat, x_hat, grad_outputs=musk, retain_graph=True, create_graph=True, allow_unused=True)[0] gradients = gradients.reshape(-1, 1) gradient_penalty = 10 * ((gradients.norm(2, dim=1) - 1) ** 2).mean() #--- # total loss #--- loss_d = loss_real - loss_fake + gradient_penalty loss_d.backward() opt_d.step() tr_loss_d += loss_d.item() / train_n / Critic_time #--- # update generator #--- opt_d.zero_grad() opt_g.zero_grad() zs = np.random.uniform(-1, 1, size=(_batch_size, 100)) zs = torch.tensor(zs, dtype=torch.float).to(device) x_fake = gen(zs) y_fake = dis(x_fake) loss_g = y_fake.mean(0).view(1) loss_g.backward() opt_g.step() tr_loss_g += loss_g.item() / train_n train_losses_d.append(tr_loss_d) train_losses_g.append(tr_loss_g) train_time_end = time.time() train_time_total = train_time_end - train_time_start print(f"epoch:{epoch + 1}/{EPOCH} [tra]loss-d:{tr_loss_d:.4f} loss-g:{tr_loss_g:.4f} [time]tra:{train_time_total:.2f}sec") if (epoch + 1) % 100 == 0: savename = f"model_epoch{epoch + 1}_{EPOCH}.pth" torch.save(gen.state_dict(), savename) print(f"model saved to >> {savename}") if (epoch + 1) % 5 == 0: show_sample(x_fake, name="output") #--- # save model #--- savename = f"model_epoch{EPOCH}.pth" torch.save(gen.state_dict(), savename) print(f"model saved to >> {savename}") print() train_models.append(gen) train_model_paths.append(savename) fig, ax1 = plt.subplots() ax2 = ax1.twinx() ax1.grid() ax1.plot(train_losses_d, marker=".", markersize=6, color="red", label="train loss d") ax1.plot(train_losses_g, marker=".", markersize=6, color="blue", label="train loss g") h1, l1 = ax1.get_legend_handles_labels() h2, l2 = ax2.get_legend_handles_labels() ax1.legend(h1+h2, l1+l2, loc="upper right") ax1.set(xlabel="Epoch", ylabel="Loss") plt.show() return train_models, train_model_paths # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="kWJlRDdHrxNG" outputId="ec2e2700-d703-48fc-b364-934b971d3ab8" train_models, train_model_paths = train() # + [markdown] id="ODVD2zTZDYAZ" # # Test # + colab={"base_uri": "https://localhost:8080/", "height": 185} id="0MvosjsLDXvQ" outputId="abbd09b5-bb06-498f-9777-b32de6d452c9" # test def test(train_models): for model in train_models: model.eval() model_num = len(train_models) with torch.no_grad(): model = train_models[0] for _ in range(2): zs = np.random.uniform(-1, 1, size=(10, 100)) zs = torch.tensor(zs, dtype=torch.float).to(device) xs = model(zs) show_sample(xs, name="output") test(train_models) # + id="u9Qjg9HZ6DwY"
notes_pytorch/GAN/WGANGP_Cifar10_pytorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pygmo as pg a =np.array([1, 2, 3]) np.sum(a**2), a**2 def Sphere(array1D): return np.sum(array1D**2) function = pg.problem(pg.ackley( dim=30)) def Ackley(array1D): return function.fitness(array1D) # + def gen_rand(n_size=1): ''' This function return a n_size-dimensional random vector. ''' return np.random.random(n_size) class NSSDE(object): def __init__(self, n_gens=10000, n_pop=100, n_dim=30, F=0.8, Cr=0.9, bounds=[-15, 30], scheme='rand/p/bin', p=1, global_max= 200., MaxEF=None, fitness=test_fitness): self.n_gens=n_gens self.n_pop=n_pop self.n_dim=n_dim self.F=F self.Cr=Cr self.bounds=bounds self.scheme=scheme self.p=p self.population=NSSDE.init_population(self, pop_size=self.n_pop, dim=self.n_dim, bounds=self.bounds) self.fitness=fitness if MaxEF==None: self.MaxEF= 10000*self.n_dim else: self.MaxEF=MaxEF self.F_evals = 0 self.global_max = global_max def get_F(self): return self.F def get_Cr(self): return self.Cr def get_p(self): return self.p def get_fitness(self): return self.fitness def get_scheme(self): return self.scheme def get_n_gens(self): return self.n_gens def get_bounds(self): return self.bounds def get_population(self): return self.population def init_population(self, pop_size, dim, bounds): ''' This function initialize the population to be use in DE Arguments: pop_size - Number of individuals (there is no default value to this yet.). dim - dimension of the search space (default is 1). bounds - The inferior and superior limits respectively (default is [-100, 100]). ''' return np.random.uniform(low=bounds[0], high=bounds[1], size=(pop_size, dim)) def keep_bounds(self, pop, bounds): ''' This function keep the population in the seach space Arguments: pop - Population; bounds - The inferior and superior limits respectively ''' pop[pop<bounds[0]] = bounds[0]; pop[pop>bounds[1]] = bounds[1] return pop def evolution(self): r_info = {} # ============ Evaluate the initial population ============ pop_fitness = np.zeros(self.population.shape[0]) for ind in range(self.population.shape[0]): pop_fitness[ind] = self.fitness(self.population[ind]) best_idx = np.argmin(pop_fitness) r_info['log'] = [] r_info['log'].append((self.F_evals, pop_fitness[best_idx], np.mean(pop_fitness), np.std(pop_fitness), np.max(pop_fitness), np.median(pop_fitness),self.F, self.Cr)) while self.F_evals < self.MaxEF: mutant = np.zeros_like(self.population) trial_pop = np.copy(self.population) trial_fitness = np.zeros(trial_pop.shape[0]) for ind in range(self.population.shape[0]): # ============ Adapt F and Cr ============ NF = np.copy(self.F) NCr = np.copy(self.Cr) if gen_rand() < 0.1: NF = 0.2 +0.2*gen_rand() NCr = 0.8 +0.2*gen_rand() # ============ Mutation Step ============ tmp_pop = np.delete(self.population, ind, axis=0) choices = np.random.choice(tmp_pop.shape[0], 1+2*self.p, replace=False) diffs = 0 for idiff in range(1, len(choices), 2): diffs += NF*((tmp_pop[choices[idiff]]-tmp_pop[choices[idiff+1]])) mutant[ind] = tmp_pop[choices[0]] + diffs # keep the bounds mutant = NSSDE.keep_bounds(self, mutant, bounds=self.bounds) # ============ Crossover Step ============ K = np.random.choice(trial_pop.shape[1]) for jnd in range(trial_pop.shape[1]): if jnd == K or gen_rand()<NCr: trial_pop[ind][jnd] = mutant[ind][jnd] # keep the bounds trial_pop = NSSDE.keep_bounds(self, trial_pop, bounds=self.bounds) trial_fitness[ind] = self.fitness(trial_pop[ind]) self.F_evals += 1 if self.F_evals > self.MaxEF-1: r_info['Population'] = self.population r_info['Fitness'] = pop_fitness r_info['Champion'] = pop_fitness[best_idx] r_info['Champion Index'] = best_idx r_info['Function Evals'] = self.F_evals return r_info # ============ Selection ============ if trial_fitness[ind] < pop_fitness[ind]: self.population[ind] = trial_pop[ind] pop_fitness[ind] = trial_fitness[ind] self.F = NF self.Cr = NCr if trial_fitness[ind] < pop_fitness[best_idx]: best_idx = ind # Save Log r_info['log'].append((self.F_evals, pop_fitness[best_idx], np.mean(pop_fitness), np.std(pop_fitness), np.max(pop_fitness), np.median(pop_fitness),self.F, self.Cr)) # ========== Local Search ============= a_1 = gen_rand(); a_2 = gen_rand() a_3 = 1.0 - a_1 - a_2 k, r1, r2 = np.random.choice(self.population.shape[0], size=3) V = np.zeros_like(self.population[k]) for jdim in range(self.population.shape[1]): V[jdim] = a_1*self.population[k][jdim] + a_2*self.population[best_idx][jdim] + a_3*(self.population[r1][jdim] - self.population[r2][jdim]) V = NSSDE.keep_bounds(self, V, bounds=self.bounds) self.F_evals += 1 F_V = self.fitness(V) if F_V < pop_fitness[k]: self.population[k] = V pop_fitness[k] = F_V if F_V < pop_fitness[best_idx]: best_idx = k # Save Log r_info['log'].append((self.F_evals, pop_fitness[best_idx], np.mean(pop_fitness), np.std(pop_fitness), np.max(pop_fitness), np.median(pop_fitness),self.F, self.Cr)) # Check the stop criteria if np.abs(pop_fitness[best_idx] - self.global_max)<1e-6: print('Stop criteria... ') r_info['Population'] = self.population r_info['Fitness'] = pop_fitness r_info['Champion'] = pop_fitness[best_idx] r_info['Champion Index'] = best_idx r_info['Function Evals'] = self.F_evals return r_info r_info['Population'] = self.population r_info['Fitness'] = pop_fitness r_info['Champion'] = pop_fitness[best_idx] r_info['Champion Index'] = best_idx r_info['Function Evals'] = self.F_evals return r_info # - max_ef = int(1e5) DE = NSSDE(bounds=[-32, 32], global_max=0.0, MaxEF=max_ef, fitness=Ackley) ev = DE.evolution() ev.keys() ev['log'][-1] DE.binomial_crossover(tm,Cr=0.2, p=2, F=0.5) # + arra1 = np.array([0.1, .2, .3]) arra2 = np.array([0.02, .1, .6]) onde_ = np.where(arra2<arra1) final_array = arra1 print(final_array) final_array[onde_] = arra2[onde_] print(final_array) # - DE.gen_rand() DE.get_Cr(), DE.get_bounds() # + def gen_rand(n_size=1): ''' This function return a n_size-dimensional random vector. ''' return np.random.random(n_size) def init_population(pop_size, dim=1, bounds=[-100,100]): ''' This function initialize the population to be use in DE Arguments: pop_size - Number of individuals (there is no default value to this yet.). dim - dimension of the search space (default is 1). bounds - The inferior and superior limits respectively (default is [-100, 100]). ''' return np.random.uniform(low=bounds[0], high=bounds[1], size=(pop_size, dim)) def keep_bounds(pop, bounds=[-10, 10]): ''' This function keep the population in the seach space Arguments: pop - Population; bounds - The inferior and superior limits respectively ''' pop[pop<bounds[0]] = bounds[0]; pop[pop>bounds[1]] = bounds[1] return pop def rand_p(pop, p=1, F=0.7): ''' This function is the rand/p mutation scheme, this is a generalization of rand/1 mutation scheme from the first DE paper (Storn and Price). Arguments: pop - Population; p - Number of diferences to be used; F - The F scale factor for the diferences (default is 0.7); ''' choices = np.random.choice(pop.shape[0], 1+2*p, replace=False) diffs = 0 for idiff in range(1, len(choices), 2): diffs += F*((pop[choices[idiff]]-pop[choices[idiff+1]])) return pop[choices[0]] + diffs def binomial_crossover(pop, Cr=0.5, mutation_type=rand_p, **kwargs): ''' This function make the binomial crossover. Arguments: pop - Population; mutation_type - mutation scheme (default is ran_p); **kwargs - This is relative to the mutation scheme ex. rand_p needs p and F. ''' K = np.random.choice(pop.shape[1]) for ind in range(tmp.shape[0]): mutant = mutation_type(pop, **kwargs) for jnd in range(tmp.shape[1]): if jnd == K or gen_rand()<Cr: tmp[ind][jnd] = mutant[jnd] return pop # - tmp = init_population(10, dim=5, bounds=[-5, 5]) tmp binomial_crossover(tmp,Cr=0.2, mutation_type=rand_p, p=2, F=0.5) rand_p(pop=tmp, p=1, F=0.2) K = np.random.choice(tmp.shape[1]) K print(tmp) for ind in range(tmp.shape[0]): mutant = rand_p(tmp, p=1, F=0.5) for jnd in range(tmp.shape[1]): if jnd == K or gen_rand()<0.6: #print(tmp[ind][jnd]) tmp[ind][jnd] = mutant[jnd] tmp of trials[trials==1] = rand_p(tmp, p=2, F=0.5) trials tmp[choices[0]] for odd in range(1, len(choices), 2): print(odd) d = 0 for idiff in range(1, len(choices), 2): d += 0.1*(tmp[choices[idiff]]-tmp[choices[idiff+1]]) d tmp[choices[1],:]-tmp[choices[2],:], 0.1*(tmp[choices[1],:]-tmp[choices[2],:]) tmp[choices[0],:] + 0.1*(tmp[choices[1],:]-tmp[choices[2],:]) idxs = np.ma.masked_where(pop < -10., pop) idxs pop[[idxs.tolist]] = -5. pop
dev_notebooks/dev_NSSDE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Welcome to the Simple ReproNim Tutorial # In this tutorial we will do some simple, yet powerful things. Once you see what can be accomplished, we can look more closely at how this is done, and how you, too, can do it! # ### Get some data... # First, lets get a publically available dataset. How about the structural T1-weighted scan from subject 01 of the OpenNeuro dataset DS000114 # ! datalad install ///openfmri/ds000114 # ! cd ds000114 # ! datalad get ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz # Let's view that image, just to be sure we it... # ! fslview ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz # Cool. Now a question, "How old is this subject?" # If this were just the \*.nii.gz on it's own, you really wouldn't know, unless the original source of the data gave you some more information. In this case, this image happens to come from a dataset that is represented using BIDS. So, we should be able to look at the 'participants.tsv' in the top level of the BIDS directory. # ! more ds000114/dataset_description.json # ! ds000114/participants.tsv # Ah, bummer, this dataset dosen't seem to have participants information, making it hard to know much about these subjects :-( # ### Do something to the data # OK, we have an image, lets do something simple, FSL's Brain Extraction Tool (BET). # # We could do it this way: # ! bet ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz brain -R -m -v # Lets have a look: # ! fslview ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz brain_mask.nii.gz # Hmmm, not so good... In many cases, now you would tweek the 'f' and 'g' values until it looked better (and hope you remembered the settings...). # # An alternative to the above is to run: datalad run ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz brain_mask.nii.gz # ### Get two BIDS cases (that have age), figure out if the age units are compatible # # This will hopefully be a simple motivitaor for the importance of semantic markup, even of BIDS files, since we can find an examplke that is ambiguous regarding age units. # ### Get two ReproBIDS files and show how 'easy' it is to determin the compatibility of the age (or other) metadata. # ### Make a container for FSL # Since OS (and lots of H/W configuration details) matters, it can be a good idea to encap[sulate your processning in a 'container'. Above we, we ran BET (from the FSL toolkit) natively on our computer. If I wanted you to run version 5.1 instead, how would you do that? NeuroDocker. # #### Run BET, like above, but through the container # #### Run BET, like above, but with 'datalad container run'
SimpleReproNimTutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/khajaowais/ColabPrimerSS2021/blob/main/Classify_text_with_bert.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Cb4espuLKJiA" # ##### Copyright 2020 The TensorFlow Hub Authors. # # + cellView="form" id="jM3hCI1UUzar" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="4_NEJlxKKjyI" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/text/tutorials/classify_text_with_bert"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/text/blob/master/docs/tutorials/classify_text_with_bert.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/text/blob/master/docs/tutorials/classify_text_with_bert.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/text/docs/tutorials/classify_text_with_bert.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # <td> # <a href="https://tfhub.dev/google/collections/bert/1"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> # </td> # </table> # + [markdown] id="IZ6SNYq_tVVC" # # Classify text with BERT # # This tutorial contains complete code to fine-tune BERT to perform sentiment analysis on a dataset of plain-text IMDB movie reviews. # In addition to training a model, you will learn how to preprocess text into an appropriate format. # # In this notebook, you will: # # - Load the IMDB dataset # - Load a BERT model from TensorFlow Hub # - Build your own model by combining BERT with a classifier # - Train your own model, fine-tuning BERT as part of that # - Save your model and use it to classify sentences # # If you're new to working with the IMDB dataset, please see [Basic text classification](https://www.tensorflow.org/tutorials/keras/text_classification) for more details. # + [markdown] id="2PHBpLPuQdmK" # ## About BERT # # [BERT](https://arxiv.org/abs/1810.04805) and other Transformer encoder architectures have been wildly successful on a variety of tasks in NLP (natural language processing). They compute vector-space representations of natural language that are suitable for use in deep learning models. The BERT family of models uses the Transformer encoder architecture to process each token of input text in the full context of all tokens before and after, hence the name: Bidirectional Encoder Representations from Transformers. # # BERT models are usually pre-trained on a large corpus of text, then fine-tuned for specific tasks. # # + [markdown] id="SCjmX4zTCkRK" # ## Setup # # + [markdown] id="rbeobv2NIhxn" # #TensorFlow Text # "TensorFlow Text provides a collection of text related classes and ops ready to use with TensorFlow 2.0. The library can perform the preprocessing regularly required by text-based models, and includes other features useful for sequence modeling not provided by core TensorFlow" (https://www.tensorflow.org/text/guide/tf_text_intro) # # + id="q-YbjCkzw0yU" colab={"base_uri": "https://localhost:8080/"} outputId="9c76fb53-b69c-4d20-f754-e82c95bbb437" # A dependency of the preprocessing for BERT inputs # !pip install -q -U tensorflow-text # + [markdown] id="5w_XlxN1IsRJ" # We will use the AdamW optimizer from [tensorflow/models](https://github.com/tensorflow/models). "TensorFlow Model Garden is a repository with a number of different implementations of state-of-the-art (SOTA) models and modeling solutions for TensorFlow users." # # + id="b-P1ZOA0FkVJ" colab={"base_uri": "https://localhost:8080/"} outputId="eba362d9-246b-49af-b413-e1ff84017a6b" # !pip install -q tf-models-official # + colab={"base_uri": "https://localhost:8080/"} id="Jq1ke17Tcgcx" outputId="64b3100b-958c-418f-8e19-b2fceb9608fe" # %tensorflow_version 2.x import tensorflow as tf device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name)) # + id="_XgTpm9ZxoN9" import os import shutil import tensorflow_hub as hub # TFHub is a repository of trained machine learning models (https://www.tensorflow.org/hub) import tensorflow_text as text from official.nlp import optimization # to create AdamW optimizer import matplotlib.pyplot as plt tf.get_logger().setLevel('ERROR') # + [markdown] id="q6MugfEgDRpY" # ## Sentiment analysis # # This notebook trains a sentiment analysis model to classify movie reviews as *positive* or *negative*, based on the text of the review. # # You'll use the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment/) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). # + [markdown] id="vck1-GsuQ2ys" # #Keras # "Keras is the high-level API of TensorFlow 2: an approachable, highly-productive interface for solving machine learning problems, with a focus on modern deep learning. It provides essential abstractions and building blocks for developing and shipping machine learning solutions with high iteration velocity" (https://keras.io/about/). A useful hands-on colab detailing keras abstractions can be found [here](https://jaredwinick.github.io/what_is_tf_keras/). Additional details can be found [here](https://machinelearningmastery.com/tensorflow-tutorial-deep-learning-with-tf-keras/). # + [markdown] id="Vnvd4mrtPHHV" # ### Download the IMDB dataset # # Let's download and extract the dataset, then explore the directory structure. # # + id="pOdqCMoQDRJL" colab={"base_uri": "https://localhost:8080/"} outputId="1ae2ea30-8245-4988-8062-ca829c9ea8b1" url = 'https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz' dataset = tf.keras.utils.get_file('aclImdb_v1.tar.gz', url, untar=True, cache_dir='.', cache_subdir='') dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb') print(dataset_dir) # !ls ./aclImdb/train train_dir = os.path.join(dataset_dir, 'train') # remove unused folders to make it easier to load the data remove_dir = os.path.join(train_dir, 'unsup') shutil.rmtree(remove_dir) # + [markdown] id="lN9lWCYfPo7b" # Next, you will use the `text_dataset_from_directory` utility to create a labeled `tf.data.Dataset`. The `tf.data.Dataset` API supports writing descriptive and efficient input pipelines. Dataset usage follows a common pattern: # - Create a source dataset from your input data. # - Apply dataset transformations to preprocess the data. # - Iterate over the dataset and process the elements. # More details can be found [here](https://www.tensorflow.org/api_docs/python/tf/data/Dataset). # # The IMDB dataset has already been divided into train and test, but it lacks a validation set. Let's create a validation set using an 80:20 split of the training data by using the `validation_split` argument below. # # Note: When using the `validation_split` and `subset` arguments, make sure to either specify a random seed, or to pass `shuffle=False`, so that the validation and training splits have no overlap. # + [markdown] id="IC7rtNp-jBJP" # #Prefetching # Prefetching overlaps the preprocessing and model execution of a training step. While the model is executing training step s, the input pipeline is reading the data for step s+1. Doing so reduces the step time to the maximum (as opposed to the sum) of the training and the time it takes to extract the data. # # The tf.data API provides the tf.data.Dataset.prefetch transformation. It can be used to decouple the time when data is produced from the time when data is consumed. In particular, the transformation uses a background thread and an internal buffer to prefetch elements from the input dataset ahead of the time they are requested. The number of elements to prefetch should be equal to (or possibly greater than) the number of batches consumed by a single training step. You could either manually tune this value, or set it to tf.data.AUTOTUNE, which will prompt the tf.data runtime to tune the value dynamically at runtime. [Source](https://www.tensorflow.org/guide/data_performance#prefetching) # # + id="6IwI_2bcIeX8" colab={"base_uri": "https://localhost:8080/"} outputId="43b33435-1b0e-4230-be92-1dd12783f5b8" AUTOTUNE = tf.data.AUTOTUNE batch_size = 32 seed = 42 raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory( 'aclImdb/train', batch_size=batch_size, validation_split=0.2, subset='training', seed=seed) class_names = raw_train_ds.class_names print(class_names) train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE) val_ds = tf.keras.preprocessing.text_dataset_from_directory( 'aclImdb/train', batch_size=batch_size, validation_split=0.2, subset='validation', seed=seed) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) test_ds = tf.keras.preprocessing.text_dataset_from_directory( 'aclImdb/test', batch_size=batch_size) test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE) # + [markdown] id="HGm10A5HRGXp" # Let's take a look at a few reviews. # + id="JuxDkcvVIoev" colab={"base_uri": "https://localhost:8080/"} outputId="9993d04e-c405-4ce5-91a3-d16e454b58ec" for text_batch, label_batch in train_ds.take(1): for i in range(3): print(f'Review: {text_batch.numpy()[i]}') label = label_batch.numpy()[i] print(f'Label : {label} ({class_names[label]})') # + [markdown] id="dX8FtlpGJRE6" # ## Loading models from TensorFlow Hub # # Here you can choose which BERT model you will load from TensorFlow Hub and fine-tune. There are multiple BERT models available. # # - [BERT-Base](https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3), [Uncased](https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3) and [seven more models](https://tfhub.dev/google/collections/bert/1) with trained weights released by the original BERT authors. # - [Small BERTs](https://tfhub.dev/google/collections/bert/1) have the same general architecture but fewer and/or smaller Transformer blocks, which lets you explore tradeoffs between speed, size and quality. # - [ALBERT](https://tfhub.dev/google/collections/albert/1): four different sizes of "A Lite BERT" that reduces model size (but not computation time) by sharing parameters between layers. # # The model documentation on TensorFlow Hub has more details and references to the # research literature. Follow the links above, or click on the [`tfhub.dev`](http://tfhub.dev) URL # printed after the next cell execution. # # The suggestion is to start with a Small BERT (with fewer parameters) since they are faster to fine-tune. If you like a small model but with higher accuracy, ALBERT might be your next option. If you want even better accuracy, choose # one of the classic BERT sizes or their recent refinements like Electra, Talking Heads, or a BERT Expert. # # Aside from the models available below, there are [multiple versions](https://tfhub.dev/google/collections/transformer_encoders_text/1) of the models that are larger and can yield even better accuracy, but they are too big to be fine-tuned on a single GPU. You will be able to do that on the [Solve GLUE tasks using BERT on a TPU colab](https://www.tensorflow.org/text/tutorials/bert_glue). # # You'll see in the code below that switching the tfhub.dev URL is enough to try any of these models, because all the differences between them are encapsulated in the SavedModels from TF Hub. # + id="y8_ctG55-uTX" cellView="form" colab={"base_uri": "https://localhost:8080/"} outputId="262ff58f-7c20-41c8-e10f-623cf155808d" #@title Choose a BERT model to fine-tune bert_model_name = "small_bert/bert_en_uncased_L-4_H-512_A-8" #@param ["bert_en_uncased_L-12_H-768_A-12", "bert_en_cased_L-12_H-768_A-12", "bert_multi_cased_L-12_H-768_A-12", "small_bert/bert_en_uncased_L-2_H-128_A-2", "small_bert/bert_en_uncased_L-2_H-256_A-4", "small_bert/bert_en_uncased_L-2_H-512_A-8", "small_bert/bert_en_uncased_L-2_H-768_A-12", "small_bert/bert_en_uncased_L-4_H-128_A-2", "small_bert/bert_en_uncased_L-4_H-256_A-4", "small_bert/bert_en_uncased_L-4_H-512_A-8", "small_bert/bert_en_uncased_L-4_H-768_A-12", "small_bert/bert_en_uncased_L-6_H-128_A-2", "small_bert/bert_en_uncased_L-6_H-256_A-4", "small_bert/bert_en_uncased_L-6_H-512_A-8", "small_bert/bert_en_uncased_L-6_H-768_A-12", "small_bert/bert_en_uncased_L-8_H-128_A-2", "small_bert/bert_en_uncased_L-8_H-256_A-4", "small_bert/bert_en_uncased_L-8_H-512_A-8", "small_bert/bert_en_uncased_L-8_H-768_A-12", "small_bert/bert_en_uncased_L-10_H-128_A-2", "small_bert/bert_en_uncased_L-10_H-256_A-4", "small_bert/bert_en_uncased_L-10_H-512_A-8", "small_bert/bert_en_uncased_L-10_H-768_A-12", "small_bert/bert_en_uncased_L-12_H-128_A-2", "small_bert/bert_en_uncased_L-12_H-256_A-4", "small_bert/bert_en_uncased_L-12_H-512_A-8", "small_bert/bert_en_uncased_L-12_H-768_A-12", "albert_en_base", "electra_small", "electra_base", "experts_pubmed", "experts_wiki_books", "talking-heads_base"] map_name_to_handle = { 'bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3', 'bert_en_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/3', 'bert_multi_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/3', 'small_bert/bert_en_uncased_L-2_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1', 'small_bert/bert_en_uncased_L-2_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-256_A-4/1', 'small_bert/bert_en_uncased_L-2_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-512_A-8/1', 'small_bert/bert_en_uncased_L-2_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-768_A-12/1', 'small_bert/bert_en_uncased_L-4_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-128_A-2/1', 'small_bert/bert_en_uncased_L-4_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-256_A-4/1', 'small_bert/bert_en_uncased_L-4_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1', 'small_bert/bert_en_uncased_L-4_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-768_A-12/1', 'small_bert/bert_en_uncased_L-6_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-128_A-2/1', 'small_bert/bert_en_uncased_L-6_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-256_A-4/1', 'small_bert/bert_en_uncased_L-6_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-512_A-8/1', 'small_bert/bert_en_uncased_L-6_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-768_A-12/1', 'small_bert/bert_en_uncased_L-8_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-128_A-2/1', 'small_bert/bert_en_uncased_L-8_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-256_A-4/1', 'small_bert/bert_en_uncased_L-8_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-512_A-8/1', 'small_bert/bert_en_uncased_L-8_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-768_A-12/1', 'small_bert/bert_en_uncased_L-10_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-128_A-2/1', 'small_bert/bert_en_uncased_L-10_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-256_A-4/1', 'small_bert/bert_en_uncased_L-10_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-512_A-8/1', 'small_bert/bert_en_uncased_L-10_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-768_A-12/1', 'small_bert/bert_en_uncased_L-12_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-128_A-2/1', 'small_bert/bert_en_uncased_L-12_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-256_A-4/1', 'small_bert/bert_en_uncased_L-12_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-512_A-8/1', 'small_bert/bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-768_A-12/1', 'albert_en_base': 'https://tfhub.dev/tensorflow/albert_en_base/2', 'electra_small': 'https://tfhub.dev/google/electra_small/2', 'electra_base': 'https://tfhub.dev/google/electra_base/2', 'experts_pubmed': 'https://tfhub.dev/google/experts/bert/pubmed/2', 'experts_wiki_books': 'https://tfhub.dev/google/experts/bert/wiki_books/2', 'talking-heads_base': 'https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_base/1', } map_model_to_preprocess = { 'bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'bert_en_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'bert_multi_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_multi_cased_preprocess/3', 'albert_en_base': 'https://tfhub.dev/tensorflow/albert_en_preprocess/3', 'electra_small': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'electra_base': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'experts_pubmed': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'experts_wiki_books': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'talking-heads_base': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', } tfhub_handle_encoder = map_name_to_handle[bert_model_name] tfhub_handle_preprocess = map_model_to_preprocess[bert_model_name] print(f'BERT model selected : {tfhub_handle_encoder}') print(f'Preprocess model auto-selected: {tfhub_handle_preprocess}') # + [markdown] id="7WrcxxTRDdHi" # ## The preprocessing model # # Text inputs need to be transformed to numeric token ids and arranged in several Tensors before being input to BERT. TensorFlow Hub provides a matching preprocessing model for each of the BERT models discussed above, which implements this transformation using TF ops from the TF.text library. It is not necessary to run pure Python code outside your TensorFlow model to preprocess text. # # The preprocessing model must be the one referenced by the documentation of the BERT model, which you can read at the URL printed above. For BERT models from the drop-down above, the preprocessing model is selected automatically. # # Note: You will load the preprocessing model into a [hub.KerasLayer](https://www.tensorflow.org/hub/api_docs/python/hub/KerasLayer) to compose your fine-tuned model. More information on Keras layers can be found [here](https://keras.io/api/layers/). "Layers are the basic building blocks of neural networks in Keras. A layer consists of a tensor-in tensor-out computation function (the layer's call method) and some state, held in TensorFlow variables (the layer's weights)." # # [hub.KerasLayer](https://www.tensorflow.org/hub/api_docs/python/hub/KerasLayer) is the preferred API to load a TF2-style [SavedModel](https://www.tensorflow.org/guide/saved_model) from TF Hub into a Keras model. # + id="0SQi-jWd_jzq" bert_preprocess_model = hub.KerasLayer(tfhub_handle_preprocess) # + [markdown] id="x4naBiEE_cZX" # Let's try the preprocessing model on some text and see the output: # + id="r9-zCzJpnuwS" colab={"base_uri": "https://localhost:8080/"} outputId="50ac66ad-1857-47ec-ad50-2472687c4ce0" text_test = ['this is such an amazing movie!'] text_preprocessed = bert_preprocess_model(text_test) print(f'Keys : {list(text_preprocessed.keys())}') print(f'Shape : {text_preprocessed["input_word_ids"].shape}') print(f'Word Ids : {text_preprocessed["input_word_ids"][0, :12]}') print(f'Input Mask : {text_preprocessed["input_mask"][0, :12]}') print(f'Type Ids : {text_preprocessed["input_type_ids"][0, :12]}') # + [markdown] id="EqL7ihkN_862" # As you can see, now you have the 3 outputs from the preprocessing that a BERT model would use (`input_words_id`, `input_mask` and `input_type_ids`). # # Some other important points: # - The input is truncated to 128 tokens. The number of tokens can be customized, and you can see more details on the [Solve GLUE tasks using BERT on a TPU colab](https://www.tensorflow.org/text/tutorials/bert_glue). # - The `input_type_ids` only have one value (0) because this is a single sentence input. For a multiple sentence input, it would have one number for each input. # # Since this text preprocessor is a TensorFlow model, It can be included in your model directly. # + [markdown] id="DKnLPSEmtp9i" # ## Using the BERT model # # Before putting BERT into your own model, let's take a look at its outputs. You will load it from TF Hub and see the returned values. # + id="tXxYpK8ixL34" bert_model = hub.KerasLayer(tfhub_handle_encoder) # + id="_OoF9mebuSZc" colab={"base_uri": "https://localhost:8080/"} outputId="f48a972b-3052-421b-c01e-d5db95194e14" bert_results = bert_model(text_preprocessed) print(f'Loaded BERT: {tfhub_handle_encoder}') print(f'Pooled Outputs Shape:{bert_results["pooled_output"].shape}') print(f'Pooled Outputs Values:{bert_results["pooled_output"][0, :12]}') print(f'Sequence Outputs Shape:{bert_results["sequence_output"].shape}') print(f'Sequence Outputs Values:{bert_results["sequence_output"][0, :12]}') # + [markdown] id="sm61jDrezAll" # The BERT models return a map with 3 important keys: `pooled_output`, `sequence_output`, `encoder_outputs`: # # - `pooled_output` represents each input sequence as a whole. The shape is `[batch_size, H]`. You can think of this as an embedding for the entire movie review. # - `sequence_output` represents each input token in the context. The shape is `[batch_size, seq_length, H]`. You can think of this as a contextual embedding for every token in the movie review. # - `encoder_outputs` are the intermediate activations of the `L` Transformer blocks. `outputs["encoder_outputs"][i]` is a Tensor of shape `[batch_size, seq_length, 1024]` with the outputs of the i-th Transformer block, for `0 <= i < L`. The last value of the list is equal to `sequence_output`. # # For the fine-tuning you are going to use the `pooled_output` array. # + [markdown] id="pDNKfAXbDnJH" # ## Define your model # # You will create a very simple fine-tuned model, with the preprocessing model, the selected BERT model, one Dense and a Dropout layer. # # Note: for more information about the base model's input and output you can follow the model's URL for documentation. Here specifically, you don't need to worry about it because the preprocessing model will take care of that for you. # # + id="aksj743St9ga" def build_classifier_model(): text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text') preprocessing_layer = hub.KerasLayer(tfhub_handle_preprocess, name='preprocessing') encoder_inputs = preprocessing_layer(text_input) encoder = hub.KerasLayer(tfhub_handle_encoder, trainable=True, name='BERT_encoder') outputs = encoder(encoder_inputs) net = outputs['pooled_output'] net = tf.keras.layers.Dropout(0.1)(net) net = tf.keras.layers.Dense(1, activation=None, name='classifier')(net) return tf.keras.Model(text_input, net) # + [markdown] id="Zs4yhFraBuGQ" # Let's check that the model runs with the output of the preprocessing model. # + id="mGMF8AZcB2Zy" colab={"base_uri": "https://localhost:8080/"} outputId="a111e0b3-026d-4eb9-de34-5a669bc5bbde" classifier_model = build_classifier_model() bert_raw_result = classifier_model(tf.constant(text_test)) print(tf.sigmoid(bert_raw_result)) # + [markdown] id="ZTUzNV2JE2G3" # The output is meaningless, of course, because the model has not been trained yet. # # Let's take a look at the model's structure. # + id="0EmzyHZXKIpm" colab={"base_uri": "https://localhost:8080/", "height": 466} outputId="8dd0fa29-d6f4-407d-877f-015045c34f25" tf.keras.utils.plot_model(classifier_model) # + [markdown] id="WbUWoZMwc302" # ## Model training # # You now have all the pieces to train a model, including the preprocessing module, BERT encoder, data, and classifier. # + [markdown] id="WpJ3xcwDT56v" # ### Loss function # # Since this is a binary classification problem and the model outputs a probability (a single-unit layer), you'll use `losses.BinaryCrossentropy` loss function. More information on the BinaryCrossentropy loss can be found [here](https://www.tensorflow.org/api_docs/python/tf/keras/losses/BinaryCrossentropy). # # + id="OWPOZE-L3AgE" loss = tf.keras.losses.BinaryCrossentropy(from_logits=True) metrics = tf.metrics.BinaryAccuracy() # + [markdown] id="77psrpfzbxtp" # ### Optimizer # # For fine-tuning, let's use the same optimizer that BERT was originally trained with: the "Adaptive Moments" (Adam). This optimizer minimizes the prediction loss and does regularization by weight decay (not using moments), which is also known as [AdamW](https://arxiv.org/abs/1711.05101). # # For the learning rate (`init_lr`), you will use the same schedule as BERT pre-training: linear decay of a notional initial learning rate, prefixed with a linear warm-up phase over the first 10% of training steps (`num_warmup_steps`). In line with the BERT paper, the initial learning rate is smaller for fine-tuning (best of 5e-5, 3e-5, 2e-5). # + id="P9eP2y9dbw32" epochs = 3 steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy() num_train_steps = steps_per_epoch * epochs num_warmup_steps = int(0.1*num_train_steps) init_lr = 3e-5 optimizer = optimization.create_optimizer(init_lr=init_lr, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, optimizer_type='adamw') # + [markdown] id="SqlarlpC_v0g" # ### Loading the BERT model and training # # Using the `classifier_model` you created earlier, you can compile the model with the loss, metric and optimizer. # + id="-7GPDhR98jsD" classifier_model.compile(optimizer=optimizer, loss=loss, metrics=metrics) # + [markdown] id="CpBuV5j2cS_b" # Note: training time will vary depending on the complexity of the BERT model you have selected. # + id="HtfDFAnN_Neu" colab={"base_uri": "https://localhost:8080/"} outputId="c45d45b7-5e0f-4b5c-8014-8025514f689d" print(f'Training model with {tfhub_handle_encoder}') history = classifier_model.fit(x=train_ds, validation_data=val_ds, epochs=epochs) # + [markdown] id="uBthMlTSV8kn" # ### Evaluate the model # # Let's see how the model performs. Two values will be returned. Loss (a number which represents the error, lower values are better), and accuracy. # + id="slqB-urBV9sP" colab={"base_uri": "https://localhost:8080/"} outputId="4459a3ae-bc7c-4460-92c3-b26e7702cad4" loss, accuracy = classifier_model.evaluate(test_ds) print(f'Loss: {loss}') print(f'Accuracy: {accuracy}') # + [markdown] id="uttWpgmSfzq9" # ### Plot the accuracy and loss over time # # Based on the `History` object returned by `model.fit()`. You can plot the training and validation loss for comparison, as well as the training and validation accuracy. More information on the History object can be found [here](https://machinelearningmastery.com/display-deep-learning-model-training-history-in-keras/). # + id="fiythcODf0xo" colab={"base_uri": "https://localhost:8080/", "height": 438} outputId="bda5953b-a431-49b1-f711-c1df34cd4203" history_dict = history.history print(history_dict.keys()) acc = history_dict['binary_accuracy'] val_acc = history_dict['val_binary_accuracy'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) fig = plt.figure(figsize=(10, 6)) fig.tight_layout() plt.subplot(2, 1, 1) # "bo" is for "blue dot" plt.plot(epochs, loss, 'r', label='Training loss') # b is for "solid blue line" plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') # plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.subplot(2, 1, 2) plt.plot(epochs, acc, 'r', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(loc='lower right') # + [markdown] id="WzJZCo-cf-Jf" # In this plot, the red lines represent the training loss and accuracy, and the blue lines are the validation loss and accuracy. # + [markdown] id="Rtn7jewb6dg4" # ## Export for inference # # Now you just save your fine-tuned model for later use. # + id="ShcvqJAgVera" colab={"base_uri": "https://localhost:8080/"} outputId="9cb66a6f-d57f-47c8-b9c0-a9ad83d9a79f" dataset_name = 'imdb' saved_model_path = './{}_bert'.format(dataset_name.replace('/', '_')) classifier_model.save(saved_model_path, include_optimizer=False) # + [markdown] id="PbI25bS1vD7s" # Let's reload the model, so you can try it side by side with the model that is still in memory. # + id="gUEWVskZjEF0" reloaded_model = tf.saved_model.load(saved_model_path) # + [markdown] id="oyTappHTvNCz" # Here you can test your model on any sentence you want, just add to the examples variable below. # + id="VBWzH6exlCPS" colab={"base_uri": "https://localhost:8080/"} outputId="3b2a2f64-e8ea-4b80-fb25-28641997c6ca" def print_my_examples(inputs, results): result_for_printing = \ [f'input: {inputs[i]:<30} : score: {results[i][0]:.6f}' for i in range(len(inputs))] print(*result_for_printing, sep='\n') print() examples = [ 'this is such an amazing movie!', # this is the same sentence tried earlier 'The movie was great!', 'The movie was meh.', 'The movie was okish.', 'The movie was terrible...' ] reloaded_results = tf.sigmoid(reloaded_model(tf.constant(examples))) original_results = tf.sigmoid(classifier_model(tf.constant(examples))) print('Results from the saved model:') print_my_examples(examples, reloaded_results) print('Results from the model in memory:') print_my_examples(examples, original_results) # + id="moBT9_FGMgJq"
Classify_text_with_bert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # COVID-19 Exploratory Data Analysis # ## Context and introductory notes # ### Reasons to give this tutorial and reasons to not # This tutorial's purpose is to introduce people to the [2019 Novel Coronavirus COVID-19 (2019-nCoV) Data Repository by Johns Hopkins CSSE](https://github.com/CSSEGISandData/COVID-19) and how to explore it using some foundational packages in the Scientific Python Data Science stack. # # It is not intended to encourage people to create & publish their own data visualizations. In fact, as [this thoughtful essay](https://medium.com/nightingale/ten-considerations-before-you-create-another-chart-about-covid-19-27d3bd691be8) makes clear, in many cases it is irresponsible to publish amateur visualizations, which at best will dilute those that experts with domain expertise are publishing. We won't be making any predictions or doing any statistical modelling. # # Firstly, why are we looking at the dataset from Johns Hopkins (JHU)? I recently asked twitter, which has wonderful epidemiology and data communities, "Which COVID-19 datasets are best to look at and why?" # # <blockquote class="twitter-tweet"><p lang="en" dir="ltr">data folk: which <a href="https://twitter.com/hashtag/Covid_19?src=hash&amp;ref_src=twsrc%5Etfw">#Covid_19</a> datasets should we be looking at (and directing people towards) and why?</p>&mdash; <NAME> (@hugobowne) <a href="https://twitter.com/hugobowne/status/1247013362988240896?ref_src=twsrc%5Etfw">April 6, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> # # There were many thoughtful responses and I encourage you to look through them. [<NAME>](https://scholar.harvard.edu/eleanormurray/home), an epidemiologist and Assistant Professor at the Boston University School of Public Health, responded # # <blockquote class="twitter-tweet"><p lang="en" dir="ltr">What purposes are you directing people towards data for? All the data we have are incomplete, so the goal of the data should inform which data to use</p>&mdash; <NAME> (@EpiEllie) <a href="https://twitter.com/EpiEllie/status/1247321239292706817?ref_src=twsrc%5Etfw">April 7, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> # # to which I replied # # <blockquote class="twitter-tweet"><p lang="en" dir="ltr">Great question. I&#39;m a data science educator interested in directing people more generally to the best resources &amp; ways to think about them so that they have a stronger sense of what&#39;s actually happening in such a confusing time, e.g. my tweet below 1/ <a href="https://t.co/6lGPbDAZL5">https://t.co/6lGPbDAZL5</a></p>&mdash; <NAME> (@hugobowne) <a href="https://twitter.com/hugobowne/status/1247322357863034880?ref_src=twsrc%5Etfw">April 7, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> # # This is the tweet I was referencing # # <blockquote class="twitter-tweet"><p lang="en" dir="ltr">even how to just think about the data generating process -- e.g. number of reported cases a function of number of tests, willingness of govts to reports, in addition to number of actual cases. there&#39;s also censoring, lag, &amp; much more. cc <a href="https://twitter.com/ericmjl?ref_src=twsrc%5Etfw">@ericmjl</a> <a href="https://twitter.com/jsbois?ref_src=twsrc%5Etfw">@jsbois</a></p>&mdash; <NAME> (@hugobowne) <a href="https://twitter.com/hugobowne/status/1247321297987620864?ref_src=twsrc%5Etfw">April 7, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> # # Ellie replied # # # <blockquote class="twitter-tweet"><p lang="en" dir="ltr">In that case, I agree with the other responses: the Johns Hopkins data is probably the best general purpose dataset for education.</p>&mdash; <NAME> (@EpiEllie) <a href="https://twitter.com/EpiEllie/status/1247322941039300608?ref_src=twsrc%5Etfw">April 7, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> # # Once again, check out out the thread for further details, but the main reasons cited for using the JHU data are # # - JHU is already a trusted and respected institution, # - They cite many sources, which are themselves reputable, # - The data is updated daily, and # - It is provided in an easily digestible format (.csv in a github repository). # # I also want to flag that, after all the responses that came in, I thought twice about whether to conduct this tutorial. The main reasons are summarized by Ellie's tweets here: # # <blockquote class="twitter-tweet"><p lang="en" dir="ltr">I definitely understand the desire of data-minded people to dig into learning about the covid data, but misunderstandings can add to the chaos &amp; complicate pandemic response. 1/2</p>&mdash; <NAME> (@EpiEllie) <a href="https://twitter.com/EpiEllie/status/1247324458479693824?ref_src=twsrc%5Etfw">April 7, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> # # <blockquote class="twitter-tweet"><p lang="en" dir="ltr">My advice to learners is to use data from a historic epidemic—maybe swine flu. Then you also have the chance to see how well your predictions actually match with the epidemic trajectory, and there are more likely many resources to help understand the data. 2/2</p>&mdash; <NAME> (@EpiEllie) <a href="https://twitter.com/EpiEllie/status/1247324778014412811?ref_src=twsrc%5Etfw">April 7, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> # # I love the idea of looking at historical datasets and hope to do this in the future. The reason I've decided to continue with this tutorial is that I feel that, as long as we take our results with **very, very many grains of salt**, it will help us # # - to interrogate the world as it currently is, # - to understand many of the biases in modern data collection, data analysis, and data reporting, # - to develop more of a shared language to discuss it, even as non-experts, and # - to learn about some of the contemporary tooling and data-analytic concepts that help us to work with data. # # That all having been said, please do listen to the domain experts, the epidemiologists. # *** # ### Biases in data collection, data analysis, and data reporting # # What type of biases am I talking about? A key example to keep in mind is when interpreting plots, numbers, and reports of the *known* number of cases of COVID-19, **know** that this is a function of many things that are *not* the total number of cases, such as **the number of available tests**. The limiting case is when there are zero tests, there are zero known cases; then if many tests become available, there'll seem to be a huge spike in number of cases, even if there hasn't been such a spike. # # <NAME>'s thread here gives a concrete example in Washington State: # # <blockquote class="twitter-tweet"><p lang="en" dir="ltr">THREAD:<br><br>Washington state is a good example of the importance of accounting for the number of tests when reporting COVID-19 case counts. Remember I mentioned a couple of days ago how their number of cases in WA had begun to stabilize? Well, guess what happened...</p>&mdash; <NAME> (@NateSilver538) <a href="https://twitter.com/NateSilver538/status/1241113755016138755?ref_src=twsrc%5Etfw">March 20, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> # # and he also has an interesting article linked to in this tweet here: # # <blockquote class="twitter-tweet"><p lang="en" dir="ltr">Hey y&#39;all. I have a deep dive today on how the number of *known coronavirus cases* isn&#39;t really a good way to know what&#39;s happening with the disease. Unless you know something about testing, anyway. Hope you&#39;ll check it out.<a href="https://t.co/VK7rCgBNMc">https://t.co/VK7rCgBNMc</a></p>&mdash; <NAME> (@NateSilver538) <a href="https://twitter.com/NateSilver538/status/1246487881297920001?ref_src=twsrc%5Etfw">April 4, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> # # The number of reported known cases is **also a function of any given government's willingness (or lack thereof) to report their actual findings**. We can think both of these in relation to NNT's conception of [Wittgenstein's ruler](https://twitter.com/DellAnnaLuca/status/1244555177807380480): # # > “Wittgenstein’s ruler: Unless you have confidence in the ruler’s reliability, if you use a ruler to measure a table you may also be using the table to measure the ruler.” # # # # *** # We'll be visualizing number of reported confirmed cases, deaths, and recoveries around the world. Note that there are interesting ways to report these numbers that aren't quite visualization per se. Ryan Struyck of CNN, for example, has been leveraging the affordances of tweets and twitter do display such data compellingly: # # <blockquote class="twitter-tweet"><p lang="en" dir="ltr">Reported US coronavirus cases<br><br>3/17: 6,135<br>3/18: 8,760<br>3/19: 13,229<br>3/20: 18,763<br>3/21: 25,740<br>3/22: 34,276<br>3/23: 42,663<br>3/24: 52,976<br>3/25: 65,273<br>3/26: 82,135<br>3/27: 101,295<br>3/28: 121,176<br>3/29: 139,773<br>3/30: 160,377<br>3/31: 185,469<br>4/1: 211,740<br>4/2: 245,070<br>4/3: 277,953<br>Now: 311,544</p>&mdash; <NAME> (@ryanstruyk) <a href="https://twitter.com/ryanstruyk/status/1246624017278263296?ref_src=twsrc%5Etfw">April 5, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> # Raymond Hettinger, one of my Pythonic heroes, tweeted in a similar vein: # # <blockquote class="twitter-tweet"><p lang="en" dir="ltr">US covid-19 case count growth:<br><br>100 to 1,000 in 8 days<br>1,000 to 10,000 in 9 days<br>10,000 to 100,000 in 9 days<br><br>What is your best guess for 1,000,000?</p>&mdash; <NAME> (@raymondh) <a href="https://twitter.com/raymondh/status/1243643686602211328?ref_src=twsrc%5Etfw">March 27, 2020</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> # # From Raymond's tweet, it becomes clear why we will want to plot the growth curves with a logarithmic y-axis: so that the data is not all packed into a small region of the visualization. # # **ESSENTIAL POINT THAT WE'LL ALSO MAKE LATER:** A logarithm scale is good for visualization **BUT** remember, in the thoughtful words of [<NAME>](http://bois.caltech.edu/), "on the ground, in the hospitals, we live with the linear scale. The flattening of the US curve, for example is more evident on the log scale, but the growth is still rapid on a linear scale, which is what we feel." # I also want to remind people that exploratory data analysis and data visualization is about discovering things that exist and that are happening in the world. In this case, we are plotting data of **people** who are sick and dying **right now** so throughout the tutorial I'd encourage us all to have the patience and respect required in such a time. # Before starting to look at the data, let's look at [the repository containing it](https://github.com/CSSEGISandData/COVID-19) to get a feel for the context. # **Summary:** We've # - discussed reasons to give this tutorial and provided warnings about doing so # - discussed biases in data collection, data analyses, and data reporting, # - had a look at the github repository containing the JHU COVID-19 dataset. # ## Exploratory data analysis and visualization using Python # ### Imports and data # Let's import the necessary packages from the SciPy stack and get [the data](https://github.com/CSSEGISandData/COVID-19). # Import packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Set style & figures inline sns.set() # %matplotlib inline # Data urls base_url = ___ confirmed_cases_data_url = base_url + 'time_series_covid19_confirmed_global.csv' death_cases_data_url = base_url + 'time_series_covid19_deaths_global.csv' recovery_cases_data_url = base_url+ 'time_series_covid19_recovered_global.csv' # Import datasets as pandas dataframes raw_data_confirmed = ___ raw_data_deaths = ___ raw_data_recovered = ___ # ### Confirmed cases of COVID-19 # We'll first check out the confirmed cases data by looking at the head of the dataframe: # ___ # **Discuss:** What do you see here? # We can also see a lot about the data by using the `.info()` and `.describe()` dataframe methods: ___ ___ # **Discuss:** What do the above tell us? # ### Number of confirmed cases by country # Look at the head (or tail) of our dataframe again and notice that each row is the data for a particular *province* or *state* of a given country: ___ # We want the numbers for each country, though. So the way to think about this is, for each country, we want to take all the rows (*regions/provinces*) that correspond to that country and add up the numbers for each. To put this in data-analytic-speak, we want to **group by** the country column and sum up all the values for the other columns. # # This is a common pattern in data analysis that we humans have been using for centuries. Interestingly, it was only formalized in 2011 by <NAME> in his seminal paper [The Split-Apply-Combine Strategy for Data # Analysis](https://www.jstatsoft.org/article/view/v040i01). The pattern we're discussing is now called Split-Apply-Combine and, in the case at hand, we # # - Split the data into new datasets for each country, # - Apply the function of "sum" for each new dataset (that is, we add/sum up the values for each column) to sum over territories/provinces/states for each country, and # - Combine these datasets into a new dataframe. # # The `pandas` API has the `groupby` method, which allows us to do this. # # **Side note:** For more on split-apply-combine and `pandas` check out [my post here](https://www.datacamp.com/community/tutorials/pandas-split-apply-combine-groupby). # Group by region (also drop 'Lat', 'Long' as it doesn't make sense to sum them here) confirmed_country = ___ confirmed_country.head() # So each row of our new dataframe `confirmed_country` is a time series of the number of confirmed cases for each country. Cool! # Now a dataframe has an associated object called an Index, which is essentially a set of unique indentifiers for each row. Let's check out the index of `confirmed_country`: ___ # It's indexed by `Country/Region`. That's all good **but** if we index by date **instead**, it will allow us to produce some visualizations almost immediately. This is a nice aspect of the `pandas` API: you can make basic visualizations with it and, if your index consists of DateTimes, it knows that you're plotting time series and plays nicely with them. # To make the index the set of dates, notice that the column names are the dates. To turn column names into the index, we essentially want to make the columns the rows (and the rows the columns). This corresponds to taking the transpose of the dataframe: confirmed_country = ___ confirmed_country.head() # Let's have a look at our index to see whether it actually consists of DateTimes: ___ # Note that `dtype='object'`which means that these are strings, not DateTimes. We can use `pandas` to turn it into a DateTimeIndex: # Set index as DateTimeIndex datetime_index = ___ ___ # Check out index ___ # Now we have a DateTimeIndex and Countries for columns, we can use the dataframe plotting method to visualize the time series of confirmed number of cases by country. As there are so many coutries, we'll plot a subset of them: # ### Plotting confirmed cases by country # Plot time series of several countries of interest poi = ['China', 'US', 'Italy', 'France', 'Spain', 'Australia'] ___ # Let's label our axes and give the figure a title. We'll also thin the line and add points for the data so that the sampling is evident in our plots: # Plot time series of several countries of interest ___ plt.xlabel('Date', fontsize=20); plt.ylabel('Reported Confirmed cases count', fontsize=20); plt.title('Reported Confirmed Cases Time Series', fontsize=20); # Let's do this again but make the y-axis logarithmic: # Plot time series of several countries of interest confirmed_country[poi].plot(figsize=(20,10), linewidth=2, marker='.', fontsize=20, ___) ___ ___ ___ # **Discuss:** Why do we plot with a log y-axis? How do we interpret the log plot? # **Key points:** # - If a variable takes on values over several orders of magnitude (e.g. in the 10s, 100s, and 1000s), we use a log axes so that the data is not all crammed into a small region of the visualization. # - If a curve is approximately linear on a log axis, then its approximately exponential growth and the gradient/slope of the line tells us about the exponent. # # # **ESSENTIAL POINT:** A logarithm scale is good for visualization **BUT** remember, in the thoughtful words of [<NAME>](http://bois.caltech.edu/), "on the ground, in the hospitals, we live with the linear scale. The flattening of the US curve, for example is more evident on the log scale, but the growth is still rapid on a linear scale, which is what we feel." # **Summary:** We've # - looked at the JHU data repository and imported the data, # - looked at the dataset containing the number of reported confirmed cases for each region, # - wrangled the data to look at the number of reported confirmed cases by country, # - plotted the number of reported confirmed cases by country (both log and semi-log), # - discussed why log plots are important for visualization and that we need to remember that we, as humans, families, communities, and society, experience COVID-19 linearly. # ### Number of reported deaths # As we did above for `raw_data_confirmed`, let's check out the head and the info of the `raw_data_deaths` dataframe: ___ ___ # It seems to be structured similarly to `raw_data_confirmed`. I have checked it out in detail and can confirm that it is! This is good data design as it means that users like can explore, munge, and visualize it in a fashion analogous to the above. Can you remember what we did? We # # - Split-Apply-Combined it (and dropped 'Lat'/'Long'), # - Transposed it, # - Made the index a DateTimeIndex, and # - Visualized it (linear and semi-log). # # Let's now do the first three steps here for `raw_data_deaths` and see how we go: # # # ### Number of reported deaths by country # + # Split-Apply-Combine deaths_country = ___ # Transpose deaths_country = ___ # Set index as DateTimeIndex datetime_index = ___ ___ # Check out head ___ # - # Check out the index ___ # ### Plotting number of reported deaths by country # Let's now visualize the number of reported deaths: # Plot time series of several countries of interest ___ plt.xlabel('Date', fontsize=20); plt.ylabel('Number of Reported Deaths', fontsize=20); plt.title('Reported Deaths Time Series', fontsize=20); # Now on a semi-log plot: # Plot time series of several countries of interest ___ ___ ___ ___ # ### Aligning growth curves to start with day of number of known deaths ≥ 25 # To compare what's happening in different countries, we can align each country's growth curves to all start on the day when the number of known deaths ≥ 25, such as reported in the first figure [here](https://www.nytimes.com/interactive/2020/03/21/upshot/coronavirus-deaths-by-country.html). # To achieve this, first off, let's set set all values less than 25 to NaN so that the associated data points don't get plotted at all when we visualize the data: # + # Loop over columns & set values < 25 to None for col in deaths_country.columns: ___ # Check out tail ___ # - # Now let's plot as above to make sure we see what we think we should see: # Plot time series of several countries of interest poi = ['China', 'US', 'Italy', 'France', 'Australia'] ___ ___ ___ ___ # The countries that have seen less than 25 total deaths will have columns of all NaNs now so let's drop these and then see how many columns we have left: # Drop columns that are all NaNs (i.e. countries that haven't yet reached 25 deaths) ___ deaths_country.info() # As we're going to align the countries from the day they first had at least 25 deaths, we won't need the DateTimeIndex. In fact, we won't need the date at all. So we can # - Reset the Index, which will give us an ordinal index (which turns the date into a regular column) and # - Drop the date column (which will be called 'index) after the reset. # sort index, drop date column deaths_country_drop = ___ deaths_country_drop.head() # Now it's time to shift each column so that the first entry is the first NaN value that it contains! To do this, we can use the `shift()` method on each column. How much do we shift each column, though? The magnitude of the shift is given by how many NaNs there are at the start of the column, which we can retrieve using the `first_valid_index()` method on the column **but** we want to shift **up**, which is negative in direction (by convention and perhaps intuition). SO let's do it. # shift for col in deaths_country_drop.columns: ___ # check out head ___ # **Side note:** instead of looping over columns, we could have applied a lambda function to the columns of the dataframe, as follows: # + # shift using lambda function #deaths_country = deaths_country.apply(lambda x: x.shift(-x.first_valid_index())) # - # Now we get to plot our time series, first with linear axes, then semi-log: # Plot time series ___ plt.xlabel('Days', fontsize=20); plt.ylabel('Number of Reported Deaths', fontsize=20); plt.title('Total reported coronavirus deaths for places with at least 25 deaths', fontsize=20); # Plot semi log time series ___ ___ ___ ___ # **Note:** although we have managed to plot what we wanted, the above plots are challenging to retrieve any meaningful information from. There are too many growth curves so that it's very crowded **and** too many colours look the same so it's difficult to tell which country is which from the legend. Below, we'll plot less curves and further down in the notebook we'll use the python package Altair to introduce interactivity into the plot in order to deal with this challenge. # Plot semi log time series ax = deaths_country_drop.plot(figsize=(20,10), linewidth=2, marker='.', fontsize=20, logy=True) ax.legend(ncol=3, loc='upper right') plt.xlabel('Days', fontsize=20); plt.ylabel('Deaths Patients count', fontsize=20); plt.title('Total reported coronavirus deaths for places with at least 25 deaths', fontsize=20); # **Summary:** We've # - looked at the dataset containing the number of reported deaths for each region, # - wrangled the data to look at the number of reported deaths by country, # - plotted the number of reported deaths by country (both log and semi-log), # - aligned growth curves to start with day of number of known deaths ≥ 25. # ### Plotting number of recovered people # The third dataset in the Hopkins repository is the number of recovered. We want to do similar data wrangling as in the two cases above so we *could* copy and paste our code again *but*, if you're writing the same code three times, it's likely time to write a function. # Function for grouping countries by region def group_by_country(raw_data): """Returns data for countries indexed by date""" # Group by data = ___ # Transpose data = ___ # Set index as DateTimeIndex datetime_index = ___ data.set_index(datetime_index, inplace=True) return data # Function to align growth curves def align_curves(data, min_val): """Align growth curves to start on the day when the number of known deaths = min_val""" # Loop over columns & set values < min_val to None for col in data.columns: ___ # Drop columns with all NaNs ___ # Reset index, drop date data = # Shift each column to begin with first valid index for col in data.columns: data[col] = ___ return data # Function to plot time series def plot_time_series(df, plot_title, x_label, y_label, logy=False): """Plot time series and make looks a bit nice""" ax = df.plot(figsize=(20,10), linewidth=2, marker='.', fontsize=20, logy=logy) ax.legend(ncol=3, loc='lower right') plt.xlabel(x_label, fontsize=20); plt.ylabel(y_label, fontsize=20); plt.title(plot_title, fontsize=20); # For a sanity check, let's see these functions at work on the 'number of deaths' data: ___ ___ ___ # Now let's check use our functions to group, wrangle, and plot the recovered patients data: # group by country and check out tail recovered_country = ___ ___ # align curves and check out head recovered_country_drop = ___ ___ # Plot time series: ___ ___ # **Note:** once again, the above plots are challenging to retrieve any meaningful information from. There are too many growth curves so that it's very crowded **and** too many colours look the same so it's difficult to tell which country is which from the legend. Let's plot less curves and in the next section we'll use the python package Altair to introduce interactivity into such a plot in order to deal with this challenge. plot_time_series(recovered_country_drop[poi], 'Recovered Patients Time Series', 'Days', 'Recovered Patients count', True) # **Summary:** We've # - looked at the dataset containing the number of reported recoveries for each region, # - written function for grouping, wrangling, and plotting the data, # - grouped, wrangled, and plotted the data for the number of reported recoveries. # ## Interactive plots with altair # We're now going to build some interactive data visualizations. I was recently inspired by [this one in the NYTimes](https://www.nytimes.com/interactive/2020/03/21/upshot/coronavirus-deaths-by-country.html), a chart of confirmed number of deaths by country for places with at least 25 deaths, similar to ours above, but with informative hover tools. [This one](https://www.nytimes.com/interactive/2020/us/coronavirus-us-cases.html) is also interesting. # We're going to use a tool called [Altair](https://altair-viz.github.io/). I like Altair for several reasons, including precisely what they state on their website: # # > With Altair, you can spend more time understanding your data and its meaning. Altair’s API is simple, friendly and consistent and built on top of the powerful [Vega-Lite](https://vega.github.io/vega-lite/) visualization grammar. This elegant simplicity produces beautiful and effective visualizations with a minimal amount of code. # # Before jumping into Altair, let's reshape our `deaths_country` dataset. Notice that it's currently in **wide data format**, with a column for each country and a row for each "day" (where day 1 is the first day with over 25 confirmed deaths). This worked with the `pandas` plotting API for reasons discussed above. # Look at head ___ # For Altair, we'll want to convert the data into **long data format**. What this will do essentially have a row for each country/day pair so our columns will be 'Day', 'Country', and number of 'Deaths'. We do this using the dataframe method `.melt()` as follows: # create long data for deaths deaths_long = ___ deaths_long.head() # We'll see the power of having long data when using Altair. Such transformations have been performed for a long time, however it wasn't until 2014 that <NAME> formalized the language in his paper [Tidy Data](https://www.researchgate.net/publication/215990669_Tidy_data). Note that Wickham prefers to avoid the terms long and wide because, in his words, 'they are imprecise'. I generally agree but for our purposes here of giving the flavour, they suffice. # # Now having transformed our data, let's import Altair and get a sense of its API. # + import altair as alt # altair plot ___ # - # It is nice to be able to build such an informative and elegant chart in four lines of code (which is also elegant). And, looking at the simplicity of the code we just wrote, we can see why it was great to have long data: a column for each variable allowed us to explicitly and easily tell Altair what we wanted on each axis and what we wanted for the colour. # # As the [Altair documentation (which is great, by the way!) states](https://altair-viz.github.io/getting_started/overview.html), # # > The key idea is that you are declaring links between *data columns* and *visual encoding channels*, such as the x-axis, y-axis, color, etc. The rest of the plot details are handled automatically. Building on this declarative plotting idea, a surprising range of simple to sophisticated plots and visualizations can be created using a relatively concise grammar. # We can now customize the code to thicken the line width, to alter the opacity, and to make the chart larger: # altair plot alt.Chart(deaths_long).mark_line(___, ___).encode( x='Day', y='Deaths', color='Country/Region' ).___ # We can also add a log y-axis. To do this, The long-form, we express the types using the long-form `alt.X('Day',...)`, which is, in the words of the [Altair documentation](https://altair-viz.github.io/user_guide/encoding.html) # > useful when doing more fine-tuned adjustments to the encoding, such as binning, axis and scale properties, or more. # # We'll also now add a hover tooltip so that, when we hover our cursor over any point on any of the lines, it will tell us the 'Country', the 'Day', and the number of 'Deaths'. # altair plot alt.Chart(deaths_long).mark_line(strokeWidth=4, opacity=0.7).encode( x=alt.X('Day'), y=alt.Y('Deaths', scale=alt.Scale(type='log')), color='Country/Region', ___ ).properties( width=800, height=650 ) # It's great that we could add that useful hover tooltip with one line of code `tooltip=['Country/Region', 'Day','Deaths']`, particularly as it adds such information rich interaction to the chart. # One useful aspect of the NYTimes chart was that, when you hovered over a particular curve, it made it stand out against the other. We're going to do something similar here: in the resulting chart, when you click on a curve, the others turn grey. # # **Note:** When first attempting to build this chart, I discovered [here](https://github.com/altair-viz/altair/issues/1552) that "multiple conditional values in one encoding are not allowed by the Vega-Lite spec," which is what Altair uses. For this reason, we build the chart, then an overlay, and then combine them. # + # Selection tool selection = ___ # Color change when clicked color = ___ # Base altair plot base = alt.Chart(deaths_long).mark_line(strokeWidth=4, opacity=0.7).encode( x=alt.X('Day'), y=alt.Y('Deaths', scale=alt.Scale(type='log')), color='Country/Region', tooltip=['Country/Region', 'Day','Deaths'] ).properties( width=800, height=650 ) # Chart chart = ___ # Overlay overlay = base.encode( color='Country/Region', opacity=alt.value(0.5), tooltip=['Country/Region:N', 'Name:N'] ).transform_filter( selection ) # Sum em up! ___ # - # It's not super easy to line up the legend with the curves on the chart so let's put the labels on the chart itself. Thanks to [<NAME>](http://vanderplas.com/) for this suggestion, and for the code. # + # drop NaNs deaths_long = deaths_long.dropna() # Selection tool selection = alt.selection_single(fields=['Country/Region']) # Color change when clicked color = alt.condition(selection, alt.Color('Country/Region:N'), alt.value('lightgray')) # Base altair plot base = alt.Chart(deaths_long).mark_line(strokeWidth=4, opacity=0.7).encode( x=alt.X('Day'), y=alt.Y('Deaths', scale=alt.Scale(type='log')), color=alt.Color('Country/Region', legend=None), ).properties( width=800, height=650 ) # Chart chart = base.encode( color=alt.condition(selection, 'Country/Region:N', alt.value('lightgray')) ).add_selection( selection ) # Overlay overlay = base.encode( color='Country/Region', opacity=alt.value(0.5), tooltip=['Country/Region:N', 'Name:N'] ).transform_filter( selection ) # Text labels text = base.mark_text( align='left', dx=5, size=10 ).encode( x=alt.X('Day', aggregate='max', axis=alt.Axis(title='Day')), y=alt.Y('Deaths', aggregate={'argmax': 'Day'}, axis=alt.Axis(title='Reported Deaths')), text='Country/Region', ).transform_filter( selection ) # Sum em up! chart + overlay + text # - # **Summary:** We've # - melted the data into long format, # - used Altair to make interactive plots of increasing richness, # - admired the elegance & simplicity of the Altair API and the visualizations produced. # That's all for the time being. I'd be interested to see how you all can make these charts more information rich and comprehensible. I encourage you to raise ideas in issues on the issue tracker in [this github repository](https://github.com/hugobowne/COVID-19-EDA-tutorial) and then to make pull requests. A couple of ideas are # - Adding lines to the above chart that show curves for deaths doubling each X days, as in the [first chart here](https://www.nytimes.com/interactive/2020/03/21/upshot/coronavirus-deaths-by-country.html), # - Figuring out a way to make the chart less crowded with names by perhaps only showing 10 of them.
notebooks/1-COVID-19-EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Ale1167A/daa_2021_1/blob/master/Tarea_5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="uMRvwe-CIpgG" outputId="42b85e95-183f-452c-ab06-c40bef9337f2" colab={"base_uri": "https://localhost:8080/", "height": 384} from time import time def ejemplo1( n ): start_time = time() c = n + 1 d = c * n e = n * n total = c + e - d print(f"total={ total }") elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) for entrada in range(100,1100,100): ejemplo1( entrada ) # + id="k9GncBhHIw0s" outputId="f3a9c446-c3c1-4668-c9ea-11d69aba315c" colab={"base_uri": "https://localhost:8080/", "height": 201} from time import time def ejemplo2( n ): start_time = time() contador = 0 for i in range( n ) : for j in range( n ) : contador += 1 elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return contador for entrada in range(100,1100,100): ejemplo2( entrada ) # + id="gjtNUbvGIyh7" outputId="3060583a-0e88-4b24-f8bf-3803615dc7e2" colab={"base_uri": "https://localhost:8080/", "height": 201} from time import time def ejemplo3( n ): # n=4 start_time = time() x = n * 2 # x = 8 y = 0 # y = 0 for m in range( 100 ): #3 y = x - n # y = 4 elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return y for entrada in range(100,1100,100): ejemplo3( entrada ) # + id="wagWQWo0I2Lb" outputId="8a81a565-1be7-4748-d07b-d4225f947396" colab={"base_uri": "https://localhost:8080/", "height": 201} from time import time def ejemplo4( n ): start_time = time() x = 3 * 3.1416 + n y = x + 3 * 3 - n z = x + y elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return z for entrada in range(100,1100,100): ejemplo4( entrada ) # + id="p7Awe-b_I0HZ" outputId="55b332af-3498-4526-8374-0c2c3bbbf458" colab={"base_uri": "https://localhost:8080/", "height": 201} from time import time def ejemplo5( x ): start_time = time() n = 10 for j in range( 0 , x , 1 ): n = j + n elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return n for entrada in range(100,1100,100): ejemplo5( entrada ) # + id="LFRtz6QhI38T" outputId="26a5d2f9-5674-4cb3-a727-16168ea1316b" colab={"base_uri": "https://localhost:8080/", "height": 201} from time import time def ejemplo6( n ): start_time = time() data=[[[1 for x in range(n)] for x in range(n)] for x in range(n)] suma = 0 for d in range(n): for r in range(n): for c in range(n): suma += data[d][r][c] elapsed_time = time() - start_time print("Tiempo transcurrido: %0.10f segundos." % elapsed_time) return suma for entrada in range(100,1100,100): ejemplo6( entrada )
Tarea_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Oil and Gas Sector # # --- # # A look at oil and natural gas, according to weekly supply reports from the Energy Information Agency. # + import pandas as pd import altair as alt import numpy as np import re from time import sleep from joblib import Memory url = 'https://www.eia.gov/petroleum/supply/weekly/archive/' # res = !curl -L -s $url html = "".join(res) # https://www.eia.gov/petroleum/supply/weekly/archive/2020/2020_02_20/wpsr_2020_02_20.php links = re.findall('href="([^"]+\d.php)"', html) dates = [l.split("/")[-2].replace("_", "-") for l in links] # + # %%time baseUrl = "https://www.eia.gov/petroleum/supply/weekly/archive/{y}/{y}_{m:02d}_{d:02d}/csv/table1.csv" memory = Memory('data/', verbose=0) @memory.cache def fetchEiaReport(year="2019", month="7", day="24"): #print(baseUrl.format(y=year, m=int(month), d=int(day))) try: df = pd.read_csv(baseUrl.format(y=year, m=int(month), d=int(day)), usecols=[c for c in range(8)]) except UnicodeDecodeError: df = pd.read_csv(baseUrl.format(y=year, m=int(month), d=int(day)), usecols=[c for c in range(8)], encoding="ISO-8859-1") except ValueError: print("Failed") return None df = df.iloc[:19, :2].set_index('STUB_1') return df.T #fetchReport(year="2020", month="2", day="20").head() dfs = [] for d in dates: dfs.append(fetchEiaReport(*(d.split("-")))) sleep(0.2) df_oil = pd.concat(dfs) df_oil['dt'] = df_oil.index.map(lambda v: pd.to_datetime(v, format='%m/%d/%y')) df_oil_ts = df_oil.set_index('dt') #.agg('mean').resample('1W').nearest() df_oil_ts = df_oil_ts.applymap(lambda v: str(v).replace(",", "")).applymap(pd.to_numeric) df_oil_ts = df_oil_ts.groupby('dt').agg('mean').resample('1W').nearest() df_oil_yoy = df_oil_ts.pct_change(52).apply(lambda v: v*100) # + col = 'Crude Oil' def chartFor(df, metric='Crude Oil', label='stock'): return alt.Chart(df.reset_index()[['dt', metric]]).mark_bar(width=1).encode( alt.X('dt:T', axis=alt.Axis(title='')), alt.Y(f'{metric}:Q', axis=alt.Axis(title='Year-over-year Change [%]')), color=alt.condition(f"datum['{metric}'] < 0", alt.value('darkred'), alt.value('royalblue') ), tooltip=[alt.Tooltip('dt:T', title=''), alt.Tooltip(f'{metric}:Q', title='YoY % Change', format=',.02f')] ).properties( title=f'Year over Year change in {metric} {label}', width=750, height=400 ) chartFor(df_oil_yoy, col) # + col = 'Total Motor Gasoline' chartFor(df_oil_yoy, col) # + col = 'Fuel Ethanol' chartFor(df_oil_yoy, col) # + def fetchPriceData(url): commandString = "curl -s '{0}' | grep 'originalData ='".format(url) res = !$commandString dataString = res[0].split('= ')[-1] dates = [pd.to_datetime(d) for d in re.findall('"date":"(\d+-\d+-\d+)"', dataString)] closes = [float(v) for v in re.findall('"close":"(\d+.\d+)"', dataString)] closes1 = [float(v) for v in re.findall('"close1":"(\d+.\d+)"', dataString)] recs = zip(dates, closes, closes1) if closes1 else zip(dates, closes) cols = ['Date', 'Close-Adj', 'Close-Nominal'] if closes1 else ['Date', 'Close-Nominal'] return pd.DataFrame.from_records(recs, columns=cols) df_oil = fetchPriceData('https://www.macrotrends.net/assets/php/chart_iframe_comp.php?id=1369&url=crude-oil-price-history-chart:1656') # - # ## How does the crude oil price vary with inventories? # + oil_price = alt.Chart(df_oil[-86:]).mark_line(color='black', strokeDash=[4, 2]).encode( alt.X('Date:T'), alt.Y('Close-Nominal:Q', axis=alt.Axis(title='Oil Price (black) [USD/barrel]')) ).properties( width=750, height=400 ) c = (chartFor(df_oil_yoy, 'Crude Oil') + oil_price).resolve_scale(y='independent').properties( background='white' ) c.save('oil-gas-inventories.png') c.display() # - # # Natural Gas # + url = 'https://www.eia.gov/dnav/ng/xls/NG_STOR_WKLY_S1_W.xls' df_ng = pd.read_excel(url, sheet_name='Data 1', skiprows=2) df_ng.columns = ['Date', 'Lower48', 'East', 'Midwest', 'Mountain', 'Pacific', 'Central', 'SaltSouthCental', 'NonsaltSouthCental'] #df_ng.head() df_ng['dt'] = df_ng.Date.map(pd.to_datetime) #df_ng.head() df_ng_yoy = df_ng.set_index('dt').iloc[:, 1:].pct_change(52).apply(lambda v: v * 100) #df_ng_yoy.tail() # - chartFor(df_ng_yoy, 'Lower48', label='Natural Gas storage') # + df_natgas = fetchPriceData('https://www.macrotrends.net/assets/php/chart_iframe_comp.php?id=2478&url=natural-gas-prices-historical-chart') #df_natgas.head() # - # ## How does the natural gas price vary with inventories? # + ng_price = alt.Chart(df_natgas[-110:]).mark_line(color='black', strokeDash=[4, 2]).encode( alt.X('Date:T'), alt.Y('Close-Nominal:Q', axis=alt.Axis(title='Natural Gas Price (black) [USD/MMBtu]')) ).properties( width=750, height=400 ) (chartFor(df_ng_yoy, 'Lower48', label='Natural Gas storage') + ng_price).resolve_scale(y='independent').properties( background='white' )
analysis/oil-gas-inventories.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Layout Solution # The following cell contains the layout solution for the two-stage amplifier. LVS should pass when you evaluate it. # + from bag.layout.routing import TrackID from bag.layout.template import TemplateBase from xbase_demo.demo_layout.core import AmpCS, AmpSFSoln class AmpChain(TemplateBase): def __init__(self, temp_db, lib_name, params, used_names, **kwargs): super(AmpChain, self).__init__(temp_db, lib_name, params, used_names, **kwargs) self._sch_params = None @property def sch_params(self): return self._sch_params @classmethod def get_params_info(cls): return dict( cs_params='common source amplifier parameters.', sf_params='source follower parameters.', show_pins='True to draw pin geometries.', ) def draw_layout(self): """Draw the layout of a transistor for characterization. """ cs_params = self.params['cs_params'].copy() sf_params = self.params['sf_params'].copy() show_pins = self.params['show_pins'] cs_params['show_pins'] = False sf_params['show_pins'] = False # create layout masters for subcells we will add later cs_master = self.new_template(params=cs_params, temp_cls=AmpCS) sf_master = self.new_template(params=sf_params, temp_cls=AmpSFSoln) # add subcell instances cs_inst = self.add_instance(cs_master, 'XCS') # add source follower to the right of common source x0 = cs_inst.bound_box.right_unit sf_inst = self.add_instance(sf_master, 'XSF', loc=(x0, 0), unit_mode=True) # get VSS wires from AmpCS/AmpSF cs_vss_warr = cs_inst.get_all_port_pins('VSS')[0] sf_vss_warrs = sf_inst.get_all_port_pins('VSS') # only connect bottom VSS wire of source follower if sf_vss_warrs[0].track_id.base_index < sf_vss_warrs[1].track_id.base_index: sf_vss_warr = sf_vss_warrs[0] else: sf_vss_warr = sf_vss_warrs[1] # connect VSS of the two blocks together vss = self.connect_wires([cs_vss_warr, sf_vss_warr])[0] # get layer IDs from VSS wire hm_layer = vss.layer_id vm_layer = hm_layer + 1 top_layer = vm_layer + 1 # calculate template size tot_box = cs_inst.bound_box.merge(sf_inst.bound_box) self.set_size_from_bound_box(top_layer, tot_box, round_up=True) # get subcell ports as WireArrays so we can connect them vmid0 = cs_inst.get_all_port_pins('vout')[0] vmid1 = sf_inst.get_all_port_pins('vin')[0] vdd0 = cs_inst.get_all_port_pins('VDD')[0] vdd1 = sf_inst.get_all_port_pins('VDD')[0] # get vertical VDD TrackIDs vdd0_tid = TrackID(vm_layer, self.grid.coord_to_nearest_track(vm_layer, vdd0.middle)) vdd1_tid = TrackID(vm_layer, self.grid.coord_to_nearest_track(vm_layer, vdd1.middle)) # connect VDD of each block to vertical M5 vdd0 = self.connect_to_tracks(vdd0, vdd0_tid) vdd1 = self.connect_to_tracks(vdd1, vdd1_tid) # connect M5 VDD to top M6 horizontal track vdd_tidx = self.grid.get_num_tracks(self.size, top_layer) - 1 vdd_tid = TrackID(top_layer, vdd_tidx) vdd = self.connect_to_tracks([vdd0, vdd1], vdd_tid) # connect vmid using vertical track in the middle of the two templates mid_tid = TrackID(vm_layer, self.grid.coord_to_nearest_track(vm_layer, x0, unit_mode=True)) vmid = self.connect_to_tracks([vmid0, vmid1], mid_tid) # add pins on wires self.add_pin('vmid', vmid, show=show_pins) self.add_pin('VDD', vdd, show=show_pins) self.add_pin('VSS', vss, show=show_pins) # re-export pins on subcells. self.reexport(cs_inst.get_port('vin'), show=show_pins) self.reexport(cs_inst.get_port('vbias'), net_name='vb1', show=show_pins) self.reexport(sf_inst.get_port('vout'), show=show_pins) self.reexport(sf_inst.get_port('vbias'), net_name='vb2', show=show_pins) # compute schematic parameters. self._sch_params = dict( cs_params=cs_master.sch_params, sf_params=sf_master.sch_params, ) import os # import bag package import bag from bag.io import read_yaml # import BAG demo Python modules import xbase_demo.core as demo_core # load circuit specifications from file spec_fname = os.path.join(os.environ['BAG_WORK_DIR'], 'specs_demo/demo.yaml') top_specs = read_yaml(spec_fname) # obtain BagProject instance local_dict = locals() if 'bprj' in local_dict: print('using existing BagProject') bprj = local_dict['bprj'] else: print('creating BagProject') bprj = bag.BagProject() demo_core.run_flow(bprj, top_specs, 'amp_chain_soln', AmpChain, run_lvs=True, lvs_only=True) # - # ## AmpChain Schematic Generator Solution # The AmpChain schematic generation solution is shown below, evaluate it to run through the flow. Note that it uses the `amp_chain_soln` schematic template instead of the `amp_chain` schematic template you are supposed to fill out. Change `amp_chain_soln` to `amp_chain` in the `yaml_file` class variable if you wish to debug your schematic template. # + import os from bag.design import Module # noinspection PyPep8Naming class demo_templates__amp_chain(Module): """Module for library demo_templates cell amp_chain. Fill in high level description here. """ # hard coded netlist flie path to get jupyter notebook working. yaml_file = os.path.join(os.environ['BAG_WORK_DIR'], 'BAG_XBase_demo', 'BagModules', 'demo_templates', 'netlist_info', 'amp_chain_soln.yaml') def __init__(self, bag_config, parent=None, prj=None, **kwargs): Module.__init__(self, bag_config, self.yaml_file, parent=parent, prj=prj, **kwargs) @classmethod def get_params_info(cls): # type: () -> Dict[str, str] """Returns a dictionary from parameter names to descriptions. Returns ------- param_info : Optional[Dict[str, str]] dictionary from parameter names to descriptions. """ return dict( cs_params='common-source amplifier parameters dictionary.', sf_params='source-follwer amplifier parameters dictionary.', ) def design(self, cs_params=None, sf_params=None): self.instances['XCS'].design(**cs_params) self.instances['XSF'].design(**sf_params) import os # import bag package import bag from bag.io import read_yaml # import BAG demo Python modules import xbase_demo.core as demo_core from xbase_demo.demo_layout.core import AmpChainSoln # load circuit specifications from file spec_fname = os.path.join(os.environ['BAG_WORK_DIR'], 'specs_demo/demo.yaml') top_specs = read_yaml(spec_fname) # obtain BagProject instance local_dict = locals() if 'bprj' in local_dict: print('using existing BagProject') bprj = local_dict['bprj'] else: print('creating BagProject') bprj = bag.BagProject() demo_core.run_flow(bprj, top_specs, 'amp_chain', AmpChainSoln, sch_cls=demo_templates__amp_chain, run_lvs=True)
workspace_setup/tutorial_files/solutions/5_hierarchical_generators.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + active="" # 说明: # 给定一个正整数n,返回长度为n的所有可能的出勤记录数,这将被视为可奖励的。 # 答案可能非常大,请在 mod 10^9 + 7之后返回。 # 学生出勤记录是仅包含以下三个字符的字符串: # 1、'A':缺席。 # 2、'L':晚了。 # 3、'P':存在。 # 如果一条记录中包含不超过一个“ A”(不存在)或两个以上连续的“ L”(后期),则该记录被认为是有奖励的。 # # Example 1: # Input: n = 2 # Output: 8 # Explanation: # There are 8 records with length 2 will be regarded as rewardable: # "PP" , "AP", "PA", "LP", "PL", "AL", "LA", "LL" # Only "AA" won't be regarded as rewardable owing to more than one absent times. # # Note: The value of n won't exceed 100,000. # - class Solution: def checkRecord(self, n: int) -> int: if n == 0: return 1 # 一共会有6中状态 M = pow(10, 9) + 7 dp00, dp01, dp02, dp10, dp11, dp12 = 1, 0, 0, 0, 0, 0 for i in range(1, n+1): n_dp00 = (dp00 + dp01 + dp02) % M n_dp01 = dp00 n_dp02 = dp01 n_dp10 = (dp00 + dp01 + dp02 + dp10 + dp11 + dp12) % M n_dp11 = dp10 n_dp12 = dp11 dp00, dp01, dp02, dp10, dp11, dp12 = n_dp00, n_dp01, n_dp02, n_dp10, n_dp11, n_dp12 return (dp00 + dp01 + dp02 + dp10 + dp11 + dp12) % M solution = Solution() solution.checkRecord(2) # + active="" # 一共会有6种状态 # 1. dp00 没有出现a,最后不以L结尾: dp00[i] = dp00[i-1] + dp01[i-1] + dp02[i-1], 最后加的字母都是 P # 2. dp01 没有出现a,最后以 1 个L结尾: dp01[i] = dp00[i-1] (结尾字母为L) # 3. dp02 没有出现a,最后以 2 个L结尾: dp02[i] = dp01[i-1] (结尾字母为L) # 4. dp10 出现一个a,最后不以L结尾: dp10[i] = dp00[i-1] (A) + dp01[i-1] (A) + dp02[i-1](A) + # dp10[i-1] (P) + dp11[i-1](P) + dp12[i-1](P) # 5. dp11 出现一个a,最后以 1 个L结尾: dp11[i] = dp10[i-1] # 6. dp12 出现一个a,最后以 2 个L结尾: dp12[i] = dp11[i-1]
Dynamic Programming/1010/552. Student Attendance Record II.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D5_DimensionalityReduction/student/W1D5_Tutorial3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="a1pq-AcBBJ5L" # # # Neuromatch Academy: Week 1, Day 5, Tutorial 3 # # Dimensionality Reduction and reconstruction # + [markdown] colab_type="text" id="_coTqnWnBo7V" # --- # # In this notebook we'll learn to apply PCA for dimensionality reduction, using a classic dataset that is often used to benchmark machine learning algorithms: the MNIST dataset of handwritten digits. We'll also learn how to use PCA for reconstruction and denoising. # # Steps: # 1. Perform PCA on MNIST dataset. # 2. Calculate the variance explained. # 3. Reconstruct data with different numbers of PCs. # 4. Examine denoising using PCA. # # To learn more about MNIST: # * https://en.wikipedia.org/wiki/MNIST_database # # --- # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="o3An8t_BXkpj" outputId="f7bdf062-14af-46b3-98db-10a95e1656c2" #@title Video: Logistic regression from IPython.display import YouTubeVideo video = YouTubeVideo(id="ew0-P7-6Nho", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="Lvd-FYmlXyiH" # # Setup # Run these cells to get the tutorial started. # + colab={} colab_type="code" id="ExhYAoZHv-8_" #import libraries import time # import time import numpy as np # import numpy import scipy as sp # import scipy import math # import basic math import random # import basic random number generator functions import matplotlib.pyplot as plt # import matplotlib from IPython import display # + cellView="form" colab={} colab_type="code" id="4GcQOmtlBb8V" # @title Figure Settings fig_w, fig_h = (10, 4) plt.rcParams.update({'figure.figsize': (fig_w, fig_h)}) plt.style.use('ggplot') # %matplotlib inline # %config InlineBackend.figure_format = 'retina' # + cellView="form" colab={} colab_type="code" id="LFts1a8oflAC" # @title Helper Functions # New helper functions def plot_variance_explained(variance_explained): """ Plots eigenvalues. Args: variance_explained (numpy array of floats) : Vector of variance explained for each PC Returns: Nothing. """ plt.figure() plt.plot(np.arange(1,len(variance_explained)+1),variance_explained,'o-k') plt.xlabel('Number of components') plt.ylabel('Variance explained') def plot_MNIST_reconstruction(X,X_reconstructed): """ Plots 9 images in the MNIST dataset side-by-side with the reconstructed images. Args: X (numpy array of floats): Data matrix each column corresponds to a different random variable X_reconstructed (numpy array of floats): Data matrix each column corresponds to a different random variable Returns: Nothing. """ plt.figure() ax = plt.subplot(1,2,1) k=0 for k1 in range(3): for k2 in range(3): k = k+1 plt.imshow(np.reshape(X[k,:],(28,28)),extent=[(k1+1)*28,k1*28,(k2+1)*28,k2*28],vmin=0,vmax=255) plt.xlim((3*28,0)) plt.ylim((3*28,0)) plt.tick_params(axis='both',which='both',bottom=False,top=False,labelbottom=False) ax.set_xticks([]) ax.set_yticks([]) plt.title('Data') plt.clim([0,250]) ax = plt.subplot(1,2,2) k=0 for k1 in range(3): for k2 in range(3): k = k+1 plt.imshow(np.reshape(np.real(X_reconstructed[k,:]),(28,28)),extent=[(k1+1)*28,k1*28,(k2+1)*28,k2*28],vmin=0,vmax=255) plt.xlim((3*28,0)) plt.ylim((3*28,0)) plt.tick_params(axis='both',which='both',bottom=False,top=False,labelbottom=False) ax.set_xticks([]) ax.set_yticks([]) plt.clim([0,250]) plt.title('Reconstructed') def plot_MNIST_sample(X): """ Plots 9 images in the MNIST dataset. Args: X (numpy array of floats): Data matrix each column corresponds to a different random variable Returns: Nothing. """ plt.figure() fig, ax = plt.subplots() k=0 for k1 in range(3): for k2 in range(3): k = k+1 plt.imshow(np.reshape(X[k,:],(28,28)),extent=[(k1+1)*28,k1*28,(k2+1)*28,k2*28],vmin=0,vmax=255) plt.xlim((3*28,0)) plt.ylim((3*28,0)) plt.tick_params(axis='both',which='both',bottom=False,top=False,labelbottom=False) plt.clim([0,250]) ax.set_xticks([]) ax.set_yticks([]) def plot_MNIST_weights(weights): """ Visualize PCA basis vector weights for MNIST. Red = positive weights, blue = negative weights, white = zero weight. Args: weights (numpy array of floats) : PCA basis vector Returns: Nothing. """ plt.figure() fig, ax = plt.subplots() cmap = plt.cm.get_cmap('seismic') plt.imshow(np.real(np.reshape(weights,(28,28))),cmap=cmap) plt.tick_params(axis='both',which='both',bottom=False,top=False,labelbottom=False) plt.clim(-.15,.15) plt.colorbar(ticks=[-.15,-.1,-.05,0,.05,.1,.15]) ax.set_xticks([]) ax.set_yticks([]) def add_noise(X,frac_noisy_pixels): """ Randomly corrupts a fraction of the pixels by setting them to random values. Args: X (numpy array of floats) : Data matrix frac_noisy_pixels (scalar) : Fraction of noisy pixels Returns: (numpy array of floats) : Data matrix + noise """ X_noisy = np.reshape(X,(X.shape[0]*X.shape[1])) N_noise_ixs = int(X_noisy.shape[0] * frac_noisy_pixels) noise_ixs = np.random.choice(X_noisy.shape[0],size= N_noise_ixs,replace=False) X_noisy[noise_ixs] = np.random.uniform(0,255,noise_ixs.shape) X_noisy = np.reshape(X_noisy,(X.shape[0],X.shape[1])) return X_noisy # Old helper functions from Tutorial 1-2 def change_of_basis(X,W): """ Projects data onto a new basis. Args: X (numpy array of floats) : Data matrix each column corresponding to a different random variable W (numpy array of floats): new orthonormal basis columns correspond to basis vectors Returns: (numpy array of floats) : Data matrix expressed in new basis """ Y = np.matmul(X,W) return Y def get_sample_cov_matrix(X): """ Returns the sample covariance matrix of data X Args: X (numpy array of floats): Data matrix each column corresponds to a different random variable Returns: (numpy array of floats) : Covariance matrix """ X = X - np.mean(X,0) cov_matrix = 1./X.shape[0]*np.matmul(X.T,X) return cov_matrix def sort_evals_descending(evals,evectors): """ Sorts eigenvalues and eigenvectors in decreasing order. Also aligns first two eigenvectors to be in first two quadrants (if 2D). Args: evals (numpy array of floats): Vector of eigenvalues evectors (numpy array of floats): Corresponding matrix of eigenvectors each column corresponds to a different eigenvalue Returns: (numpy array of floats) : Vector of eigenvalues after sorting (numpy array of floats) : Matrix of eigenvectors after sorting """ index = np.flip(np.argsort(evals)) evals = evals[index] evectors = evectors[:,index] if evals.shape[0] == 2: if np.arccos(np.matmul(evectors[:,0], 1./np.sqrt(2)*np.array([1,1]))) > np.pi/2.: evectors[:,0] = -evectors[:,0] if np.arccos(np.matmul(evectors[:,1], 1./np.sqrt(2)*np.array([-1,1]))) > np.pi/2.: evectors[:,1] = -evectors[:,1] return evals, evectors def pca(X): """ Performs PCA on multivariate data. Eigenvalues are sorted in decreasing order. Args: X (numpy array of floats): Data matrix each column corresponds to a different random variable Returns: (numpy array of floats) : Data projected onto the new basis (numpy array of floats) : Vector of eigenvalues (numpy array of floats) : Corresponding matrix of eigenvectors """ X = X - np.mean(X,0) cov_matrix = get_sample_cov_matrix(X) evals, evectors = np.linalg.eig(cov_matrix) evals, evectors = sort_evals_descending(evals,evectors) score = change_of_basis(X,evectors) return score, evectors, evals def plot_eigenvalues(evals): """ Plots eigenvalues. Args: (numpy array of floats) : Vector of eigenvalues Returns: Nothing. """ plt.figure() plt.plot(np.arange(1,len(evals)+1),evals,'o-k') plt.xlabel('Component') plt.ylabel('Eigenvalue') plt.title('Scree plot') # + [markdown] colab_type="text" id="7jFNyCJ5ChXQ" # # Perform PCA on MNIST dataset. # The MNIST dataset consists of a 70,000 images of individual handwritten digits. Each image is a 28x28 pixel grayscale image. For convenience, each 28x28 pixel image is often unravelled into a single 784 (=28*28) element vector, so that the whole dataset is represented as a 70,000 x 784 matrix. Each row represents a different image, and each column represents a different pixel. # # Enter the following cell to load the MNIST dataset and plot the first nine images. # + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" id="f4TNMebrBDSQ" outputId="b8b1fe95-1d4f-47cb-dfe7-887f53a6491f" from sklearn.datasets import fetch_openml mnist = fetch_openml(name = 'mnist_784') X = mnist.data plot_MNIST_sample(X) # + [markdown] colab_type="text" id="OxtBZtgXHIAT" # # The MNIST dataset has an extrinsic dimensionality of 784, much higher than the 2-dimensional examples used in the previous tutorials! To make sense of this data, we'll use dimensionality reduction. But first, we need to determine the intrinsic dimensionality $K$ of the data. One way to do this is to look for an "elbow" in the scree plot, to determine which eigenvalues are signficant. # # #### Exercise # In this exercise you will examine the scree plot in the MNIST dataset. # # **Suggestions** # * Perform PCA on the dataset and examine the scree plot. # * When do the eigenvalues appear (by eye) to reach zero? (Hint: use `plt.xlim` to zoom into a section of the plot). # # + colab={"base_uri": "https://localhost:8080/", "height": 461} colab_type="code" id="3kiAFD9KOG8F" outputId="4521e6de-2be8-4334-d953-1ffff0c4cc18" help(pca) help(plot_eigenvalues) # + colab={} colab_type="code" id="7zgeszJSHVr9" ################################################################### ## Insert your code here to: ## perform PCA ## plot the eigenvalues ################################################################### # score, evectors, evals = ...YOUR CODE HERE to perform PCA # plot_eigenvalues(evals) # YOUR CODE HERE to limit the x-axis for zooming # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 306} colab_type="text" id="HAM5vUWJBpiJ" outputId="3e995c97-a287-40cc-ae5b-4d2aebbf3909" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial3_Solution_f0861370.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=446 height=303 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/static/W1D5_Tutorial3_Solution_f0861370_1.png> # # # + [markdown] colab_type="text" id="ccOz9ePZPGMz" # # Calculate the variance explained. # The scree plot suggests that most of the eigenvalues are near zero, with fewer than 100 having large values. Another common way to determine the intrinsic dimensionality is by considering the variance explained. This can be examined with a cumulative plot of the fraction of the total variance explained by the top $K$ components, i.e.: # \begin{equation} # \text{var explained} = \frac{\sum_{i=1}^K \lambda_i}{\sum_{i=1}^N \lambda_i} # \end{equation} # The intrinsic dimensionality is often quantified by the $K$ necessary to explain a large proportion of the total variance of the data (often a defined threshold, e.g., 90%). # # + [markdown] colab_type="text" id="1W30pzQPIwZ0" # #### Exercise # In this exercise you will plot the explained variance. # # **Suggestions** # * Fill in the function below to calculate the fraction variance explained as a function of the number of principal componenets. **Hint:** use `np.cumsum`. # * Plot the variance explained using `plot_variance_explained`. # * How many principal components are required to explain 90% of the variance? # * How does the intrinsic dimensionality of this dataset compare to its extrinsic dimensionality? # # + colab={"base_uri": "https://localhost:8080/", "height": 212} colab_type="code" id="XnQt-y4_WwVp" outputId="22e8654d-c554-48de-9be1-f4b2c5645677" help(plot_variance_explained) # + colab={} colab_type="code" id="FEVRB7fCVcOR" def get_variance_explained(evals): """ Calculates variance explained from the eigenvalues. Args: evals (numpy array of floats) : Vector of eigenvalues Returns: (numpy array of floats) : Vector of variance explained """ ################################################################### ## Insert your code here to: ## cumulatively sum the eigenvalues ## normalize by the sum of eigenvalues #uncomment once you've filled in the function raise NotImplementedError("Student excercise: calculate explaine variance!") ################################################################### return variance_explained ################################################################### ## Insert your code here to: ## calculate and plot the variance explained ################################################################### # variance_explained = ... # plot_variance_explained(variance_explained) # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 288} colab_type="text" id="7HhFNTajH55u" outputId="78dde262-a82c-49e3-b1db-f5d0250082ef" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial3_Solution_7af6bcb7.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=407 height=281 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/static/W1D5_Tutorial3_Solution_7af6bcb7_1.png> # # # + [markdown] colab_type="text" id="8lVO2rHv0kDi" # # Reconstruct data with different numbers of PCs. # # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="aIi0fq2d0Z60" outputId="0041a7c0-63d4-404f-9699-7f9142c2c6c5" #@title Video: Geometric view of data from IPython.display import YouTubeVideo video = YouTubeVideo(id="A_a7_hMhjfc", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="dhA1ioJJlrtw" # Now we have seen that the top 100 or so principal components of the data can explain most of the variance. We can use this fact to perform *dimensionality reduction*, i.e., by storing the data using only 100 components rather than the samples of all 784 pixels. Remarkably, we will be able to reconstruct much of the structure of the data using only the top 100 components. To see this, recall that to perform PCA we projected the data $\bf X$ onto the eigenvectors of the covariance matrix: # \begin{equation} # \bf S = X W # \end{equation} # Since $\bf W$ is an orthogonal matrix, ${\bf W}^{-1} = {\bf W}^T$. So by multiplying by ${\bf W}^T$ on each side we can rewrite this equation as # \begin{equation} # {\bf X = S W}^T. # \end{equation} # This now gives us a way to reconstruct the data matrix from the scores and loadings. To reconstruct the data from a low-dimensional approximation, we just have to truncate these matrices. Let's call ${\bf S}_{1:K}$ and ${\bf W}_{1:K}$ as keeping only the first $K$ columns of this matrix. Then our reconstruction is: # \begin{equation} # {\bf \hat X = S}_{1:K} ({\bf W}_{1:K})^T. # \end{equation} # # #### Exercise # Fill in the function below to reconstruct the data using different numbers of principal components. # # **Suggestions** # * Fill in the following function to reconstruct the data based on the weights and scores. Don't forget to add the mean! # * Make sure your function works by reconstructing the data with all $K=784$ components. They two images should look identical. # + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" id="mozpTVpMniYw" outputId="f3762552-8780-45bf-ebe8-aefebaa0e63b" help(plot_MNIST_reconstruction) # + colab={} colab_type="code" id="YS1c_mSLIdMu" def reconstruct_data(score,evectors,X_mean,K): """ Reconstruct the data based on the top K components. Args: score (numpy array of floats) : Score matrix evectors (numpy array of floats) : Matrix of eigenvectors X_mean (numpy array of floats) : Vector corresponding to data mean K (scalar) : Number of components to include Returns: (numpy array of floats) : Matrix of reconstructed data """ ################################################################### ## Insert your code here to: ## Reconstruct the data from the score and eigenvectors ## Don't forget to add the mean!! #X_reconstructed = Your code here #uncomment once you've filled in the function raise NotImplementedError("Student excercise: finish reconstructing data function!") ################################################################### return X_reconstructed K = 784 ## Uncomment below to to: ## Reconstruct the data based on all components ## Plot the data and reconstruction # X_mean = ... # X_reconstructed = ... # plot_MNIST_reconstruction(X ,X_reconstructed) # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 202} colab_type="text" id="P5q8yvs6TJAA" outputId="a0510790-ca43-42f1-8279-7bf491423c38" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial3_Solution_0edb6db6.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=357 height=188 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/static/W1D5_Tutorial3_Solution_0edb6db6_0.png> # # # + [markdown] colab_type="text" id="aHxJHV4BrRHi" # #### Exercise: # Now run the code below and experiment with the slider to reconstruct the data matrix using different numbers of principal components. # # **Questions:** # * How many principal components are necessary to reconstruct the numbers (by eye)? How does this relate to the intrinsic dimensionality of the data? # * Do you see any information in the data with only a single principal component? # + colab={"base_uri": "https://localhost:8080/", "height": 232, "referenced_widgets": ["<KEY>", "deffd77ea6604fe7a167d24caf6a2cb2", "eef308c7a20d492a9bffbaec8fda7d0b", "3d17fb0e43a64453a590119b3a3c055d", "71f7ab5b1c6d444e98a8a0ed053ade4e", "3b95b78da6ba4a2dacadcaeb83d3c733", "d7a30886fd7744f89a9e096c5c897e0b"]} colab_type="code" id="_ZGFzhXqlvmM" outputId="5338b056-eddc-4fff-e228-6869a995203a" ###### MAKE SURE TO RUN THIS CELL VIA THE PLAY BUTTON TO ENABLE SLIDERS ######## import ipywidgets as widgets def refresh(K = 100): X_reconstructed = reconstruct_data(score,evectors,X_mean,K) plot_MNIST_reconstruction(X ,X_reconstructed) plt.title('Reconstructed, K={}'.format(K)) _ = widgets.interact(refresh, K = (1, 784, 10)) # + [markdown] colab_type="text" id="bnmqJqd3nue7" # #### Exercise: # Next, let's take a closer look at the first principal component by visualizing its corresponding weights. # # **Questions** # * Enter `plot_MNIST_weights` to visualize the weights of the first basis vector. # * What structure do you see? Which pixels have a strong positive weighting? Which have a strong negative weighting? What kinds of images would this basis vector differentiate? # * Try visualizing the second and third basis vectors. Do you see any structure? What about the 100th basis vector? 500th? 700th? # + colab={"base_uri": "https://localhost:8080/", "height": 230} colab_type="code" id="ZghlYuowoaAn" outputId="2098a1a3-726d-430f-a377-0ece9dc946e5" help(plot_MNIST_weights) # + colab={} colab_type="code" id="W7BkraA4IDR9" ################################################################### ## Insert your code here to: ## Plot the weights of the first principal component #plot_MNIST_weights(Your code here) ################################################################### # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 276} colab_type="text" id="OPYX_kK9nvBn" outputId="e5992b8d-6236-4001-dda6-303b73fc774c" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial3_Solution_d07d8802.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=313 height=247 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/static/W1D5_Tutorial3_Solution_d07d8802_1.png> # # # + [markdown] colab_type="text" id="4o207yNk0c-I" # # (Optional Exploration): Examine denoising using PCA. # # Finally, we will test how PCA can be used to denoise data. We will add salt-and-pepper noise to the original data and see how that affects the eigenvalues. To do this, we'll use the function `add_noise`, starting with adding noise to 20% of the pixels. # The we'll Perform PCA and plot the variance explained. How many principal components are required to explain 90% of the variance? How does this compare to the original data? # # # + colab={"base_uri": "https://localhost:8080/", "height": 537} colab_type="code" id="wVWrn-mn5m4w" outputId="2c715f19-5df0-41e5-d2bd-e644281da383" ################################################################### ## Here we: ## Add noise to the data ## Plot noise-corrupted data ## Perform PCA on the noisy data ## Calculate and plot the variance explained ################################################################### X_noisy = add_noise(X,.2) score_noisy, evectors_noisy, evals_noisy = pca(X_noisy) variance_explained_noisy = get_variance_explained(evals_noisy) with plt.xkcd(): plot_MNIST_sample(X_noisy) plot_variance_explained(variance_explained_noisy) # + [markdown] colab_type="text" id="wWojBrz2xbjC" # # To denoise the data, we can simply project it onto the basis found with the original dataset (`evectors`, not `evectors_noisy`). Then, by taking the top K components of this projection, we have a guess for where the sample should lie in the K-dimensional latent space. We can then reconstruct the data as normal, using the top 50 components. You should play around with the amount of noise and K to build intuition. # # + colab={"base_uri": "https://localhost:8080/", "height": 202} colab_type="code" id="KB5QiPn-3Pag" outputId="d62bfec6-22bf-458b-8ae4-dbf5814cb650" ################################################################### ## Here we: ## Subtract the mean of the noise-corrupted data ## Project onto the original basis vectors evectors ## Reconstruct the data using the top 50 components ## Plot the result ################################################################### X_noisy_mean = np.mean(X_noisy,0) projX_noisy = np.matmul(X_noisy-X_noisy_mean,evectors) X_reconstructed = reconstruct_data(projX_noisy,evectors,X_noisy_mean,50) with plt.xkcd(): plot_MNIST_reconstruction(X_noisy,X_reconstructed)
tutorials/W1D5_DimensionalityReduction/student/W1D5_Tutorial3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + with open('textsprint.txt', 'r') as myfile: textfile = myfile.read() text = textfile.split(" ") words = [] for i in range(len(text)): if '\n' in text[i]: text[i] = text[i].replace('\n', ' ') text[i] = text[i].split('.') words.append(text[i][0]) words.append(text[i][1]) else: words.append(text[i]) print(words) # - def test(): words = "The quick brown fox jumped over the lazy dog".split(" ") for word in words: length = len(word) leftHalf = word[0:length//2 ] centerChar = word[length//2].upper() rightHalf = word[length//2 + 1:] print( leftHalf + centerChar + rightHalf) # + texto = [] test = "like.dogs" test = test.split('.') texto.append(test[0]) texto.append(test[1]) print(texto) # -
ReadRunner Experiments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Práctica 2 - Cinemática directa y dinámica de manipuladores # En este documento se describe el proceso de obtención de la dinámica de un robot manipulador (pendulo doble), por medio de la ecuación de Euler-Lagrange, empecemos importando las librerias necesarias: from sympy import var, sin, cos, pi, Matrix, Function, Rational, simplify from sympy.physics.mechanics import mechanics_printing mechanics_printing() # Definimos de una vez todas las variables necesarias: var("l1:3") var("m1:3") var("J1:3") var("g t") # Y definimos las variables que dependen de otra variable, especificamente en este calculo, todo lo anterior es constante y solo $q_1$ es una variable dependiente del tiempo: q1 = Function("q1")(t) q2 = Function("q2")(t) # Esta vez vamos a hacer algo ligeramente diferente, vamos a automatizar un poco el proceso de obtención de la posición generalizada del manipulador, para esto vamos a apoyarnos de nuestros conocimientos de cinemática directa, para lo cual, primero necesitamos definir una función ```DH``` la cual tome una lista de parametros, en un orden especifico, y nos devuelva la matriz de transformación homogénea asociada a este eslabón: # --- # ### Ejercicio # Define una función ```DH``` que tome los parametros Denavit-Hartenberg de un eslabón de un manipulador y devuelva la matriz de transformación homogénea asociada a este eslabón. # + deletable=false nbgrader={"checksum": "67794745e8daa9dd15f66d3b00dcee7f", "grade": false, "grade_id": "cell-e8a3571efd7ecf1f", "locked": false, "schema_version": 1, "solution": true} def DH(params): from sympy import Matrix, sin, cos a, d, α, θ = params # ESCRIBE TU CODIGO AQUI raise NotImplementedError return A # + deletable=false editable=false nbgrader={"checksum": "a3a9d7b4acf3e5bfdc1dd679a7202535", "grade": true, "grade_id": "cell-43b04f5c5b3c878d", "locked": true, "points": 1, "schema_version": 1, "solution": false} from nose.tools import assert_equal from sympy import var, Matrix, sin, cos, eye var("l1") q1 = Function("q1")(t) A = Matrix([[cos(q1), -sin(q1), 0, l1*cos(q1)], [sin(q1), cos(q1), 0, l1*sin(q1)], [0, 0, 1, 0], [0, 0, 0, 1]]) assert_equal(DH([l1, 0, 0, q1]), A) assert_equal(DH([0, 0, 0, 0]), eye(4)) print("Sin errores") # - # Una vez que tenemos la función ```DH``` para calcular las matrices de transformación homogéneas, ahora procedemos a calcular la posición de cada articulación por medio de estas matrices: A1 = DH([l1, 0, 0, q1]) A2 = DH([l2, 0, 0, q2]) A1 # Recordemos que la posición de cada articulación se obtendrá por medio del ultimo vector de esta matriz, por lo que podemos: p1 = A1[0:3, 3:4] p1 # Para la posición de la segunda articulación necesitamos multiplicar las primeras dos matrices: p2 = (A1*A2)[0:3, 3:4] p2 # Aunque en este caso, podemos simplificar mas estas expresiones: p2 = simplify(p2) p2 # Teniendo estas posiciones, para obtener la velocidad, necesitamos obtener su derivada: v1 = p1.diff("t") v1 v2 = p2.diff("t") v2 # Una vez que tenemos la velocidad, obtener el cuadrado de esta velocidad es facil, para un vector podemos decir que: # # $$ # \left| v \right|^2 = v^T \cdot v # $$ v1c = (v1.T*v1)[0] v1c = v1c.simplify() v1c v2c = (v2.T*v2)[0] v2c = v2c.simplify() v2c # Y calculando la altura y velocidad rotacional del eslabon: h1, h2 = p1[1], p2[1] ω1, ω2 = q1.diff(t), q1.diff(t) + q2.diff(t) # --- # ### Ejercicio # Define una función ```ener_cin```, la cual tome los parametros ```m```, ```v```, ```J``` y ```ω``` y devuelva la energía cinética del eslabon. # + deletable=false nbgrader={"checksum": "e0632c6e5a0b5720c3c2f75955cac178", "grade": false, "grade_id": "cell-7324954e80d3605d", "locked": false, "schema_version": 1, "solution": true} def ener_cin(params): from sympy import Rational m, v, J, ω = params # ESCRIBE TU CODIGO AQUI raise NotImplementedError return K # + deletable=false editable=false nbgrader={"checksum": "c011ca7ea44244a9e4fdb203a12b3d88", "grade": true, "grade_id": "cell-7a89e061cd79e91c", "locked": true, "points": 1, "schema_version": 1, "solution": false} from nose.tools import assert_equal from sympy import var, Matrix, sin, cos, Rational var("m1 J1 l1 ω1") q1 = Function("q1")(t) v1 = Matrix([[l1*cos(q1)], [l1*sin(q1)], [0]]) assert_equal(ener_cin([m1, v1, J1, ω1]), Rational(1,2)*m1*l1**2 + Rational(1,2)*J1*ω1**2) assert_equal(ener_cin([0, Matrix([[0],[0],[0]]), 0, 0]), 0) print("Sin errores") # - # Si ahora calculamos las energías cinéticas, tenemos: h1, h2 = p1[1], p2[1] ω1, ω2 = q1.diff(t), q1.diff(t) + q2.diff(t) K1 = ener_cin([m1, v1, J1, ω1]) K1 K2 = ener_cin([m2, v2, J2, ω2]) K2 # --- # ### Ejercicio # Defina una función ```ener_pot```, la cual tome los parametros ```m``` y ```h``` y devuelva la energía potencial del eslabon. # + deletable=false nbgrader={"checksum": "130be45771bb4d8525d9030307a939f6", "grade": false, "grade_id": "cell-2083af3adb8c1e7f", "locked": false, "schema_version": 1, "solution": true} def ener_pot(params): m, h = params # ESCRIBE TU CODIGO AQUI raise NotImplementedError return U # + deletable=false editable=false nbgrader={"checksum": "dec5c8aaf39a923775c6cbec4ee07dbf", "grade": true, "grade_id": "cell-826c9283d765c287", "locked": true, "points": 1, "schema_version": 1, "solution": false} from nose.tools import assert_equal from sympy import var var("m1 m2 g h1 h2") assert_equal(ener_pot([m1, h1]), m1*g*h1) assert_equal(ener_pot([m2, h2]), m2*g*h2) print("Sin errores") # - # Y calculando las energías potenciales: h1, h2 = p1[1], p2[1] ω1, ω2 = q1.diff(t), q1.diff(t) + q2.diff(t) U1 = ener_pot([m1, h1]) U2 = ener_pot([m2, h2]) # Una vez que tenemos las energías cinéticas y potenciales de cada eslabón, podemos calcular la energía cinética total y la energía potencial total del manipulador: K = K1 + K2 U = U1 + U2 # Con estas energias se puede calcular el Lagrangiano: L = K - U L # Ya con el Lagrangiano, podemos calcular la ecuación de Euler-Lagrange para cada grado de libertad del manipulador: τ1 = (L.diff(q1.diff(t)).diff(t) - L.diff(q1)).expand().collect(q1.diff(t).diff(t)).collect(q2.diff(t).diff(t)) τ1 τ2 = (L.diff(q2.diff(t)).diff(t) - L.diff(q2)).expand().collect(q1.diff(t).diff(t)).collect(q2.diff(t).diff(t)) τ2 # Una vez que hemos concluido este proceso, podemos pasar al documento llamado ```numerico.ipynb```
Practicas/practica2/simbolico.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.6 64-bit # language: python # name: python3 # --- from igraph import Graph from igraph import plot import igraph import numpy as np #carregando de grafo no formato graphml grafo = igraph.load('Grafo.graphml') print(grafo) #visualizazção do gráfico plot(grafo, bbox=(0,0,650,650)) #visualizaçõ das comunidades comunidades=grafo.clusters() print(comunidades) #em qual comunidade cada registro foi adicionado comunidades.membership #visualização de grado cores=comunidades.membership #array de cores para definir cores diferentes para cada grupo cores = np.array(cores) cores= cores*20 cores= cores.tolist() plot(grafo, vertex_color= cores) # EXEMPLO 2 grafo2= Graph(edges= [(0,2), (0,1), (1,4),(1,5), (2,3), (6,7), (3,7), (4,7), (5,6)], directed= True) grafo2.vs['label'] = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] grafo2.es['weight'] = [2,1,2,1,2,1,3,1] plot(grafo2, bbox=(0,0,300,300)) #comunidade e em qual cada elemnto foi associado comunidades2 = grafo2.clusters() print(comunidades2) comunidades2.membership #função mais otimizada oara vidualização das comunidades c= grafo2.community_edge_betweenness() print(c) #obtenção do número de clasters c.optimal_count #visualização de nova comunidade comunidades3 = c.as_clustering() print(comunidades3) comunidades3.membership #geração do grafo das comunidades colocando cores entre os grupos identificados plot(grafo2, vertex_color= comunidades3.membership) cores= comunidades3.membership #array para definir coresdiferentes para cada grupo cores = np.array(cores) cores= cores*100 cores= cores.tolist() plot(grafo2, bbox=(0,0,300,300), vertex_color = cores) #visualização dos cliques cli = grafo.as_undirected().cliques(min= 4) print(cli) len(cli)
Grafos/Comunidades.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] editable=false # # Avoiding Data Races # # ## Understanding data races # # One of the primary sources of error in concurrent programming are data races. They occur, when two concurrent threads are accessing the same memory location while at least one of them is modifying (the other thread might be reading or modifying). In this scenario, the value at the memory location is completely undefined. Depending on the system scheduler, the second thread will be executed at an unknown point in time and thus see different data at the memory location with each execution. Depending on the type of program, the result might be anything from a crash to a security breach when data is read by a thread that was not meant to be read, such as a user password or other sensitive information. Such an error is called a „data race“ because two threads are racing to get access to a memory location first, with the content at the memory location depending on the result of the race. # # The following diagram illustrates the principle: One thread wants to increment a variable `x`, whereas the other thread wants to print the same variable. Depending on the timing of the program and thus the order of execution, the printed result might change each time the program is executed. # # <img src="images/C3-4-A2.png"></img> # # In this example, one safe way of passing data to a thread would be to carefully synchronize the two threads using either `join()` or the promise-future concept that can guarantee the availability of a result. Data races are always to be avoided. Even if nothing bad seems to happen, they are a bug and should always be treated as such. Another possible solution for the above example would be to make a copy of the original argument and pass the copy to the thread, thereby preventing the data race. # + [markdown] editable=false # <!-- # # # %%ulab_page_divider # --><hr/> # + [markdown] editable=false # # ## Passing data to a thread by value # # In the following example, an instance of the proprietary class `Vehicle` is created and passed to a thread by value, thus making a copy of it. # # <img src="images/C3-4-A3a.png"></img> # # Note that the class Vehicle has a default constructor and an initializing constructor. In the main function, when the instances `v0` and `v1` are created, each constructor is called respectively. Note that `v0` is passed by value to a Lambda, which serves as the thread function for `std::async`. Within the Lambda, the id of the Vehicle object is changed from the default (which is 0) to a new value 2. Note that the thread execution is paused for 500 milliseconds to guarantee that the change is performed well after the main thread has proceeded with its execution. # # In the `main` thread, immediately after starting up the worker thread, the id of `v0` is changed to 3. Then, after waiting for the completion of the thread, the vehicle id is printed to the console. In this program, the output will always be the following: # # <img src="images/image.png"></img> # # Passing data to a thread in this way is a clean and safe method as there is no danger of a data race - at least when atomic data types such as integers, doubles, chars or booleans are passed. # + [markdown] editable=false # <!-- # # # %%ulab_page_divider # --><hr/> # + [markdown] editable=false # When passing a complex data structure however, there are sometimes pointer variables hidden within, that point to a (potentially) shared data buffer - which might cause a data race even though the programmer believes that the copied data will effectively preempt this. The next example illustrates this case by adding a new member variable to the `Vehicle` class, which is a pointer to a string object, as well as the corresponding getter and setter functions. # # <img src="images/C3-4-A3b.png"></img> # # The output of the program looks like this: # # <img src="images/image_1.png"></img> # # The basic program structure is mostly identical to the previous example with the object `v0` being copied by value when passed to the thread function. This time however, even though a copy has been made, the original object `v0` is modified, when the thread function sets the new name. This happens because the member `_name` is a pointer to a string and after copying, even though the pointer variable has been duplicated, it still points to the same location as its value (i.e. the memory location) has not changed. Note that when the delay is removed in the thread function, the console output varies between "Vehicle 2" and "Vehicle 3", depending on the system scheduler. Such an error might go unnoticed for a long time. It could show itself well after a program has been shipped to the client - which is what makes this error type so treacherous. # # Classes from the standard template library usually implement a deep copy behavior by default (such as `std::vector`). When dealing with proprietary data types, this is not guaranteed. The only safe way to tell whether a data structure can be safely passed is by looking at its implementation: Does it contain only atomic data types or are there pointers somewhere? If this is the case, does the data structure implement the copy constructor (and the assignment operator) correctly? Also, if the data structure under scrutiny contains sub-objects, their respective implementation has to be analyzed as well to ensure that deep copies are made everywhere. # # Unfortunately, one of the primary concepts of object-oriented programming - information hiding - often prevents us from looking at the implementation details of a class - we can only see the interface, which does not tell us what we need to know to make sure that an object of the class may be safely passed by value. # + [markdown] editable=false # <!-- # # # %%ulab_page_divider # --><hr/> # + [markdown] editable=false # ## Overwriting the copy constructor # # The problem with passing a proprietary class is that the standard copy constructor makes a 1:1 copy of all data members, including pointers to objects. This behavior is also referred to as "shallow copy". In the above example we would have liked (and maybe expected) a "deep copy" of the object though, i.e. a copy of the data to which the pointer refers. A solution to this problem is to create a proprietary copy constructor in the class `Vehicle`. The following piece of code overwrites the default copy constructor and can be modified to make a customized copy of the data members. # # <img src="images/C3-4-A3c.png"></img> # # Expanding on the code example from above, please implement the code required for a deep copy so that the program always prints "Vehicle 3" to the console, regardless of the delay within the thread function. # + [markdown] editable=false # <!-- # # # %%ulab_page_divider # --><hr/> # + [markdown] editable=false # ## Passing data using move semantics # # Even though a customized copy constructor can help us to avoid data races, it is also time (and memory) consuming. In the following, we will use move semantics to implement a more effective way of safely passing data to a thread. # # A move constructor enables the resources owned by an rvalue object to be moved into an lvalue without physically copying it. Rvalue references support the implementation of move semantics, which enables the programmer to write code that transfers resources (such as dynamically allocated memory) from one object to another. # # To make use of move semantics, we need to provide a move constructor (and optionally a move assignment operator). Copy and assignment operations whose sources are rvalues automatically take advantage of move semantics. Unlike the default copy constructor however, the compiler does not provide a default move constructor. # # To define a move constructor for a C++ class, the following steps are required: # # 1. Define an empty constructor method that takes an rvalue reference to the class type as its parameter # # <img src="images/C3-4-A5a.png"></img> # # 2. In the move constructor, assign the class data members from the source object to the object that is being constructed # # <img src="images/C3-4-A5b.png"></img> # # 3. Assign the data members of the source object to default values. # # <img src="images/C3-4-A5c.png"></img> # # When launching the thread, the Vehicle object `v0` can be passed using `std::move()` - which calls the move constructor and invalidates the original object `v0` in the main thread. # + [markdown] editable=false # <!-- # # # %%ulab_page_divider # --><hr/> # + [markdown] editable=false # ## Move semantics and uniqueness # # As with the above-mentioned copy constructor, passing by value is usually safe - provided that a deep copy is made of all the data structures within the object that is to be passed. With move semantics , we can additionally use the notion of uniqueness to prevent data races by default. In the following example, a `unique_pointer` instead of a raw pointer is used for the string member in the Vehicle class. # # <img src="images/C3-4-A6.png"></img> # # As can be seen, the `std::string` has now been changed to a unique pointer, which means that only a single reference to the memory location it points to is allowed. Accordingly, the move constructor transfers the unique pointer to the worker by using `std::move` and thus invalidates the pointer in the `main` thread. When calling `v0.getName()`, an exception is thrown, making it clear to the programmer that accessing the data at this point is not permissible - which is the whole point of using a unique pointer here as a data race will now be effectively prevented. # # The point of this example has been to illustrate that move semantics on its own is not enough to avoid data races. The key to thread safety is to use move semantics in conjunction with uniqueness. It is the responsibility of the programmer to ensure that pointers to objects that are moved between threads are unique.
Concurrency/Passing Data Between Threads/workspace/Avoiding Data Races/.ipynb_checkpoints/Guide-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''trade2'': conda)' # name: python3 # --- # # 内容 # - データ読み込み # - split # - x, y生成 # - 学習 # - 評価 # - submit # + import os import pickle import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn import model_selection import utils as ut # - # ## 準備 DEBUG = True ut.seed_everything(24) tr_file = "../../input/ventilator-pressure-prediction/train.csv" ts_file = "../../input/ventilator-pressure-prediction/test.csv" tr_df = pd.read_csv(tr_file) ts_df = pd.read_csv(ts_file) # shuffled breath_id tr_df["shuffled_breath_id"] = ut.make_shuffled_id(tr_df["breath_id"]) # ## 学習 # + def make_x(df): ft = ut.Feature() x = ft.transform(df) return x def make_y(df): tg = ut.Target() y = tg.transform(df) return y def get_model(n_feat, prefix=""): model_prms = { "scaling_prms": dict(n_scaler=n_feat), "dnn_prms": dict(n_feat=n_feat, n_channel=256, dropout=0.0, n_rc_layer=4), "tr_prms": dict( criterion=ut.VentilatorLoss(), opt="adamw", opt_params=dict(lr=0.001, weight_decay=1e-6), sch_params=None, #{'max_lr': lr, 'pct_start':0.1, 'div_factor':5, 'final_div_factor': 10000}, # initial_lr = max_lr/div_factor, min_lr = initial_lr/final_div_factor epochs=20, prefix=prefix, save_best=True, maximize_score=False, ), "nn_prms": dict( n_neighbors=3, n_jobs=4, ), "seq_len": 80, "use_seq_len": 40, "train_batch_size": 256, "pred_batch_size": 1024, } model = ut.Model(**model_prms) return model def run_fold(tr_df, vl_df, prefix=""): # x, y tr_x = make_x(tr_df) tr_y = make_y(tr_df) vl_x = make_x(vl_df) vl_y = make_y(vl_df) model = get_model(tr_x.shape[1], prefix) model.fit(tr_x, tr_y, vl_x, vl_y) #tr_pred = model.predict(tr_x) vl_pred = model.predict(vl_x) #tr_df = pd.concat([tr_df, tr_x, pd.DataFrame(tr_pred, index=tr_df.index, columns=["pred"])], axis=1) vl_df = pd.concat([vl_df, vl_x, pd.DataFrame(vl_pred, index=vl_df.index, columns=["pred"])], axis=1) return model, tr_df, vl_df def run_cv(df, n_split, n_fold): kf = model_selection.GroupKFold(n_splits=n_split) oof_df = [] for fold, (tr_idxs, vl_idxs) in enumerate(kf.split(X=np.arange(len(df)), groups=df["shuffled_breath_id"].values)): if n_fold <= fold: break print("fold", fold) prefix = f"fold{fold}_" tr_df = df.iloc[tr_idxs].copy() vl_df = df.iloc[vl_idxs].copy() #vl_df = df.iloc[tr_idxs].copy() tr_df["fold"] = fold vl_df["fold"] = fold model, tr_df, vl_df = run_fold(tr_df, vl_df, prefix) oof_df.append(vl_df) with open(f'{prefix}model.pickle', mode='wb') as fp: model.set_dev('cpu') pickle.dump(model, fp) #tr_df.to_csv(f"{prefix}tr_df.csv", index=False) #vl_df.to_csv(f"{prefix}vl_df.csv", index=False) oof_df = pd.concat(oof_df, axis=0) return oof_df # - n_split = 2 n_fold = 1 oof_df = run_cv(tr_df, n_split, n_fold) oof_df.to_csv("oof.csv") # ## 評価 ev = ut.Evaluation(oof_df) score = ev.calc_scores() print(score) breath_mae = ev.calc_breath_mae() breath_mae = breath_mae.sort_values("error", ascending=False) breath_mae.plot.hist(bins=100) plt.show() n_plot = 20 for i in range(n_plot): ev.plot_breath(breath_mae.index[i]) # ## 推論 def run_predict(model, df): x = make_x(df) pred = model.predict(x) return pred # + model = [] for fold in range(n_fold): with open(f"fold{fold}_model.pickle", 'rb') as p: mdl = pickle.load(p) model.append(mdl) model = ut.EnsembleModel(model) # - pred = run_predict(model, ts_df) sub_df = ts_df[["id"]].copy() sub_df["pressure"] = pred sub_df.to_csv(f"submission_{os.path.basename(os.getcwd())}.csv", index=False)
experiments/exp_v00_01_00/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="RLoqeMxWYRGy" colab_type="code" colab={} # !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials import os auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # + id="lmZLBqrmYxnE" colab_type="code" colab={} train_downloaded = drive.CreateFile({'id': '1WFacqgpgPdDQc32iFpBbcl-vr2ng90YH'}) train_downloaded.GetContentFile('train.csv') test_downloaded = drive.CreateFile({'id': '1LxVNmkm_KhN3MA_VdGP4_tOVcNURmIg2'}) test_downloaded.GetContentFile('test.csv') # + id="lpNLv35TY6qG" colab_type="code" colab={} import pandas as pd import matplotlib import numpy as np import sklearn import random import time import warnings warnings.filterwarnings('ignore') #Configure Visualization Defaults # #%matplotlib inline = show plots in Jupyter Notebook browser # %matplotlib inline # + id="XRSsQ1RsZG7y" colab_type="code" colab={} data_raw = pd.read_csv('train.csv') data_val = pd.read_csv('test.csv') # + id="oBAr93twaSO8" colab_type="code" colab={} data1 = data_raw.copy(deep = True) data_cleaner = [data1, data_val] # + id="oHqOal5BjZ8N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1479} outputId="698009c7-d46e-4de3-ebeb-cb63df2aa902" data1.info() # + id="GNyKqlDIac4Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="9b7fd694-f678-44cb-bdd1-cd6a7f281e5c" pd.set_option('display.max_columns', 81) print(data1[['MSSubClass','SalePrice']].sample(10)) # + id="HAxhx7T1adiO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 697} outputId="e67a9724-1171-4610-8582-1daa75463356" for y in data1: for x in data1: if data1[x].dtype == 'int64' and x!='Id' and y!='Id' and y!=x and data1[y].dtype=='int64' and data1[x].corr(data1[y])>0.7 : print(y,'Correlation by:', x) print(data1[x].corr(data1[y])) print('-'*10, '\n') # + id="JZcz5cxFkNaY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 2586} outputId="e036ae4d-27a6-45c1-cd99-f060b14dde0e" # Data Cleaning --- 4 C's i.e. Correcting, Completing, Creating, and Converting print('Train columns with null value:\n',data1.isnull().sum()) print('-'*10) print('Test columns with null value:\n',data_val.isnull().sum()) print('-'*10) data_raw.describe(include = 'all') # + id="CvCL5ifiofYz" colab_type="code" colab={}
house_price_kaggle/house_price_kaggle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np a = np.arange(0, 10) print(a) b = np.arange(10, 20) print(b) np.hstack((a, b)).shape np.vstack((a,b)).shape c = a.reshape(2,5) d= b.reshape(2,5) c d np.hstack((c,d)) np.vstack((c,d)) np.c_[a,b] np.r_[a,b] np.r_[c, d] np.shape(np.c_[a,b]) np.shape(np.r_[c,d])
Numpy/hstack, vstack.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/khairav/MachinLearning-Mini_Project/blob/main/submission_2_timeseries_LSTM_deni_diana.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Jgo3fJR6HWh1" # Analisis deret waktu terdiri dari metode untuk menganalisis data deret waktu untuk mengekstrak statistik yang berarti dan karakteristik data lainnya. Peramalan deret waktu adalah penggunaan model untuk memprediksi nilai masa depan berdasarkan nilai yang diamati sebelumnya. # # Time series banyak digunakan untuk data non stasioner, seperti ekonomi, cuaca, harga saham, dan penjualan retail pada postingan kali ini. Kami akan mendemonstrasikan pendekatan yang berbeda untuk memperkirakan deret waktu penjualan ritel. # # Data yang digunakan Sales superstore # + [markdown] id="bED6quYNHZOW" # # PREPARE DIRECTORY GOOGLE # + colab={"base_uri": "https://localhost:8080/"} id="x_WWP50DsvLe" outputId="cb55471a-0dec-43bb-a594-48be6b52c96f" from google.colab import drive drive.mount('/content/drive') # + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 90} id="Yp8K0M6NtFG6" outputId="e401117f-1bba-4ff1-f9c8-6d2586afa8f7" import os # #!mkdir -p ~/Dicoding # %cd /content/drive/MyDrive/MachineLearningProject/Dicoding from google.colab import files uploaded = files.upload() # + colab={"base_uri": "https://localhost:8080/"} id="QS_uoUItvi-5" outputId="78e8dfc1-e8dc-48c6-923e-5c509790ef27" # !ls # + [markdown] id="iQPt3_PXu-6s" # # import library # + id="_4_WtlnvvCtT" import numpy as np import pandas as pd from keras.layers import Dense, LSTM import matplotlib.pyplot as plt import tensorflow as tf import warnings import itertools import matplotlib.pyplot as plt warnings.filterwarnings("ignore") plt.style.use('fivethirtyeight') import statsmodels.api as sm import matplotlib matplotlib.rcParams['axes.labelsize'] = 14 matplotlib.rcParams['xtick.labelsize'] = 12 matplotlib.rcParams['ytick.labelsize'] = 12 matplotlib.rcParams['text.color'] = 'k' # + [markdown] id="YYHZEDc7vGPz" # #import Dataset # # + id="tb6X6lLmxpL6" colab={"base_uri": "https://localhost:8080/", "height": 940} outputId="216a369f-16ae-403e-e434-350bba7d8b53" #/content/drive/MyDrive/MachineLearningProject/Dicoding/Dicoding/airline delay analysis/20.csv df = pd.read_excel('Sample - Superstore.xls') df.head(10) # + colab={"base_uri": "https://localhost:8080/"} id="CsfsP3341n0j" outputId="45fbb6c4-05c9-4d1f-d504-64873de207fc" df.columns # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="H3heih5jhUjS" outputId="fb3f5ee8-445e-4229-df2c-7426bf386f4a" df.describe() # + colab={"base_uri": "https://localhost:8080/"} id="XRHUeKwAwuCM" outputId="8aa72283-43d1-43df-da1a-16dba9a06503" df.shape # + id="svNy8EnPFFI2" Ada beberapa kategori dalam data penjualan Superstore, kita mulai dari analisis time series dan forecasting untuk penjualan Teknologi # + id="8RWsFTlYw-Sn" Technology = df.loc[df['Category'] == 'Technology'] # + colab={"base_uri": "https://localhost:8080/"} id="LMcdqWnyxJD4" outputId="65db5033-f4bf-45b9-b3a8-bbbd858281ec" #data penjualan teknologi 4 tahun Technology['Order Date'].min(), Technology['Order Date'].max() # + [markdown] id="PIk5QswexVYM" # # Check Missing # + colab={"base_uri": "https://localhost:8080/"} id="yvO7aL04xSQh" outputId="94945d24-c571-4a24-cccb-c51c672f3f1a" cols = ['Row ID', 'Order ID', 'Ship Date', 'Ship Mode', 'Customer ID', 'Customer Name', 'Segment', 'Country', 'City', 'State', 'Postal Code', 'Region', 'Product ID', 'Category', 'Sub-Category', 'Product Name', 'Quantity', 'Discount', 'Profit'] Technology.drop(cols, axis=1, inplace=True) Technology = Technology.sort_values('Order Date') Technology.isnull().sum() # + id="Ur6STbWAxSNU" Technology = Technology.groupby('Order Date')['Sales'].sum().reset_index() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="OaMv_cCi4N7w" outputId="0165bc82-5ce6-4b12-e751-88f26a628667" Technology.head() # + colab={"base_uri": "https://localhost:8080/"} id="8Ovc-Oxa4kW4" outputId="09e1d1fe-1dad-4a28-be8c-e4b320695518" Technology.columns # + [markdown] id="K6db6QjYFt2k" # Pengindeksan dengan Data time series # + colab={"base_uri": "https://localhost:8080/"} id="EYaWQ6pZxSJp" outputId="89d121ce-c97c-4652-e17f-66aa17d2322a" #Indexing with Time Series Technology = Technology.set_index('Order Date') Technology.index # + [markdown] id="guSh3Su0GBmp" # nilai penjualan harian rata-rata untuk bulan itu sebagai penggantinya, dan menggunakan awal setiap bulan sebagai timestamp # + id="B_FmuKMB84aG" y = Technology['Sales'].resample('MS').mean() # + colab={"base_uri": "https://localhost:8080/"} id="lHTfyUdT5oN-" outputId="e37a8572-be69-4ee0-a771-58feebf545bf" #furniture.rename(columns={'Order Date':'Order_Date'}, inplace=True) Technology = Technology.set_index('Sales') Technology.index # + [markdown] id="oWxn3_3GGRW3" # # VISUALIZATION TIME SERIES # + colab={"base_uri": "https://localhost:8080/", "height": 411} id="T7nXZarQxSCU" outputId="5f54e805-6270-456c-a986-ba5aa0c7ef2a" y.plot(figsize=(15, 6)) plt.title("Visualizing Technology Sales Time Series Data") plt.show() # + [markdown] id="dm4jevNIyU-X" # Visualisasikan data menggunakan metode yang disebut dekomposisi deret waktu yang memungkinkan dekomposisi deret waktu menjadi tiga komponen berbeda: tren, musiman, dan kebisingan # + colab={"base_uri": "https://localhost:8080/", "height": 409} id="jIq7_osWyWrj" outputId="ed8e3a89-a411-4781-f089-7f5f25dd4e48" from pylab import rcParams rcParams['figure.figsize'] = 18, 8 decomposition = sm.tsa.seasonal_decompose(y, model='additive') fig = decomposition.plot() plt.title(" visualize Technology trend, seasonality, and noise") plt.show() # + [markdown] id="4vrLlRTbG1cA" # Plot di atas dengan jelas menunjukkan bahwa penjualan Teknologi tidak stabil. # + colab={"base_uri": "https://localhost:8080/"} id="FjDAKDNH-bAn" outputId="ff034c67-ec77-4d01-c367-cc5e93cf7ef8" print(df['Order Date']) # + colab={"base_uri": "https://localhost:8080/"} id="JUJCD2boDZMZ" outputId="95d8cb01-2fa9-4034-ba92-d2fa4fbfc8ee" df.shape # + [markdown] id="6NqBWUi8HI5u" # # PROCESSING # + id="_9dy5mEP9JYE" # get data values date = df['Order Date'].values temp = df['Sales'].values # + id="O9l_rpwwyuXc" import tensorflow as tf def windowed_dataset(series, window_size, batch_size, shuffle_buffer): series = tf.expand_dims(series, axis=-1) ds = tf.data.Dataset.from_tensor_slices(series) ds = ds.window(window_size + 1, shift=1, drop_remainder=True) ds = ds.flat_map(lambda w: w.batch(window_size + 1)) ds = ds.shuffle(shuffle_buffer) ds = ds.map(lambda w: (w[:-1], w[-1:])) return ds.batch(batch_size).prefetch(1) # + colab={"base_uri": "https://localhost:8080/"} id="yQPeWRUK9rCN" outputId="30418953-6c73-4e0f-c113-d345e8ec492f" from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(temp, date, test_size = 0.2, random_state = 0 , shuffle=False) print(len(x_train), len(x_test)) # + id="ZECwezbh2DZN" # model from keras.layers import Dense, LSTM data_x_train = windowed_dataset(x_train, window_size=60, batch_size=100, shuffle_buffer=5000) data_x_test = windowed_dataset(x_test, window_size=60, batch_size=100, shuffle_buffer=5000) model = tf.keras.models.Sequential([ tf.keras.layers.Conv1D(filters=32, kernel_size=5, strides=1, padding="causal", activation="relu", input_shape=[None, 1]), tf.keras.layers.LSTM(64, return_sequences=True), tf.keras.layers.LSTM(64, return_sequences=True), tf.keras.layers.Dense(30, activation="relu"), tf.keras.layers.Dense(10, activation="relu"), tf.keras.layers.Dense(1), tf.keras.layers.Lambda(lambda x: x * 400) ]) lr_schedule = tf.keras.callbacks.LearningRateScheduler( lambda epoch: 1e-8 * 10**(epoch / 20)) optimizer = tf.keras.optimizers.SGD(lr=1e-8, momentum=0.9) model.compile(loss=tf.keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) # + colab={"base_uri": "https://localhost:8080/"} id="-T-U_uj42DQi" outputId="7c4f3c9c-4833-4c47-b9d2-3cedb1f928cd" valuemax = df['Sales'].max() print('Max value : ' ) print(valuemax) # + colab={"base_uri": "https://localhost:8080/"} id="cn5-ry54_cth" outputId="b7da6603-58ee-4d99-87f4-62f467cf5c19" valuemin = df['Sales'].min() print('Min Value: ') print(valuemin) # + colab={"base_uri": "https://localhost:8080/"} id="4kcQXAA8_rHK" outputId="e4aeadcc-b367-43d0-ace0-ed21211881c5" i = (valuemax - valuemin) * (10 / 100) print(i) # + id="eIUYt-AYADYm" # callback class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('mae')< i): self.model.stop_training = True print("\nMAE of the model < 10% of data scale") callbacks = myCallback() # + colab={"base_uri": "https://localhost:8080/"} id="EOTVCiTQAFFn" outputId="2a7761bc-6810-4f4f-db25-c6515b371ee9" tf.keras.backend.set_floatx('float64') history = model.fit(data_x_train ,epochs=500, validation_data=data_x_test, callbacks=[callbacks])
submission_2_timeseries_LSTM_deni_diana.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np import pandas as pd import utilities as utils import matplotlib.pyplot as plt # %matplotlib inline data = pd.read_csv("data/food.csv") print data.shape data.head() usa_data = utils.getByCountry(data, 'United States') print usa_data.shape usa_data.describe() # ### What is the most sugary food in the USA? sugar = usa_data.sugars_100g[usa_data.sugars_100g.notnull()] most_sugar = sugar.sort_values(ascending=False) usa_data[usa_data.sugars_100g >= most_sugar.iloc[0]] # ### Compare countries by average of different nutriments unique_countries = utils.getUniqueCountries(data) sugar_avg = utils.compareCountriesByNutrimentAverage(data, unique_countries, 'sugars_100g') p = sugar_avg.plot(kind='bar', legend=False, title='Average Sugars_100g By Country (w/ Sample Size > 30)') p.set_xlabel("Country") p.set_ylabel("Grams") p energy_avg = utils.compareCountriesByNutrimentAverage(data, unique_countries, 'energy_100g') p = energy_avg.plot(kind='bar', legend=False, title='Average Energy_100g By Country (w/ Sample Size > 30)') p.set_xlabel("Country") p.set_ylabel("Grams") p sodium_avg = utils.compareCountriesByNutrimentAverage(data, unique_countries, 'sodium_100g') p = sodium_avg.plot(kind='bar', legend=False, title='Average Sodium_100g By Country (w/ Sample Size > 30)') p.set_xlabel("Country") p.set_ylabel("Grams") p fat_100g = utils.compareCountriesByNutrimentAverage(data, unique_countries, 'fat_100g') p = fat_100g.plot(kind='bar', legend=False, title='Average Fat_100g By Country (w/ Sample Size > 30)') p.set_xlabel("Country") p.set_ylabel("Grams") p proteins_100g = utils.compareCountriesByNutrimentAverage(data, unique_countries, 'proteins_100g') p = proteins_100g.plot(kind='bar', legend=False, title='Average Proteins_100g By Country (w/ Sample Size > 30)') p.set_xlabel("Country") p.set_ylabel("Grams") p carbohydrates_100g = utils.compareCountriesByNutrimentAverage(data, unique_countries, 'carbohydrates_100g') p = carbohydrates_100g.plot(kind='bar', legend=False, title='Average Carbohydrates_100g By Country (w/ Sample Size > 30)') p.set_xlabel("Country") p.set_ylabel("Grams") p
Data_Exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=false editable=false # # <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> CS109B Data Science 2: Advanced Topics in Data Science # # ## Homework 7: Generative Models - Variational Autoencoders and GANs [100 pts] # # # **Harvard University**<br/> # **Spring 2020**<br/> # **Instructors**: <NAME>, <NAME> and <NAME><br/> # # **DISCLAIMER**: No public reproduction of this homework nor its solution is allowed without the explicit consent of their authors. # # <hr style="height:2pt"> # # --- # # # + deletable=false editable=false #RUN THIS CELL import requests from IPython.core.display import HTML, display styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text HTML(styles) # + [markdown] deletable=false editable=false # ### INSTRUCTIONS # # - To submit your assignment follow the instructions given in Canvas. # - For this homework **you do NOT need to restart the kernel and run the whole notebook again before you submit**. # - **You should SAVE your models, and do not retrain on notebook reruns**. # - In this homework we import general keras library objects. Feel free to experiment and use different layers, optimizers, random initializers... whater you like using from the core libraries we import. # - Do not submit a notebook that is excessively long because output was not suppressed or otherwise limited. # + [markdown] deletable=false editable=false # --- # + [markdown] deletable=false editable=false # <div class="theme"> Overview </div> # # We are going to compare autoencoders (AEs), variational autoencoders (VAEs) and generative adversarial networks (GANs). The goal is to understand the particularities of each model and to learn how to build them. # # In addition to standard VAEs, we will also study conditional VAEs. Conditional VAEs incorporate input attributes on the latent representation of an input, providing some structure in the latent space. We will analyze how conditioal VAEs are capable of generating new photos according that depend on specified attributes. # # We are going to train our networks using [CelebA](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html), which is a large-scale face attributes dataset with more than 200K celebrity images and 40 different attribute annotations. # # Run the following cell to load important libraries. # + deletable=false editable=false # Load useful libraries import numpy as np import pandas as pd import time from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.manifold import TSNE # TensorFlow and tf.keras import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras import layers from tensorflow.keras import models from tensorflow.keras import losses from tensorflow.keras import optimizers from tensorflow.keras import initializers from tensorflow.keras.metrics import * # Plotting libraries import matplotlib.pyplot as plt plt.gray() # + [markdown] deletable=false editable=false # **Check availability of GPU** # # Run this line to verify your instance is lists an available GPU. # + deletable=false editable=false tf.config.experimental.list_physical_devices('GPU') # + [markdown] deletable=false editable=false # --- # + [markdown] deletable=false editable=false # **Dowload CelebA Dataset** # # Make sure to have `tensorflow_datasets` installed on version 2.1.0 before running following cell. You can install by running the following code on a **terminal** (not from Jupyter): # # ```pip install tensorflow_datasets==2.1.0``` # # Download CelebA if you have not downloaded it already. You can use the following line to download manually from a **terminal**: # # ```wget https://cs109b2020.s3.amazonaws.com/2.0.0.zip``` # # Uncompress it under ```~/tensorflow_datasets/celeb_a/2.0.0/```. # # - import tensorflow_datasets as tfds # + deletable=false editable=false # Run this cell # Assumes CelebA has been manually downloaded and is available in `~/tensorflow_datasets/celeb_a/2.0.0/`. import tensorflow_datasets as tfds train_celeb, val_celeb = tfds.load('celeb_a', split=['train', 'validation'], shuffle_files=False, download=False) # + deletable=false editable=false # Global variables to define training/loading models. # Modify as required. These are only suggested parameters. train = True epochs = 5 # number of epochs to train models batch_size = 32 input_size = (64, 64, 3) # images will be cropped and resized to `input_size`. # + [markdown] deletable=false editable=false # --- # + [markdown] deletable=false editable=false # ## <div class='exercise'><b>Question 1: Preprocess and visualize the data [20 pts]</b></div> # # CelebA has 202,599 face images of various celebrities and training on the whole set requires large computational resources to fit your models. For this reason we recommend cropping the images and resizing them to reduce to computational costs. Feel free to adjust the images resolution depending on your computation capabilities. We recommend using `image_size = (64,64,3)`, but feel free to use a larger resolution, or smaller, up to `image_size = (32,32,3)`. # # We provide the function `tf_norm_crop_resize_image` to normalize image pixels between `[0,1]`, to crop the height and width of images to `150x150` pixels, and to [resize](https://www.tensorflow.org/api_docs/python/tf/image/resize) images to the indicated size in the function call. Follow the intructions below to format your data for the different models you will need to train: # # **1.1** Create training and validation Dataset pipelines `train_ds` and `val_ds` from `train_celeb` and `val_celeb`, respectively. The Dataset pipelines you create have to return a tuple `(image, image)` which you will use to train your models with an MSE loss criteria: the first element is the input fed to the model, the second element is used to compute the loss of the model. # # Make sure the Datasets follow this pipeline: 1) normalize, crop, resize and follows format (use [map](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map)), 2) [shuffle](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle), 3) [batch](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch), and 4) [prefetch](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch) (optional). # # **1.2** Create training and validation Dataset pipelines `train_cond_ds` and `val_cond_ds` from `train_celeb` and `val_celeb`, respectively. The Dataset pipelines you create have to return a tuple `((image, attributes), image)` to train your conditional VAE model. The first element of the tuple corresponds to the input of the model and consists of two tensors: the image and 2 selected attributes of your choice (for example, `Male` and `Smiling` attributes). You can choose your attributes from the ones [available](https://www.tensorflow.org/datasets/catalog/celeb_a). Make sure the attributes you use are easily identifiable in the images because you will need to alter them and expect visual changes (see Question 4.3). Convert the boolean attributes to `tf.float32` using [`tf.cast`](https://www.tensorflow.org/api_docs/python/tf/cast). # # Make sure the Datasets follow this pipeline: 1) normalize, crop, resize and satisfies input-output format (use [map](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map)), 2) [shuffle](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle), 3) [batch](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch), and 4) [prefetch](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch) (optional). # # **1.3** Pick 5 random images from the train dataset and plot them. List the attributes from these images and verify they are correct. # # # **Final Note:** # When training your models, if you find that the training set is too large and models take too long to train, you may select a portion of the train set and use that only. Adjust this value on your [Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) pipeline by using the method [`take`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#take). Make sure that your delivered images with AE, VAEs, CVAE and GAN look nice. For reference, out solution took 100 min to train in JupyterHub for all models, and 70 min in Colab, from beginning to end, on the full training data. # + deletable=false editable=false # DO NOT DELETE THIS CELL # Use this function to normalize, crop and resize your images. def tf_norm_crop_resize_image(image, resize_dim): """Normalizes image to [0.,1.], crops to dims (150, 150, 3) and resizes to `resize_dim`, returning an image tensor.""" image = tf.cast(image, tf.float32)/255. image = tf.image.resize_with_crop_or_pad(image, 150, 150) image = tf.image.resize(image, resize_dim) image.set_shape(resize_dim + (3,)) return image # + [markdown] deletable=false editable=false # ## Answers # + [markdown] deletable=false # **1.1** Create training and validation Dataset pipelines `train_ds` and `val_ds` from `train_celeb` and `val_celeb`, respectively. The Dataset pipelines you create have to return a tuple `(image, image)` which you will use to train your models with an MSE loss criteria: the first element is the input fed to the model, the second element is used to compute the loss of the model. # # Make sure the Datasets follow this pipeline: 1) normalize, crop, resize and follows format (use [map](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map)), 2) [shuffle](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle), 3) [batch](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch), and 4) [prefetch](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch) (optional). # + deletable=false # 1.1 # your code here def preprocess_img(element): image = element['image'] image = tf_norm_crop_resize_image(image, (64,64)) return (image, image) def prepare_dataset(dataset, func): ds = dataset.map(func) ds = ds.shuffle(10000) ds = ds.batch(batch_size) # use batch later when training, here it creates a lot of trouble for plotting ds = ds.prefetch(5) return ds # - train_ds = prepare_dataset(train_celeb, preprocess_img) val_ds = prepare_dataset(val_celeb, preprocess_img) # + [markdown] deletable=false # **1.2** Create training and validation Dataset pipelines `train_cond_ds` and `val_cond_ds` from `train_celeb` and `val_celeb`, respectively. The Dataset pipelines you create have to return a tuple `((image, attributes), image)` to train your conditional VAE model. The first element of the tuple corresponds to the input of the model and consists of two tensors: the image and 2 selected attributes of your choice (for example, `Male` and `Smiling` attributes). You can choose your attributes from the ones [available](https://www.tensorflow.org/datasets/catalog/celeb_a). Make sure the attributes you use are easily identifiable in the images because you will need to alter them and expect visual changes (see Question 4.3). Convert the boolean attributes to `tf.float32` using [`tf.cast`](https://www.tensorflow.org/api_docs/python/tf/cast). # # Make sure the Datasets follow this pipeline: 1) normalize, crop, resize and satisfies input-output format (use [map](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map)), 2) [shuffle](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle), 3) [batch](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch), and 4) [prefetch](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch) (optional). # + deletable=false # 1.2 # your code here def cond_img(element): image = element['image'] image = tf_norm_crop_resize_image(image, (64,64)) attributes = (tf.cast(element['attributes']['Male'], tf.float32), tf.cast(element['attributes']['Smiling'], tf.float32)) return ((image, attributes), image) # - train_cond_ds =prepare_dataset(train_celeb, cond_img) val_cond_ds = prepare_dataset(val_celeb, cond_img) # + [markdown] deletable=false # **1.3** Pick 5 random images from the train dataset and plot them. List the attributes from these images and verify they are correct. # # # **Final Note:** # When training your models, if you find that the training set is too large and models take too long to train, you may select a portion of the train set and use that only. Adjust this value on your [Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) pipeline by using the method [`take`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#take). Make sure that your delivered images with AE, VAEs, CVAE and GAN look nice. For reference, out solution took 100 min to train in JupyterHub for all models, and 70 min in Colab, from beginning to end, on the full training data. # + deletable=false # 1.3 # your code here # dic = {0: False, 1: True} fig, axes = plt.subplots(1,5, figsize=(15,3)) for i, ((image, attributes), image) in enumerate(train_cond_ds.take(5)): axes[i].imshow(image) # print(image.shape) # axes[i].set_title(f'{label:.2f}') axes[i].set_title(f'Male: {attributes[0]:.2f}, Smiling: {attributes[1]:.2f}') # + [markdown] deletable=false editable=false # --- # + [markdown] deletable=false editable=false # <div class='exercise'> <b> Question 2: Set-up an AutoEncoder [20 points] </b> </div> # # **Define custom convolutional layers** # # We provide below the skeleton of a custom keras layer that you need to complete to build the following models. You should read the Keras [guidelines](https://www.tensorflow.org/guide/keras/custom_layers_and_models) on how to build custom layers. You are required to fill the specific methods indicated below on each part. # # You will then construct an autoencoder using both custom layers, and visualize the AE image reconstruction and latent spaces. # # **2.1** Setup a custom layer consisting of convolutional layers and complete the `__init__` and `call` methods of the `ConvEncoder` class. We recommend to use 4 convolutional layers and dropout layers alternatively, filters of size 5x5, 'relu' activations, 'same' padding, `[9, 18, 32, 64]` channels respectively on each layer, and strides of 2x2. The intention is to halve the spatial dimensions on each convolutional layer while augmenting the number of channels on deeper layers. # # You will use this layer repeatedly when building your subsequent models. # # **2.2** Setup a custom layer consisting of convolutional layers and complete the `__init__` and `call` methods of the `ConvDecoder` class. We will refer to the input dimension of this layer as `latent_dim`. Make sure the output dimension of this layer is equal to the input dimension of your images, i.e., (64,64,3) if you followed our recommendation. # # We recommend using `Conv2DTranspose` layers instead of `Conv2D` similar to `ConvEncoder`. Using `Conv2DTranspose` will have the desired effect in terms of inverting previous operations; for example, setting a stride of 2x2 will double the output size of the input. # # **2.3** Create a Keras model `AE`. Use the previously defined `ConvEncoder` and `ConvDecoder` layer classes you just completed to build your autoencoder. Between these layers, [flatten](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Flatten) the input and incorporate two intermediate [Dense](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense), and [reshape](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Reshape) layers. More precisely, use the following architecture: # - Input image. # - ConvEncoder layer. # - Flatten layer. # - **Dense layer with linear activation** and `bottleneck_dim=128` units (recommended dimension). # - **Dense layer with ReLu activation**. # - Reshape layer to `latent_dim`. # - ConvDecoder layer. # # **2.4** Why do we suggest the first dense layer after the `ConvEncoder` layer use `linear` activation in the `AE` model? Is it necessary requirement or not? Explain your answer. # # **2.5** Train the `AE` model (use MSE loss criterion and an optimizer of your choice). We found 5 epochs sufficed to train our model (feel free to adjust this value). Print a summary of the model. # # **We recommend [saving](https://www.tensorflow.org/tutorials/keras/save_and_load) the trained model**. # # **2.6** Visualize 5 random original and reconstructed images fed to the autoencoder from the validation data. # # **2.7** Visualize the first 2 [principal components](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html) and [t-SNE](https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html) projection onto the plane of the latent representation of the validation images. Use the representation after the first dense layer where `bottleneck_dim=128` to compute the PCs and t-SNE projections. Retrieve at least `1024` images and color each input by class type, for example, `Male` and `Smiling` (if these where your chosen attributes), for **each scatter plot visualization** and attributes. You need to present 4 scatter plots in total. Explain your results. # + [markdown] deletable=false editable=false # ## Answers # + [markdown] deletable=false # **2.1** Setup a custom layer consisting of convolutional layers and complete the `__init__` and `call` methods of the `ConvEncoder` class. We recommend to use 4 convolutional layers and dropout layers alternatively, filters of size 5x5, 'relu' activations, 'same' padding, `[9, 18, 32, 64]` channels respectively on each layer, and strides of 2x2. The intention is to halve the spatial dimensions on each convolutional layer while augmenting the number of channels on deeper layers. # # You will use this layer repeatedly when building your subsequent models. # + deletable=false # 2.1 class ConvEncoder(layers.Layer): """ Convolutional Encoder Layer Class. Converts an input into a latent representation. """ def __init__(self, input_shape, dropout_rate=0.0, name='encoder', **kwargs): """ Initializes the encoder layers and saves them as local attribute. Input: -input_dim: 3D-tuple with (rows, cols, channels) input image dimensions. Returns nothing. """ super(ConvEncoder, self).__init__(name=name, input_shape=input_shape, **kwargs) ## your code here self.model = models.Sequential() self.model.add(layers.Conv2D(64, (5,5), padding='same', activation='relu', strides=(2, 2), input_shape=input_shape)) self.model.add(layers.Conv2D(32, (5,5), padding = 'same', activation = 'relu', strides=(2, 2))) self.model.add(layers.Conv2D(18, (5,5), padding='same', activation = 'relu', strides=(2, 2))) self.model.add(layers.Conv2D(9, (5,5), padding='same', activation='relu', strides=(2, 2))) self.model.add(layers.Dropout(dropout_rate)) # end of your code here def call(self, inputs, training=None): """ Runs the encoding inference for `inputs`. Inputs: -inputs: 4D-tensor with dimension (batch_size, self.input_dim). """ ## your code here z = self.model(inputs) # end of your code here return z # + [markdown] deletable=false # **2.2** Setup a custom layer consisting of convolutional layers and complete the `__init__` and `call` methods of the `ConvDecoder` class. We will refer to the input dimension of this layer as `latent_dim`. Make sure the output dimension of this layer is equal to the input dimension of your images, i.e., (64,64,3) if you followed our recommendation. # # We recommend using `Conv2DTranspose` layers instead of `Conv2D` similar to `ConvEncoder`. Using `Conv2DTranspose` will have the desired effect in terms of inverting previous operations; for example, setting a stride of 2x2 will double the output size of the input. # + deletable=false # 2.2 class ConvDecoder(layers.Layer): """ Convolutional Decoder Layer Class. Converts z, the encoded digit vector, back into a readable digit. """ def __init__(self, input_shape, dropout_rate=0.5, name='decoder', **kwargs): """ Initializes the decoder architecture and saves it as a local attribute. Input: -input_shape: 3D-tuple with (rows, cols, channels) input representation. Returns nothing. """ super(ConvDecoder, self).__init__(name=name, input_shape=input_shape, **kwargs) self.dropout_rate = dropout_rate # your code here self.model = models.Sequential() self.model.add(layers.Conv2DTranspose(18, (5,5), padding='same', activation='relu', strides=(2,2), input_shape=input_shape)) self.model.add(layers.Conv2DTranspose(32, (5,5), padding='same', activation='relu', strides=(2,2))) self.model.add(layers.Conv2DTranspose(64, (5,5), padding='same', activation='relu', strides=(2,2))) self.model.add(layers.Conv2DTranspose(3, (5,5), padding='same', activation='relu', strides=(2,2))) # end of your code here def call(self, inputs, training=None): """ Runs the encoding inference for `inputs`. Inputs: -inputs: 4D-tensor with dimension (batch_size, self.input_dim). """ ## your code here x = self.model(inputs) # end your code here return x # + [markdown] deletable=false # **2.3** Create a Keras model `AE`. Use the previously defined `ConvEncoder` and `ConvDecoder` layer classes you just completed to build your autoencoder. Between these layers, [flatten](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Flatten) the input and incorporate two intermediate [Dense](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense), and [reshape](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Reshape) layers. More precisely, use the following architecture: # - Input image. # - ConvEncoder layer. # - Flatten layer. # - **Dense layer with linear activation** and `bottleneck_dim=128` units (recommended dimension). # - **Dense layer with ReLu activation**. # - Reshape layer to `latent_dim`. # - ConvDecoder layer. # - latent_dim = (4,4,64) # + deletable=false # 2.3 # your code here input_image = tf.keras.layers.Input(shape=input_size, name='input') x = ConvEncoder(input_size)(input_image) x = layers.Flatten()(x) x = layers.Dense(128, activation='linear')(x) x = layers.Dense(1024, activation = 'relu')(x) x = layers.Reshape(latent_dim)(x) x = ConvDecoder(latent_dim)(x) ae = tf.keras.Model(input_image, x) ae.compile(optimizer='rmsprop', loss='mse') # - # **2.4** Why do we suggest the first dense layer after the `ConvEncoder` layer use `linear` activation in the `AE` model? Is it necessary requirement or not? Explain your answer. # *your answer here* # + [markdown] deletable=false # **2.5** Train the `AE` model (use MSE loss criterion and an optimizer of your choice). We found 5 epochs sufficed to train our model (feel free to adjust this value). Print a summary of the model. # + deletable=false # 2.5 # your code here ae.summary() # - # history = ae.fit(train_ds.batch(batch_size), epochs=epochs, validation_data=val_ds.batch(batch_size), shuffle=True) history = ae.fit(train_ds, epochs=epochs, validation_data=val_ds) ae.save('ae') # + def plot_training_history(history): loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1,len(loss)+1) plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('epoch') plt.legend() plt.show() def load_keras_model(model_name): # Load json and create model json_file = open('./models/{}.json'.format(model_name), 'r') loaded_model_json = json_file.read() json_file.close() model = tf.keras.models.model_from_json(loaded_model_json) # Load weights into new model model.load_weights("./models/{}.h5".format(model_name)) return model # - #plot our loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model train vs validation loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper right') plt.show() # + [markdown] deletable=false # **2.6** Visualize 5 random original and reconstructed images fed to the autoencoder from the validation data. # + deletable=false # 2.6 # your code here plt.figure(figsize = (8, 16)) grid = plt.GridSpec(5, 2, wspace=0.2, hspace=0.2) imlist = [] data = val_ds.unbatch().take(5) for img,_ in data: imlist.append(img) pred_imgs = ae.predict(np.asarray(imlist)) for i,img in enumerate(imlist): plt.subplot(grid[i, 0]) fig1 = plt.imshow(img) fig1.axes.get_xaxis().set_visible(False) fig1.axes.get_yaxis().set_visible(False) plt.title("Original picture") plt.subplot(grid[i, 1]) fig2 = plt.imshow(pred_imgs[i]) fig2.axes.get_xaxis().set_visible(False) fig2.axes.get_yaxis().set_visible(False) plt.title("Reconstructed picture") # + [markdown] deletable=false # **2.7** Visualize the first 2 [principal components](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html) and [t-SNE](https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html) projection onto the plane of the latent representation of the validation images. Use the representation after the first dense layer where `bottleneck_dim=128` to compute the PCs and t-SNE projections. Retrieve at least `1024` images and color each input by class type, for example, `Male` and `Smiling` (if these where your chosen attributes), for **each scatter plot visualization** and attributes. You need to present 4 scatter plots in total. Explain your results. # + deletable=false # 2.7 (PCA visualization) # your code here val_data = val_cond_ds.unbatch().take(1024) imgs = [] attrs = [] for (img, attr),_ in val_data: imgs.append(img) attrs.append(attr) imgs = np.asarray(imgs) attrs = np.asarray(attrs) # get the model # + def draw(model,mode='pca',cond=False): flag=0; for layer in model.layers: if layer.name=="encoder": flag=1;break; if flag: for layer in model.get_layer("encoder").layers: if layer.name=="flat_cond": flag=0;break; if flag: thelayer = model.get_layer("encoder").get_layer("dense1") intermediate_model = models.Model(inputs=model.layers[0].input, outputs=thelayer.output) else: thelayer = model.get_layer("encoder").get_layer("flat_cond") intermediate_model = models.Model(inputs=[model.layers[0].input,model.layers[1].input], outputs=thelayer.output) else: thelayer = model.get_layer("dense1") intermediate_model = models.Model(inputs=model.layers[0].input, outputs=thelayer.output) # # get the prediction # imgs=[] # attrs = [] # val_data = val_cond_ds.unbatch().take(1024) # for (img,attr),_ in val_data.as_numpy_iterator(): # imgs.append(img) # attrs.append(attr) # imgs= np.asarray(imgs) # attrs = np.asarray(attrs) if cond: results=intermediate_model.predict([imgs,attrs]).reshape(1024,-1) else: results=intermediate_model.predict(imgs).reshape(1024,-1) # compute pca if mode=='pca': pca = PCA(n_components=2) fitted_pca = pca.fit(results) pca_latent_train = fitted_pca.fit_transform(results) latent_plot(pca_latent_train,attrs,fitted_pca=fitted_pca) elif mode=='tsne': tsne = TSNE(n_components=2).fit_transform(results) latent_plot(tsne,attrs,'tsne') # plotting def latent_plot(data,attrs,mode='pca',fitted_pca=None): df = pd.DataFrame({'pca1':data[:,0],'pca2':data[:,1],'attr1':attrs[:,0],'attr2':attrs[:,1]}) plt.figure(figsize = (12, 5)) grid = plt.GridSpec(1, 2, wspace=0.2, hspace=0.2) color = {0:'r',1:'b'} labels = [{0:'Female',1:'Male'},{0:'Not smiling',1:'Smiling'}] for k in range(2): plt.subplot(grid[0, k]) for i in range(2): data = df[df['attr'+str(1+k)]==i] plt.scatter(data.pca1,data.pca2,c=color[i],label=labels[k][i]) if mode =='pca': plt.xlabel("PC1 ({}%)".format(np.round(100*fitted_pca.explained_variance_ratio_[0],1))) plt.ylabel("PC2 ({}%)".format(np.round(100*fitted_pca.explained_variance_ratio_[1],1))) plt.legend() # + [markdown] deletable=false # *Your answer here* # + [markdown] deletable=false editable=false # Compute also the t-SNE decomposition of the latent images. Does it uncover any characteristic structure? Please, explain your answer # + deletable=false # 2.7 (t-SNE visualization) # your code here # + [markdown] deletable=false # *Your answer here* # + [markdown] deletable=false editable=false # --- # + [markdown] deletable=false editable=false # <div class='exercise'> <b> Question 3: Set-up a Convolutional Variational Autoencoder [20 points].</b> </div> # # In this exercise you will code a standard Variational Autoencoder. You will first create a custom layer `Sampling` that takes the mean and log-variance of a Gaussian distribution as inputs, and returns a sample from that distribution. You will use this sample as a latent representation of your probabilistic encoder conditioned on the input image, and use it to reconstruct an image. You will build the complete VAE architecture and study its properties. # # You will need to minimize the negative ELBO function formed by a reconstruction loss and a regularization term over the mean and variance of the probabilistic encoder. You will train two VAE models, one with no regularization, and a second with regularization. # # Follow these instructions: # # **3.1** Complete the `call` function of Sampling keras layer. This function takes as input the mean and log-variance vectors of a multivariate Gaussian distribution and returns a sampled tensor from this distribution. # # **3.2** Create two Variational AutoEncoder models named `VAE1` and `VAE2`. Use the `ConvEncoder` and `ConvDecoder` layer classes you completed in Question 2 and the `Sampling` layer from 3.1. Both VAEs should have the following architecture: # - Input image. # - ConvEncoder. # - Flatten layer. # - Dense layer with linear activation and 128 units to predict the mean of the encoder conditional distribution $q_x(z)=N(\mu,\sigma)$. # - Dense layer with linear activation and 128 units to predict the log-variance of the encoder conditional distribution $q_x(z)=N(\mu,\sigma)$. # - Sampling layer you completed in Question 3.1. # - Dense layer with ReLu activation. # - Reshape layer: reshapes the output of dense into `latent_dim`. # - ConvDecoder. # # Finally, `VAE1` should not use any regularization of the probabilistic encoder (from the prior). # # Instead, `VAE2` should incorporate a KL loss to regularize the probabilistic encoder to normal Gaussian of zero mean and unit variance acting as prior, as explained in class. # You may use the following expression: `kl_loss = - reg * 0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)`, where a reasonable value for `reg = 0.1` (feel free to adjust). # To include the intermediate loss in `VAE2`, you may use the function `add_loss` from keras models/layers as explained in the [documentation](https://www.tensorflow.org/guide/keras/train_and_evaluate). # # **We recommend saving your trained models.** # # **3.3** Why do we use linear activation values to encode the mean and log-variance of the probabilistic encoder? Explain your answer. # # **3.4** Visualize 1 original image and 5 reconstructed images from the validation data fed to `VAE1` and `VAE2`. Comment on the reconstructed images. Notice that you may need to tune penalty regularization term to observe differences between `VAE1` and `VAE2` (there should be differences!). # # **3.5** Visualize the first 2 principal components (PCs) and the t-SNE decomposition of the validation data on both `VAE1` and `VAE2` obtained from the latent space, i.e., a sample drawn from the probabilistic encoder for a given input. Color the datapoints depending on the input's attributes of your choice (`Male` and `Smiling` if these were your choice). Draw four separate scatterplots in total. Explain what you observe. # + [markdown] deletable=false editable=false # ## Answers # + [markdown] deletable=false # **3.1** Complete the `call` function of Sampling keras layer. This function takes as input the mean and log-variance vectors of a multivariate Gaussian distribution and returns a sampled tensor from this distribution. # - latent_dim=(4,4,64) # + deletable=false class Sampling(layers.Layer): """ Sampling layer in latent space. Uses (z_mean, z_log_var) to sample z. """ def call(self, inputs): """Rturns a random sample from a Gaussian with mean and log-variance indicated in inputs. Inputs: -inputs: tuple (z_mean, z_log_var) Returns a sample z drawn from Gaussian. """ z_mean, z_log_var = inputs # your code here epsilon = tf.keras.backend.random_normal(shape=(tf.keras.backend.shape(z_mean)[0], latent_dim)) z = z_mean + tf.keras.backend.exp(z_log_var) * epsilon return z # z_mean, z_log_var = inputs # epsilon = tf.keras.backend.random_normal(shape=(128,)) # return z_mean+tf.exp(0.5 * z_log_var) * epsilon def get_config(self): config = super().get_config().copy() config.update({ }) return config # + [markdown] deletable=false # # **3.2** Create two Variational AutoEncoder models named `VAE1` and `VAE2`. Use the `ConvEncoder` and `ConvDecoder` layer classes you completed in Question 2 and the `Sampling` layer from 3.1. Both VAEs should have the following architecture: # - Input image. # - ConvEncoder. # - Flatten layer. # - Dense layer with linear activation and 128 units to predict the mean of the encoder conditional distribution $q_x(z)=N(\mu,\sigma)$. # - Dense layer with linear activation and 128 units to predict the log-variance of the encoder conditional distribution $q_x(z)=N(\mu,\sigma)$. # - Sampling layer you completed in Question 3.1. # - Dense layer with ReLu activation. # - Reshape layer: reshapes the output of dense into `latent_dim`. # - ConvDecoder. # # Finally, `VAE1` should not use any regularization of the probabilistic encoder (from the prior). # # Instead, `VAE2` should incorporate a KL loss to regularize the probabilistic encoder to normal Gaussian of zero mean and unit variance acting as prior, as explained in class. # You may use the following expression: `kl_loss = - reg * 0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)`, where a reasonable value for `reg = 0.1` (feel free to adjust). # To include the intermediate loss in `VAE2`, you may use the function `add_loss` from keras models/layers as explained in the [documentation](https://www.tensorflow.org/guide/keras/train_and_evaluate). # # **We recommend saving your trained models.** # + deletable=false # 3.2 # your code here # encoder input_image = tf.keras.layers.Input(shape=input_size, name='input') x = ConvEncoder(input_size, name='encoder')(input_image) # need to know the shape of the network here for the decoder shape_before_flattening = K.int_shape(x) x = layers.Flatten()(x) # sampling z z_mu = layers.Dense(128, activation='linear')(x) z_log_var = layers.Dense(128, activation = 'linear')(x) z = Sampling()((z_mu, z_log_var)) # some bug here cannot fix # decoder takes the latent distribution sample as input # decoder_input = Input(K.int_shape(z)[1:]) # Expand to 784 total pixels x = Dense(np.prod(shape_before_flattening[1:]), activation='relu')(z) # reshape x = Reshape(shape_before_flattening[1:])(x) x = ConvDecoder(shape_before_flattening[1:])(x) VAE1 = tf.keras.Model(input_image, z) VAE1.compile(optimizer='rmsprop', loss='mse') VAE1.summary() # + input_image = tf.keras.layers.Input(shape=input_size, name='input') x = ConvEncoder(input_size)(input_image) x = layers.Flatten()(x) # sampling z z_mu = layers.Dense(128, activation='linear')(x) z_log_var = layers.Dense(128, activation = 'linear')(x) z = tf.keras.layers.Lambda(Sampling)([z_mu, z_log_var]) z = layers.Dense(1024, activation='relu')(z) z = layers.Reshape(latent_dim)(z) z = ConvDecoder(latent_dim)(z) VAE1 = tf.keras.Model(input_image, z) VAE1.compile(optimizer='rmsprop', loss='mse') VAE1.summary() # + [markdown] deletable=false # **3.3** Why do we use linear activation values to encode the mean and log-variance of the probabilistic encoder? Explain your answer. # + [markdown] deletable=false # *Your answer here* # + [markdown] deletable=false # **3.4** Visualize 1 original image and 5 reconstructed images from the validation data fed to `VAE1` and `VAE2`. Comment on the reconstructed images. Notice that you may need to tune penalty regularization term to observe differences between `VAE1` and `VAE2` (there should be differences!). # + deletable=false # 3.4 # your code here # your code here plt.figure(figsize = (8, 16)) grid = plt.GridSpec(5, 2, wspace=0.2, hspace=0.2) imlist = [] data = val_ds.unbatch().take(5) for img,_ in data: imlist.append(img) pred_imgs = VAE1.predict(np.asarray(imlist)) for i,img in enumerate(imlist): plt.subplot(grid[i, 0]) fig1 = plt.imshow(img) fig1.axes.get_xaxis().set_visible(False) fig1.axes.get_yaxis().set_visible(False) plt.title("Original picture") plt.subplot(grid[i, 1]) fig2 = plt.imshow(pred_imgs[i]) fig2.axes.get_xaxis().set_visible(False) fig2.axes.get_yaxis().set_visible(False) plt.title("Reconstructed picture") # + # your code here plt.figure(figsize = (8, 16)) grid = plt.GridSpec(5, 2, wspace=0.2, hspace=0.2) imlist = [] data = val_ds.unbatch().take(5) for img,_ in data: imlist.append(img) pred_imgs = VAE2.predict(np.asarray(imlist)) for i,img in enumerate(imlist): plt.subplot(grid[i, 0]) fig1 = plt.imshow(img) fig1.axes.get_xaxis().set_visible(False) fig1.axes.get_yaxis().set_visible(False) plt.title("Original picture") plt.subplot(grid[i, 1]) fig2 = plt.imshow(pred_imgs[i]) fig2.axes.get_xaxis().set_visible(False) fig2.axes.get_yaxis().set_visible(False) plt.title("Reconstructed picture") # + [markdown] deletable=false # # *your explanation here* # # + [markdown] deletable=false # **3.5** Visualize the first 2 principal components (PCs) and the t-SNE decomposition of the validation data on both `VAE1` and `VAE2` obtained from the latent space, i.e., a sample drawn from the probabilistic encoder for a given input. Color the datapoints depending on the input's attributes of your choice (`Male` and `Smiling` if these were your choice). Draw four separate scatterplots in total. Explain what you observe. # + deletable=false # 3.5 # your code here # + [markdown] deletable=false # **Explanation of PCA visualization:** # # *your explanation here* # # - # 3.5 # your code here # + [markdown] deletable=false # **Explanation of t-SNE decomposition:** # # *your explanation here* # # + [markdown] deletable=false # <div class='exercise'> <b> Question 4: Set-up a Conditional VAE. [20 points] </b> </div> # # Conditional VAEs are similar to standard VAEs were you incorporate an input's label/information into the latent space. When the model is trained in this form, the model learns to distinguish between these features. For example, you can select features on latent space manually, and explore the space of representations in an explicit manner. We point you to [one](https://wiseodd.github.io/techblog/2016/12/17/conditional-vae/) and [two](https://ijdykeman.github.io/ml/2016/12/21/cvae.html) short tutorials on conditional VAEs. Additionally, you may be interested in reading the [original paper](http://papers.nips.cc/paper/5775-learning-structured-output-representation-using-deep-conditional-generative-models.pdf), or a [continuation paper](https://papers.nips.cc/paper/7880-learning-latent-subspaces-in-variational-autoencoders.pdf). # # In this exercise you are going to build a conditional VAE, and reconstruct images by altering their attributes. For example, pick a set of 'not smiling men' and transform them by changing the label conditions such as 'smile' or 'gender' in latent space. You can choose whatever attributes you want, as long as the reconstructed latent space shows reasonable success when changing the attribute labels. # # Follow this set of instructions: # # **4.1** Create a conditional VAE keras model named `CVAE`. The conditional VAE should have the following architecture: # - Input for image. # - Input for attributes. # - ConvEncoder layer. # - Flatten layer: flattens the output of the ConvEncoder. # - [Concatenate layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/concatenate): concatenates the latent representation of dimension `latent_dim[0]*latent_dim[1]*latent_dim[2]` with two attribute codes of your choice (tf.float32 representations). # - Dense layer with linear activation and `bottleneck_dim` units to predict the mean of the encoder conditional distribution $q_x(z)=N(\mu,\sigma)$. # - Dense layer with linear activation and `bottleneck_dim` units to predict the log-variance of the encoder conditional distribution $q_x(z)=N(\mu,\sigma)$. # - Sampling layer you completed in Question 3.1. # - [Concatenate layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/concatenate) that combines your sample with the two attribute codes of your choice (tf.float32 representations). # - Dense layer with ReLu activation. # - Reshape layer. # - ConvDecoder. # - Output image of same size as input image. # # **4.2** Train the model using the data generator you completed in Question 1.2 (use mean squared error loss and an optimizer of your choice). Print a summary of your model. # # **We recommend saving your trained models**. # # **4.3** Select 5 photos with common attributes and reconstruct these images after feeding them to the conditional variational autoencoder `CVAE` from the validation data. Change attributes in the other three possible combinations and visualize all compositions. Comment on your compositions. # # For example, if your choice of attributes where 'Male' and 'Smiling', you should reconstruct these images with all possible attribute combinations. # # **4.4** Visualize the first 2 principal components and the t-SNE decomposition of the validation data of `CVAE` obtained from the latent space, i.e., a sample drawn from the probabilistic encoder for at least 1024 input images. Color the datapoints depending on the input's attributes `male` and `smiling`: draw four separate scatterplots in total. Explain what you observe. # + [markdown] deletable=false editable=false # ## Answers # + [markdown] deletable=false # **4.1** Create a conditional VAE keras model named `CVAE`. The conditional VAE should have the following architecture: # - Input for image. # - Input for attributes. # - ConvEncoder layer. # - Flatten layer: flattens the output of the ConvEncoder. # - [Concatenate layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/concatenate): concatenates the latent representation of dimension `latent_dim[0]*latent_dim[1]*latent_dim[2]` with two attribute codes of your choice (tf.float32 representations). # - Dense layer with linear activation and `bottleneck_dim` units to predict the mean of the encoder conditional distribution $q_x(z)=N(\mu,\sigma)$. # - Dense layer with linear activation and `bottleneck_dim` units to predict the log-variance of the encoder conditional distribution $q_x(z)=N(\mu,\sigma)$. # - Sampling layer you completed in Question 3.1. # - [Concatenate layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/concatenate) that combines your sample with the two attribute codes of your choice (tf.float32 representations). # - Dense layer with ReLu activation. # - Reshape layer. # - ConvDecoder. # - Output image of same size as input image. # + deletable=false load=true bottleneck_dim=128 latent_dim = [32,32,64] aeinput = layers.Input(shape=input_size,name='Input_layer') cond = layers.Input(shape=(2,),name='Input_label1') convencoder = ConvEncoder(input_shape = input_size,name='convencoder')(aeinput) flattenner = layers.Flatten(name='flatten1')(convencoder) flat_cond = layers.Concatenate(name='flat_cond')([flattenner,cond]) z_mean = layers.Dense(bottleneck_dim,activation='relu',name='z_mean')(flat_cond) z_log_var = layers.Dense(bottleneck_dim,activation='relu',name='z_log_var')(flat_cond) z = Sampling(name='sampling')([z_mean,z_log_var]) z_cond = layers.Concatenate()([z,cond]) dense2 = layers.Dense(np.prod(latent_dim),activation='relu',name='dense2')(z_cond) reshaper = layers.Reshape((*latent_dim,),name='reshape')(dense2) latent = layers.Input(shape=(*latent_dim,),name='Input') convdecoder=ConvDecoder(input_shape = (*latent_dim,),name='convdecoder')(latent) # compile the encoder and decoder encoder = tf.keras.models.Model([aeinput,cond], [z_mean, z_log_var, reshaper], name='encoder') decoder = models.Model(inputs = latent,outputs = convdecoder,name='Decoder') outputs = decoder(encoder([aeinput,cond])[2]) # Define vae_loss as mean of (reconstruction loss + KL loss) CVAE = models.Model(inputs=[aeinput,cond],outputs= outputs,name='CVAE') reconstruction_loss = tf.keras.losses.mse(aeinput,outputs) reconstruction_loss *= 64*64*3 reg=0.1 kl_loss = - reg * 0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1) # compile the model vae_loss = tf.keras.backend.mean(reconstruction_loss+kl_loss) CVAE.add_loss(vae_loss) CVAE.compile(optimizer='adam',loss=None) if os.path.exists("model/cvae_celeb_model.h5") and load is True: CVAE.load_weights("model/cvae_celeb_model.h5") print(CVAE.summary()) # + [markdown] deletable=false # **4.2** Train the model using the generator you completed in Q1.3. Print a summary of your model. # **We recommend saving your trained models**. # + deletable=false train_and_plot_history(CVAE,size=1024,train=train_cond_ds,val=val_cond_ds) CVAE.save('model/cvae_celeb_model.h5',save_format="tf") # + [markdown] deletable=false # **4.3** Select 5 photos with common attributes and reconstruct these images after feeding them to the conditional variational autoencoder `CVAE` from the validation data. Change attributes in the other three possible combinations and visualize all compositions. Comment on your compositions. # + deletable=false # 4.3 # Example case: select 5 images of men, not smiling; reconstruct with smiling and no smiling, and as smiling/no-smiling women. # your code here # + [markdown] deletable=false # *Your answer here* # + [markdown] deletable=false # **4.4** Visualize the first 2 principal components and the t-SNE decomposition of the validation data of `CVAE` obtained from the latent space, i.e., a sample drawn from the probabilistic encoder for at least 1024 input images. Color the datapoints depending on the input's attributes `male` and `smiling`: draw four separate scatterplots in total. Explain what you observe. # + deletable=false # 4.4 # your code here # + [markdown] deletable=false # *Your answer here* # + # 4.4 # your code here # + [markdown] deletable=false # *Your answer here* # + [markdown] deletable=false editable=false # --- # + [markdown] deletable=false editable=false # <div class='exercise'> <b> Question 5: Generative Adversarial Networks. [20 points] </b> </div> # # For the final exercise we are going to create a standard GAN composed of a generator, and a discriminator. GANs are tricky to train, so we encourage you to follow the given instructions when training your models. However, feel completely free to explore and present other layer architectures if they present better results. # # **5.1** Create a convolutional keras generator model. We recommend the follow architecture. # # - Input to the generator is a noise vector of dimension `bottleneck_dim`. You can rename to `noise_dim` for more corresponding terminology if you prefer. # - Dense layer with `latent_dim[0]*latent_dim[1]*latent_dim[2]` units, and LeakyRelu. # - Reshape to `latent_dim`. # - 3x Conv2DTranspose layers with 5x5 filters, LeakyRelu's, stride 2x2, 'same' padding. # # For stability, consider training without bias terms on your Dense and Conv2DTranspose layers. Print a summary of your model. # # **5.2** Create a convolutional discriminator model. Our recommended setup is to use 3x Conv2D layers with filters of size (4,4), 'same' padding, strides 2x2, and LeakyRelu activations. Compile the model with binary cross entropy loss and an optimizer of your choice. Print a summary of the model. # # **5.3** Create a DCGAN model that is a composition of the generator and the discriminator. The DCGAN model takes a Gaussian vector as input into the generator, and then the discriminator decides whether the output comes from the generator or from the true distristribution. The DCGAN is composed of the trainable weights of the generator, and fixed discriminator weights. You can accompolish this behavior by fixing the discriminator training weights using `discriminator.trainable = False` before constructing the model. Once you have instantiated the DCGAN model, compile it with a binary cross entropy loss and optimizer of your choice. # # **5.4** Train your model (both DCGAN and discriminator) on the train images of the celeba dataset. We recommend you display images after every train epoch to visualize performance. You should observe 'sensible' images after around 5 epochs or less, specially if you train on the full dataset. Consider training on a reduced set of the dataset if it takes too long. # # To train your DCGAN model, you will not be able to use the model's [`fit`](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit) function. Instead, you should consider using [`train_on_batch`](https://www.tensorflow.org/api_docs/python/tf/keras/Model#train_on_batch) method, where you can manually feed an input and training labels, and alternate between the DCGAN and the discriminator. Datasets are `iterable`, so you can use them directly in a for loop to obtain mini-batches. You need to run these three steps inside the for loop: # 1) Train on batch the discriminator on real images with labels equal to 1 (optionally, minus a small smoother). The smoother may help the generator train faster than the discriminator. # 2) Train on batch the discriminator on generated images obtained from random Gaussian input and labels equal to 0. # 3) Train on batch the DCGAN by feeding noise inputs and labels of 1's. # # **Show at least 8 generated images from your final trained DCGAN model for submission**. How do these images compare in quality to the faces generated via VAE? Explain. # # **5.5** Standard GANs are composed as a generator and discriminator, as you just coded them. Could we substitute the discriminator with something else, like a KL loss with the empirical distribution? Why or why not? Explain your answer. # + [markdown] deletable=false editable=false # ## Answers # + [markdown] deletable=false # **5.1** Create a convolutional keras generator model. We recommend the follow architecture. # # - Input to the generator is a noise vector of dimension `bottleneck_dim`. You can rename to `noise_dim` for more corresponding terminology if you prefer. # - Dense layer with `latent_dim[0]*latent_dim[1]*latent_dim[2]` units, and LeakyRelu. # - Reshape to `latent_dim`. # - 3x Conv2DTranspose layers with 5x5 filters, LeakyRelu's, stride 2x2, 'same' padding. # # For stability, consider training without bias terms on your Dense and Conv2DTranspose layers. Print a summary of your model. # + deletable=false from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.optimizers import Adam, RMSprop bottolneck_dim = 128 noise_dim = bottolneck_dim latent_dim = (8,8,10) adam = Adam(lr=0.0002, beta_1=0.5) # - g= models.Sequential() g.add(layers.Input(shape = (noise_dim,),name = 'gen_input')) g.add(layers.Dense(latent_dim[0]*latent_dim[1]*latent_dim[2], use_bias=False,input_dim=noise_dim, activation=LeakyReLU(alpha=0.2),name = 'g_dense')) g.add(layers.Reshape(latent_dim,name='g_reshaper')) g.add(layers.Conv2DTranspose(filters=9, kernel_size=(5,5), strides=(2,2), use_bias=False,padding='same',name='g_conv1', activation=LeakyReLU(alpha=0.2))) g.add(layers.Conv2DTranspose(filters=18, kernel_size=(5, 5), strides=(2,2), use_bias=False,padding='same',name='g_conv2', activation=LeakyReLU(alpha=0.2))) g.add(layers.Conv2DTranspose(filters=32, kernel_size=(5, 5), strides=(2,2), use_bias=False,padding='same',name='g_conv3', activation=LeakyReLU(alpha=0.2))) g.add(layers.Conv2D(filters=3, kernel_size=(3, 3), strides=(1,1), use_bias=False,padding='same',name='g_conv4', activation=LeakyReLU(alpha=0.2))) g.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) g.summary() # + [markdown] deletable=false # **5.2** Create a convolutional discriminator model. Our recommended setup is to use 3x Conv2D layers with filters of size (4,4), 'same' padding, strides 2x2, and LeakyRelu activations. Compile the model with binary cross entropy loss and an optimizer of your choice. Print a summary of the model. # + deletable=false d = models.Sequential() d.add(layers.Input(shape=input_size)) d.add(layers.Conv2D(filters=9, kernel_size=(4, 4), strides=(2,2), use_bias=False,padding='same',name='d_conv1', activation=LeakyReLU(alpha=0.2))) d.add(layers.Conv2D(filters=18, kernel_size=(4, 4), strides=(2,2), use_bias=False,padding='same',name='d_conv2', activation=LeakyReLU(alpha=0.2))) d.add(layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(2,2), use_bias=False,padding='same',name='d_conv3', activation=LeakyReLU(alpha=0.2))) d.add(layers.Flatten()) d.add(layers.Dense(1, use_bias=False, activation ='sigmoid')) d.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) d.summary() # + [markdown] deletable=false # **5.3** Create a DCGAN model that is a composition of the generator and the discriminator. The DCGAN model takes a Gaussian vector as input into the generator, and then the discriminator decides whether the output comes from the generator or from the true distristribution. The DCGAN is composed of the trainable weights of the generator, and fixed discriminator weights. You can accompolish this behavior by fixing the discriminator training weights using `discriminator.trainable = False` before constructing the model. Once you have instantiated the DCGAN model, compile it with a binary cross entropy loss and optimizer of your choice. # # + deletable=false d.trainable = False inputs = layers.Input(shape=(noise_dim, )) hidden = g(inputs) output = d(hidden) dcgan = models.Model(inputs, output) dcgan.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) dcgan.summary() # + [markdown] deletable=false # **5.4** Train your model (both DCGAN and discriminator) on the train images of the celeba dataset. We recommend you display images after every train epoch to visualize performance. You should observe 'sensible' images after around 5 epochs or less, specially if you train on the full dataset. Consider training on a reduced set of the dataset if it takes too long. # # To train your DCGAN model, you will not be able to use the model's [`fit`](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit) function. Instead, you should consider using [`train_on_batch`](https://www.tensorflow.org/api_docs/python/tf/keras/Model#train_on_batch) method, where you can manually feed an input and training labels, and alternate between the DCGAN and the discriminator. Datasets are `iterable`, so you can use them directly in a for loop to obtain mini-batches. You need to run these three steps inside the for loop: # 1) Train on batch the discriminator on real images with labels equal to 1 (optionally, minus a small smoother). The smoother may help the generator train faster than the discriminator. # 2) Train on batch the discriminator on generated images obtained from random Gaussian input and labels equal to 0. # 3) Train on batch the DCGAN by feeding noise inputs and labels of 1's. # # **Show at least 8 generated images from your final trained DCGAN model for submission**. How do these images compare in quality to the faces generated via VAE? Explain. # - epochs=20 batch_size=32 maxt =2000 train_ds_gan = train_celeb.map(lambda img: tf_norm_crop_resize_image(img["image"],input_size[:2])) train_ds_gan = train_ds_gan.shuffle(20) train_ds_gan = train_ds_gan.batch(batch_size) train_ds_gan = train_ds_gan.prefetch(10) losses = {"D":[], "G":[]} def train_d(): d.trainable = True d_loss_real = d.train_on_batch(x_real,tf.ones((batch_size,1))-0.1) d_loss_fake = d.train_on_batch(generated_images, tf.zeros((batch_size,1))) d_loss = (d_loss_real[0]+d_loss_fake[0])/2 return d_loss def train_g(noise): noise = np.random.normal(0, 1, size=(batch_size, noise_dim)) y2 = np.ones(batch_size) d.trainable = False g_loss = dcgan.train_on_batch(noise, y2) return g_loss # + deletable=false for e in range(epochs+1): start = time.time() noise = np.random.normal(0, 1, size=(batch_size, noise_dim)) generated_images = g.predict_on_batch(noise) dl=train_d() gl=train_g(noise) losses["D"].append(dl) losses["G"].append(gl) print(f'\n Epoch: {e} \t Discr. Loss: {dl} \t Gan Loss: {gl} \t Time: ', time.time()-start) noise = np.random.normal(0, 1, size=(n_ex, noise_dim)) generated_images = g.predict(noise) generated_images = generated_images.reshape(n_ex, 64, 64,3) plt.figure(figsize=figsize) for i in range(generated_images.shape[0]): plt.subplot(dim[0], dim[1], i+1) plt.imshow(generated_images[i]) plt.axis('off') plt.tight_layout() plt.show() # + #plot losses d_loss = [v for v in losses["D"]] g_loss = [v[0] for v in losses["G"]] plt.figure(figsize=(10,8)) plt.plot(d_loss, label="Dloss") plt.plot(g_loss, label="Gloss") plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() # - # **5.5** Standard GANs are composed as a generator and discriminator, as you just coded them. Could we substitute the discriminator with something else, like a KL loss with the empirical distribution? Why or why not? Explain your answer. # # # No. The generator will produce pictures similar to the empirical distribution (existing ones) if we do so. The process of GANs makes the generator learn to construct new faces. # + [markdown] deletable=false editable=false # ---
content/HW/hw7/109/cs109b_hw7_v.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="MhoQ0WE77laV" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" id="_ckMIh7O7s6D" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="jYysdyb-CaWM" # # tf.distribute.Strategy を使用したカスタムトレーニング # + [markdown] id="S5Uhzt6vVIB2" # <table class="tfo-notebook-buttons" align="left"> # <td> # <img src="https://www.tensorflow.org/images/tf_logo_32px.png"><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tutorials/distribute/custom_training.ipynb">TensorFlow.org で表示</a> # </td> # <td> # <img src="https://www.tensorflow.org/images/colab_logo_32px.png"><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tutorials/distribute/custom_training.ipynb">Google Colab で実行</a> # </td> # <td> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png"><a target="_blank" href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/tutorials/distribute/custom_training.ipynb">GitHub でソースを表示</a> # </td> # <td> # <img src="https://www.tensorflow.org/images/download_logo_32px.png"><a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/distribute/custom_training.ipynb">ノートブックをダウンロード</a> # </td> # </table> # + [markdown] id="FbVhjPpzn6BM" # このチュートリアルでは、[`tf.distribute.Strategy`](https://www.tensorflow.org/guide/distributed_training)をカスタムトレーニングループで使用する方法を示します。Fashion MNIST データセットで単純な CNN モデルをトレーニングします。Fashion MNIST データセットには、サイズ 28×28 のトレーニング画像 60,000 枚とサイズ 28×28 のテスト画像 10,000 枚が含まれています。 # # モデルのトレーニングにカスタムトレーニングループを使用するのは、柔軟性があり、トレーニングを容易に制御できるからです。それに加えて、モデルとトレーニングループのデバッグも容易になります。 # + id="dzLKpmZICaWN" # Import TensorFlow import tensorflow as tf # Helper libraries import numpy as np import os print(tf.__version__) # + [markdown] id="MM6W__qraV55" # ## Fashion MNIST データセットをダウンロードする # + id="7MqDQO0KCaWS" fashion_mnist = tf.keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() # Adding a dimension to the array -&gt; new shape == (28, 28, 1) # We are doing this because the first layer in our model is a convolutional # layer and it requires a 4D input (batch_size, height, width, channels). # batch_size dimension will be added later on. train_images = train_images[..., None] test_images = test_images[..., None] # Getting the images in [0, 1] range. train_images = train_images / np.float32(255) test_images = test_images / np.float32(255) # + [markdown] id="4AXoHhrsbdF3" # ## 変数とグラフを分散させるストラテジーを作成する # + [markdown] id="5mVuLZhbem8d" # `tf.distribute.MirroredStrategy`ストラテジーはどのように機能するのでしょう? # # - 全ての変数とモデルグラフはレプリカ上に複製されます。 # - 入力はレプリカ全体に均等に分散されます。 # - 各レプリカは受け取った入力の損失と勾配を計算します。 # - 勾配は加算して全てのレプリカ間で同期されます。 # - 同期後、各レプリカ上の変数のコピーにも同じ更新が行われます。 # # 注意: 下記のコードは全て 1 つのスコープ内に入れることができます。説明しやすいように、ここでは幾つかのコードセルに分割しています。 # # + id="F2VeZUWUj5S4" # If the list of devices is not specified in the # `tf.distribute.MirroredStrategy` constructor, it will be auto-detected. strategy = tf.distribute.MirroredStrategy() # + id="ZngeM_2o0_JO" print ('Number of devices: {}'.format(strategy.num_replicas_in_sync)) # + [markdown] id="k53F5I_IiGyI" # ## 入力パイプラインをセットアップする # + [markdown] id="0Qb6nDgxiN_n" # グラフと変数をプラットフォームに依存しない SavedModel 形式にエクスポートします。モデルが保存された後、スコープの有無に関わらずそれを読み込むことができます。 # + id="jwJtsCQhHK-E" BUFFER_SIZE = len(train_images) BATCH_SIZE_PER_REPLICA = 64 GLOBAL_BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync EPOCHS = 10 # + [markdown] id="J7fj3GskHC8g" # データセットを作成して、それを配布します。 # + id="WYrMNNDhAvVl" train_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(BUFFER_SIZE).batch(GLOBAL_BATCH_SIZE) test_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(GLOBAL_BATCH_SIZE) train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset) test_dist_dataset = strategy.experimental_distribute_dataset(test_dataset) # + [markdown] id="bAXAo_wWbWSb" # ## モデルを作成する # # `tf.keras.Sequential`を使用してモデルを作成します。これは Model Subclassing API を使用しても作成できます。 # + id="9ODch-OFCaW4" def create_model(): model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(64, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(10) ]) return model # + id="9iagoTBfijUz" # Create a checkpoint directory to store the checkpoints. checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") # + [markdown] id="e-wlFFZbP33n" # ## 損失関数を定義する # # 通常、GPU/CPU を 1 つ搭載した単一のマシンでは、損失は入力バッチ内の例の数で除算されます。 # # *では、`tf.distribute.Strategy`を使用する場合、どのように損失を計算すればよいのでしょうか?* # # - 例えば、4 つの GPU と 64 のバッチサイズがあるとします。1 つの入力バッチは(4 つの GPU の)レプリカに分散されるので、各レプリカはサイズ 16 の入力を取得します。 # # - 各レプリカのモデルは、それぞれの入力でフォワードパスを実行し、損失を計算します。ここでは、損失をそれぞれの入力の例の数(BATCH_SIZE_PER_REPLICA = 16)で除算するのではなく、損失を GLOBAL_BATCH_SIZE (64) で除算する必要があります。 # # *なぜそうするのでしょう?* # # - 勾配を各レプリカで計算した後にそれらを**加算**してレプリカ間で同期するため、これを行う必要があります。 # # *TensorFlow でこれを行うには?* # # - このチュートリアルにもあるように、カスタムトレーニングループを書く場合は、例ごとの損失を加算し、その合計を GLOBAL_BATCH_SIZE: `scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)`で除算する必要があります。または、`tf.nn.compute_average_loss`を使用することも可能です。これは例ごとの損失、オプションのサンプルの重み、そしてGLOBAL_BATCH_SIZE を引数として取り、スケーリングされた損失を返します。 # # - モデルで正則化損失を使用している場合は、損失値をレプリカの数でスケーリングする必要があります。これを行うには、`tf.nn.scale_regularization_loss`関数を使用します。 # # - `tf.reduce_mean`の使用は推奨されません。これを使用すると、損失がレプリカごとの実際のバッチサイズで除算され、ステップごとに変化する場合があります。 # # - この縮小とスケーリングは、keras`model.compile`と`model.fit`で自動的に行われます。 # # - 以下の例のように`tf.keras.losses`クラスを使用する場合、損失削減は`NONE`または`SUM`のいずれかになるよう、明示的に指定する必要があります。`AUTO`および`SUM_OVER_BATCH_SIZE`の`tf.distribute.Strategy`との併用は許可されません。`AUTO`は、分散型のケースでユーザーがどの削減を正しいと確認するか明示的に考える必要があるため、許可されていません。`SUM_OVER_BATCH_SIZE`は、現時点ではレプリカのバッチサイズのみで除算され、レプリカの数に基づく除算はユーザーに任されており見落としがちなため、許可されていません。そのため、その代わりにユーザー自身が明示的に削減を行うようにお願いしています。 # # - もし`labels`が多次元である場合は、各サンプルの要素数全体で`per_example_loss`を平均化します。例えば、`predictions`の形状が`(batch_size, H, W, n_classes)`で、`labels`が`(batch_size, H, W)`の場合、`per_example_loss /= tf.cast(tf.reduce_prod(tf.shape(labels)[1:]), tf.float32)`のように`per_example_loss`を更新する必要があります。 # # + id="R144Wci782ix" with strategy.scope(): # Set reduction to `none` so we can do the reduction afterwards and divide by # global batch size. loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE) def compute_loss(labels, predictions): per_example_loss = loss_object(labels, predictions) return tf.nn.compute_average_loss(per_example_loss, global_batch_size=GLOBAL_BATCH_SIZE) # + [markdown] id="w8y54-o9T2Ni" # ## 損失と精度を追跡するメトリクスを定義する # # これらのメトリクスは、テストの損失、トレーニング、テストの精度を追跡します。`.result()`を使用して、いつでも累積統計を取得できます。 # + id="zt3AHb46Tr3w" with strategy.scope(): test_loss = tf.keras.metrics.Mean(name='test_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='test_accuracy') # + [markdown] id="iuKuNXPORfqJ" # ## トレーニングループ # + id="OrMmakq5EqeQ" # model, optimizer, and checkpoint must be created under `strategy.scope`. with strategy.scope(): model = create_model() optimizer = tf.keras.optimizers.Adam() checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) # + id="3UX43wUu04EL" def train_step(inputs): images, labels = inputs with tf.GradientTape() as tape: predictions = model(images, training=True) loss = compute_loss(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_accuracy.update_state(labels, predictions) return loss def test_step(inputs): images, labels = inputs predictions = model(images, training=False) t_loss = loss_object(labels, predictions) test_loss.update_state(t_loss) test_accuracy.update_state(labels, predictions) # + id="gX975dMSNw0e" # `run` replicates the provided computation and runs it # with the distributed input. @tf.function def distributed_train_step(dataset_inputs): per_replica_losses = strategy.run(train_step, args=(dataset_inputs,)) return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None) @tf.function def distributed_test_step(dataset_inputs): return strategy.run(test_step, args=(dataset_inputs,)) for epoch in range(EPOCHS): # TRAIN LOOP total_loss = 0.0 num_batches = 0 for x in train_dist_dataset: total_loss += distributed_train_step(x) num_batches += 1 train_loss = total_loss / num_batches # TEST LOOP for x in test_dist_dataset: distributed_test_step(x) if epoch % 2 == 0: checkpoint.save(checkpoint_prefix) template = ("Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, " "Test Accuracy: {}") print (template.format(epoch+1, train_loss, train_accuracy.result()*100, test_loss.result(), test_accuracy.result()*100)) test_loss.reset_states() train_accuracy.reset_states() test_accuracy.reset_states() # + [markdown] id="Z1YvXqOpwy08" # 上記の例における注意点: # # - for文(`for x in ...`)を使用して、`train_dist_dataset`と`test_dist_dataset`に対してイテレーションしています。 # - スケーリングされた損失は `distributed_train_step`の戻り値です。この値は`tf.distribute.Strategy.reduce`呼び出しを使用してレプリカ間で集約され、次に`tf.distribute.Strategy.reduce`呼び出しの戻り値を加算してバッチ間で集約されます。 # - `tf.keras.Metrics`は、`tf.distribute.Strategy.run`によって実行される`train_step`と`test_step`内で更新される必要があります。*`tf.distribute.Strategy.run`はストラテジー内の各ローカルレプリカの結果を返し、この結果の消費方法は多様です。`tf.distribute.Strategy.reduce`を実行して、集約された値を取得することができます。また、`tf.distribute.Strategy.experimental_local_results`を実行して、ローカルレプリカごとに 1 つ、結果に含まれる値のリストを取得することもできます。 # # + [markdown] id="-q5qp31IQD8t" # ## 最新のチェックポイントを復元してテストする # + [markdown] id="WNW2P00bkMGJ" # `tf.distribute.Strategy`でチェックポイントされたモデルは、ストラテジーの有無に関わらず復元することができます。 # + id="pg3B-Cw_cn3a" eval_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='eval_accuracy') new_model = create_model() new_optimizer = tf.keras.optimizers.Adam() test_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(GLOBAL_BATCH_SIZE) # + id="7qYii7KUYiSM" @tf.function def eval_step(images, labels): predictions = new_model(images, training=False) eval_accuracy(labels, predictions) # + id="LeZ6eeWRoUNq" checkpoint = tf.train.Checkpoint(optimizer=new_optimizer, model=new_model) checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)) for images, labels in test_dataset: eval_step(images, labels) print ('Accuracy after restoring the saved model without strategy: {}'.format( eval_accuracy.result()*100)) # + [markdown] id="EbcI87EEzhzg" # ## データセットのイテレーションの代替方法 # # ### イテレータを使用する # # データセット全体ではなく、任意のステップ数のイテレーションを行いたい場合は、`iter`呼び出しを使用してイテレータを作成し、そのイテレータ上で`next`を明示的に呼び出すことができます。tf.function の内側と外側の両方でデータセットのイテレーションを選択することができます。ここでは、イテレータを使用し tf.function 外側のデータセットのイテレーションを実行する小さなスニペットを示します。 # # + id="7c73wGC00CzN" for _ in range(EPOCHS): total_loss = 0.0 num_batches = 0 train_iter = iter(train_dist_dataset) for _ in range(10): total_loss += distributed_train_step(next(train_iter)) num_batches += 1 average_train_loss = total_loss / num_batches template = ("Epoch {}, Loss: {}, Accuracy: {}") print (template.format(epoch+1, average_train_loss, train_accuracy.result()*100)) train_accuracy.reset_states() # + [markdown] id="GxVp48Oy0m6y" # ### tf.function 内でイテレーションする # # tf.function の内側で for 文(`for x in ...`)を使用して、あるいは上記で行ったようにイテレータを作成して、入力`train_dist_dataset`全体をイテレーションすることもできます。次の例では、トレーニングの 1 つのエポックを tf.function でラップし、関数内で`train_dist_dataset`をイテレーションする方法を示します。 # + id="-REzmcXv00qm" @tf.function def distributed_train_epoch(dataset): total_loss = 0.0 num_batches = 0 for x in dataset: per_replica_losses = strategy.run(train_step, args=(x,)) total_loss += strategy.reduce( tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None) num_batches += 1 return total_loss / tf.cast(num_batches, dtype=tf.float32) for epoch in range(EPOCHS): train_loss = distributed_train_epoch(train_dist_dataset) template = ("Epoch {}, Loss: {}, Accuracy: {}") print (template.format(epoch+1, train_loss, train_accuracy.result()*100)) train_accuracy.reset_states() # + [markdown] id="MuZGXiyC7ABR" # ### レプリカ間でトレーニング損失を追跡する # # 注意: 一般的なルールとして、サンプルごとの値の追跡には`tf.keras.Metrics`を使用し、レプリカ内で集約された値を避ける必要があります。 # # `tf.metrics.Mean`を使用すると損失スケーリングが計算されるため、異なるレプリカ間のトレーニング損失の追跡には*推奨できません*。 # # 例えば、次のような特徴を持つトレーニングジョブを実行するとします。 # # - レプリカ 2 つ # - 各レプリカで 2 つのサンプルを処理 # - 結果の損失値 : 各レプリカで [2, 3] および [4, 5] # - グローバルバッチサイズ = 4 # # 損失スケーリングで損失値を加算して各レプリカのサンプルごとの損失の値を計算し、さらにグローバルバッチサイズで除算します。この場合は、`(2 + 3) / 4 = 1.25`および`(4 + 5) / 4 = 2.25`となります。 # # `tf.metrics.Mean`を使用して 2 つのレプリカ間の損失を追跡する場合、結果は異なります。この例では、`total`は 3.50、`count`は 2 となり、メトリック上で`result()`が呼び出された場合の結果は`total`/`count` = 1.75 となります。`tf.keras.Metrics`で計算された損失は、同期するレプリカの数に等しい追加の係数によってスケーリングされます。 # + [markdown] id="xisYJaV9KZTN" # ### ガイドと例 # # カスタムトレーニングループを用いた分散ストラテジーの使用例をここに幾つか示します。 # # 1. [分散型トレーニングガイド](../../guide/distributed_training) # 2. `MirroredStrategy`を使用した [DenseNet](https://github.com/tensorflow/examples/blob/master/tensorflow_examples/models/densenet/distributed_train.py) の例。 # 3. `MirroredStrategy`と`TPUStrategy`を使用してトレーニングされた [BERT](https://github.com/tensorflow/models/blob/master/official/nlp/bert/run_classifier.py) の例。この例は、分散トレーニングなどの間にチェックポイントから読み込む方法と、定期的にチェックポイントを生成する方法を理解するのに特に有用です。 # 4. `MirroredStrategy`を使用してトレーニングされ、`keras_use_ctl`フラグを使用した有効化が可能な、[NCF](https://github.com/tensorflow/models/blob/master/official/recommendation/ncf_keras_main.py)の例。 # 5. `MirroredStrategy`を使用してトレーニングされた、[NMT](https://github.com/tensorflow/examples/blob/master/tensorflow_examples/models/nmt_with_attention/distributed_train.py) の例。 # # [分散ストラテジーガイド](../../guide/distributed_training.ipynb#examples_and_tutorials)には他の例も記載されています。 # + [markdown] id="6hEJNsokjOKs" # ## 次のステップ # # - 新しい`tf.distribute.Strategy` API を独自のモデルで試してみましょう。 # - 他のストラテジーや独自の TensorFlow モデルのパフォーマンス最適化に使用できる[ツール](../../guide/profiler.md)についての詳細は、ガイドの[パフォーマンスのセクション](../../guide/function.ipynb)をご覧ください。
site/ja/tutorials/distribute/custom_training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Flattening a nested linked list # # Suppose you have a linked list where the value of each node is a sorted linked list (i.e., it is a _nested_ list). Your task is to _flatten_ this nested list—that is, to combine all nested lists into a single (sorted) linked list. # # First, we'll need some code for generating nodes and a linked list: # + # Helper code # A class behaves like a data-type, just like an int, float or any other built-in ones. # User defined class class Node: def __init__(self, value): # <-- For simple LinkedList, "value" argument will be an int, whereas, for NestedLinkedList, "value" will be a LinkedList self.value = value self.next = None def __repr__(self): return str(self.value) # User defined class class LinkedList: def __init__(self, head): # <-- Expects "head" to be a Node made up of an int or LinkedList self.head = head ''' For creating a simple LinkedList, we will pass an integer as the "value" argument For creating a nested LinkedList, we will pass a LinkedList as the "value" argument ''' def append(self, value): # If LinkedList is empty if self.head is None: self.head = Node(value) return # Create a temporary Node object node = self.head # Iterate till the end of the currrent LinkedList while node.next is not None: node = node.next # Append the newly creataed Node at the end of the currrent LinkedList node.next = Node(value) '''We will need this function to convert a LinkedList object into a Python list of integers''' def to_list(self): out = [] # <-- Declare a Python list node = self.head # <-- Create a temporary Node object while node: # <-- Iterate untill we have nodes available out.append(int(str(node.value))) # <-- node.value is actually of type Node, therefore convert it into int before appending to the Python list node = node.next return out # - # ### Exercise - Write the two function definitions below # Now, in the cell below, see if you can solve the problem by implementing the `flatten` method. # # **Hint**: If you first create a `merge` method that merges two linked lists into a sorted linked list, then there is an elegant recursive solution. # + def merge(list1, list2): # TODO: Implement this function so that it merges the two linked lists in a single, sorted linked list. ''' The arguments list1, list2 must be of type LinkedList. The merge() function must return an instance of LinkedList. ''' merged = LinkedList(None) if list1 is None: return list2 if list2 is None: return list1 list1_elt = list1.head list2_elt = list2.head while list1_elt is not None or list2_elt is not None: if list1_elt is None: merged.append(list2_elt) list2_elt = list2_elt.next elif list2_elt is None: merged.append(list1_elt) list1_elt = list1_elt.next elif list1_elt.value <= list2_elt.value: merged.append(list1_elt) list1_elt = list1_elt.next else: merged.append(list2_elt) list2_elt = list2_elt.next return merged ''' In a NESTED LinkedList object, each node will be a simple LinkedList in itself''' class NestedLinkedList(LinkedList): def flatten(self): # TODO: Implement this method to flatten the linked list in ascending sorted order. return self._flatten(self.head) def _flatten(self, node): if node.next is None: return merge(node.value, None) return merge(node.value, self._flatten(node.next)) # - # ### Test - Let's test your function # Here's some code that will generate a nested linked list that we can use to test the solution: # + # First Test scenario ''' Create a simple LinkedList''' linked_list = LinkedList(Node(1)) # <-- Notice that we are passing a Node made up of an integer linked_list.append(3) # <-- Notice that we are passing a numerical value as an argument in the append() function here linked_list.append(5) ''' Create another simple LinkedList''' second_linked_list = LinkedList(Node(2)) second_linked_list.append(4) ''' Create a NESTED LinkedList, where each node will be a simple LinkedList in itself''' nested_linked_list = NestedLinkedList(Node(linked_list)) # <-- Notice that we are passing a Node made up of a simple LinkedList object nested_linked_list.append(second_linked_list) # <-- Notice that we are passing a LinkedList object in the append() function here # - # #### Structure of the nested linked list to be tested # `nested_linked_list` should now have 2 nodes. The head node is a linked list containing `1, 3, 5`. The second node is a linked list containing `2, 4`. # # Calling `flatten` should return a linked list containing `1, 2, 3, 4, 5`. # + solution = nested_linked_list.flatten() # <-- returns A LinkedList object expected_list = [1,2,3,4,5] # <-- Python list # Convert the "solution" into a Python list and compare with another Python list assert solution.to_list() == expected_list, f"list contents: {solution.to_list()}" # - # ### Solution # First, let's implement a `merge` function that takes in two linked lists and returns one sorted linked list. Note, this implementation expects both linked lists to be sorted. def merge(list1, list2): merged = LinkedList(None) if list1 is None: return list2 if list2 is None: return list1 list1_elt = list1.head list2_elt = list2.head while list1_elt is not None or list2_elt is not None: if list1_elt is None: merged.append(list2_elt) list2_elt = list2_elt.next elif list2_elt is None: merged.append(list1_elt) list1_elt = list1_elt.next elif list1_elt.value <= list2_elt.value: merged.append(list1_elt) list1_elt = list1_elt.next else: merged.append(list2_elt) list2_elt = list2_elt.next return merged # Let's make sure merge works how we expect: # + ''' Test merge() function''' linked_list = LinkedList(Node(1)) linked_list.append(3) linked_list.append(5) second_linked_list = LinkedList(Node(2)) second_linked_list.append(4) merged = merge(linked_list, second_linked_list) node = merged.head while node is not None: #This will print 1 2 3 4 5 print(node.value) node = node.next # Lets make sure it works with a None list merged = merge(None, linked_list) node = merged.head while node is not None: #This will print 1 3 5 print(node.value) node = node.next # - # Now let's implement `flatten` recursively using merge. ''' In a NESTED LinkedList object, each node will be a simple LinkedList in itself''' class NestedLinkedList(LinkedList): def flatten(self): return self._flatten(self.head) # <-- self.head is a node for NestedLinkedList ''' A recursive function ''' def _flatten(self, node): # A termination condition if node.next is None: return merge(node.value, None) # <-- First argument is a simple LinkedList # _flatten() is calling itself untill a termination condition is achieved return merge(node.value, self._flatten(node.next)) # <-- Both arguments are a simple LinkedList each # + ''' Test flatten() function''' nested_linked_list = NestedLinkedList(Node(linked_list)) nested_linked_list.append(second_linked_list) flattened = nested_linked_list.flatten() node = flattened.head while node is not None: #This will print 1 2 3 4 5 print(node.value) node = node.next # - # ### Computational Complexity # Lets start with the computational complexity of `merge`. Merge takes in two lists. Let's say the lengths of the lists are $N_{1}$ and $N_{2}$. Because we assume the inputs are sorted, `merge` is very efficient. It looks at the first element of each list and adds the smaller one to the returned list. Every time through the loop we are appending one element to the list, so it will take $N_{1} + N_{2}$ iterations until we have the whole list. # # The complexity of `flatten` is a little more complicated to calculate. Suppose our `NestedLinkedList` has $N$ linked lists and each list's length is represented by $M_{1}, M_{2}, ..., M_{N}$. # # We can represent this recursion as: # # $merge(M_{1}, merge(M_{2}, merge(..., merge(M_{N-1}, merge(M_{N}, None)))))$ # # Let's start from the inside. The inner most merge returns the $nth$ linked list. The next merge does $M_{N-1} + M_{N}$ comparisons. The next merge does $M_{N-2} + M_{N-1} + M_{N}$ comparisons. # # Eventually we will do $N$ comparisons on all of the $M_{N}$ elements. We will do $N-1$ comparisons on $M_{N-1}$ elements. # # This can be generalized as: # # $$ # \sum_n^N n*M_{n} # $$
concepts/Data Structures/01 LinkedLists/06 Flattening a nested linked list.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf18p36 # language: python # name: tf18p36 # --- # #### 预处理文件夹: # 预处理文件夹需要包含两个VOC格式的图片目录和标注目录,分别叫JPEGImages和Annotations。 ROOT_FILE='/home/lidingke/DATA/unit_by_yolo' # #### 删除解析不成功的Annotation。 # + import os import xml.etree.cElementTree as ET ERROR_XML_ROOT = ROOT_FILE for i in os.listdir(ERROR_XML_ROOT+'/JPEGImages'): path = ERROR_XML_ROOT+'/Annotations/'+i.split('.')[0]+'.xml' jpg = ERROR_XML_ROOT+'/JPEGImages/'+i.split('.')[0]+'.jpg' # print(path) try: tree = ET.parse(path) root = tree.getroot() for obj_item in root.findall('object'): obj_name = obj_item.find('name').text if obj_name == 'dontcare': print('dontcare') print(path) os.system('rm '+path) print(jpg) os.system('rm '+jpg) except Exception as e: print(e) print(path) print(jpg) os.system('rm '+path) os.system('rm '+jpg) print('Done') # - # #### 删除无法读取的img。 # + import os import cv2 import numpy as np JPG_FILE = ROOT_FILE for i in os.listdir(JPG_FILE+'/JPEGImages/'): jpg = JPG_FILE+'/JPEGImages/'+i.split('.')[0]+'.jpg' im = cv2.imread(jpg) if not isinstance(im,np.ndarray): xml = JPG_FILE+'/Annotations/'+i.split('.')[0]+'.xml' print(xml) print(jpg) os.system('rm '+xml) os.system('rm' +jpg) print('Done') # - # #### 删除JPEGImages中不存在的Annotations # + def double_delete(root_file,names): for n in names: ann_file = root_file+'/Annotations/' jpg_flie = root_file+'/JPEGImages/' xml = ann_file+n+'.xml' jpg = jpg_flie+n+'.jpg' cmd = 'rm ' + xml print(cmd) os.system(cmd) cmd = 'rm ' + jpg print(cmd) os.system(cmd) root_file = ROOT_FILE ann_file = root_file+'/Annotations/' jpg_flie = root_file+'/JPEGImages/' anns = [a.split('.')[0] for a in os.listdir(ann_file) if a.split('.')[-1]=='xml'] jpgs = [a.split('.')[0] for a in os.listdir(jpg_flie) if a.split('.')[-1]=='jpg'] diffs = set(anns)-set(jpgs) double_delete(root_file,diffs) # - # #### 处理后两个文件夹文件数量应该一致 # !ls -l /home/lidingke/DATA/unit_by_yolo/Annotations/ | wc # !ls -l /home/lidingke/DATA/unit_by_yolo/JPEGImages/ | wc # #### 删除过期的roidb # !rm /home/lidingke/MyProject/ctpn_py3/data/cache/voc_2007_trainval_gt_roidb.pkl # + #查看VOCdevkit2007的软连接是否正确,代码里是handcode读取该软连接 # - # !ls -l /home/lidingke/MyProject/ctpn_py3/data/VOCdevkit2007 # #### Next step: # # 去prepare_datas下生成新的TEXTVOC # !rm /home/lidingke/MyProject/ctpn_py3/data/VOCdevkit2007 # !ln -s /home/lidingke/DATA/HCCR_0619/TEXTVOC /home/lidingke/MyProject/ctpn_py3/data/VOCdevkit2007
ldklib/jupyter_demo/clean_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Regression Week 1: Simple Linear Regression # In this notebook we will use data on house sales in King County to predict house prices using simple (one input) linear regression. You will: # * Use graphlab SArray and SFrame functions to compute important summary statistics # * Write a function to compute the Simple Linear Regression weights using the closed form solution # * Write a function to make predictions of the output given the input feature # * Turn the regression around to predict the input given the output # * Compare two different models for predicting house prices # # In this notebook you will be provided with some already complete code as well as some code that you should complete yourself in order to answer quiz questions. The code we provide to complte is optional and is there to assist you with solving the problems but feel free to ignore the helper code and write your own. # # Fire up graphlab create import graphlab # # Load house sales data # # Dataset is from house sales in King County, the region where the city of Seattle, WA is located. sales = graphlab.SFrame('kc_house_data.gl/') # # Split data into training and testing # We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you). train_data,test_data = sales.random_split(.8,seed=0) # # Useful SFrame summary functions # In order to make use of the closed form soltion as well as take advantage of graphlab's built in functions we will review some important ones. In particular: # * Computing the sum of an SArray # * Computing the arithmetic average (mean) of an SArray # * multiplying SArrays by constants # * multiplying SArrays by other SArrays # + # Let's compute the mean of the House Prices in King County in 2 different ways. prices = sales['price'] # extract the price column of the sales SFrame -- this is now an SArray # recall that the arithmetic average (the mean) is the sum of the prices divided by the total number of houses: sum_prices = prices.sum() num_houses = prices.size() # when prices is an SArray .size() returns its length avg_price_1 = sum_prices/num_houses avg_price_2 = prices.mean() # if you just want the average, the .mean() function print "average price via method 1: " + str(avg_price_1) print "average price via method 2: " + str(avg_price_2) # - # As we see we get the same answer both ways # if we want to multiply every price by 0.5 it's a simple as: half_prices = 0.5*prices # Let's compute the sum of squares of price. We can multiply two SArrays of the same length elementwise also with * prices_squared = prices*prices sum_prices_squared = prices_squared.sum() # price_squared is an SArray of the squares and we want to add them up. print "the sum of price squared is: " + str(sum_prices_squared) # Aside: The python notation x.xxe+yy means x.xx \* 10^(yy). e.g 100 = 10^2 = 1*10^2 = 1e2 # # Build a generic simple linear regression function # Armed with these SArray functions we can use the closed form solution found from lecture to compute the slope and intercept for a simple linear regression on observations stored as SArrays: input_feature, output. # # Complete the following function (or write your own) to compute the simple linear regression slope and intercept: def simple_linear_regression(input_feature, output): N = input_feature.size() # compute the mean of input_feature and output avgIn = input_feature.mean() avgOut = output.mean() # compute the product of the output and the input_feature and its mean product = input_feature * output avgPro = product.mean() # compute the squared value of the input_feature and its mean squaredIn = input_feature * input_feature avgSquIn = squaredIn.mean() # use the formula for the slope slope = (product.sum() - avgIn * avgOut * N) / (squaredIn.sum() - avgIn * avgIn * N) # use the formula for the intercept intercept = avgOut - slope * avgIn return (intercept, slope) # We can test that our function works by passing it something where we know the answer. In particular we can generate a feature and then put the output exactly on a line: output = 1 + 1\*input_feature then we know both our slope and intercept should be 1 test_feature = graphlab.SArray(range(5)) test_output = graphlab.SArray(1 + 1*test_feature) (test_intercept, test_slope) = simple_linear_regression(test_feature, test_output) print "Intercept: " + str(test_intercept) print "Slope: " + str(test_slope) # Now that we know it works let's build a regression model for predicting price based on sqft_living. Rembember that we train on train_data! # + sqft_intercept, sqft_slope = simple_linear_regression(train_data['sqft_living'], train_data['price']) print "Intercept: " + str(sqft_intercept) print "Slope: " + str(sqft_slope) # - # # Predicting Values # Now that we have the model parameters: intercept & slope we can make predictions. Using SArrays it's easy to multiply an SArray by a constant and add a constant value. Complete the following function to return the predicted output given the input_feature, slope and intercept: def get_regression_predictions(input_feature, intercept, slope): # calculate the predicted values: predicted_values = intercept + slope * input_feature return predicted_values # Now that we can calculate a prediction given the slop and intercept let's make a prediction. Use (or alter) the following to find out the estimated price for a house with 2650 squarefeet according to the squarefeet model we estiamted above. # # **Quiz Question: Using your Slope and Intercept from (4), What is the predicted price for a house with 2650 sqft?** my_house_sqft = 2650 estimated_price = get_regression_predictions(my_house_sqft, sqft_intercept, sqft_slope) print "The estimated price for a house with %d squarefeet is $%.2f" % (my_house_sqft, estimated_price) # # Residual Sum of Squares # Now that we have a model and can make predictions let's evaluate our model using Residual Sum of Squares (RSS). Recall that RSS is the sum of the squares of the residuals and the residuals is just a fancy word for the difference between the predicted output and the true output. # # Complete the following (or write your own) function to compute the RSS of a simple linear regression model given the input_feature, output, intercept and slope: def get_residual_sum_of_squares(input_feature, output, intercept, slope): # First get the predictions predictions = intercept + slope * input_feature # then compute the residuals (since we are squaring it doesn't matter which order you subtract) residuals = predictions - output # square the residuals and add them up RSS = (residuals * residuals).sum() return(RSS) # Let's test our get_residual_sum_of_squares function by applying it to the test model where the data lie exactly on a line. Since they lie exactly on a line the residual sum of squares should be zero! print get_residual_sum_of_squares(test_feature, test_output, test_intercept, test_slope) # should be 0.0 # Now use your function to calculate the RSS on training data from the squarefeet model calculated above. # # **Quiz Question: According to this function and the slope and intercept from the squarefeet model What is the RSS for the simple linear regression using squarefeet to predict prices on TRAINING data?** rss_prices_on_sqft = get_residual_sum_of_squares(train_data['sqft_living'], train_data['price'], sqft_intercept, sqft_slope) print 'The RSS of predicting Prices based on Square Feet is : ' + str(rss_prices_on_sqft) # # Predict the squarefeet given price # What if we want to predict the squarefoot given the price? Since we have an equation y = a + b\*x we can solve the function for x. So that if we have the intercept (a) and the slope (b) and the price (y) we can solve for the estimated squarefeet (x). # # Comlplete the following function to compute the inverse regression estimate, i.e. predict the input_feature given the output! def inverse_regression_predictions(output, intercept, slope): # solve output = intercept + slope*input_feature for input_feature. Use this equation to compute the inverse predictions: estimated_feature = (output - intercept) / slope return estimated_feature # Now that we have a function to compute the squarefeet given the price from our simple regression model let's see how big we might expect a house that coses $800,000 to be. # # **Quiz Question: According to this function and the regression slope and intercept from (3) what is the estimated square-feet for a house costing $800,000?** my_house_price = 800000 estimated_squarefeet = inverse_regression_predictions(my_house_price, sqft_intercept, sqft_slope) print "The estimated squarefeet for a house worth $%.2f is %d" % (my_house_price, estimated_squarefeet) # # New Model: estimate prices from bedrooms # We have made one model for predicting house prices using squarefeet, but there are many other features in the sales SFrame. # Use your simple linear regression function to estimate the regression parameters from predicting Prices based on number of bedrooms. Use the training data! # + # Estimate the slope and intercept for predicting 'price' based on 'bedrooms' beds_intercept, beds_slope = simple_linear_regression(train_data['bedrooms'], train_data['price']) print "Intercept: " + str(bed_intercept) print "Slope: " + str(bed_slope) # - # # Test your Linear Regression Algorithm # Now we have two models for predicting the price of a house. How do we know which one is better? Calculate the RSS on the TEST data (remember this data wasn't involved in learning the model). Compute the RSS from predicting prices using bedrooms and from predicting prices using squarefeet. # # **Quiz Question: Which model (square feet or bedrooms) has lowest RSS on TEST data? Think about why this might be the case.** # Compute RSS when using bedrooms on TEST data: rss_prices_on_beds = get_residual_sum_of_squares(test_data['bedrooms'], test_data['price'], beds_intercept, beds_slope) print 'The RSS of predicting Prices based on Bedrooms is : ' + str(rss_prices_on_beds) # Compute RSS when using squarfeet on TEST data: rss_prices_on_sqft = get_residual_sum_of_squares(test_data['sqft_living'], test_data['price'], sqft_intercept, sqft_slope) print 'The RSS of predicting Prices based on Square Feet is : ' + str(rss_prices_on_sqft)
1_Regression/Week1_Simple Linear Regression/week-1-simple-regression-assignment-blank.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sewar.full_ref import ssim, psnr import cv2 import os import matplotlib.pyplot as plt video = "eval video/face/wannabe_face_keypoints.mp4" img_name = "wannabe" output_dir = "img/face/wannabe" vidcap = cv2.VideoCapture(video) count = 0 while vidcap.isOpened(): success, image = vidcap.read() if success: if count % 10 == 0: cv2.imwrite(os.path.join(output_dir, 'wannabe %d.jpeg') % (count), image) count += 1 else: break cv2.destroyAllWindows() vidcap.release() # + face_wannabe_ssim = [] face_fiesta_ssim = [] face_fifth_season_ssim = [] face_wannabe_psnr = [] face_fiesta_psnr = [] face_fifth_season_psnr = [] # - img_dir = "img/face/fiesta" a = sorted(os.listdir(img_dir)) face_fiesta_psnr = [] face_fiesta_ssim = [] for i in range(len(a) - 1): if i % 50 == 0: print("now tring %d" % (i)) img1 = cv2.imread(img_dir + '/' + a[i]) img2 = cv2.imread(img_dir + '/' + a[i+1]) face_fiesta_ssim.append(ssim(img1, img2)) face_fiesta_psnr.append(psnr(img1, img2)) plt.plot([i for i in range(len(face_fiesta_ssim))], face_fiesta_ssim) plt.show() plt.plot([i for i in range(len(face_fiesta_psnr))], face_fiesta_psnr) plt.show() f = open('face_fiesta_psnr.txt', 'w') f.write(str(face_fiesta_psnr)) f.close() f = open('face_fiesta_ssim.txt', 'w') f.write(str(face_fiesta_ssim)) f.close()
evaluation/eval.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #<NAME>'19 #GenerativeLSTM # - import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras.layers import Dot, Input, Dense, Reshape, LSTM, Conv2D, Flatten, MaxPooling1D, Dropout, MaxPooling2D from tensorflow.keras.layers import Embedding, Multiply, Subtract from tensorflow.keras.models import Sequential, Model from tensorflow.python.keras.layers import Lambda from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint from keras.utils import np_utils import pandas as pd import numpy as np import re import nltk import matplotlib.pyplot as plt pd.options.display.max_colwidth = 200 # %matplotlib inline # load ascii text and covert to lowercase filename = "wonderland.txt" raw_text = open(filename).read() raw_text = raw_text.lower() # + active="" # # Now that the book is loaded, we must prepare the data for modeling by the neural network. We cannot model the characters directly, instead we must convert the characters to integers. # # We can do this easily by first creating a set of all of the distinct characters in the book, then creating a map of each character to a unique integer. # - # create mapping of unique chars to integers chars = sorted(list(set(raw_text))) char_to_int = dict((c, i) for i, c in enumerate(chars)) #Lookup Table print(chars) n_chars = len(raw_text) n_vocab = len(chars) print("Total Characters: ", n_chars) print("Total Vocab: ", n_vocab) # we will split the book text up into subsequences with a fixed length of 100 characters, an arbitrary length. We could just as easily split the data up by sentences and pad the shorter sequences and truncate the longer ones. range(0, n_chars - 100, 1) #Input Sequence raw_text[0:0 + 100] print([char_to_int[char] for char in raw_text[0:0 + 100]]) #Char Prediction raw_text[0 + 100] # prepare the dataset of input to output pairs encoded as integers seq_length = 100 #<------- [Hyperparameter] dataX = [] dataY = [] for i in range(0, n_chars - seq_length, 1): seq_in = raw_text[i:i + seq_length] seq_out = raw_text[i + seq_length] dataX.append([char_to_int[char] for char in seq_in]) dataY.append(char_to_int[seq_out]) n_patterns = len(dataX) print ("Total Patterns: ", n_patterns) # First we must transform the list of input sequences into the form [samples, time steps, features] expected by an LSTM network. # Next we need to rescale the integers to the range 0-to-1 to make the patterns easier to learn by the LSTM network that uses the sigmoid activation function by default. # Finally, we need to convert the output patterns (single characters converted to integers) into a one hot encoding. # Each y value is converted into a sparse vector with a length of 47, full of zeros except with a 1 in the column for the letter (integer) that the pattern represents. # reshape X to be [samples, time steps, features] X = np.reshape(dataX, (n_patterns, seq_length, 1)) # normalize X = X / float(n_vocab) # one hot encode the output variable y = np_utils.to_categorical(dataY) X.shape y # We can now define our LSTM model. Here we define a single hidden LSTM layer with 256 memory units. The network uses dropout with a probability of 20. The output layer is a Dense layer using the softmax activation function to output a probability prediction for each of the 47 characters between 0 and 1. # # The problem is really a single character classification problem with 47 classes and as such is defined as optimizing the log loss (cross entropy), here using the ADAM optimization algorithm for speed. # define the LSTM model model = Sequential() model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]))) model.add(Dropout(0.2)) model.add(Dense(y.shape[1], activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') # There is no test dataset. We are modeling the entire training dataset to learn the probability of each character in a sequence. # # -----> We are not interested in the most accurate (classification accuracy) model of the training dataset. This would be a model that predicts each character in the training dataset perfectly. Instead we are interested in a generalization of the dataset that minimizes the chosen loss function. We are seeking a balance between generalization and overfitting but short of memorization # # ------> The network is slow to train (about 300 seconds per epoch on an Nvidia K520 GPU). Because of the slowness and because of our optimization requirements, we will use model checkpointing to record all of the network weights to file each time an improvement in loss is observed at the end of the epoch. We will use the best set of weights (lowest loss) to instantiate our generative model in the next section. # define the checkpoint filepath="weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint] # + ###Generative Network from Tensorflow https://www.tensorflow.org/beta/tutorials/text/text_generation # - from __future__ import absolute_import, division, print_function, unicode_literals # + # #!pip install -q tensorflow-gpu==2.0.0-beta1 import tensorflow as tf import numpy as np import os import time # - path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt') # Read, then decode for py2 compat. text = open(path_to_file, 'rb').read().decode(encoding='utf-8') # length of text is the number of characters in it print ('Length of text: {} characters'.format(len(text))) # Take a look at the first 250 characters in text print(text[:250]) # The unique characters in the file vocab = sorted(set(text)) print ('{} unique characters'.format(len(vocab))) # + # Creating a mapping from unique characters to indices # Vectorize the text char2idx = {u:i for i, u in enumerate(vocab)} idx2char = np.array(vocab) text_as_int = np.array([char2idx[c] for c in text]) # - print('{') for char,_ in zip(char2idx, range(20)): print(' {:4s}: {:3d},'.format(repr(char), char2idx[char])) print(' ...\n}') # Show how the first 13 characters from the text are mapped to integers print ('{} ---- characters mapped to int ---- > {}'.format(repr(text[:13]), text_as_int[:13])) # + # The maximum length sentence we want for a single input in characters seq_length = 100 examples_per_epoch = len(text)//seq_length # Create training examples / targets char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int) for i in char_dataset.take(5): print(idx2char[i.numpy()]) # + sequences = char_dataset.batch(seq_length+1, drop_remainder=True) for item in sequences.take(5): print(repr(''.join(idx2char[item.numpy()]))) # + def split_input_target(chunk): input_text = chunk[:-1] target_text = chunk[1:] return input_text, target_text dataset = sequences.map(split_input_target) # - dataset #Print the first examples input and target values: for input_example, target_example in dataset.take(2): print ('Input data: ', repr(''.join(idx2char[input_example.numpy()]))) print ('Target data:', repr(''.join(idx2char[target_example.numpy()]))) for i, (input_idx, target_idx) in enumerate(zip(input_example[:5], target_example[:5])): print("Step {:4d}".format(i)) print(" input: {} ({:s})".format(input_idx, repr(idx2char[input_idx]))) print(" expected output: {} ({:s})".format(target_idx, repr(idx2char[target_idx]))) # + # Batch size BATCH_SIZE = 64 # Buffer size to shuffle the dataset # (TF data is designed to work with possibly infinite sequences, # so it doesn't attempt to shuffle the entire sequence in memory. Instead, # it maintains a buffer in which it shuffles elements). BUFFER_SIZE = 10000 dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) dataset # + # Length of the vocabulary in chars vocab_size = len(vocab) # The embedding dimension embedding_dim = 256 # Number of RNN units rnn_units = 512 # - def build_model(vocab_size, embedding_dim, rnn_units, batch_size): model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]), tf.keras.layers.LSTM(rnn_units, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'), tf.keras.layers.Dense(vocab_size) ]) return model model = build_model( vocab_size = len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=BATCH_SIZE) # Try the model # Now run the model to see that it behaves as expected. for input_example_batch, target_example_batch in dataset.take(1): example_batch_predictions = model(input_example_batch) print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)") model.summary() sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1) sampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy() sampled_indices print("Input: \n", repr("".join(idx2char[input_example_batch[0]]))) print() print("Next Char Predictions: \n", repr("".join(idx2char[sampled_indices ]))) # #Train the model # + def loss(labels, logits): return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True) example_batch_loss = loss(target_example_batch, example_batch_predictions) print("Prediction shape: ", example_batch_predictions.shape, " # (batch_size, sequence_length, vocab_size)") print("scalar_loss: ", example_batch_loss.numpy().mean()) # - model.compile(optimizer='adam', loss=loss) # + # Directory where the checkpoints will be saved checkpoint_dir = './training_checkpoints' # Name of the checkpoint files checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}") checkpoint_callback=tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_prefix, save_weights_only=True) # - EPOCHS=30 history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback]) # Generate text tf.train.latest_checkpoint(checkpoint_dir) # + model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1) model.load_weights(tf.train.latest_checkpoint(checkpoint_dir)) model.build(tf.TensorShape([1, None])) # - model.summary() # The prediction loop def generate_text(model, start_string): # Evaluation step (generating text using the learned model) # Number of characters to generate num_generate = 1000 # Converting our start string to numbers (vectorizing) input_eval = [char2idx[s] for s in start_string] input_eval = tf.expand_dims(input_eval, 0) # Empty string to store our results text_generated = [] # Low temperatures results in more predictable text. # Higher temperatures results in more surprising text. # Experiment to find the best setting. temperature = 1.0 # Here batch size == 1 model.reset_states() for i in range(num_generate): predictions = model(input_eval) # remove the batch dimension predictions = tf.squeeze(predictions, 0) # using a categorical distribution to predict the word returned by the model predictions = predictions / temperature predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy() # We pass the predicted word as the next input to the model # along with the previous hidden state input_eval = tf.expand_dims([predicted_id], 0) text_generated.append(idx2char[predicted_id]) return (start_string + ''.join(text_generated)) print(generate_text(model, start_string=u"ROMEO: "))
main/nbs/mdling/lstm/[Tutorial]generativeLSTM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import datetime lipidname = "OUPE" tail = "DDDDD CDCC" link = "G G" head = "E P" description = "; A general model Phosphosatidylethanolamine (PE) lipid \n; C22:6(4c,7c,10c,13c,16c,19c) docosahexaenoic acid , and C16:1(9c) palmitoleic acid \n" modeledOn="; This topology follows the standard Martini 2.0 lipid definitions and building block rules.\n; Reference(s): \n; <NAME>, <NAME>, <NAME>. Coarse grained model for semi-quantitative lipid simulations. JPC-B, 108:750-760, \n; 2004. doi:10.1021/jp036508g \n; <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. The MARTINI force field: coarse grained model for \n; biomolecular simulations. JPC-B, 111:7812-7824, 2007. doi:10.1021/jp071097f \n; <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Computational lipidomics with insane: a versatile \n; tool for generating custom membranes for molecular simulations. JCTC, 150410125128004, 2015. doi:10.1021/acs.jctc.5b00209\n; Created: " now = datetime.datetime.now() membrane="testmembrane" insane="../insane+SF.py" mdparams="../test.mdp" martinipath="../martini.ff/" ITPCatalogue="./epithelial.cat" ITPMasterFile="martini_v2_epithelial.itp" modeledOn+= now.strftime("%Y.%m.%d")+"\n" # Cleaning up intermediate files from previous runs # !rm -f *#* # !rm -f *step* # !rm -f {membrane}* # + import fileinput import os.path print("Create itp") # !python {martinipath}/lipid-martini-itp-v06.py -o {lipidname}.itp -alname {lipidname} -name {lipidname} -alhead '{head}' -allink '{link}' -altail '{tail}' #update description and parameters with fileinput.FileInput(lipidname+".itp", inplace=True) as file: for line in file: if line == "; This is a ...\n": print(description, end='') elif line == "; Was modeled on ...\n": print(modeledOn, end='') else: print(line, end='') # + #Add this ITP file to the catalogue file if not os.path.exists(ITPCatalogue): ITPCatalogueData = [] else: with open(ITPCatalogue, 'r') as file : ITPCatalogueData = file.read().splitlines() ITPCatalogueData = [x for x in ITPCatalogueData if not x==lipidname+".itp"] ITPCatalogueData.append(lipidname+".itp") with open(ITPCatalogue, 'w') as file : file.writelines("%s\n" % item for item in ITPCatalogueData) #build ITPFile with open(martinipath+ITPMasterFile, 'w') as masterfile: for ITPfilename in ITPCatalogueData: with open(ITPfilename, 'r') as ITPfile : for line in ITPfile: masterfile.write(line) print("Done") # - # build a simple membrane to visualize this species # !python2 {insane} -o {membrane}.gro -p {membrane}.top -d 0 -x 3 -y 3 -z 3 -sol PW -center -charge 0 -orient -u {lipidname}:1 -l {lipidname}:1 -itpPath {martinipath} # + import os #Operating system specific commands import re #Regular expression library print("Test") print("Grompp") # grompp = !gmx grompp -f {mdparams} -c {membrane}.gro -p {membrane}.top -o {membrane}.tpr success=True for line in grompp: if re.search("ERROR", line): success=False if re.search("Fatal error", line): success=False #if not success: print(line) if success: print("Run") # !export GMX_MAXCONSTRWARN=-1 # !export GMX_SUPPRESS_DUMP=1 # run = !gmx mdrun -v -deffnm {membrane} summary="" logfile = membrane+".log" if not os.path.exists(logfile): print("no log file") print("== === ====") for line in run: print(line) else: try: file = open(logfile, "r") fe = False for line in file: if fe: success=False summary=line elif re.search("^Steepest Descents.*converge", line): success=True summary=line break elif re.search("Fatal error", line): fe = True except IOError as exc: sucess=False; summary=exc; if success: print("Success") else: print(summary) # -
buildITP/OUPE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:root] * # language: python # name: conda-root-py # --- # + import torch import numpy as np import torch.nn as nn import torch.optim as optim from torch.utils.data import Dataset, DataLoader import torchmetrics from scipy.io import loadmat from sklearn.model_selection import train_test_split device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # - # Input source_dataset_mat=loadmat('Source_BZ_DG_GG.mat') source_dataset_raw=np.vstack((source_dataset_mat['BZ'],source_dataset_mat['DG'],source_dataset_mat['GG'])) target_dataset_mat=loadmat('Target_BZ_DG_GG.mat') target_dataset_raw=np.vstack((target_dataset_mat['BZ'],target_dataset_mat['DG'],target_dataset_mat['GG'])) # Labels y_source=np.hstack((0*np.ones(160),1*np.ones(160),2*np.ones(160))).astype(np.int64) y_target=np.hstack((0*np.ones(50),1*np.ones(50),2*np.ones(50))).astype(np.int64) print(source_dataset_raw.shape) print(target_dataset_raw.shape) print(y_source.shape) print(y_target.shape) # ### DRCA class DRCA(): ''' The DRCA Class ''' def __init__(self, n_components = 2,alpha = None, mode = 'raw'): ''' The function to initialize the DRCA class :param n_components: The intended dimensionality of projection hyperplane smaller than the initial dimensionality :param alpha: weighting factor for target domain data within class scatter :param mode: the mode of DRCA: 'raw': consider source domain data (S) and target domain data (T) as two groups 'number': consider type-specific source domain data and target domain data based on the average number of cases in S and T 'mean': equal weights for each class ''' self.mode = mode self.Sw_s = None self.Sw_t = None self.mu_s = None self.mu_t = None self.alpha = alpha self.D_tilde = n_components pass def fit(self, Xs, Xt, Ys=None, Yt = None): ''' This function fit the DRCA model with the data and labels given by users :param Xs: the feature matrix of shape (Ns, D) in source domain, np.array :param Xt: the feature matrix of shape (Nt, D) in target domain, np.array :param Ys: the label of the data of shape (Ns,) in source domain, np.array, int :param Yt: the label of the data of shape (Nt,) in target domain, np.array, int ''' ### --- Summarize statistics --- ### if self.mode != 'raw': Ys = Ys.reshape(-1,) #we need to use Y and make sure the Y is the intended form Yt = Yt.reshape(-1,) Ns = Xs.shape[0] Nt = Xt.shape[0] D = Xs.shape[1] ### --- Within-domain scatter --- ### self.mu_s = np.mean(Xs,axis=0,keepdims=True) #1*D self.mu_t = np.mean(Xt,axis=0,keepdims=True) self.Sw_s = (Xs - self.mu_s).T @ (Xs - self.mu_s) #D*D self.Sw_t = (Xt - self.mu_t).T @ (Xt - self.mu_t) #D*D if self.alpha == None: self.alpha = Ns/Nt self.nominator = self.Sw_s + self.Sw_t * self.alpha ### --- Eliminate sensor drifts --- ### if self.mode == 'raw': #S and T as two entities self.denominator = (self.mu_s - self.mu_t).T @ (self.mu_s-self.mu_t) #D*D elif self.mode == 'number': #Focus on the same classes appeared in target domain Kt = np.unique(Yt).shape[0] #Assume that the target domain classes are fewer self.denominator = np.empty((D,D)) for i in range(Kt): Ns = np.mean(Ys==Kt[i]) Nt = np.mean(Yt==Kt[i]) N = 0.5*(self.Ns+self.Nt)#self. ??????????????????? mu_s_matrix = np.mean(Xs[Ys==Kt[i],:],axis=0,keepdims=True) mu_t_matrix = np.mean(Xt[Yt==Kt[i],:],axis=0,keepdims=True) Sb_matrix = (self.mu_s_matrix-self.mu_t_matrix).T @ (self.mu_s_matrix-self.mu_t_matrix) self.denomiator += N * Sb_matrix elif self.mode == 'mean': #Equal weights for every class Kt = np.unique(Yt).shape[0] #Assume that the target domain classes are fewer self.denominator = np.empty((D,D)) for i in range(Kt): mu_s_matrix = np.mean(Xs[Ys==Kt[i],:],axis=0,keepdims=True)#1*D mu_t_matrix = np.mean(Xt[Yt==Kt[i],:],axis=0,keepdims=True)#1*D Sb_matrix = (self.mu_s_matrix-self.mu_t_matrix).T @ (self.mu_s_matrix-self.mu_t_matrix) self.denomiator += Sb_matrix#D*D eigenValues, eigenVectors = np.linalg.eig(np.linalg.pinv(self.denominator) @ self.nominator) #D*D idx = np.abs(eigenValues).argsort()[::-1] self.eigenValues = eigenValues[idx] self.eigenVectors = eigenVectors[:,idx] self.W = self.eigenVectors[:,0:self.D_tilde]#shape=(D,D_tilde) pass def transform(self, X): ''' This function use the fitted SRLDA model :param X: the data in np.array of shape (N,D) that needs to be projected to the lower dimension :return: X_tilde: the projected data in the lower dimensional space in np.array of shape (N, D_tilde) ''' return np.matmul(X,self.W) #goal: (N,D_tilde) (D_tilde*D)@(D*N).T (N*D)(D*D_tilde) pass def fit_transform(self, Xs, Xt, Ys=None, Yt = None): ''' :param Xs: the feature matrix of shape (Ns, D) in source domain, np.array :param Xt: the feature matrix of shape (Nt, D) in target domain, np.array :param Ys: the label of the data of shape (Ns,) in source domain, np.array, int :param Yt: the label of the data of shape (Nt,) in target domain, np.array, int ''' self.fit(Xs, Xt, Ys, Yt) return np.real(self.transform(Xs)),np.real(self.transform(Xt)) #N * D_tilde pass # ### Spilt Dataset class HerbalData(Dataset): def __init__(self, x_train, y_train): assert len(x_train) == len(y_train) self.x = x_train self.y = y_train def __len__(self): return self.x.shape[0] def __getitem__(self, idx): return self.x[idx], self.y[idx] # Normalize data from sklearn.preprocessing import normalize for i in range(0, 128, 16): source_dataset_raw[:,i:i+16] = normalize(source_dataset_raw[:,i:i+16]) target_dataset_raw[:,i:i+16] = normalize(target_dataset_raw[:,i:i+16]) # + # Without DRCA x_train, x_valid, y_train, y_valid = train_test_split(source_dataset_raw, y_source, train_size=0.7, random_state=1) x_valid, x_test, y_valid, y_test = train_test_split(x_valid, y_valid, train_size=0.5, random_state=1) train_set = HerbalData(torch.from_numpy(x_train).to(device), torch.from_numpy(y_train).to(device)) valid_set = HerbalData(torch.from_numpy(x_valid).to(device), torch.from_numpy(y_valid).to(device)) test_set = HerbalData(torch.from_numpy(x_test).to(device), torch.from_numpy(y_test).to(device)) target_set = HerbalData(torch.from_numpy(target_dataset_raw).to(device), torch.from_numpy(y_target).to(device)) train_dataloader = DataLoader(train_set, batch_size=128, shuffle=True) valid_dataloader = DataLoader(valid_set, batch_size=128, shuffle=True) test_dataloader = DataLoader(test_set, batch_size=len(test_set)) target_dataloader = DataLoader(target_set, batch_size=len(target_set)) print("Train Set:", x_train.shape) print("Valid Set:", x_valid.shape) print("Test Set:", x_test.shape) print("Target Set:", target_dataset_raw.shape) # + # With DRCA drca=DRCA(n_components=50, alpha=10)#n_components and alpha value are hyperparameters source_dataset_drca, target_dataset_drca = drca.fit_transform(source_dataset_raw,target_dataset_raw) x_train_drca, x_valid_drca, y_train_drca, y_valid_drca = train_test_split(source_dataset_drca, y_source, train_size=0.7, random_state=1) x_valid_drca, x_test_drca, y_valid_drca, y_test_drca = train_test_split(x_valid_drca, y_valid_drca, train_size=0.5, random_state=1) train_set_drca = HerbalData(torch.from_numpy(x_train_drca).to(device), torch.from_numpy(y_train_drca).to(device)) valid_set_drca = HerbalData(torch.from_numpy(x_valid_drca).to(device), torch.from_numpy(y_valid_drca).to(device)) test_set_drca = HerbalData(torch.from_numpy(x_test_drca).to(device), torch.from_numpy(y_test_drca).to(device)) target_set_drca = HerbalData(torch.from_numpy(target_dataset_drca).to(device), torch.from_numpy(y_target).to(device)) train_dataloader_drca = DataLoader(train_set_drca, batch_size=128, shuffle=True) valid_dataloader_drca = DataLoader(valid_set_drca, batch_size=128, shuffle=True) test_dataloader_drca = DataLoader(test_set_drca, batch_size=len(test_set_drca)) target_dataloader_drca = DataLoader(target_set_drca, batch_size=len(target_set_drca)) print("Train Set:", x_train_drca.shape) print("Valid Set:", x_valid_drca.shape) print("Test Set:", x_test_drca.shape) print("Target Set:", target_dataset_drca.shape) # - # ### NN class NN(nn.Module): def __init__(self, layers, dropout): super().__init__() fcs = [] for i in range(len(layers) - 2): fcs.append(nn.Linear(layers[i], layers[i+1])) fcs.append(nn.ReLU()) fcs.append(nn.Dropout(dropout)) fcs.append(nn.Linear(layers[-2], layers[-1])) self.fc = nn.Sequential(*fcs) def forward(self, data): # data = [batch size, input_dim] return self.fc(data) # + INPUT_DIM = x_train.shape[1] INPUT_DIM_DRCA = x_train_drca.shape[1] OUTPUT_DIM = 3 DROPOUT = 0.3 LAYERS = [INPUT_DIM, 64, 32, 16, OUTPUT_DIM] LAYERSD_DRCA = [INPUT_DIM_DRCA, 64, 32, 16, OUTPUT_DIM] model = NN(LAYERS, DROPOUT) model.double() model = model.to(device) optimizer = optim.Adam(model.parameters(),lr=5e-4) model_drca = NN(LAYERSD_DRCA, DROPOUT) model_drca.double() model_drca = model_drca.to(device) optimizer_drca = optim.Adam(model_drca.parameters(),lr=1e-4) criterion = nn.CrossEntropyLoss() criterion = criterion.to(device) # - def train(model, train_dataloader, optimizer, criterion): epoch_loss = 0 epoch_acc = 0 epoch_prec = 0 epoch_recall = 0 epoch_f1 = 0 batches = len(train_dataloader) model.train() for _, batch in enumerate(train_dataloader): x, y = batch optimizer.zero_grad() predictions = model(x) loss = criterion(predictions, y) predictions = torch.argmax(torch.softmax(predictions, 1), dim=1) acc = torchmetrics.functional.accuracy(predictions, y) prec, recall = torchmetrics.functional.precision_recall(predictions, y, num_classes=3, average='macro') f1 = torchmetrics.functional.f1(predictions, y, num_classes=3, average='macro') loss.backward() optimizer.step() epoch_loss += loss.item() epoch_acc += acc.item() epoch_prec += prec.item() epoch_recall += recall.item() epoch_f1 += f1.item() return epoch_loss / batches, epoch_acc / batches, epoch_prec / batches, epoch_recall / batches, epoch_f1 / batches def evaluate(model, dataloader, criterion): epoch_loss = 0 epoch_acc = 0 epoch_prec = 0 epoch_recall = 0 epoch_f1 = 0 batches = len(dataloader) model.eval() with torch.no_grad(): for _, batch in enumerate(dataloader): x, y = batch predictions = model(x) loss = criterion(predictions, y) predictions = torch.argmax(torch.softmax(predictions, 1), dim=1) acc = torchmetrics.functional.accuracy(predictions, y) prec, recall = torchmetrics.functional.precision_recall(predictions, y, num_classes=3, average='macro') f1 = torchmetrics.functional.f1(predictions, y, num_classes=3, average='macro') epoch_loss += loss.item() epoch_acc += acc.item() epoch_prec += prec.item() epoch_recall += recall.item() epoch_f1 += f1.item() return epoch_loss / batches, epoch_acc / batches, epoch_prec / batches, epoch_recall / batches, epoch_f1 / batches # ## Training Model Without DRCA # + import time def epoch_time(start_time, end_time): elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs # + tags=[] N_EPOCHS = 300 train_loss = np.zeros(N_EPOCHS) train_acc = np.zeros(N_EPOCHS) train_prec = np.zeros(N_EPOCHS) train_recall = np.zeros(N_EPOCHS) train_f1 = np.zeros(N_EPOCHS) valid_loss = np.zeros(N_EPOCHS) valid_acc = np.zeros(N_EPOCHS) valid_prec = np.zeros(N_EPOCHS) valid_recall = np.zeros(N_EPOCHS) valid_f1 = np.zeros(N_EPOCHS) best_valid_loss = float('inf') for i in range(N_EPOCHS): start_time = time.time() train_loss[i], train_acc[i], train_prec[i], train_recall[i], train_f1[i] = train(model, train_dataloader, optimizer, criterion) valid_loss[i], valid_acc[i], valid_prec[i], valid_recall[i], valid_f1[i] = evaluate(model, valid_dataloader, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss[i] < best_valid_loss: best_valid_loss = valid_loss[i] torch.save(model.state_dict(), 'nn-agg.pt') print(f'Epoch: {i+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss[i]:.3f} | Train Acc: {train_acc[i]*100:.2f}%') print(f'\t Val. Loss: {valid_loss[i]:.3f} | Val. Acc: {valid_acc[i]*100:.2f}%') # - # ## Analysis # + from sklearn.metrics import confusion_matrix from matplotlib import pyplot as plt def predict(model, x): x = torch.from_numpy(x).to(device) with torch.no_grad(): return torch.argmax(torch.softmax(model(x), 1), dim=1).cpu().detach().numpy() def confusion_matrix_plot(y_pred, y_true): cm = confusion_matrix(y_pred, y_true, normalize='true') normalize = True cmap = 'RdPu' classes = [0, 1, 2] title = 'cofusion matrix' fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax = ax) ax.set(xticks = np.arange(cm.shape[1]), yticks = np.arange(cm.shape[0]), xticklabels = classes, yticklabels = classes, ylabel = 'True label', xlabel = 'Predicted label', title = title) plt.setp(ax.get_xticklabels(), rotation=45, ha = 'right', rotation_mode = 'anchor') fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2 for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha = 'center', va = 'center', color = 'white' if cm[i,j] > thresh else 'black') fig.tight_layout() def metric_epoch(train_loss, valid_loss, train_f1, valid_f1): x = range(0, len(train_loss)) plt.figure(figsize=(14,3)) grid = plt.GridSpec(3, 2, wspace=0.5, hspace=0.5) plt.subplot(grid[:,0]) plt.plot(x, train_f1, color="r", marker='o',markersize='1.5',markeredgecolor='r',markeredgewidth = 1.5, label = 'Train F1 score') plt.plot(x, valid_f1, color="b", marker='o',markersize='1.5',markeredgecolor='b',markeredgewidth = 1.5, label = 'Valid F1 score') plt.legend() plt.title('F1 score vs epoches') plt.xlabel('epoches') plt.ylabel('F1 score') plt.subplot(grid[:,1]) plt.plot(x, train_loss, color="red", marker='o',markersize='1.5',markeredgecolor='r',markeredgewidth = 1.5, label = 'Train Loss') plt.plot(x, valid_loss, color="blue", marker='o',markersize='1.5',markeredgecolor='b',markeredgewidth = 1.5, label = 'Valid Loss') plt.legend() plt.title('Loss vs epoches') plt.xlabel('epoches') plt.ylabel('Loss') plt.show() # + import matplotlib.pyplot as plt from numpy import interp from sklearn.preprocessing import label_binarize from sklearn.metrics import auc from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score def plot_roc_curve(model, X, Y_true, titile=""): fpr = dict() tpr = dict() roc_auc = dict() n_classes = 3 y_test = label_binarize(Y_true, classes=[0, 1, 2]) y_score = label_binarize(predict(model, X), classes=[0, 1, 2]) for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) micro_auc = roc_auc_score(y_test, y_score, average='micro') all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += interp(all_fpr, fpr[i], tpr[i]) mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) macro_auc = roc_auc_score(y_test, y_score, average='macro') print(roc_auc) print('micro auc:', micro_auc) print('macro auc:', macro_auc) # Plot all ROC curves plt.figure() plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})'.format(roc_auc["micro"]), color='deeppink', linestyle=':', linewidth=4) plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})'.format(roc_auc["macro"]), color='navy', linestyle=':', linewidth=4) colors = ['aqua', 'darkorange', 'cornflowerblue'] for i, color in zip(range(n_classes), colors): plt.plot(fpr[i], tpr[i], color=color, lw=2, label='ROC curve of class {0} (area = {1:0.2f})'.format(i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--', lw=2) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver Operating Characteristic') plt.legend(loc="lower right") # - metric_epoch(train_loss, valid_loss, train_f1, valid_f1) # ### Test Set # + model.load_state_dict(torch.load('nn-agg.pt')) test_loss, test_acc, test_prec, test_recall, test_f1 = evaluate(model, test_dataloader, criterion) print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}% | Test Prec: {test_prec*100:.2f}% | Test Recall: {test_recall*100:.2f}% | Test F1: {test_f1*100:.2f}%') # - confusion_matrix_plot(predict(model, x_test), y_test) plot_roc_curve(model, x_test, y_test) # ### Target Set # + model.load_state_dict(torch.load('nn-agg.pt')) test_loss, test_acc, test_prec, test_recall, test_f1 = evaluate(model, target_dataloader, criterion) print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}% | Test Prec: {test_prec*100:.2f}% | Test Recall: {test_recall*100:.2f}% | Test F1: {test_f1*100:.2f}%') # - confusion_matrix_plot(predict(model, target_dataset_raw), y_target) plot_roc_curve(model, target_dataset_raw, y_target) # ## Training Model With DRCA # + tags=[] N_EPOCHS = 300 best_valid_loss = float('inf') train_loss_drca = np.zeros(N_EPOCHS) train_acc_drca = np.zeros(N_EPOCHS) train_prec_drca = np.zeros(N_EPOCHS) train_recall_drca = np.zeros(N_EPOCHS) train_f1_drca = np.zeros(N_EPOCHS) valid_loss_drca = np.zeros(N_EPOCHS) valid_acc_drca = np.zeros(N_EPOCHS) valid_prec_drca = np.zeros(N_EPOCHS) valid_recall_drca = np.zeros(N_EPOCHS) valid_f1_drca = np.zeros(N_EPOCHS) for i in range(N_EPOCHS): start_time = time.time() train_loss_drca[i], train_acc_drca[i], train_prec_drca[i], train_recall_drca[i], train_f1_drca[i] = train(model_drca, train_dataloader_drca, optimizer_drca, criterion) valid_loss_drca[i], valid_acc_drca[i], valid_prec_drca[i], valid_recall_drca[i], valid_f1_drca[i] = evaluate(model_drca, valid_dataloader_drca, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss_drca[i] < best_valid_loss: best_valid_loss = valid_loss_drca[i] torch.save(model_drca.state_dict(), 'nn-agg-drca.pt') print(f'Epoch: {i+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss_drca[i]:.3f} | Train Acc: {train_acc_drca[i]*100:.2f}%') print(f'\t Val. Loss: {valid_loss_drca[i]:.3f} | Val. Acc: {valid_acc_drca[i]*100:.2f}%') # - metric_epoch(train_loss_drca, valid_loss_drca, train_f1_drca, valid_f1_drca) # ### Test Set # + model_drca.load_state_dict(torch.load('nn-agg-drca.pt')) test_loss, test_acc, test_prec, test_recall, test_f1 = evaluate(model_drca, test_dataloader_drca, criterion) print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}% | Test Prec: {test_prec*100:.2f}% | Test Recall: {test_recall*100:.2f}% | Test F1: {test_f1*100:.2f}%') # - confusion_matrix_plot(predict(model_drca, x_test_drca), y_test_drca) plot_roc_curve(model_drca, x_test_drca, y_test_drca) # ### Target Set # + model_drca.load_state_dict(torch.load('nn-agg-drca.pt')) test_loss, test_acc, test_prec, test_recall, test_f1 = evaluate(model_drca, target_dataloader_drca, criterion) print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}% | Test Prec: {test_prec*100:.2f}% | Test Recall: {test_recall*100:.2f}% | Test F1: {test_f1*100:.2f}%') # - confusion_matrix_plot(predict(model_drca, target_dataset_drca), y_target) plot_roc_curve(model_drca, target_dataset_drca, y_target)
nn-agg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # DSCI 525 - Web and Cloud Computing # ***Milestone 4:*** In this milestone, you will deploy the machine learning model you trained in milestone 3. # # You might want to go over [this sample project](https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone4/sampleproject.ipynb) and get it done before starting this milestone. # # Milestone 4 checklist : # # - [X] Use an EC2 instance. # - [X] Develop your API here in this notebook. # - [X] Copy it to ```app.py``` file in EC2 instance. # - [X] Run your API for other consumers and test among your colleagues. # - [X] Summarize your journey. # # In this milestone, you will do certain things that you learned. For example... # - Login to the instance # - Work with Linux and use some basic commands # - Configure security groups so that it accepts your webserver requests from your laptop # - Configure AWS CLI # # In some places, I explicitly mentioned these to remind you. ## Import all the packages that you need from flask import Flask, request, jsonify import joblib import numpy as np # ## 1. Develop your API # # rubric={mechanics:45} # You probably got how to set up primary URL endpoints from the [sampleproject.ipynb](https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone4/sampleproject.ipynb) and have them process and return some data. Here we are going to create a new endpoint that accepts a POST request of the features required to run the machine learning model that you trained and saved in last milestone (i.e., a user will post the predictions of the 25 climate model rainfall predictions, i.e., features, needed to predict with your machine learning model). Your code should then process this data, use your model to make a prediction, and return that prediction to the user. To get you started with all this, I've given you a template that you should fill out to set up this functionality: # # ***NOTE:*** You won't be able to test the flask module (or the API you make here) unless you go through steps in ```2. Deploy your API```. However, you can make sure that you develop all your functions and inputs properly here. # # ```python # from flask import Flask, request, jsonify # import joblib # import pandas as pd # app = Flask(__name__) # # # 1. Load your model here # model = joblib.load("model.joblib") # df = pd.read_csv("ml_data_SYD.csv", index_col = 0, parse_dates = True) # # # 2. Define a prediction function # def return_prediction(input_data): # # format input_data here so that you can pass it to model.predict() # data = pd.DataFrame([input_data], columns = df.columns[:25]) # return model.predict(data)[0] # # # 3. Set up home page using basic html # @app.route("/") # def index(): # # feel free to customize this if you like # return """ # <h1>Welcome to our rain prediction service</h1> # To use this service, make a JSON post request to the /predict url with 25 climate model outputs. # """ # # # 4. define a new route which will accept POST requests and return model predictions # @app.route('/predict', methods=['POST']) # def rainfall_prediction(): # content = request.json # this extracts the JSON content we sent # data = content["data"] # prediction = return_prediction(data) # results = {"Data": data, # "Rain prediction": round(prediction, 3)} # return whatever data you wish, it can be just the prediction # # or it can be the prediction plus the input data, it's up to you # return jsonify(results) # ``` # ## 2. Deploy your API # # rubric={mechanics:40} # Once your API (app.py) is working, we're ready to deploy it! For this, do the following: # # 1. Setup an EC2 instance. Make sure you add a rule in security groups to accept `All TCP` connections from `Anywhere`. SSH into your EC2 instance from milestone2. # 2. Make a file `app.py` file in your instance and copy what you developed above in there. # # 2.1 You can use the Linux editor using ```vi```. More details on vi Editor [here](https://www.guru99.com/the-vi-editor.html). Use your previous learnings, notes, mini videos, etc. You can copy code from your jupyter and paste it into `app.py`. # # 2.2 Or else you can make a file in your laptop called app.py and copy it over to your EC2 instance using ```scp```. Eg: ```scp -r -i "ggeorgeAD.pem" ~/Desktop/app.py <EMAIL>:~/``` # # 3. Download your model from s3 to your EC2 instance. You want to configure your S3 for this. Use your previous learnings, notes, mini videos, etc. # 4. You should use one of those package managers to install the dependencies of your API, like `flask`, `joblib`, `sklearn`, etc... # # 4.1. (Additional help) you can install the required packages inside your terminal. # - Install conda: # wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh # bash Miniconda3-latest-Linux-x86_64.sh # - Install packages (there might be others): # conda install flask scikit-learn joblib # # 5. Now you're ready to start your service, go ahead and run `flask run --host=0.0.0.0 --port=8080`. This will make your service available at your EC2 instance's `Public IPv4 address` on port 8080. Please ensure that you run this from where ```app.py``` and ```model.joblib``` reside. # 6. You can now access your service by typing your EC2 instances `public IPv4 address` append with `:8080` into a browser, so something like `http://Public IPv4 address:8080`. From step 4, you might notice that flask output saying "Running on http://XXXX:8080/ (Press CTRL+C to quit)", where XXXX is `Private IPv4 address`, and you want to replace it with the `Public IPv4 address` # 7. You should use `curl` to send a post request to your service to make sure it's working as expected. # >EG: curl -X POST http://your_EC2_ip:8080/predict -d '{"data":[1,2,3,4,53,11,22,37,41,53,11,24,31,44,53,11,22,35,42,53,12,23,31,42,53]}' -H "Content-Type: application/json" # # 8. Now, what happens if you exit your connection with the EC2 instance? Can you still reach your service? # 9. We could use several options to help us persist our server even after we exit our shell session. We'll be using `screen`. `screen` will allow us to create a separate session within which we can run `flask` and won't shut down when we exit the main shell session. Read [this](https://linuxize.com/post/how-to-use-linux-screen/) to learn more on ```screen```. # 10. Now, create a new `screen` session (think of this as a new, separate shell), using: `screen -S myapi`. If you want to list already created sessions do ```screen -list```. If you want to get into an existing ```screen -x myapi```. # 11. Within that session, start up your flask app. You can then exit the session by pressing `Ctrl + A then press D`. Here you are detaching the session, once you log back into EC2 instance you can attach it using ```screen -x myapi```. # 12. Feel free to exit your connection with the EC2 instance now and try reaccessing your service with `curl`. You should find that the service has now persisted! # 13. ***CONGRATULATIONS!!!*** You have successfully got to the end of our milestones. Move to Task 3 and submit it. # ## Screenshot: # ![](milestone-4.png) # ## 3. Summarize your journey from Milestone 1 to Milestone 4 # rubric={mechanics:10} # >There is no format or structure on how you write this. (also, no minimum number of words). It's your choice on how well you describe it. # **Milestone 1**: # # In Milestone 1, we found that working with large data sets on a local machine using standard libraries and file formats commonly used in the dataset can be very difficult. Processing large amounts of data is a resource-intensive task. The time required to perform the processing is highly dependent on system resources. We were unable to perform the analysis on a relatively low-configuration system. We considered Dask as an alternative to pandas for reading and processing data. This allowed us to process data in specific block sizes and only process columns of interest. In addition to CSV, we considered file formats such as parquet, arrow, and feather, and ultimately adopted the parquet format. Because the implementation of converting to parquet is already provided in pandas. Although feather is also supported by pandas parquet has the better compression than feather where the storage memory for parquet of this data set is almost half of feather. # # **Milestone 2**: # # After learning the basics of reading data from APIs and working with big data, we began to explore the web and cloud. We moved our data to the cloud and worked with it there. We set up an EC2 instance using JupyterHub, to which all members of the group had access to. We then installed everything that we needed, including software packages, on the Amazon EC2 instance. An S3 bucket was set up for storage, and data was transferred from Milestone 1 to S3 in the form of a parquet file. It was beneficial to learn how EC2 and S3 instances work, how they communicate, and how to use each. After processing the data pulled from the API on the EC2 instance, the large amount of data generated was stored in an S3 bucket. This preprocessed data was later used for machine learning for the next milestone. # # **Milestone 3**: # # After reviewing the architecture of EMR and Spark, we set up EMR along with Apache Spark. We developed ML models for the preprocessed data and tuned the hyperparameters using spark's MLlib. we learned how to install additional packages in Amazon EMR using JupyterHub and got a flavour of Spark's MLlib as well in the process. After this milestone, we are much more comfortable using AWS cloud solutions. # # **Milestone 4**: # # For this milestone, a pre-trained rainfall prediction machine learning model was deployed on an EC2 instance using the Flask package. The model was saved as a joblib file from the sklearn random forest model output of milestone 3. The Flask server loaded the pre-trained model and user test data to predict rainfall float values. The REST API written by Flask allows users to send an HTTP POST request to our website and predict rainfall using our model. The returned object is sent as a JSON file for the user to process and store the results. This milestone will equipped us with a high-level understanding of how to deploy a machine learning model on AWS. # ## 4. Submission instructions # rubric={mechanics:5} # # In the textbox provided on Canvas please put a link where TAs can find the following- # - [X] This notebook with solution to ```1 & 3``` # - [X] Screenshot from # - [X] Output after trying curl. Here is a [sample](https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone4/images/curl_deploy_sample.png). This is just an example; your input/output doesn't have to look like this, you can design the way you like. But at a minimum, it should show your prediction value. # ## Screenshot see section 2.
notebooks/milestone-4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/chandrusuresh/ReinforcementLearning/blob/master/Ch3-Finite%20MDP%20GridWorld/GridWorld.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="1rkBkDbWM5ew" colab_type="code" colab={} ## Grid World Example import numpy as np from scipy import linalg as scilinalg import matplotlib.pyplot as plt # %matplotlib inline # + [markdown] id="caf_U9tOM5e0" colab_type="text" # ## Grid World Example # # **Copy from [chandrusuresh/ReinforcementLearning](https://github.com/chandrusuresh/ReinforcementLearning/blob/master/GridWorld.ipynb)** # # This is an example for a value function iteration for the grid world with a $5 \times 5$ grid. From each cell, four actions are possible, each with a move of 1 cell in the north, south, east and west directions. The rewards for states and actions is as follows: # 1. An action bringing the agent outside of the grid bounds results in a reward of -1 # 2. All actions from the cell $\left[ 0,1 \right]$ would bring the agent to the cell $\left[ 4,1 \right]$ with a reward of +10 # 3. All actions from the cell $\left[ 0,3 \right]$ would bring the agent to the cell $\left[ 2,3 \right]$ with a reward of +5 # 4. All other actions from any cell would yield a reward of 0. # # The agent selects all the actions with the same probability. The discount factor for each move is $\gamma = 0.9$ # # The grid and its rewards are illustrated below. # # ![Grid World](https://raw.githubusercontent.com/chandrusuresh/ReinforcementLearning/master/files/GridWorld.PNG) # + [markdown] id="BQohOXcUM5e1" colab_type="text" # ## Approach # The value function (policy) is given as: # $$ v_\pi (s) = \sum_a{\pi(a|s)} \sum_{a,s'}{p(s',r|a,s) \left(r + \gamma v_\pi (s')\right)} $$ # # For the grid, we have deterministic transitions for each action, which implies that $p(s',r|a,s) = 1$ for all valid transitions. # # The above equation for the grid becomes: # # $$ v_{\pi} (i,j) = \sum_a{\frac{1}{4}} \sum_{k,l}{\left(r + \gamma v_\pi (k,l) {}\right)} $$ # where $(k,l)$ is a cell adjacent to $(i,j)$ in the north,west,east and south directions. # # Aggregating and solving the linear equations for all the cells, we get an equation of the form: # $$ \mathbf{A} \mathbf{v_\pi} = \mathbf{r} $$ # where $\mathbf{v_\pi}$ and $\mathbf{r}$ represents the arrays from aggregating the values and rewards at each cell. The value functions are then obtained as follows: # $$ \mathbf{v_\pi} = \mathbf{A}^{-1} \mathbf{r} $$ # # + id="SJxlYCeYM5e1" colab_type="code" colab={} m = 5 n = 5 gamma = 0.9 grid = [[0 for j in range(m*n)] for i in range(m*n)] reward = [0 for i in range(m*n)] steps = [[-1,0],[1,0],[0,-1],[0,1]] def getAdjacencyMatrix(i,j,gamma): idx_0 = i*n+j if i == 0 and j == 1: reward[idx_0] += 10 elif i == 0 and j == 3: reward[idx_0] += 5 for s in steps: if i == 0 and j == 1: r = m-1 c = j elif i == 0 and j == 3: r = i+2 c = j else: r = min(m-1,max(0,i+s[0])) c = min(n-1,max(0,j+s[1])) if r == i and c == j: reward[idx_0] -= 1/(float(len(steps))) idx = r*n + c grid[idx_0][idx] += gamma/(float(len(steps))) for i in range(m): for j in range(n): getAdjacencyMatrix(i,j,gamma) # + id="dpU65IDdM5e4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="1c80ccbc-dea9-4d42-d01e-6a3d903367d9" A_mat = np.eye(m*n) - np.matrix(grid) A_mat_inv = np.linalg.inv(A_mat) val_func = np.matmul(A_mat_inv,np.matrix(reward).T) print("State Value Function: ") print(np.reshape(np.round(val_func,1),(m,n))) # + [markdown] id="037dMSdEM5e_" colab_type="text" # ## Expressions relating state and action value functions # Exercice 3.17: $$ q_\pi (s,a) = \sum_{r,s'}{p(s',r|a,s) \left(r + \gamma \sum_{a'}{\pi(a'|s') q_\pi (s',a')}\right)} $$ # # Exercise 3.18: $$ v_\pi (s) = \sum_{a}{\pi(a|s) q_\pi (s,a)} $$ # # Exercise 3.19: $$ q_\pi (s,a) = \sum_{r,s'}{p(s',r|a,s) \left(r + \gamma v_\pi (s')\right)} $$ # # # + [markdown] id="SxYwEB-iM5fA" colab_type="text" # ## Action value function for Grid World Example # For the above example, the action value function would be computed based on the expression above. # + id="j-j1sL8FM5fA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 476} outputId="b5e1edd0-3499-410a-8a8d-e65c6380c068" action_val_func = np.zeros((np.size(val_func,0),len(steps))) print("Action Value Function: ") print(" w e n s") for i in range(m): for j in range(n): idx0 = i*n+j for si,s in enumerate(steps): if i == 0 and j == 1: r = m-1 c = j reward = 10 elif i == 0 and j == 3: r = i+2 c = j reward = 5 else: r = min(m-1,max(0,i+s[0])) c = min(n-1,max(0,j+s[1])) reward = 0 if r == i and c == j: reward = -1 idx = r*n + c action_val_func[idx0][si] += reward + gamma*val_func[idx] # print(i,',',j,':',np.round(action_val_func[idx0],1)) print(np.round(action_val_func,1)) # + [markdown] id="NUS9mlEKM5fD" colab_type="text" # ## Iterative Policy Evaluation # Iterative policy evaluation is an algorithm where a value function is derived for a policy iteratively. The value function is initialized in the first step. The value function is updated at each step (k+1) based on the value iteration of the next states from the previous iteration (k) as follows: # $$ v_{k+1}(s) = \sum_a{\pi(a|s)} \sum_{a,s'}{p(s',r|a,s) \left(r + \gamma v_k (s')\right)} $$ # In this section, the algorithm for iterative policy evaluation is shown for the grid world example for both the state & action value functions. # + id="75RMhmVmM5fD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="57891eea-195f-468c-ae6b-d0bade827b60" init_state_v = 10*np.ones((m*n,1)) init_action_v = 10*np.ones((m*n,len(steps))) tol = 0.1 pi = 1/float(len(steps)) def iterativePolicyEvaluation_state(maxIter = 50): v = init_state_v exitFlag = False iter = 0 maxDelta = [] while not exitFlag and iter < maxIter: iter = iter+1 v_prev = np.copy(v) valFuncDelta = 0 for i in range(m): for j in range(n): idx0 = i*n+j stateValFunc = 0 for si,s in enumerate(steps): if i == 0 and j == 1: r = m-1 c = j reward = 10 elif i == 0 and j == 3: r = i+2 c = j reward = 5 else: r = min(m-1,max(0,i+s[0])) c = min(n-1,max(0,j+s[1])) reward = 0 if r == i and c == j: reward = -1 idx = r*n + c stateValFunc += reward + gamma*v_prev[idx] stateValFunc = pi*stateValFunc valFuncDelta = max(valFuncDelta,abs(v_prev[idx0]-stateValFunc)) v[idx0] = stateValFunc maxDelta.append(valFuncDelta) exitFlag = valFuncDelta < tol return v,maxDelta,exitFlag def iterativePolicyEvaluation_action(maxIter = 50): v = init_action_v exitFlag = False iter = 0 maxDelta = [] while not exitFlag and iter < maxIter: iter = iter+1 v_prev = np.copy(v) valFuncDelta = 0 for i in range(m): for j in range(n): idx0 = i*n+j for si,s in enumerate(steps): if i == 0 and j == 1: r = m-1 c = j reward = 10 elif i == 0 and j == 3: r = i+2 c = j reward = 5 else: r = min(m-1,max(0,i+s[0])) c = min(n-1,max(0,j+s[1])) reward = 0 if r == i and c == j: reward = -1 idx = r*n + c actionValFunc = reward + gamma*pi*sum(v_prev[idx,:]) valFuncDelta = max(valFuncDelta,abs(v_prev[idx0,si]-actionValFunc)) v[idx0,si] = actionValFunc maxDelta.append(valFuncDelta) exitFlag = valFuncDelta < tol return v,maxDelta,exitFlag approx_state_val_func,maxDelta_state,exitFlag_state = iterativePolicyEvaluation_state() approx_action_val_func,maxDelta_action,exitFlag_action = iterativePolicyEvaluation_action() print('Iterative Policy Evaluation for State Terminated?:', exitFlag_state) print('MaxDelta: ',maxDelta_state[-1]) print('Approximate State Value Function:') print(np.reshape(np.round(approx_state_val_func,1),(m,n))) print('Iterative Policy Evaluation for Action Terminated?:', exitFlag_action) print('MaxDelta: ',maxDelta_action[-1]) print('Approximate Action Value Function:') print(np.round(approx_action_val_func,1)) f,ax = plt.subplots(1,2,figsize=(20,7)) ax[0].plot(range(len(maxDelta_state)),maxDelta_state) ax[0].set_title('Convergence in state value function'); ax[1].plot(range(len(maxDelta_action)),maxDelta_action) ax[1].set_title('Convergence in action value function'); # + [markdown] id="q4XCuNhcM5fG" colab_type="text" # ## Iterative Policy Improvement # + [markdown] id="z99WRmTOM5fH" colab_type="text" # Policy improvement is an algorithm similar to policy evaluation and is applied as follows: # For each state, we select an action a, and thereafter follow the current policy. If the resulting value function offers an improvement over the value function with the current policy, then the policy for the current state is updated accordingly. # + id="bs9pGOI1M5fH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="010e67d3-dd2b-42b6-c21e-f594046bd37d" init_state_v = np.copy(approx_state_val_func) pi = 0.25*np.ones((init_state_v.shape[0],len(steps))) def iterativePolicyImprovement_state(maxIter = 25): iter = 0 v = init_state_v exitFlag = False iter = 0 maxDelta = [] while not exitFlag and iter < maxIter: iter = iter+1 v_prev = np.copy(v) valFuncDelta = 0 updateFound = False for i in range(m): for j in range(n): idx0 = i*n+j stateValFunc = 0 new_pi = np.zeros((1,len(steps))) for si,s in enumerate(steps): if i == 0 and j == 1: r = m-1 c = j reward = 10 elif i == 0 and j == 3: r = i+2 c = j reward = 5 else: r = min(m-1,max(0,i+s[0])) c = min(n-1,max(0,j+s[1])) reward = 0 if r == i and c == j: reward = -1 idx = r*n + c stateValFunc = reward + gamma*v[idx] if stateValFunc >= v[idx0]: valFuncDelta = max(valFuncDelta,abs(v_prev[idx0]-stateValFunc)) # print(idx0,v[idx0],stateValFunc) v[idx0] = stateValFunc new_pi[0,si] = 1 updateFound = True else: continue if updateFound: # print(idx0,reward,v[idx0]) new_pi = new_pi/np.sum(new_pi) pi[idx0,:] = new_pi maxDelta.append(valFuncDelta) exitFlag = not updateFound opt_pi = pi return v,opt_pi,maxDelta,exitFlag opt_state_val_func,opt_pi,opt_maxDelta_state,opt_exitFlag_state = iterativePolicyImprovement_state() print('Iterative Policy Improvement for State Terminated?:', opt_exitFlag_state) print('MaxDelta: ',opt_maxDelta_state[-1]) print('Approximate Optimal State Value Function:') print(np.reshape(np.round(opt_state_val_func,1),(m,n))) print("Optimal policy") print(pi) f,ax = plt.subplots(1,1,figsize=(20,7)) ax.plot(range(len(opt_maxDelta_state)),opt_maxDelta_state) ax.set_title('Convergence in optimal state value function');
Ch3-Finite MDP GridWorld/GridWorld.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Acrobot-v1 # + import numpy as np import gym from keras.models import Sequential from keras.layers import Dense, Activation, Flatten from keras.optimizers import Adam from rl.agents.dqn import DQNAgent from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory import SequentialMemory ENV_NAME = 'Acrobot-v1' env = gym.make(ENV_NAME) # - ## play for rondom action, without being trained for i_episode in range(5): observation = env.reset() for t in range(100): env.render() print(observation) action = env.action_space.sample() observation, reward, done, info = env.step(action) if done: print("Episode finished after {} timesteps".format(t+1)) break env.close() # + # Get the environment and extract the number of actions. np.random.seed(123) env.seed(123) nb_actions = env.action_space.n model = Sequential() model.add(Flatten(input_shape=(1,) + env.observation_space.shape)) model.add(Dense(300)) model.add(Activation('relu')) model.add(Dense(300)) model.add(Activation('relu')) model.add(Dense(300)) model.add(Activation('relu')) model.add(Dense(nb_actions)) # model.add(Activation('linear')) print(model.summary()) # + # # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and # # even the metrics! memory = SequentialMemory(limit=50000, window_length=1) policy = EpsGreedyQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50, target_model_update=200,train_interval=4, policy=policy) dqn.compile(Adam(lr=1e-4), metrics=['mae']) # # # Okay, now it's time to learn something! We visualize the training here for show, but this # # slows down training quite a lot. You can always safely abort the training prematurely using # # Ctrl + C. # dqn.fit(env, nb_steps=10000, visualize=False, verbose=2) # # # After training is done, we save the final weights. # dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True) # Finally, evaluate our algorithm for 5 episodes. # weights_filename = 'dqn_{}_weights.h5f'.format(ENV_NAME) # # if args.weights: # # weights_filename = args.weights dqn.load_weights(weights_filename) dqn.test(env, nb_episodes=10, visualize=True) # dqn.test(env, nb_episodes=5, visualize=True) # - env.close() # ## Research Infinite Solutions LLP # # by [Research Infinite Solutions](http://www.researchinfinitesolutions.com/) # (https://www.ris-ai.com/) # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
ReinforcementLearning/Acrobot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 1 # + # %%writefile prime_num # A optimized school method based # Python3 program to check # if a number is prime def isPrime(n) : # Corner cases if (n <= 1) : return False if (n <= 3) : return True # This is checked so that we can skip # middle five numbers in below loop if (n % 2 == 0 or n % 3 == 0) : return False i = 5 while(i * i <= n) : if (n % i == 0 or n % (i + 2) == 0) : return False i = i + 6 return True # Driver Program if (isPrime(11)) : print(" true") else : print(" false") if(isPrime(15)) : print(" true") else : print(" false") # - # !pip install pylint # ! python -m pip install --upgrade pip # ! pylint "prime_num" # # Assignment 2 # Python program to check if the number provided by the user is an Armstrong number or not # take input from the user num = int(input("Enter a number: ")) # initialize sum sum = 0 # find the sum of the cube of each digit temp = num while temp > 0: digit = temp % 10 sum += digit ** 3 temp //= 10 # display the result if num == sum: print(num,"is an Armstrong number") else: print(num,"is not an Armstrong number")
Day 9 Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from stonks.data.data_reader import MktDataReader from stonks.backtest.backtest import BackTester from stonks.backtest.strategy.constant_equal_weights import EqualWeightsStrategy import functools import math import backtrader as bt import backtrader.feeds as btfeeds # + tickers = [ 'AAPL', # Apple -> tech 'TSLA', # Tesla -> tech 'GOOGL', # Google -> tech 'AMZN', # Amazon -> consumer/tech 'MA', # MasterCard -> finance 'V', # Visa -> finance 'MELI', # MercadoLibre -> consumer 'NVDA', # Nvidia -> chips 'ASML', # ASML -> chips 'FB' # facebook -> social media ] # We would like all available data from 01/01/2000 until 12/31/2016. start_date = '2008-01-01' end_date = '2021-04-05' dr = MktDataReader(start_date, end_date, 'yahoo', tickers) # - # impute missing data dr.impute_missing_data() # fetch market caps dr.fetch_market_caps() dr.df_stocks_bkfilled.head(4) #dr.df_stocks_bkfilled.iloc[:10,:] len(dr.df_stocks_bkfilled.columns) # + bt = BackTester(dr.df_stocks_bkfilled, 0, 5) bt.set_strategy(EqualWeightsStrategy()) # manual iteration through the generator # bt.next_per() # bt.next_per() # bt.next_per() # bt.next_per() # iterate all bt.backtest() # - len(bt.data.columns) bt.enr_df.iloc[:,1:11] ff = bt.enr_df.copy() ff # + from abc import ABC, abstractmethod class BaseStrategy(ABC): def __init__(self): # ideally this should be an abstract property self.strat_executed = False self.weights = None @abstractmethod def execute(self): pass def execute_strategy(self): self.execute() def set_period_data(self, curr_period_data): self.curr_period_data = curr_period_data class EqualWeightsStrategy(BaseStrategy): # override of the abstract method def execute(self): if self.curr_period_data.empty == False: self.log() self.set_weights() self.compute_returns() # print(f"pnl is: {self.rets}") # print(f"weights are: {self.weights_array.reshape(-1,1)}") # self.daily_pnl = np.dot(self.rets, self.weights_array.reshape(-1,1)) # works mathematically accurate way self.strat_executed = True def set_weights(self): dts = self.curr_period_data.index n = len(self.curr_period_data.columns) weights_lst = [1/n for _ in range(n)] weights_lst_rep = [weights_lst for _ in range(len(dts))] self.weights = pd.DataFrame( { 'weights': weights_lst_rep }, index = dts ) # self.num_tickers = len(self.curr_period_data.columns) # self.weights_lst = [1/self.num_tickers for _ in range(self.num_tickers)] # self.weights_array = np.array(self.weights_lst) # create a matrix of equally sized weights # n = self.curr_period_data.shape[0] # lst = [weights_array for _ in range(n)] # weights matrix # self.weights = functools.reduce(lambda a, b: np.vstack((a, b)), lst) # weights_matrix = np.vstack((weights_array.reshape(1,-1), weights_array.reshape(1,-1))) def compute_returns(self): # self.rets = ((self.curr_period_data - self.curr_period_data.shift(1)) / self.curr_period_data)[1:] self.rets = self.curr_period_data.pct_change().iloc[1:,:] def log(self): print(f"The shape of the current data is: {self.curr_period_data.shape}") # print(f"{self.elapsed_period_data.empty}") print(f"Dt from: {self.curr_period_data.index[0].strftime('%Y-%m-%d')} to: {self.curr_period_data.index[-1].strftime('%Y-%m-%d')}") if self.curr_period_data.empty != True else print("The DF is empty") # + class BackTester(): def __init__(self, data, initial_period, window_size): self.data = data self._rts = data.pct_change() self._data_generator = self.df_generator() # data generator that will be used to traverse the df with the next method self._generator_iteration = 0 self.initial_period = initial_period self.window_size = window_size self.log = None # portfolio based properties self.initial_capital = 100 self.weights_df = self._rts.copy() # self.weights = [] def backtest(self): print("Calculating Strategy for the period") def df_generator(self): window_selection = 0 + self.initial_period # initial selection of the data while(True): yield self.data.iloc[:window_selection,:] window_selection += self.window_size # advance the generator one window size self._generator_iteration += 1 def set_strategy(self, strategy): self.strategy = strategy # you'd need to overwrite strategy before instantiating the class def compute_metrics(self, weights): # self.enr_df = weights.merge(self.weights_df, left_index=True, right_index=True, how = "right") # join with the data self.enr_df = weights.merge(self.weights_df, left_index=True, right_index=True) # join with the data # this gets called everytime you compute the metrics should be only once at the end self.enr_df["portfolio_daily_returns"] = self.enr_df.apply(lambda row: np.dot(np.array(row[:1][0]), row[1:len(ff.columns)].values), axis = 1) self.enr_df["cumulative_returns"] = (self.enr_df["portfolio_daily_returns"] + 1).cumprod() #print(daily_rts_weights_adj[0,:] * self.initial_capital) # cum_rets_per = (daily_pnl_df + 1).cumprod() # total_returns_period = (daily_pnl_df + 1).prod() * self.initial_capital # #print(cum_rets_per * self.initial_capital) # print(f"Portfolio value at the end of the period: {total_returns_period: .2f}") def next_per(self): assert self.strategy != None, "Set a strategy first" print("Executing the strategy...") next_df = next(self._data_generator) print(f"Setting the data with shape: {next_df.shape}, generator iteration {self._generator_iteration}") self.strategy.set_period_data(next_df) self.strategy.execute() if self.strategy.strat_executed: weights = self.strategy.weights # prices = self.strategy.curr_period_data.values self.compute_metrics(weights) def run(self): n_windows = math.ceil(self.data.shape[0] / self.window_size) print(n_windows) for _ in range(n_windows)[:2]: self.next_per() # the abstract class needs to be the strategy or the logger # from abc import ABC, abstractmethod # class Strategy(ABC): # @abstractmethod # def log(self): # pass # + bt = BackTester(dr.df_stocks_bkfilled, 0, 5) bt.set_strategy(EqualWeightsStrategy()) # manual iteration through the generator # bt.next_per() # bt.next_per() # bt.next_per() # bt.next_per() # iterate all bt.run() # - ff = bt.enr_df.copy() # ff["cumulative_returns"] = (ff["portfolio_daily_returns"] + 1).cumprod() # aa_1 = (1+aa.iloc[1:,:]).cumprod() # aa # + ff # + import math dd = dr.df_stocks_bkfilled.copy() window_size = 200 math.ceil(dd.shape[0] / 200) # - ff = bt.enr_df.copy() ff["portfolio_daily_returns"] = ff.apply(lambda row: np.dot(np.array(row[:1][0]), row[1:len(ff.columns)].values), axis = 1) # drr = ff.iloc[:,1:].apply(lambda x: x, axis = 1) ff # + n = df_rets.shape[0] import functools lst = [weights_array for _ in range(n)] # lst[0] weights_matrix = functools.reduce(lambda a, b: np.vstack((a, b)), lst) # + #df_rets['portfolio_daily_returns'] = df_rets.dot(weights_array) # - g = (i for i in range(5)) next(g) next(g) # + # i = iter(range(10)) # while (x := next(i, None)) is not None and x < 5: # print(x) # + # np.dot(weights, rets.values.T) # + # Create a Stratey class LogStrategy(bt.Strategy): def log(self, txt, dt=None): ''' Logging function for this strategy''' dt = dt or self.datas[0].datetime.date(0) print('%s, %s' % (dt.isoformat(), txt)) def __init__(self): # Keep a reference to the "close" line in the data[0] dataseries self.dataclose = self.datas[0].close # self.dataclose = self.datas[0].close # self.dataclose_n = self.datas[1].close def next(self): # Simply log the closing price of the series from the reference # log the first price self.log('Close, %.2f' % self.dataclose) # log the second price self.log('Close, %.2f' % self.dataclose_n) # - dat = bt.feeds.PandasData(dataname = tmp, name = tk)
notebooks/backtrader-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Seaborn is a data visualization library. # ### It is a stastistical library built on top of Matplotlib. # ### works well with pandas. import seaborn as sns import numpy as np tips = sns.load_dataset('tips') tips tips.head() sns.barplot(x='sex', y='total_bill', data=tips, estimator=np.std) sns.countplot(x = 'sex', data=tips) sns.countplot(x='time', data=tips) sns.boxplot(x='day', y='total_bill', data=tips, hue='smoker') # + active="" # these are called quartiles. # #outliers # - sns.boxplot(x='day', y='total_bill', data=tips, hue='sex') sns.violinplot(x='day', y='total_bill', data=tips, hue='sex',split=True) sns.distplot(tips['total_bill']) # + active="" # The line (curve) in the graph is called Kernal Density Estimation (KDE) # - # to remove this line, sns.distplot(tips['total_bill'], kde=False) sns.distplot(tips['total_bill'], kde=False, bins = 100) sns.distplot(tips['total_bill'], kde=False, bins = 40) sns.kdeplot(tips['total_bill']) sns.jointplot(x='total_bill' ,y='tip' ,data=tips) sns.jointplot(x='total_bill' ,y='tip' ,data=tips,kind='hex') sns.jointplot(x='total_bill' ,y='tip' ,data=tips,kind='kde') sns.jointplot(x='total_bill' ,y='tip' ,data=tips,kind='reg') sns.pairplot(tips,hue='sex') tips.head() tips_corr = tips.corr() tips_corr sns.heatmap(tips_corr,annot=True) flights=sns.load_dataset('flights') flights flights_pivot = flights.pivot_table(index = 'month', columns = 'year', values = 'passengers') flights_pivot sns.heatmap(flights_pivot) sns.heatmap(flights_pivot, cmap = 'coolwarm') # ## TASK # # #### titanic = sns.load_dataset('titanic') # ##### Visualize this data and make some conclusion. titanic = sns.load_dataset('titanic') titanic
07-data_visualization/06-seaborn_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="FCnjNNVqSQ9U" colab_type="code" colab={} # #!unzip '/content/drive/My Drive/classification_Dataset/cat_VS_dogs/test1.zip' -d '/content/drive/My Drive/classification_Dataset/cat_VS_dogs' # + id="eoDT5Fl0TVhK" colab_type="code" colab={} import keras,os from keras.models import Sequential from keras.layers import Dense, Conv2D, MaxPool2D , Flatten from keras.preprocessing.image import ImageDataGenerator import numpy as np # + id="NJ2TQ9tcVU97" colab_type="code" colab={} train_directory= "/content/drive/My Drive/classification_Dataset/cat_VS_dogs/train" test_directory="/content/drive/My Drive/classification_Dataset/cat_VS_dogs/test1" # + id="iTz7UdVh-A3K" colab_type="code" colab={} import shutil import os from os import listdir from os.path import splitext # + id="vRaFd2ljC6gL" colab_type="code" colab={} src= '/content/drive/My Drive/classification_Dataset/cat_VS_dogs/train' dest_d='/content/drive/My Drive/classification_Dataset/cat_VS_dogs/train/Dogs' dest_c='/content/drive/My Drive/classification_Dataset/cat_VS_dogs/train/Cats' validation_set='/content/drive/My Drive/classification_Dataset/cat_VS_dogs/validation_data' # + id="VerTxgvBD6Ax" colab_type="code" outputId="47f54d87-b7bc-4dd7-9516-061385371439" colab={"base_uri": "https://localhost:8080/", "height": 35} c=1 for file in listdir(dest_d): filename = file[:3] f1=filename.lower() print(filename) c+=1 try: if(filename=='cat'): shutil.move(os.path.join(src,file), dest_c) print("cat moved succesfully") else: shutil.move(os.path.join(src,file),dest_d) print("dog moved successfully") except: print("error occured at "+ file) if(c==10): break print(c) # + id="b5mJNIzVEGvz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="34347b6f-3572-45f6-9724-7a379af20f85" trdata = ImageDataGenerator() traindata = trdata.flow_from_directory(directory=src,target_size=(224,224),batch_size=32) # + id="WpuX2scNg-nf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="ac877867-c178-4863-ddf4-348123fd06f2" tsdata = ImageDataGenerator() testdata = tsdata.flow_from_directory(directory=validation_set, target_size=(224,224),batch_size=32) # + [markdown] id="VIDHFUDbmCrN" colab_type="text" # Building the Model - VGG16 From Scratch # + id="cam8SB2AmKKs" colab_type="code" colab={} model = Sequential() # + id="meorqFm-mKzG" colab_type="code" colab={} model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu")) model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) # + [markdown] id="7fPF_ilzoCM6" colab_type="text" # Here I have started with initialising the model by specifying that the model is a sequential model. After initialising the model I add # # → 2 x convolution layer of 64 channel of 3x3 kernal and same padding # # → 1 x maxpool layer of 2x2 pool size and stride 2x2 # # → 2 x convolution layer of 128 channel of 3x3 kernal and same padding # # → 1 x maxpool layer of 2x2 pool size and stride 2x2 # # → 3 x convolution layer of 256 channel of 3x3 kernal # and same padding # # → 1 x maxpool layer of 2x2 pool size and stride 2x2 # # → 3 x convolution layer of 512 channel of 3x3 kernal and same padding # # → 1 x maxpool layer of 2x2 pool size and stride 2x2 # → 3 x convolution layer of 512 channel of 3x3 kernal and same padding # # → 1 x maxpool layer of 2x2 pool size and stride 2x2 # # I also add relu(Rectified Linear Unit) activation to each layers so that all the negative values are not passed to the next layer. # + id="K1CjNIiDoL9C" colab_type="code" colab={} model.add(Flatten()) model.add(Dense(units=4096,activation="relu")) model.add(Dense(units=4096,activation="relu")) model.add(Dense(units=2, activation="softmax")) # + id="N9LzqZJLfEhR" colab_type="code" colab={} # + [markdown] id="9Y9XBryvfcP4" colab_type="text" # After creating all the convolution I pass the data to the dense layer so for that I flatten the vector which comes out of the convolutions and add # # → 1 x Dense layer of 4096 units # # → 1 x Dense layer of 4096 units # # → 1 x Dense Softmax layer of 2 units # # # + id="2vZvFhusfgOo" colab_type="code" colab={} from keras.optimizers import Adam opt = Adam(lr=0.001) model.compile(optimizer=opt, loss=keras.losses.categorical_crossentropy, metrics=['accuracy']) # + id="zaOX10SdfpkT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 919} outputId="9a484a24-434f-45a8-f7b6-be8f66b70424" model.summary() # + id="a7FMYOzqfslo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="c183f9f3-0b8a-44b5-8c34-44e8f039c30d" from keras.callbacks import ModelCheckpoint, EarlyStopping checkpoint = ModelCheckpoint("/content/drive/My Drive/classification_Dataset/vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) early = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto') hist = model.fit_generator(steps_per_epoch=200,generator=traindata, validation_data= testdata, validation_steps=80,epochs=100,callbacks=[checkpoint,early]) # + id="UJavSR9VgBQg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="1b07462e-fb0c-4db5-b2e6-6a366dd20f36" import matplotlib.pyplot as plt plt.plot(hist.history["acc"]) plt.plot(hist.history['val_acc']) plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title("model accuracy") plt.ylabel("Accuracy") plt.xlabel("Epoch") plt.legend(["Accuracy","Validation Accuracy","loss","Validation Loss"]) plt.show() # + id="GIgRO-BJGujH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 809} outputId="bf5dfbd9-958e-4b58-cdb7-3df6fd2da03c" from keras.preprocessing import image import matplotlib.pyplot as plt img = image.load_img("/content/drive/My Drive/classification_Dataset/cat_VS_dogs/test1/12500.jpg",target_size=(224,224)) img = np.asarray(img) plt.imshow(img) img = np.expand_dims(img, axis=0) from keras.models import load_model saved_model = load_model("/content/drive/My Drive/classification_Dataset/vgg16_1.h5") output = saved_model.predict(img) if output[0][0] > output[0][1]: print("cat") else: print('dog') # + id="dFB4wRP2zYbo" colab_type="code" colab={}
Classification_Cat_VS_Dogs_Custom_Modelling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <font color = #254117>[EEP 147]: ESG Analysis Notebook - Practice Round</font> # <div style="width:image width px; font-size:80%; text-align:center;"><img src="big_creek.jpg" alt="alternate text" width="500" height="height" style="padding-bottom:0.5em;" />Big Creek Hydroelectric Project - Southern California Edison</div> # This notebook can be utilized for analysis of the Electricity Strategy Game. # First on our agenda is to import **<font color = ##008700>dependencies</font>** -- packages in Python that add to the basic functions in Python. from datascience import * import matplotlib.pyplot as plt import matplotlib.patches as mpatches # %matplotlib inline import numpy as np import pandas as pd from ipywidgets import interact, interactive, Dropdown, IntSlider, BoundedFloatText import ipywidgets as widgets from functools import partial from IPython.display import display plt.style.use('fivethirtyeight') plt.rcParams["figure.figsize"] = [10,6] # The variable **current_period** should contain the current round. # # The variable **pab_periods** should contain each of the periods for which there was or will be a pay-as-bid auction. This shouldn't change. current_period = 0 pab_periods = [1] # Next we import Demand (realized and forecasted), Bids, Porfolios, and the Auction results. demand_table = Table.read_table('demand.csv') bids_mc = Table.read_table('Bids/MC_bids.csv').sort('PORTFOLIO') ESG = Table.read_table('ESGPorfolios.csv') #auction_results = Table.read_table('portfolio_auction.csv') # In the following cell we will join the tables based on the column **Plant_ID**. We will incorporate the actual bids of the rounds completed. def get_bids(section): bids_all = bids_mc.copy() if (current_period > 1) & (current_period < 8): bids_all_df = bids_all.sort("PLANT_ID").to_df() bids_actual = Table.read_table('Bids/' + section + '_bids_' + str(current_period - 1) + '.csv').sort('PORTFOLIO') bids_actual_df = bids_actual.sort("PLANT_ID").to_df() for period_i in range(1,current_period): bids_all_df.loc[bids_all_df["PERIOD"] == period_i] = bids_actual_df.loc[bids_actual_df["PERIOD"] == period_i].values bids_all = Table.from_df(bids_all_df) joined_table_all = bids_all.join("PLANT_ID", ESG, "Plant_ID").sort("PLANT_ID") return(joined_table_all) # Define helper functions # + energy_colors_dict = {'Bay Views' : '#EC5F67', 'Beachfront' : '#F29056', 'Big Coal' : '#F9C863', 'Big Gas' : '#99C794', 'East Bay' : '#5FB3B3', 'Fossil Light' : '#6699CC', 'Old Timers' : '#C594C5'} def demand_calc(hour, period, demand_sp): demand = demand_table.where("round", period).where("hour", hour)["load"].item() if np.abs(demand_sp) <= 1: demand *= (1 + demand_sp) else: demand = demand_sp return(demand) def price_calc(input_table, demand, hour, period): #hour and period determine which bids are taken from joined_table joined_table = input_table.copy() sorted_table = joined_table.where("PERIOD", period).sort("PRICE" + str(hour), descending = False) price = 0 sum_cap = 0 for i in range(0,len(sorted_table['Capacity_MW'])): if sum_cap + sorted_table['Capacity_MW'][i] >= demand: price = sorted_table['PRICE' + str(hour)][i] break else: sum_cap += sorted_table['Capacity_MW'][i] price = sorted_table['PRICE' + str(hour)][i] return price def find_x_pos(widths): cumulative_widths = [0] cumulative_widths.extend(np.cumsum(widths)) half_widths = [i/2 for i in widths] x_pos = [] for i in range(0, len(half_widths)): x_pos.append(half_widths[i] + cumulative_widths[i]) return x_pos def price_line_plot(price): plt.axhline(y=price, color='r', linewidth = 2) def demand_plot(demand): plt.axvline(x=demand, color='r', linewidth = 2) def adjust_by_cp(input_table, hour, period, carbon_price): joined_table = input_table.copy() joined_table["Var_Cost_USDperMWH"] += carbon_price * joined_table["Carbon_tonsperMWH"] if (period >= current_period) | (current_period == 8): joined_table["PRICE" + str(hour)] += carbon_price * joined_table["Carbon_tonsperMWH"] return(joined_table) def user_defined_bids(input_table, hour, period, my_portfolio, def_my_bids, def_others_bids): joined_table = input_table.copy() joined_df = joined_table.to_df() if def_my_bids: joined_df.loc[(joined_df["Group"] == my_portfolio) & (joined_df["PERIOD"] == period), "PRICE" + str(hour)] = list(globals()["bids_" + my_portfolio.replace(" ", "").lower()].values()) if def_others_bids: for group in set(joined_table['Group']): if group != my_portfolio: joined_df.loc[(joined_df["Group"] == group) & (joined_df["PERIOD"] == period), "PRICE" + str(hour)] = list(globals()["bids_" + group.replace(" ", "").lower()].values()) joined_table = Table.from_df(joined_df) return(joined_table) # + def profit_calc(input_table, hour, period, demand, price, my_portfolio): if period in pab_periods: return(profit_pab(input_table, hour, period, demand, price, my_portfolio)) sorted_joined_table = input_table.copy() nonmarg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"]) marg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"]) marg_demand = demand - nonmarg_capacity marg_proportion = marg_demand / marg_capacity sorted_table = sorted_joined_table.where("Group", my_portfolio) capacity_subset = sum(sorted_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"]) capacity_subset += sum(sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"] * marg_proportion) revenue = capacity_subset * price cost = 0 for i in range(len(sorted_table.where('PRICE' + str(hour), are.below(price))["Var_Cost_USDperMWH"])): cost += sorted_table.where('PRICE' + str(hour), are.below(price))["Var_Cost_USDperMWH"][i]\ * sorted_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"][i] for i in range(len(sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Var_Cost_USDperMWH"])): cost += sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Var_Cost_USDperMWH"][i]\ * (sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"][i] * marg_proportion) return revenue - cost def profit_pab(input_table, hour, period, demand, price, my_portfolio): sorted_joined_table = input_table.copy() nonmarg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"]) marg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"]) marg_demand = demand - nonmarg_capacity marg_proportion = marg_demand / marg_capacity sorted_table = sorted_joined_table.where("Group", my_portfolio) revenue = 0 for i in range(len(sorted_table.where('PRICE' + str(hour), are.below(price))['PRICE' + str(hour)])): revenue += sorted_table.where('PRICE' + str(hour), are.below(price))['PRICE' + str(hour)][i]\ * sorted_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"][i] for i in range(len(sorted_table.where('PRICE' + str(hour), are.equal_to(price))['PRICE' + str(hour)])): revenue += sorted_table.where('PRICE' + str(hour), are.equal_to(price))['PRICE' + str(hour)][i]\ * (sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"][i] * marg_proportion) cost = 0 for i in range(len(sorted_table.where('PRICE' + str(hour), are.below(price))["Var_Cost_USDperMWH"])): cost += sorted_table.where('PRICE' + str(hour), are.below(price))["Var_Cost_USDperMWH"][i]\ * sorted_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"][i] for i in range(len(sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Var_Cost_USDperMWH"])): cost += sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Var_Cost_USDperMWH"][i]\ * (sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"][i] * marg_proportion) return revenue - cost # - def emissions_calc(input_table, hour, period, demand, price, my_portfolio): sorted_joined_table = input_table.copy() nonmarg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"]) marg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"]) marg_demand = demand - nonmarg_capacity marg_proportion = marg_demand / marg_capacity sorted_table = sorted_joined_table.where("Group", my_portfolio) emissions = 0 for i in range(len(sorted_table.where('PRICE' + str(hour), are.below(price))["Var_Cost_USDperMWH"])): emissions += sorted_table.where('PRICE' + str(hour), are.below(price))["Carbon_tonsperMWH"][i]\ * sorted_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"][i] for i in range(len(sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Var_Cost_USDperMWH"])): emissions += sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Carbon_tonsperMWH"][i]\ * (sorted_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"][i] * marg_proportion) return emissions def market_plot(input_table, hour, period, demand, price): sorted_joined_table = input_table.copy() width = sorted_joined_table.column("Capacity_MW") height = sorted_joined_table.column('PRICE' + str(hour)) x_vals = find_x_pos(width) colors_mapped = list(pd.Series(sorted_joined_table['Group']).map(energy_colors_dict)) sorted_joined_table = sorted_joined_table.with_column('Color', colors_mapped) group_colors = sorted_joined_table.group("Group", lambda x: x).select("Group", "Color") group_colors["Color"] = group_colors.apply(lambda x: x[0], "Color") #prepare the Marginal Cost to be a dashed line num_plants = len(width) height_mc = sorted_joined_table.column("Var_Cost_USDperMWH") x_vec = np.zeros(num_plants * 2) h_vec = np.zeros(num_plants * 2) for i, (w, h) in enumerate(zip(width, height_mc)): h_vec[2*i] = h h_vec[2*i+1] = h if i == 0: x_vec[1] = w else: x_vec[2*i] = x_vec[2*i - 1] x_vec[2*i + 1] = x_vec[2*i] + w # Make the plot plt.figure(figsize=(9,6)) plt.bar(x_vals, height, width=width, color=sorted_joined_table['Color'], edgecolor = "black") line_mc = plt.plot(x_vec, h_vec, '--k', label='Marginal Cost', linewidth=2) patches = [] for row in group_colors.rows: patches += [mpatches.Patch(color=row.item("Color"), label=row.item("Group"))] patches += line_mc plt.legend(handles=patches, bbox_to_anchor=(1.1,1)) plt.title('Energy Market') plt.xlabel('Capacity_MW') plt.ylabel('Price') price_line_plot(price) demand_plot(demand) plt.show() def portfolio_plot(input_table, hour, period, demand, price, my_portfolio): sorted_joined_table = input_table.copy() your_source = sorted_joined_table.where("Group", my_portfolio) width_yours = your_source.column("Capacity_MW") height_yours = your_source.column('PRICE' + str(hour)) new_x_yours = find_x_pos(width_yours) label_yours = your_source.column("PLANT") colors_mapped = list(pd.Series(sorted_joined_table['Group']).map(energy_colors_dict)) sorted_joined_table = sorted_joined_table.with_column('Color', colors_mapped) group_colors = sorted_joined_table.group("Group", lambda x: x).select("Group", "Color") group_colors["Color"] = group_colors.apply(lambda x: x[0], "Color") #prepare the Marginal Cost to be a dashed line num_plants = len(width_yours) height_mc = your_source.column("Var_Cost_USDperMWH") x_vec = np.zeros(num_plants * 2) h_vec = np.zeros(num_plants * 2) for i, (w, h) in enumerate(zip(width_yours, height_mc)): h_vec[2*i] = h h_vec[2*i+1] = h if i == 0: x_vec[1] = w else: x_vec[2*i] = x_vec[2*i - 1] x_vec[2*i + 1] = x_vec[2*i] + w # Make the plot plt.figure(figsize=(11,6)) plt.bar(new_x_yours, height_yours, width=width_yours, color = energy_colors_dict[my_portfolio], edgecolor = "black") line_mc = plt.plot(x_vec, h_vec, '--k', label='Marginal Cost', linewidth=2) plt.title("Bids: " + my_portfolio) plt.xlabel('Capacity_MW') plt.ylabel('Price') for new_x_i, height_i, label_i in zip(new_x_yours, height_yours, label_yours): plt.text(new_x_i, height_i, label_i, ha='center', va='bottom', fontsize=8) price_line_plot(price) #the marginal plants should indicate how much capacity they produce nonmarg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"]) marg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"]) marg_demand = demand - nonmarg_capacity marg_proportion = marg_demand / marg_capacity curr_capacity = 0 for i, (w, h) in enumerate(zip(width_yours, height_yours)): if h == price: x_val = curr_capacity + (w * marg_proportion) x_vec = [x_val, x_val] h_vec = [0, h] plt.plot(x_vec, h_vec, '--k', linewidth=1) elif h > price: break curr_capacity += w plt.axvline(x=curr_capacity, color='k', linewidth = 2) plt.show() def total_profits(section, my_portfolio, carbon_price_vec): # Merge auction results with input table portfolio_profit_dict = {} #auction_results_section = auction_results.where("world_id", section) if current_period > 1: joined_table = get_bids(section) # full_table = joined_table.join("TEAM", auction_results_section, "team") portfolio_table = full_table.where("Group", my_portfolio) portfolio_profit = -portfolio_table.where("PERIOD", 1).to_df().loc[0, "adjustment"] * 1.05**(current_period-2) for period_i in range(1, current_period): portfolio_table_period = joined_table.where("PERIOD", period_i).where("Group", my_portfolio) portfolio_profit_period = -sum(portfolio_table_period["FixedCst_OandM_perDay"]) carbon_price = carbon_price_vec[period_i - 1] for hour_i in range(1, 5): demand = demand_calc(hour_i, period_i, 0) joined_table = adjust_by_cp(joined_table, hour_i, period_i, carbon_price) sorted_joined_table = joined_table.where("PERIOD", period_i).sort("PRICE" + str(hour_i), descending = False) price = price_calc(sorted_joined_table, demand, hour_i, period_i) portfolio_profit_period += profit_calc(sorted_joined_table, hour_i, period_i, demand, price, my_portfolio) portfolio_profit_dict['Round ' + str(period_i)] = portfolio_profit_period portfolio_profit += portfolio_profit_period * 1.05**(current_period - period_i - 1) else: portfolio_profit = 0 portfolio_profit_dict['Total'] = portfolio_profit output_df = pd.DataFrame.from_dict(portfolio_profit_dict, orient = 'index', columns = [my_portfolio + ' Profit']).round().astype(int) return output_df def total_emissions(section, my_portfolio): portfolio_emissions_dict = {} if current_period > 1: joined_table = get_bids(section) portfolio_emissions = 0 for period_i in range(1, current_period): portfolio_emissions_period = 0 for hour_i in range(1, 5): demand = demand_calc(hour_i, period_i, 0) sorted_joined_table = joined_table.where("PERIOD", period_i).sort("PRICE" + str(hour_i), descending = False) price = price_calc(sorted_joined_table, demand, hour_i, period_i) portfolio_emissions_period += emissions_calc(sorted_joined_table, hour_i, period_i, demand, price, my_portfolio) portfolio_emissions_dict['Round ' + str(period_i)] = portfolio_emissions_period portfolio_emissions += portfolio_emissions_period else: portfolio_emissions = 0 portfolio_emissions_dict['Total'] = portfolio_emissions output_df = pd.DataFrame.from_dict(portfolio_emissions_dict, orient = 'index', columns = [my_portfolio + ' Emissions']).round().astype(int) return output_df # Here is the main wrapper function def all_output(section, hour, period, my_portfolio, demand_sp, carbon_p4, carbon_p5, carbon_p6, def_my_bids, def_others_bids): #print demand demand = demand_calc(hour, period, demand_sp) print("Demand: " + str(demand)) #print price joined_table = get_bids(section) carbon_price_vec = [0, 0, 0, carbon_p4, carbon_p5, carbon_p6] carbon_price = carbon_price_vec[period - 1] joined_table = adjust_by_cp(joined_table, hour, period, carbon_price) joined_table = user_defined_bids(joined_table, hour, period, my_portfolio, def_my_bids, def_others_bids) sorted_joined_table = joined_table.where("PERIOD", period).sort("PRICE" + str(hour), descending = False) price = price_calc(sorted_joined_table, demand, hour, period) print("Price: " + str(price)) #print profits and emissions my_profit = profit_calc(sorted_joined_table, hour, period, demand, price, my_portfolio) print(my_portfolio + ' Profit: $' + str(round(my_profit, 2))) my_emissions = emissions_calc(sorted_joined_table, hour, period, demand, price, my_portfolio) print(my_portfolio + ' Emissions: ' + str(round(my_emissions, 2)) + ' Tons CO2') #produce plots market_plot(sorted_joined_table, hour, period, demand, price) portfolio_plot(sorted_joined_table, hour, period, demand, price, my_portfolio) #the marginal plants should indicate how much capacity they produce nonmarg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.below(price))["Capacity_MW"]) marg_capacity = sum(sorted_joined_table.where('PRICE' + str(hour), are.equal_to(price))["Capacity_MW"]) marg_demand = demand - nonmarg_capacity marg_proportion = marg_demand / marg_capacity #display information about plants display_bids = sorted_joined_table.where("Group", my_portfolio).to_df() display_bids.rename(columns = {'PLANT':'Plant', 'Var_Cost_USDperMWH':'Adjusted MC', 'PRICE' + str(hour):'Bid', 'Capacity_MW':'Capacity'}, inplace = True) display_bids['Output'] = np.where(display_bids['Bid'] < price, display_bids['Capacity'], np.where(display_bids['Bid'] == price, display_bids['Capacity'] * marg_proportion, 0)).round(1) display_bids.set_index(keys = 'Plant', inplace = True) display_bids.index.name = None display(display_bids[['Adjusted MC', 'Bid', 'Capacity','Output']]) # In the next cell, we can define the bids. # + bids_bigcoal = {'fourcorners' : 36.5, 'alamitos7' : 73.72, 'huntingtonbeach1_2' : 40.5, 'huntingtonbeach5' : 66.5, 'redondo5_6' : 41.94, 'redondo7_8' : 41.94} bids_biggas = {'elsegundo1_2' : 44.83, 'elsegundo3_4' : 41.22, 'longbeach' : 52.5, 'northisland' : 65.5, 'encina' : 41.67, 'kearny' : 90.06, 'southbay' : 43.83} bids_bayviews = {'morrobay1_2' : 38.78, 'morrobay3_4' : 36.61, 'mosslanding6' : 32.56, 'mosslanding7' : 32.56, 'oakland' : 61.17} bids_beachfront = {'coolwater' : 42.39, 'etiwanda1_4' : 42.67, 'etiwanda5' : 62.89, 'ellwood' : 75.61, 'mandalay1_2' : 39.06, 'mandalay3' : 52.06, 'ormondbeach1' : 38.06, 'ormondbeach2' : 38.06} bids_eastbay = {'pittsburgh1_4' : 40.94, 'pittsburgh5_6' : 36.61, 'pittsburgh7' : 59.72, 'contracosta4_5' : 58.28, 'contracosta6_7' : 39.5, 'potrerohill' : 69.83} bids_oldtimers = {'bigcreek' : 0, 'mohave1' : 34.5, 'mohave2' : 34.5, 'highgrove' : 49.61, 'sanbernadino' : 53.94} bids_fossillight = {'humboldt' : 47.44, 'helms' : 0.5, 'hunterspoint1_2' : 49.17, 'hunterspoint4' : 75.89, 'diablocanyon1' : 11.5} # - # The next cell runs everything. # # Assign **section** to the section code (including quotes) that corresponds to your own according to the following table. # # | Code | Section Time | # |---------|---------------| # | "W8" | Wednesday 8am | # | "W9" | Wednesday 9am | # | "F2" | Friday 2pm | # | "F3" | Friday 3pm | # # **Widget Dictionary**: # # **Section**: Section of the ESG game in which you are participating (defined by the table above). # # **Hour**: Hour within the current round (ranges from 1 to 4). # # **Period**: Round number (ranges from 0 to 6). # # **my_portfolio**: Team portfolio of interest. # # **demand_sp**: Adjustment to forecasted demand (or realized demand in past rounds). If value is between -1 and 1, gives a percentage change from forecasted demand. If value is greater than 1, gives a new value for demand in MWh. # For example, a value of 0.05 will assign demand to be (forecasted demand times 1.05). A value of 15000 will assign demand to be 15,000 MWh. # # **carbon_pX**: Assigns a carbon price in period X. # # **def_my_bids**: If TRUE, then allows you to alter the bids for the portfolio selected in **my_portfolio**. Alteration of bids can occur in the code in block 13 (above). # # **def_others_bids**: If TRUE, then allows you to alter the bids of the portfolios not selected in **my_portfolio**. Alteration of bids can occur in the code in block 13 (above). interact(lambda section, hour, period, my_portfolio, demand_sp, carbon_p4, carbon_p5, carbon_p6, def_my_bids, def_others_bids: all_output(section = section, hour = hour, period = period, my_portfolio = my_portfolio, #demand_sp = 0 uses realized demand for past rounds, forecasted demand for future rounds #abs(demand_sp) <= 1 will use a percent change in demand # (e.g. demand_sp = -.03 will cause a 3% reduction in demand by multiplying demand by .97) #demand_sp > 1 will give a new value for demand. # (e.g. demand_sp = 10000 will give 10000 demand) demand_sp = demand_sp, #Changing the carbon price will automatically adjust MC. carbon_p4 = carbon_p4, carbon_p5 = carbon_p5, carbon_p6 = carbon_p6, def_my_bids = def_my_bids, def_others_bids = def_others_bids), section = Dropdown(options=['W8','W9','F2','F3']), hour = Dropdown(options=list(range(1,5))), period = Dropdown(value = min(current_period, 6), options=list(range(0,7))), my_portfolio = Dropdown(options=np.unique(ESG["Group"])), demand_sp = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), carbon_p4 = BoundedFloatText(value=0, min = 0, max = 300, step=0.01), carbon_p5 = BoundedFloatText(value=0, min = 0, max = 300, step=0.01), carbon_p6 = BoundedFloatText(value=0, min = 0, max = 10000, step=0.01), def_my_bids = Dropdown(options=[False, True]), def_others_bids = Dropdown(options=[False, True])) print('') # Finally, let's predict emissions under competitive bidding with the given carbon price. def predicted_emissions_456(section, D_R4_H1, D_R4_H2, D_R4_H3, D_R4_H4, D_R5_H1, D_R5_H2, D_R5_H3, D_R5_H4, D_R6_H1, D_R6_H2, D_R6_H3, D_R6_H4, carbon_p4, carbon_p5, carbon_p6): demand_sp_vec = [D_R4_H1, D_R4_H2, D_R4_H3, D_R4_H4, D_R5_H1, D_R5_H2, D_R5_H3, D_R5_H4, D_R6_H1, D_R6_H2, D_R6_H3, D_R6_H4] carbon_price_vec = [0, 0, 0, carbon_p4, carbon_p5, carbon_p6] emissions_dict = {'Round 4':{}, 'Round 5':{}, 'Round 6':{}, 'Total':{}} joined_table = get_bids(section) total_emissions = 0 portfolio_emissions = 0 for period_i in range(4, 7): total_emissions_period = 0 carbon_price = carbon_price_vec[period_i - 1] for hour_i in range(1, 5): if (period_i < current_period) & (current_period < 8): joined_table_adj = user_defined_bids(joined_table, hour_i, period_i, my_portfolio = 'Big Coal', def_my_bids = False, def_others_bids = False) else: joined_table_adj = adjust_by_cp(joined_table, hour_i, period_i, carbon_price) demand_sp = demand_sp_vec[4*(period_i - 4) + (hour_i - 1)] demand = demand_calc(hour_i, period_i, demand_sp) sorted_joined_table = joined_table_adj.where("PERIOD", period_i).sort("PRICE" + str(hour_i), descending = False) price = price_calc(sorted_joined_table, demand, hour_i, period_i) for group in np.unique(ESG["Group"]): emissions_i = emissions_calc(sorted_joined_table, hour_i, period_i, demand, price, group) total_emissions_period += emissions_i if group not in emissions_dict['Round ' + str(period_i)].keys(): emissions_dict['Round ' + str(period_i)][group] = emissions_i else: emissions_dict['Round ' + str(period_i)][group] += emissions_i if group not in emissions_dict['Total'].keys(): emissions_dict['Total'][group] = emissions_i else: emissions_dict['Total'][group] += emissions_i emissions_dict['Round ' + str(period_i)]['Total Emissions'] = total_emissions_period total_emissions += total_emissions_period emissions_dict['Total']['Total Emissions'] = total_emissions output_df = pd.DataFrame(emissions_dict).round(2) return(output_df) interact(lambda section, D_R4_H1, D_R4_H2, D_R4_H3, D_R4_H4, D_R5_H1, D_R5_H2, D_R5_H3, D_R5_H4, D_R6_H1, D_R6_H2, D_R6_H3, D_R6_H4, carbon_p4, carbon_p5, carbon_p6: predicted_emissions_456(section = section, D_R4_H1 = D_R4_H1, D_R4_H2 = D_R4_H2, D_R4_H3 = D_R4_H3, D_R4_H4 = D_R4_H4, D_R5_H1 = D_R5_H1, D_R5_H2 = D_R5_H2, D_R5_H3 = D_R5_H3, D_R5_H4 = D_R5_H4, D_R6_H1 = D_R6_H1, D_R6_H2 = D_R6_H2, D_R6_H3 = D_R6_H3, D_R6_H4 = D_R6_H4, carbon_p4 = carbon_p4, carbon_p5 = carbon_p5, carbon_p6 = carbon_p6), section = Dropdown(options=['W8','W9','F2','F3']), D_R4_H1 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), D_R4_H2 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), D_R4_H3 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), D_R4_H4 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), D_R5_H1 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), D_R5_H2 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), D_R5_H3 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), D_R5_H4 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), D_R6_H1 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), D_R6_H2 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), D_R6_H3 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), D_R6_H4 = BoundedFloatText(value=0, min = -1, max = np.sum(ESG['Capacity_MW']), step=0.001), carbon_p4 = BoundedFloatText(value=0, min = 0, max = 300, step=0.01), carbon_p5 = BoundedFloatText(value=0, min = 0, max = 300, step=0.01), carbon_p6 = BoundedFloatText(value=0, min = 0, max = 300, step=0.01)) print('') # Thanks for help from: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
ESG/ESG-PracticeRound/ESG_Analysis_PracticeRound_WithWidgetDictionary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import json import pandas as pd import numpy as np #file_dir = "C:\Users\sacle\OneDrive\Documentos\BOOTTEC\ETL\Movies-ETL\Resources" #pd.read_csv(r"C:\Users\sacle\OneDrive\Documentos\BOOTTEC\ETL\Movies-ETL\Resources") #data = open("C:\Users\sacle\OneDrive\Documentos\BOOTTEC\ETL\Movies-ETL\Resources") file_dir = './Resources' with open(f'{file_dir}/wikipedia-movies.json', mode='r') as file: wiki_movies_raw = json.load(file) len(wiki_movies_raw) # First 5 records wiki_movies_raw[:5] # Last 5 records wiki_movies_raw[-5:] # Some records in the middle wiki_movies_raw[3600:3605] kaggle_metadata = pd.read_csv(f'{file_dir}movies_metadata.csv', low_memory=False) ratings = pd.read_csv(f'{file_dir}ratings.csv')
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="oLm2vhVexycL" colab_type="text" # # ClusterAI 2020 # # Ciencia de Datos - Ingenieria Industrial UTN BA # # Curso I5521 # + [markdown] id="SLIwqxD4xycP" colab_type="text" # Docente: <NAME> # + id="_5eMQtG5x7vG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} executionInfo={"status": "ok", "timestamp": 1599318038630, "user_tz": -120, "elapsed": 16507, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgPTxU30M6WJGjxxsl1WHOpFgnH2tdiZyM3qzeXDA=s64", "userId": "18070302972341276271"}} outputId="fa3ecf32-b356-4329-bd69-b186f472b0e3" from google.colab import drive drive.mount('/gdrive') # + id="eIAINey9x-WJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1599318039634, "user_tz": -120, "elapsed": 515, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgPTxU30M6WJGjxxsl1WHOpFgnH2tdiZyM3qzeXDA=s64", "userId": "18070302972341276271"}} outputId="c0f4b3cb-b0d5-42ca-fa85-d4599dedcee4" DRIVE_FOLDER = 'ClusterAI2020/' CLASS_FOLDER = 'clase_01/' DATA_PATH = "../data/clase_01/" # %cd {'/gdrive/My Drive/'+DRIVE_FOLDER+CLASS_FOLDER} # + [markdown] id="DBdQHc5CxycS" colab_type="text" # # clase_00: Importar data, describirla y visualizarla. # + [markdown] id="EyOlnbMfxycU" colab_type="text" # **Objetivo:**En esta primera clase aprenderemos a importar archivos .csv a python. Luego procederemos a realizar el adecuado pre-procesamiento y limpieza de datos con el fin de obtener un dataset acorde para ser analizado. Finalmente realizaremos un análisis exploratorio de los datos, con el fin de obtener estadísticas descriptivas y visualizaciones. # + [markdown] id="PAJOwsTyxycV" colab_type="text" # ## **Comencemos:** # + [markdown] id="KEieafISxycY" colab_type="text" # **Importamos las Librerías necesarias:** Utilizaremos Numpy para realizar calculos matriciales, pandas para manejar los datos y almacenarlos, y matplotlib junto con Seaborn para visualizar. # + id="fVyMIca1xycb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1599318043798, "user_tz": -120, "elapsed": 1678, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgPTxU30M6WJGjxxsl1WHOpFgnH2tdiZyM3qzeXDA=s64", "userId": "18070302972341276271"}} outputId="d8354f8a-0523-41a1-8b52-ae0eba0bd562" # importamos las librerías necesarias para trabajar. import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # + [markdown] id="iJSZ9qxHxyck" colab_type="text" # # Read .csv # + [markdown] id="_WFIa65jxycl" colab_type="text" # **Read_CSV:** Descargamos de https://data.buenosaires.gob.ar/dataset/subte-viajes-molinetes el dataset de usuarios de subte por molinete del año 2017. Luego con el comando de Pandas "pd.read_csv" indicamos donde se ubica el dataset a ser leido. Una vez leido guardamos la tabla como un DataFrame de Pandas en el elemento "molinetes". Solicitaremos que el indice de nuestro dataframe importado sea la columna "PERIODO" del archvo .csv que manejamos. # + [markdown] id="i9u1eMKmxyc2" colab_type="text" # Podemos importar el archivo .csv # + id="9kPRhR02xyc2" colab_type="code" colab={} # importante detallar que el delimiter indica como estan separadas las columnas en el dataframe. #molinetes = pd.read_csv('molinetes_2017.csv', delimiter=';') # + [markdown] id="9v32d9tbxyc6" colab_type="text" # O podemos importar directamente el archivo .zip sin tener que descomprimirlo. # + id="xhn633EJxyc7" colab_type="code" colab={} molinetes = pd.read_csv(DATA_PATH+'molinetes_2017.csv', delimiter=';') # + [markdown] id="7_H9VuaWxydA" colab_type="text" # # Visualizar el dataset importado # con el comando .head() podemos ver en pantalla las primeras filas y del dataframe # + id="twLubEMoxydA" colab_type="code" colab={} outputId="eba81a8f-7017-44d6-a13d-fa4244bc6af3" molinetes.head() # + [markdown] id="30mZqyVExydF" colab_type="text" # # comando shape # si queremos saber cuantos renglones y columnas tiene el objeto en cuestion, utilizamos el comando de numpy "shape".Asi podemos ver que nuestro dataset posee mas de 11 millones de renglones y 12 columnas # + id="BS6eDVYIxydF" colab_type="code" colab={} outputId="55b933df-e07d-49cc-98bb-195c9d9870c6" # comando shape en el dataframe importado "molinetes" np.shape(molinetes) # + id="q30rMWXYxydL" colab_type="code" colab={} outputId="b0bc5c19-c044-489b-8c48-c753fa82f21f" # Imprimimos en pantalla un reporte de un renglón sobre las dimensiones de nuestro dataframe. # Ver que utilizamos la función de numpy "shape". print("El dataset importado tiene "+ str(np.shape(molinetes)[0]) + " renglones y " + str(np.shape(molinetes)[1]) + " columnas") # + [markdown] id="EA45SuLDxydP" colab_type="text" # # Verificamos si existen columnas con valores faltantes # + id="64CpYhXpxydQ" colab_type="code" colab={} outputId="f4cc02d1-2d31-44db-db15-b68a059e4668" # revisamos si existen columnas que contengan valores faltantes (NaNs) col_nulls = molinetes.isnull().any() col_nulls # + [markdown] id="CqShmYWpxydV" colab_type="text" # # Columnas e Indices en Pandas # + [markdown] id="g3YeijMvxydV" colab_type="text" # **Columnas e Indices en Pandas:** los dataframes de pandas se caracterizan por tener nombres explícitos por columna y por renglón (indice). Es decir que podemos llamar a un elemento (o varios) de un DataFrame por el nombre de su columna e índice. # + id="Bv7uTWV5xydW" colab_type="code" colab={} outputId="7810c70d-e9cb-471d-82ab-196cdfce4abd" # Obtenemos los nombres de las columnas del dataframe Molinetes y lo guardamos en "columnas_molinetes" columnas_molinetes = molinetes.columns columnas_molinetes # + id="t63cTlfHxydc" colab_type="code" colab={} outputId="14da1d76-6274-4186-ae3e-5ec11814cc6c" # Obtenemos los nombres de los índices (renglones) del dataframe Molinetes y lo guardamos en "instancias_molinetes" instancias_molinetes = molinetes.index instancias_molinetes # + [markdown] id="1qfzY_wDxydl" colab_type="text" # # Seleccionar una sola columna de un dataframe # Podremos seleccionar la o las columnas y renglones que quisiéramos para futuros análisis. # + id="x0zzqGcfxydm" colab_type="code" colab={} outputId="a6e73c11-a558-4950-c356-18697b3e2f26" # seleccionamos la columna LINEA de molinetes molinetes.LINEA.head(5) # + id="9_gZ45E6xydt" colab_type="code" colab={} outputId="770b9b6f-90a5-4c67-b96f-aafb471fe67a" # ver que tambien podriamos haber utilizado corchetes para ingresar el nombre de la columna a seleccionar molinetes['LINEA'].head() # + [markdown] id="cRItJiUKxyd0" colab_type="text" # # Filtrar un dataframe por el valor de una columna # + [markdown] id="t8Kp2DiAxyd1" colab_type="text" # **Filtrar un dataframe por el valor de una columna:** Inicialmente filtraremos nuestro dataframe inicial "molinetes"por el valor "LINEA_B" de la columna "LINEA" y guardar el resultado en el dataframe "molinetes_b". # + [markdown] id="pO8F6s8zxyd2" colab_type="text" # Primero observamos que podemos aplicar una operación "booleana" donde la respuesta será True o False. De esta manera obtendremos un vector "mascara" donde indicará con True cada renglon de "molinetes" cuyo valor de la columna "LINEA" sea "LINEA_B". # + id="0W06E94Zxyd3" colab_type="code" colab={} outputId="e9b5de6e-88f9-4d92-9635-263650632693" (molinetes['LINEA'] == 'LINEA_B') # + [markdown] id="2BQUNfffxyd8" colab_type="text" # ### Filtrar un dataframe por ".loc" # + [markdown] id="mnz6-wOyxyd9" colab_type="text" # Para poder filtrar con una máscara booleana al dataframe "molinetes" utilizaremos la sub-función ".loc" que sirve para filtrar los dataframes de pandas de esta manera. # + id="1-mvmolGxyd-" colab_type="code" colab={} outputId="df1ed34a-90d6-443e-8c1a-020d09fb1e7c" # como inicialmente queremos trabajar con los datos de la Linea B, filtramos por el valor "LINEA_B" de la columna # "LINEA" nuestro dataframe original llamado molinetes. molinetes_b = molinetes.loc[molinetes['LINEA'] == 'LINEA_B'] molinetes_b # + id="Vca7CJnuxyeD" colab_type="code" colab={} outputId="fad04228-5de6-4f02-c3c8-0ca227b288e2" molinetes_b.index # + id="WseCwfwWxyeH" colab_type="code" colab={} outputId="7133064b-fb97-4615-b342-451edb93964b" # observamos que la cantidad de renglones del nuevo dataframe es menor al del original, puesto que solo # estamos considerando a los molinetes de la linea B np.shape(molinetes_b) # + id="9fBOxEKsxyeN" colab_type="code" colab={} outputId="edcb4b9d-8a7d-4977-c0fc-892967d2947d" # hacemos un head de solamente los primeros 3 renglones del nuevo dataframe "molinetes_b" molinetes_b.head(5) # + [markdown] id="YQ1e1SnYxyeU" colab_type="text" # ### Filtrar un dataframe con ".iloc" # Podriamos filtrar nuestro dataframe de otra manera, por ejemplo indicando simultàneamente que renglones y columnas queremos preservar. Para ello en vez de utilizar .loc utilizaremos .iloc. # + id="HqciE0IBxyeV" colab_type="code" colab={} outputId="ae37b799-27f5-4a91-c1dc-ee08acdf9952" # por ejemplo con .iloc seleccionamos los primeros 10 renglones y 3 columnas del nuevo dataframe "molinetes_b" molinetes_b.iloc[0:10,0:4] # + [markdown] id="rpcc_iSHxyeb" colab_type="text" # #### **Consejo:** # Para saber todos los atajos y trucos en Pandas te recomendamos que tengas a mano la "Pandas Cheat Sheet" donde encontrarás muchos consejos y sugerencias para agilizar tu trabajo. https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf # + [markdown] id="0gnmcac2xyed" colab_type="text" # # Primeras visualizaciones: Countplot # + [markdown] id="X56IGCJ1xyef" colab_type="text" # Visualización Countplot de Seaborn para las distintas lineas de subte sobre el dataframe "molinetes". # + id="9OiBNaBYxyeg" colab_type="code" colab={} outputId="16dbbe47-fa5b-4d9d-a40a-3150376e4771" # con el comando "countplot" de Seaborn (importado como sns) generamos un gráfico de barras por la columna "LINEA" sns.set_context("talk") plt.figure(figsize=(10,6)) sns.countplot(x="LINEA", data=molinetes) plt.title("Cantidad de viajes por linea durante 2018") plt.show() # + [markdown] id="Udle3dOJxyem" colab_type="text" # Visualización de Countplot de Seaborn para las distintas estaciones de la linea B. # + id="6xpLCrVJxyeo" colab_type="code" colab={} outputId="91d1f1e4-c023-4bc7-e3f2-c97b258c5672" # luego generamos un countplot de seaborn por la columna estación, esta vez utilizando el nuevo dataframe filtrado # molinetes_b, que solo contiene los datos de la linea B. plt.figure(figsize=(10,6)) ax = sns.countplot(x="ESTACION", data=molinetes_b) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.title("Cantidad de viajes molinete por estacion linea B") plt.show() # + [markdown] id="w9oNQuJKxyes" colab_type="text" # si quisieramos imprimir en pantalla los valores unicos de la columna "ESTACION" podemos usar el comando "np.unique" de numpy # + id="drDO_834xyes" colab_type="code" colab={} outputId="8599db2a-e8d3-4f6e-cef5-70b8b7416832" # imprimimos todos los valores unicos dentro de la columna "ESTACION" para el dataframe "molinetes_b" np.unique(molinetes_b.ESTACION) # + [markdown] id="xmT5e_PKxyew" colab_type="text" # # Estadisticas Descriptivas # + [markdown] id="iDPbK2Woxyew" colab_type="text" # Con el comando de numpy .mean() puedo obtener la media del elemento seleccionado, en este caso la columna "TOTAL" del dataframe "molinetes_b". # + id="36uxslQcxyex" colab_type="code" colab={} outputId="7b81ee09-1a51-4c00-dc0a-07d37f03d2da" media_tot_b = np.mean(molinetes_b['TOTAL']) media_tot_b # + id="CpfmssQIxye2" colab_type="code" colab={} outputId="efa73a90-8086-47f5-d41a-b30ee4d2a73d" # Luego procedemos a imprimir por pantalla la media calculada mas un texto print("La media total de todo el año 2017 para la linea B es " + str(media_tot_b)) # + [markdown] id="gySPBDq5xye5" colab_type="text" # por medio del comando .std() de numpy podemos calcular el desvío estandard de la variable en cuestion, en este caso la columna "TOTAL" del dataframe molinetes_b # + id="fb5PAvNDxye5" colab_type="code" colab={} outputId="91298856-cd7b-4ef6-b28c-a377afc58bee" desvio_std_tot_b = np.std(molinetes_b['TOTAL']) print("El desvio estándard de la variable total para todo el año 2017 es " + str(desvio_std_tot_b)) # + [markdown] id="RSiR-KUYxye_" colab_type="text" # por medio del comando .describe() de pandas podemos obtener estadísticas descriptivas del elemento en cuestion, La cantidad de instancias, la media, el desvio estandard, el valor minimo, el 1er cuartil, el 2do cuartil, el 3er cuartil y el valor máximo. # + id="U1_yhM0fxyfA" colab_type="code" colab={} outputId="df8dbd6c-3b77-46cf-f389-50f018537368" molinetes_b['TOTAL'].describe() # + [markdown] id="ekYE9FZpxyfD" colab_type="text" # # Análisis del mes de Abril para la linea B # + [markdown] id="nVEx0Gb7xyfE" colab_type="text" # Primero filtramos utilizando .loc el dataframe "molinetes_b" por los renglones que en la columna "PERIODO" sean iguales a "201704". Generamos un nuevo dataframe filtrando y reteniendo las instancias/renglones con indice referido al mes de Abril # + id="QZckHKNFxyfF" colab_type="code" colab={} # el filtrado se realiza mediante la accion de pandas ".loc". molinetes_b_abril = molinetes_b.loc[molinetes_b['PERIODO'] == 201704] # + id="L6fcTRSQxyfI" colab_type="code" colab={} outputId="4bcc1efd-f203-4cca-b325-37a9f701c862" # imprimimos por pantalla los primeros 3 renglones del mes de abril molinetes_b_abril.head(3) # + id="O_XjPMIoxyfN" colab_type="code" colab={} outputId="ad9ee679-244e-4bb8-eedd-64cae852bc78" # Obtenemos estadísticas descriptivas de la columna "TOTAL" del mes de abril. molinetes_b_abril['TOTAL'].describe() # + [markdown] id="VE8OJ8gdxyfS" colab_type="text" # # Visualización Distplot # Utilizaremos esta visualización para generar un histograma + una curva de distribución de los datos a partir de sus frecuencias poblacionales # + id="NbpDu0d-xyfT" colab_type="code" colab={} outputId="4e250e1f-34cb-4bc7-a500-536a9f03a96a" # displot para la columna "TOTAL" nos generará un histograma + la distribución generada de los datos a partir de # sus frecuencias poblacionales. plt.figure(figsize=(10,6)) sns.distplot(molinetes_b_abril['TOTAL']) plt.title('Distribución de cantidad de viajes totales de molinetes. Linea B. Mes Abril.') plt.xlabel('Cantidad de Viajes en molinete por cada 15 mins') plt.show() # + id="BAXCGCNuxyfX" colab_type="code" colab={} outputId="3cba81cb-3329-408f-d532-e0262f866efa" # displot para la columna "TOTAL" nos generará un histograma + la distribución generada de los datos a partir de # sus frecuencias poblacionales. plt.figure(figsize=(10,6)) sns.distplot(molinetes_b_abril.loc[molinetes_b_abril.ESTACION == 'LEANDRO N. ALEM']['TOTAL'], label = 'Alem') sns.distplot(molinetes_b_abril.loc[molinetes_b_abril.ESTACION == 'FEDERICO LACROZE']['TOTAL'], label = 'Lacroze') plt.title('Distribución de cantidad de viajes de molinetes. ALEM vs LACROZE.') plt.xlabel('Cantidad de Viajes en molinete por cada 15 mins') plt.legend(loc='upper right') plt.show() # + id="W6FOAcc6xyfa" colab_type="code" colab={} outputId="25068e69-8e5c-49d3-87a2-ba4f41b2dd4a" # displot para la columna "TOTAL" nos generará un histograma + la distribución generada de los datos a partir de # sus frecuencias poblacionales. plt.figure(figsize=(10,6)) sns.distplot(molinetes_b_abril.loc[molinetes_b_abril.ESTACION == 'LEANDRO N. ALEM']['TOTAL'], label = 'Alem') #sns.distplot(molinetes_b_abril.loc[molinetes_b_abril.ESTACION == 'FEDERICO LACROZE']['TOTAL'], label = 'Lacroze') sns.distplot(molinetes_b_abril.loc[molinetes_b_abril.ESTACION == 'MEDRANO']['TOTAL'], label = 'Medrano') plt.title('Distribución de cantidad de viajes molinetes. ALEM vs LACROZE.') plt.xlabel('Cantidad de Viajes en molinete por cada 15 mins') plt.legend(loc='upper right') plt.show() # + id="buzupma9xyfe" colab_type="code" colab={} outputId="1e215029-aae8-4233-89b9-135865ba8403" # visualizamos en pantalla las columnas del dataframe de la linea B molinetes_b_abril.columns # + [markdown] id="NU6Jn1D_xyfh" colab_type="text" # # Tabla Pivote para Linea B, con Estaciones y Periodos # Las tablas pivote son elementos poderosos para resumir la información de nuestro dataframe, seleccionando que datos queremos que marquen nuestros renglones, otros para las columnas y otros para los valores. En otras palabras, podemos obtener un nuevo dataframe cuyas columnas y filas las podemos definir nosotros en funcion de los valores originales. # + [markdown] id="tNHBO451xyfi" colab_type="text" # En este caso decidimos realizar una tabla pivote donde las columnas son los valores de la columna "ESTACIONES", el indice equivale a los valores de la columna "PERIODO" y los valores equivalen a la columna "TOTAL". Finalmente, con el comando "aggfunc" indicamos que el valor de cada celda sea la suma correspondiente de TOTAL para cada columna y renglon. # + id="02ywc9yXxyfj" colab_type="code" colab={} linea_b_2017 = pd.pivot_table(molinetes_b,values = 'TOTAL',columns = 'ESTACION', index = molinetes_b.PERIODO.values, aggfunc = np.sum) # + id="fiW8CUvLxyfq" colab_type="code" colab={} outputId="6cc8d6c1-77cb-45aa-dd21-d9af2836fb26" linea_b_2017.head(14) # + [markdown] id="CmeUJ--Ixyft" colab_type="text" # Obtenemos un nuevo dataframe de 12 renglones (meses) y 17 columnas (estaciones). # + id="GY0Fb9z_xyfu" colab_type="code" colab={} outputId="f840daf0-966c-4309-fdef-2b4c04402954" linea_b_2017.shape # + [markdown] id="GQwGjTwkxyfz" colab_type="text" # **Heatmap con Seaborn**: con el comando sns.heatmap() podremos visualizar los valores del dataframe obtenido en el ultimo paso y asi en una sola figura entender cuales son los pares "mes-estacion" de mayor cantidad de viajes. # + id="eB8wLziBxyf0" colab_type="code" colab={} outputId="21cccddb-f42e-43df-a8fa-9420f4ae1e21" plt.figure(figsize=(12,8)) sns.heatmap(linea_b_2017) plt.title("Cantidad de viajes por mes y estacion Linea B 2017") plt.show() # + [markdown] id="kKDVK0zRxyf6" colab_type="text" # ### ¿que conclusiones parciales podemos obtener en base a las visualizaciones realizadas? # + [markdown] id="IZ3Tj_J2xyf7" colab_type="text" # # Visualizando series de tiempo con comando "plot" de matplotlib (plt) # Utilizando el dataframe obtenido de la tabla pivote en el paso anterior, procedemos a visualizar la cantidad de pasajeros que pasaron por cada estación a lo largo de todo el año 2017. # + id="8tltEHwlxyf9" colab_type="code" colab={} outputId="16f9c344-3d06-4694-a498-cab17c284903" plt.figure(figsize=(12,8)) plt.plot(linea_b_2017.index, linea_b_2017['<NAME>'], label='<NAME>'); plt.plot(linea_b_2017.index, linea_b_2017['<NAME>'], label='<NAME>'); plt.plot(linea_b_2017.index, linea_b_2017['CALLAO.B'], label='CALLAO'); plt.plot(linea_b_2017.index, linea_b_2017['DORREGO'], label='DORREGO'); plt.plot(linea_b_2017.index, linea_b_2017['ECHEVERRIA'], label='ECHEVERRIA'); plt.plot(linea_b_2017.index, linea_b_2017['FEDERICO LACROZE'], label='LACROZE'); plt.plot(linea_b_2017.index, linea_b_2017['FLORIDA'], label='FLORIDA'); plt.legend(loc='upper right') plt.title('Cantidad de tickes de molinete por mes') plt.ylabel('Cantidad de tickets') plt.xlabel('Mes') plt.show() # + [markdown] id="DS5R1PtBxygB" colab_type="text" # # Análisis de la estación Lacroze de la linea B # Creamos un dataframe llamado "molinetes_lacroze" que contiene unicamente las instancias del dataframe "molinetes_b" cuyo valor del atributo/feature "ESTACION" sea igual a "FEDERICO LACROZE" # + id="eiRMV6A5xygC" colab_type="code" colab={} molinetes_lacroze = molinetes_b.loc[molinetes_b['ESTACION']== 'FEDERICO LACROZE'] # + id="r0FjKmcaxygE" colab_type="code" colab={} outputId="699583f7-14f4-4f26-d776-6663f3d66571" molinetes_lacroze.head(3) # + [markdown] id="cALDNlMZxygI" colab_type="text" # Podriamos seleccionar varias columnas en simultáneo con doble corchete en los extremos y detallando las columnas en cuestión. Así podriamos ver que tipo de pago es el mas utilizado en los molinetes de la estación Lacroze de la linea B. # + id="8SNCs7RZxygJ" colab_type="code" colab={} outputId="416469a6-b76a-442c-9cd2-df295855847a" molinetes_lacroze[['PAX_PAGOS', 'PAX_PASES_PAGOS', 'PAX_FRANQ']].sum() # + [markdown] id="71SWKzc0xygN" colab_type="text" # ### Ejercicio 00: # Partiendo del dataframe "molinetes" crear un dataframe que contenga todos los viajes de la linea D. # + id="MN-jzF11xygO" colab_type="code" colab={} ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### # + [markdown] id="Id87h8aKxygS" colab_type="text" # ### Ejercicio 01: # Determinar cual es la estacion de mayor cantidad de viajes en la linea D durante todo el año. # + id="-P-rn7igxygT" colab_type="code" colab={} ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### # + [markdown] id="Ei5UeFFrxygV" colab_type="text" # ### Ejercicio 02: # Para la estación de mayor cantidad de viajes determinar cual es el mes de mayor cantidad de viajes. # + id="Wn7_oMSnxygV" colab_type="code" colab={} ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### # + [markdown] id="mr4KLvEKxygd" colab_type="text" # ### Ejercicio 03: # Para la estación de mayor cantidad de viajes, en el mes de mayor cantidad de viajes, determinar cual es el molinete de mayor cantidad de viajes. # + id="EWDxl-gVxygd" colab_type="code" colab={} ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### ### resolver el ejercicio en estas celdas. Agregar mas celdas si es necesario ### # + id="1dLGcLQkxygp" colab_type="code" colab={} # + id="A2AmC6Vcxygs" colab_type="code" colab={}
clases/clase_00/clusterai_2020_clase00.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Hongchenglong/colab/blob/main/CertifiableBayesianInference/Rayleigh_Experiments/analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="6TGbK9oIw9gl" outputId="d6b049e4-b2ad-49f8-82c8-7f3eb51ef4ae" from google.colab import drive drive.mount('/content/drive') # + id="G_zNvxljPoFO" colab={"base_uri": "https://localhost:8080/"} outputId="cdbaace8-6401-4487-f6c4-780d1dd57839" import sys, os from pathlib import Path path = Path(os.getcwd()) sys.path.append(str(path.parent)) sys.path.append('/content/drive/MyDrive/CertifiableBayesianInference') experiment = '/content/drive/MyDrive/ColabNotebooks/CertifiableBayesianInference/Rayleigh_Experiments/' # + colab={"base_uri": "https://localhost:8080/"} id="VE8rC4mCx5aA" outputId="889d23c1-a4b4-451d-a6e8-d32773a8fc45" # !apt-get install tree # !tree /content/drive/MyDrive/ColabNotebooks/CertifiableBayesianInference/Rayleigh_Experiments/ # + colab={"base_uri": "https://localhost:8080/"} id="NmkAoIy0tMTb" outputId="da61a507-0887-4654-ba81-c4e24026929e" # import sys, os # from pathlib import Path # path = Path(os.getcwd()) # sys.path.append(str(path.parent)) import BayesKeras from BayesKeras import PosteriorModel from BayesKeras import analyzers import tensorflow as tf from tensorflow.keras.models import * from tensorflow.keras.layers import * os.environ['CUDA_VISIBLE_DEVICES'] = '-1' import numpy as np from tqdm import trange # import argparse # parser = argparse.ArgumentParser() # parser.add_argument("--opt") # parser.add_argument("--rob") # args = parser.parse_args() # opt = str(args.opt) # rob = int(args.rob) opt = 'SWAG' rob = 5 inference = opt # 导入mnist数据集,划分训练集和测试集 (X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data() # 像素强度除以255.0,将像素强度降低到0-1范围内 X_train = X_train/255. X_test = X_test/255. # 转换数据类型为float32,调整矩阵形状:行数未知,列数28*28 X_train = X_train.astype("float32").reshape(-1, 28*28) X_test = X_test.astype("float32").reshape(-1, 28*28) # 加载保存的后验分布模型 model = PosteriorModel(experiment + "%s_FCN_Posterior_%s"%(inference, rob)) # 交叉熵损失函数 # 稀疏分类交叉熵 loss = tf.keras.losses.SparseCategoricalCrossentropy() # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="Es9d5VVEPxXX" outputId="a446bf25-a479-4e24-c6c8-aa69d73d0f3e" num_images = 500 # 指标.准确率 accuracy = tf.keras.metrics.Accuracy() # 返回后验预测分布的平均值:此函数对后验“n”次进行采样,并返回每个样本的平均 softmax 值 preds = model.predict(X_test[0:500]) #np.argmax(model.predict(np.asarray(adv)), axis=1) # argmax: 返回沿轴的最大值的索引 accuracy.update_state(np.argmax(preds, axis=1), y_test[0:500]) fgsm = accuracy.result() print("%s Accuracy: "%(inference), accuracy.result()) accuracy = tf.keras.metrics.Accuracy() # 分析器.快速梯度标志方法 fast gradient sign method adv = analyzers.FGSM(model, X_test[0:500], eps=0.1, loss_fn=loss, num_models=10) preds = model.predict(adv) #np.argmax(model.predict(np.asarray(adv)), axis=1) accuracy.update_state(np.argmax(preds, axis=1), y_test[0:500]) fgsm = accuracy.result() print("FGSM Robustness: ", accuracy.result()) accuracy = tf.keras.metrics.Accuracy() # 切尔诺夫界验证 preds = analyzers.chernoff_bound_verification(model, X_test[0:100], 0.1, y_test[0:100], confidence=0.80) #print(preds.shape) #print(np.argmax(preds, axis=1).shape) accuracy.update_state(np.argmax(preds, axis=1), y_test[0:100]) print("Chernoff Lower Bound (IBP): ", accuracy.result()) """ p = 0 for i in trange(100, desc="Computing FGSM Robustness"): this_p = analyzers.massart_bound_check(model, np.asarray([X_test[i]]), 0.075, y_test[i]) print(this_p) p += this_p print("Massart Lower Bound (IBP): ", p/100.0) """ # + id="-4UibT2rQCjz" colab={"base_uri": "https://localhost:8080/"} outputId="e8bd0aca-08f5-4567-d81a-f6a9915b6961" # !ls /content/
CertifiableBayesianInference/Rayleigh_Experiments/analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1>datetime library</h1> # <li>Time is linear # <li>progresses as a straightline trajectory from the big bag # <li>to now and into the future # <h3>Reasoning about time is important in data analysis</h3> # <li>Analyzing financial timeseries data # <li>Looking at commuter transit passenger flows by time of day # <li>Understanding web traffic by time of day # <li>Examining seaonality in department store purchases # <h3>The datetime library</h3> # <li>understands the relationship between different points of time # <li>understands how to do operations on time # <h3>Example:</h3> # <li>Which is greater? "10/24/2018" or "11/24/2016" d1 = "9/24/2018" d2 = "11/24/2016" max(d1,d2) # <li>How much time has passed? d1 - d2 # <h4>Obviously that's not going to work. </h4> # <h4>We can't do date operations on strings</h4> # <h4>Let's see what happens with datetime</h4> import datetime d1 = datetime.date(2016,11,24) d2 = datetime.date(2018,9,24) max(d1,d2) print(d2 - d1) # <li>datetime objects understand time # <h3>The datetime library contains several useful types</h3> # <li>date: stores the date (month,day,year) # <li>time: stores the time (hours,minutes,seconds) # <li>datetime: stores the date as well as the time (month,day,year,hours,minutes,seconds) # <li>timedelta: duration between two datetime or date objects # <h3>datetime.date</h3> import datetime century_start = datetime.date(2000,1,1) today = datetime.date.today() print(century_start,today) print("We are",today-century_start,"days into this century") # <h3>For a cleaner output</h3> print("We are",(today-century_start).days,"days into this century") # <h3>datetime.datetime</h3> century_start = datetime.datetime(2000,1,1,0,0,0) time_now = datetime.datetime.now() print(century_start,time_now) print("we are",time_now - century_start,"days, hour, minutes and seconds into this century") # <h4>datetime objects can check validity</h4> # <li>A ValueError exception is raised if the object is invalid</li> some_date=datetime.date(2015,2,29) #some_date =datetime.date(2016,2,29) #some_time=datetime.datetime(2015,2,28,23,60,0) # <h3>datetime.timedelta</h3> # <h4>Used to store the duration between two points in time</h4> century_start = datetime.datetime(2000,1,1,0,0,0) time_now = datetime.datetime.now() time_since_century_start = time_now - century_start print("days since century start",time_since_century_start.days) print("seconds since century start",time_since_century_start.total_seconds()) print("minutes since century start",time_since_century_start.total_seconds()/60) print("hours since century start",time_since_century_start.total_seconds()/60/60) # <h3>datetime.time</h3> date_and_time_now = datetime.datetime.now() time_now = date_and_time_now.time() print(time_now) # <h4>You can do arithmetic operations on datetime objects</h4> # <li>You can use timedelta objects to calculate new dates or times from a given date # + today=datetime.date.today() five_days_later=today+datetime.timedelta(days=5) print(five_days_later) # - now=datetime.datetime.today() five_minutes_and_five_seconds_later = now + datetime.timedelta(minutes=5,seconds=5) print(five_minutes_and_five_seconds_later) now=datetime.datetime.today() five_minutes_and_five_seconds_earlier = now+datetime.timedelta(minutes=-5,seconds=-5) print(five_minutes_and_five_seconds_earlier) # <li>But you can't use timedelta on time objects. If you do, you'll get a TypeError exception time_now=datetime.datetime.now().time() #Returns the time component (drops the day) print(time_now) thirty_seconds=datetime.timedelta(seconds=30) time_later=time_now+thirty_seconds #Bug or feature? # + #But this is Python #And we can always get around something by writing a new function! #Let's write a small function to get around this problem def add_to_time(time_object,time_delta): import datetime temp_datetime_object = datetime.datetime(500,1,1,time_object.hour,time_object.minute,time_object.second) #print(temp_datetime_object) return (temp_datetime_object+time_delta).time() # - #And test it time_now=datetime.datetime.now().time() thirty_seconds=datetime.timedelta(seconds=30) print(time_now,add_to_time(time_now,thirty_seconds)) # <h2>datetime and strings</h2> # + active="" # More often than not, the program will need to get the date or time from a string: # From a website (bus/train timings) # From a file (date or datetime associated with a stock price) # From the user (from the input statement) # # Python needs to parse the string so that it correctly creates a date or time object # # - # <h4>datetime.strptime</h4> # <li>datetime.strptime(): grabs time from a string and creates a date or datetime or time object # <li>The programmer needs to tell the function what format the string is using # <li> See http://pubs.opengroup.org/onlinepubs/009695399/functions/strptime.html for how to specify the format date='01-Apr-03' date_object=datetime.datetime.strptime(date,'%d-%b-%y') print(date_object) #Unfortunately, there is no similar thing for time delta #So we have to be creative! bus_travel_time='2:15:30' hours,minutes,seconds=bus_travel_time.split(':') x=datetime.timedelta(hours=int(hours),minutes=int(minutes),seconds=int(seconds)) print(x) #Or write a function that will do this for a particular format def get_timedelta(time_string): hours,minutes,seconds = time_string.split(':') import datetime return datetime.timedelta(hours=int(hours),minutes=int(minutes),seconds=int(seconds)) # <h4>datetime.strftime</h4> # <li>The strftime function flips the strptime function. It converts a datetime object to a string # <li>with the specified format now = datetime.datetime.now() string_now = datetime.datetime.strftime(now,'%m/%d/%y %H:%M:%S') print(now,string_now) print(str(now)) #Or you can use the default conversion
week3-pandas/datetime_objects.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 데이터 받기 및 전처리 # + from sqlalchemy import create_engine import pymysql db_connection_str = 'mysql+pymysql://root:Lwglwk5120!@172.16.17.32/Linear_Regression' db_connection = create_engine(db_connection_str) df_sql = pd.read_sql('SELECT * FROM api_football', con=db_connection) # - df_original = df_sql df_personal_info = df_original[['player_name','position','age','nationality','height','weight','team_name','league','season','rating']] df_original = df_original.groupby('player_name').sum() df_original db_connection_str = 'mysql+pymysql://root:Lwglwk5120!@172.16.17.32/Linear_Regression' db_connection = create_engine(db_connection_str) df0 = pd.read_sql('SELECT * FROM market_instagram', con=db_connection) df0['value'] = df0['value'].str.replace(pat=r'[€@m\r]', repl = r' ', regex=True).astype(np.float) df0 # + # 10~15만 데이터 추가 후 merge결과 : 474명 선수 데이터 출력(몸값, 인스타, 퍼포먼스 데이터) df_original = pd.merge(df_original, df0, on='player_name', how='inner') df_original.drop(['height', 'weight','rating', 'captain','age'], axis=1, inplace=True) df_original # - # 선수 별 경기 수 columns 생성 df_original['games_played'] = round(df_original['games_minutes_played'] / 90,4) # 선수당 경기수 50 미만 제거 df_copy = df_original.copy() df_copy = df_copy[df_copy['games_played'] >= 50] df_copy # + # 최종 dataset 생성(df) = 3시즌 간 경기수 50 초과 대상 game_played =pd.DataFrame(df_copy['games_played']) per_game = round(df_copy[['shots_total', 'shots_on', 'goals_total', 'goals_conceded', 'goals_assists', 'passes_total', 'passes_key', 'passes_accuracy', 'tackles_total', 'tackles_blocks', 'tackles_interceptions', 'duels_total', 'duels_won', 'dribbles_attempts', 'dribbles_success', 'fouls_drawn', 'fouls_committed', 'cards_yellow', 'cards_yellowred', 'cards_red', 'penalty_won', 'penalty_commited', 'penalty_success', 'penalty_missed', 'penalty_saved', 'games_appearences', 'games_lineups', 'substitutes_in', 'substitutes_out', 'substitutes_bench']].div(df_copy['games_played'], axis=0),4) df_copy = pd.concat([df_copy[['player_name','value','follower']], per_game], axis=1) df_copy = pd.concat([df_copy, game_played], axis=1) df_copy = df_copy.set_index('player_name') df_copy # - df_personal_info def checkPS(pos): if pos == 'Goalkeeper': return 1 elif pos == 'Midfielder': return 2 elif pos == 'Defender': return 3 elif pos == 'Attacker': return 4 else: return 0 df_pi = df_personal_info.copy() df_pi.position = df_pi.position.map(lambda x: checkPS(x)) df_pi df_pi = df_pi.groupby('player_name').mean() df_pi df_pi = pd.merge(df_pi, df0, on='player_name', how='inner') df_pi df_pi = df_pi.drop(['value', 'follower'], axis=1) df_pi df_pos = pd.DataFrame() for a in df_copy.reset_index().player_name: if a in df_pi.player_name.tolist(): df_pos = df_pos.append(df_pi[df_pi.player_name==a]) df_pos df_pos = pd.merge(df_pos, df_copy, on='player_name', how='inner') df_pos.position.round() df_pos # + pd.options.display.max_columns = None df_pos.describe() # - # # Clustering # ## position 별 feature들의 군집이 다른지 확인 # ## position 별로 나누는 것이 의미가 있는지 확인 X = df_pos.drop(['player_name','position'], axis=1) y = df_pos['position'] # + # %matplotlib inline from sklearn.cluster import MiniBatchKMeans from sklearn.cluster import KMeans model1 = KMeans(n_clusters=4).fit(X) model2 = MiniBatchKMeans(n_clusters=4, batch_size=1000, compute_labels=True).fit(X) plt.figure(figsize=(12, 6)) idx = np.random.randint(371, size=371) plt.subplot(121) plt.scatter(X[idx, 0], X[idx, 1], c=model1.labels_[idx]) plt.title("K-평균 군집화") plt.subplot(122) plt.scatter(X[idx, 0], X[idx, 1], c=model2.labels_[idx]) plt.title("미니배치 K-평균 군집화") plt.tight_layout() plt.show() # - # # 데이터 corr 확인 df_pos = df_pos.drop('player_name', axis=1) df_pos df_pos.corr()[df_pos.corr() >= 0.7].to_csv('corrCheck.csv', encoding='utf-8') corrChecked = pd.read_csv('corrChecked.csv', encoding='utf-8', index_col='Unnamed: 0') corrChecked # # 7 high correlating features identified # 1. height - weight # 2. shots_total - shots_on - goals_total # 3. goals_conceded - penalty_saved # 4. goals_assist - passes_key # 5. duels_total - duels_won # 6. dribbles_attempts - dribbles_success # 7. games_apperances - substitutes_in - substitutes_out - substitutes_bench # 위 7개 feature들에 대해서는 PCA 작업 from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA # 1. Height과 Weight PCA df_pos_hw = df_pos[['height', 'weight']] df_pos_hw = StandardScaler().fit_transform(df_pos_hw) df_pos_pca_hw = pd.DataFrame(data = PCA(n_components=1).fit_transform(df_pos_hw), columns=['hw']) df_pos_pca_hw # 2. shots_on, shots_total and goals_total PCA df_pos_sotgt = df_pos[['shots_on', 'shots_total', 'goals_total']] df_pos_sotgt = StandardScaler().fit_transform(df_pos_sotgt) df_pos_pca_sg = pd.DataFrame(data = PCA(n_components=1).fit_transform(df_pos_sotgt), columns=['shotsOnTotal_goalsTotal']) df_pos_pca_sg # 3. goals_conceded, penalty_saved PCA df_pos_gcps = df_pos[['goals_conceded', 'penalty_saved']] df_pos_gcps = StandardScaler().fit_transform(df_pos_gcps) df_pos_pca_gcps = pd.DataFrame(data = PCA(n_components=1).fit_transform(df_pos_gcps), columns=['goasConceded_penaltySaved']) df_pos_pca_gcps # 4. goals_assist, passes_key PCA df_pos_gapk = df_pos[['goals_assists','passes_key']] df_pos_gapk = StandardScaler().fit_transform(df_pos_gapk) df_pos_pca_gapk = pd.DataFrame(data = PCA(n_components=1).fit_transform(df_pos_gapk), columns=['goalsAssist_passesKey']) df_pos_pca_gapk # 5. duels_total, duels_won PCA df_pos_duels = df_pos[['duels_total', 'duels_won']] df_pos_duels = StandardScaler().fit_transform(df_pos_duels) df_pos_pca_duels = pd.DataFrame(data = PCA(n_components=1).fit_transform(df_pos_duels), columns=['duelsWonTotal']) df_pos_pca_duels # 6. dribbles_attempts, dribbles_success PCA df_pos_dribbles = df_pos[['dribbles_attempts', 'dribbles_success']] df_pos_dribbles = StandardScaler().fit_transform(df_pos_dribbles) df_pos_pca_dribbles = pd.DataFrame(data = PCA(n_components=1).fit_transform(df_pos_dribbles), columns=['dribblesAtmptsSuc']) df_pos_pca_dribbles df_pos.columns # 7. games_appearences, substitutes_in, substitutes_out, substitutes_bench PCA df_pos_gasub = df_pos[['games_appearences', 'substitutes_in', 'substitutes_out', 'substitutes_bench']] df_pos_gasub = StandardScaler().fit_transform(df_pos_gasub) df_pos_pca_gasub = pd.DataFrame(data = PCA(n_components=1).fit_transform(df_pos_gasub), columns=['gamesApperance_sub']) df_pos_pca_gasub # # PCA 작업한 feature table df_pca = pd.concat([df_pos_pca_hw, df_pos_pca_sg, df_pos_pca_gcps, df_pos_pca_gapk, df_pos_pca_duels, df_pos_pca_dribbles, df_pos_pca_gasub], axis=1) df_pca df_pca.corr()[df_pca.corr() > 0.7] # # PCA한 feature와 나머지 feature들 OLS 확인 pca_cols = ['height', 'weight', 'shots_total', 'shots_on', 'goals_total', 'goals_conceded', 'penalty_saved', 'goals_assist', 'passes_key', 'duels_total', 'duels_won', 'dribbles_attempts', 'dribbles_success', 'games_appearences', 'substitutes_in', 'substitutes_out', 'substitutes_bench'] npca_cols = df_pos.columns.tolist() npca_features = [item for item in npca_cols if item not in pca_cols] df_OLS = pd.concat([df_pos[npca_features], df_pca], axis=1) df_OLS import statsmodels.api as sm dfX = df_OLS.drop(['value'], axis = 1) dfy = df_OLS['value'] model = sm.OLS.from_formula("value ~ +" + "+".join(dfX.columns), data=df_OLS) result = model.fit() print(result.summary()) # # 전체 스케일링 후, OLS import statsmodels.api as sm dfX = df_OLS.drop(['value'], axis = 1) dfy = df_OLS['value'] feature_names = list(dfX.columns) feature_names = ["scale({})".format(name) for name in feature_names] model = sm.OLS.from_formula("value ~ +" + "+".join(feature_names), data=df_OLS) result = model.fit() print(result.summary()) # # 1차 feature 제거 # # P값 > 0.9 제거 # # penalty_missed df_OLS_1 = df_OLS.drop(['penalty_missed'], axis=1) len(df_OLS.columns), len(df_OLS_1.columns) import statsmodels.api as sm dfX = df_OLS_1.drop(['value'], axis = 1) dfy = df_OLS_1['value'] feature_names = list(dfX.columns) feature_names = ["scale({})".format(name) for name in feature_names] model = sm.OLS.from_formula("value ~ +" + "+".join(feature_names), data=df_OLS_1) result = model.fit() print(result.summary()) # # 2차 feature 제거 # # P값 > 0.7 제거 # # position, cards_yellowred df_OLS_2 = df_OLS_1.drop(['position', 'cards_yellowred'], axis=1) len(df_OLS_1.columns), len(df_OLS_2.columns) import statsmodels.api as sm dfX = df_OLS_2.drop(['value'], axis = 1) dfy = df_OLS_2['value'] feature_names = list(dfX.columns) feature_names = ["scale({})".format(name) for name in feature_names] model = sm.OLS.from_formula("value ~ +" + "+".join(feature_names), data=df_OLS_2) result = model.fit() print(result.summary()) # # 3차 feature 제거 # # P값 > 0.6 제거 # # tackles_interceptions, duelsWonTotal df_OLS_3 = df_OLS_2.drop(['tackles_interceptions', 'duelsWonTotal'], axis=1) len(df_OLS_2.columns), len(df_OLS_3.columns) import statsmodels.api as sm dfX = df_OLS_3.drop(['value'], axis = 1) dfy = df_OLS_3['value'] feature_names = list(dfX.columns) feature_names = ["scale({})".format(name) for name in feature_names] model = sm.OLS.from_formula("value ~ +" + "+".join(feature_names), data=df_OLS_3) result = model.fit() print(result.summary()) # # 4차 feature 제거 # # P값 > 0.4 제거 # # rating, goasConceded_penaltySaved df_OLS_4 = df_OLS_3.drop(['rating', 'goasConceded_penaltySaved'], axis=1) len(df_OLS_3.columns), len(df_OLS_4.columns) import statsmodels.api as sm dfX = df_OLS_4.drop(['value'], axis = 1) dfy = df_OLS_4['value'] feature_names = list(dfX.columns) feature_names = ["scale({})".format(name) for name in feature_names] model = sm.OLS.from_formula("value ~ +" + "+".join(feature_names), data=df_OLS_4) result = model.fit() print(result.summary()) # # 5차 feature 제거 # # P값 > 0.2 제거 # # cards_red, penalty_commited df_OLS_5 = df_OLS_4.drop(['cards_red', 'penalty_commited'], axis=1) len(df_OLS_4.columns), len(df_OLS_5.columns) import statsmodels.api as sm dfX = df_OLS_5.drop(['value'], axis = 1) dfy = df_OLS_5['value'] feature_names = list(dfX.columns) feature_names = ["scale({})".format(name) for name in feature_names] model = sm.OLS.from_formula("value ~ +" + "+".join(feature_names), data=df_OLS_5) result = model.fit() print(result.summary()) # # 6차 feature 제거 # # P값 > 0.1 제거 # # tackles_total, cards_yellow, penalty_success, games_lineups df_OLS_6 = df_OLS_5.drop(['tackles_total', 'cards_yellow', 'penalty_success', 'games_lineups'], axis=1) len(df_OLS_5.columns), len(df_OLS_6.columns) import statsmodels.api as sm dfX = df_OLS_6.drop(['value'], axis = 1) dfy = df_OLS_6['value'] feature_names = list(dfX.columns) feature_names = ["scale({})".format(name) for name in feature_names] model = sm.OLS.from_formula("value ~ +" + "+".join(feature_names), data=df_OLS_6) result = model.fit() print(result.summary()) # # 7차 feature 제거 # # P값 > 0.1 제거 # # hw df_OLS_7 = df_OLS_6.drop(['hw'], axis=1) len(df_OLS_6.columns), len(df_OLS_7.columns) import statsmodels.api as sm dfX = df_OLS_7.drop(['value'], axis = 1) dfy = df_OLS_7['value'] feature_names = list(dfX.columns) feature_names = ["scale({})".format(name) for name in feature_names] model = sm.OLS.from_formula("value ~ +" + "+".join(feature_names), data=df_OLS_7) result = model.fit() print(result.summary()) # # 8차 feature 제거 # # 가장 큰 P값 제거 # # fouls_committed df_OLS_8 = df_OLS_7.drop(['fouls_committed'], axis=1) len(df_OLS_7.columns), len(df_OLS_8.columns) import statsmodels.api as sm dfX = df_OLS_8.drop(['value'], axis = 1) dfy = df_OLS_8['value'] feature_names = list(dfX.columns) feature_names = ["scale({})".format(name) for name in feature_names] model = sm.OLS.from_formula("value ~ +" + "+".join(feature_names), data=df_OLS_8) result = model.fit() print(result.summary()) # # 1차 검증 # + from sklearn.model_selection import KFold scores = np.zeros(10) cv = KFold(10, shuffle=True, random_state=0) for i, (idx_train, idx_test) in enumerate(cv.split(df_OLS_8)): df_train = df_OLS_8.iloc[idx_train] df_test = df_OLS_8.iloc[idx_test] model = sm.OLS.from_formula("value ~ scale(age) + scale(follower) + scale(goals_assists) + scale(passes_total) +\ scale(passes_accuracy) + scale(tackles_blocks) + scale(fouls_drawn) + scale(penalty_won) +\ scale(games_played) + scale(shotsOnTotal_goalsTotal) + scale(goalsAssist_passesKey) +\ scale(dribblesAtmptsSuc) + scale(gamesApperance_sub)", data=df_train) result = model.fit() pred = result.predict(df_test) rss = ((df_test.value - pred) ** 2).sum() tss = ((df_test.value - df_test.value.mean())** 2).sum() rsquared = 1 - rss / tss scores[i] = rsquared print("학습 R2 = {:.8f}, 검증 R2 = {:.8f}".format(result.rsquared, rsquared)) # - # # 2차 검증 # + from sklearn.base import BaseEstimator, RegressorMixin import statsmodels.formula.api as smf import statsmodels.api as sm class StatsmodelsOLS(BaseEstimator, RegressorMixin): def __init__(self, formula): self.formula = formula self.model = None self.data = None self.result = None def fit(self, dfX, dfy): self.data = pd.concat([dfX, dfy], axis=1) self.model = smf.ols(self.formula, data=self.data) self.result = self.model.fit() def predict(self, new_data): return self.result.predict(new_data) # + from sklearn.model_selection import cross_val_score model = StatsmodelsOLS("value ~ scale(age) + scale(follower) + scale(goals_assists) + scale(passes_total) +\ scale(passes_accuracy) + scale(tackles_blocks) + scale(fouls_drawn) + scale(penalty_won) +\ scale(games_played) + scale(shotsOnTotal_goalsTotal) + scale(goalsAssist_passesKey) +\ scale(dribblesAtmptsSuc) + scale(gamesApperance_sub)") cv = KFold(10, shuffle=True, random_state=0) cross_val_score(model, dfX, dfy, scoring="r2", cv=cv) # -
Analysis/4.1_DataAnalysis_AllData_20200620.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .sh # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Bash # language: bash # name: bash # --- # # Sorting and Mark duplicates using UMI # # 1. (010-020) Novosort + Picard MarkDuplicates ls -al cat Projects/NGSK_IRUD_dummy_dummy/sge-Novosort-NGSK_IRUD_dummy_dummy.sh
NagasakiU/030_SortDedup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dask for beginners Cheat Sheets sample code # # (c) 2020 NVIDIA, Blazing SQL # # Distributed under Apache License 2.0 # ### Imports from dask_cuda import LocalCUDACluster # # Cluster setup # #### dask_cuda.LocalCUDACluster cluster = LocalCUDACluster() cluster cluster = LocalCUDACluster( n_workers=1 , threads_per_worker=1 , CUDA_VISIBLE_DEVICES="0" , rmm_managed_memory=True , rmm_pool_size="20GB" ) cluster # #### dask-scheduler # + language="bash" # dask-scheduler # + language="bash" # dask-scheduler --host 127.0.0.1 --port 8786 --dashboard-address 8787 --idle-timeout 600 # - # #### dask_worker # + language="bash" # dask-cuda-worker <scheduler-ip>:8786 # + language="bash" # dask-cuda-worker --worker-port=3000:3001 --death-timeout 600 <scheduler-ip>:8786 # -
cheatsheets/dask4beginners/dask4beginners_ClusterSetup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from matplotlib import pylab # - bioresponse = pd.read_csv('bioresponse.csv', header=0) bioresponse.head() from sklearn.ensemble import RandomForestClassifier import xgboost as xgb from sklearn.model_selection import cross_val_score bio_target = bioresponse.Activity.values del bioresponse['Activity'] n_trees = [i for i in range(10, 55, 5)] n_trees[:5] # + # %%time rf_scoring = [] for tree in n_trees: rf = RandomForestClassifier(max_depth=30, n_estimators=tree, random_state=1) score = cross_val_score(rf, bioresponse, bio_target, \ scoring='accuracy', cv=3) rf_scoring.append(score) rf_scoring = np.asmatrix(rf_scoring) # - rf_scoring # %matplotlib inline pylab.plot(n_trees, rf_scoring.mean(axis = 1), marker='.', label='RandomForest') pylab.grid(True) pylab.xlabel('n_trees') pylab.ylabel('score') pylab.title('Accuracy score') pylab.legend(loc='lower right') from tqdm import tqdm_notebook # + # %%time xgb_scoring = [] for tree in tqdm_notebook(n_trees): rf = xgb.XGBClassifier(n_estimators=tree, learning_rate=0.1) score = cross_val_score(rf, bioresponse, bio_target, \ scoring='accuracy', cv=3) xgb_scoring.append(score) xgb_scoring = np.asmatrix(xgb_scoring) # - pylab.plot(n_trees, rf_scoring.mean(axis = 1), marker='.', label='RandomForest') pylab.plot(n_trees, xgb_scoring.mean(axis = 1), marker='.', label='XGBoost') pylab.grid(True) pylab.xlabel('n_trees') pylab.ylabel('score') pylab.title('Accuracy score') pylab.legend(loc='lower right') from catboost import CatBoostClassifier titanic_data = pd.read_csv('../day-1/titanic.csv') titanic_data.head() del titanic_data['PassengerId'] del titanic_data['Name'] del titanic_data['Ticket'] del titanic_data['Cabin'] titanic_data.head() titanic_data.dropna(inplace=True) titanic_target = titanic_data.Survived.values del titanic_data['Survived'] from sklearn.model_selection import train_test_split Xtrain, Xtest, Ytrain, Ytest = train_test_split(titanic_data, \ titanic_target, test_size=0.2) cb_model = CatBoostClassifier( learning_rate=0.1, depth=8, iterations=70, custom_loss=['Accuracy', 'Precision', 'Recall', 'F1'] ) np.where(Xtrain.dtypes != np.float)[0] cb_model.fit( Xtrain, Ytrain, cat_features=np.where(Xtrain.dtypes != np.float)[0], eval_set=(Xtest, Ytest), verbose=False, plot=True ) Xtrain.head() cb_model.save_model('asdasdfsadff') cb_model.feature_importances_ cb_model.predict_proba(Xtest)[:5] from sklearn.ensemble import AdaBoostClassifier titanic_data['Sex'] = [1 if s == 'male' else 0 for s in titanic_data['Sex']] titanic_data.head() del titanic_data['Embarked'] # + from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score # - Xtrain, Xtest, Ytrain, Ytest = train_test_split(titanic_data, \ titanic_target, test_size=0.2) base_est = DecisionTreeClassifier(max_depth=3) ada_model = AdaBoostClassifier(base_estimator=base_est) ada_model.fit(Xtrain, Ytrain) print(accuracy_score(Ytest, ada_model.predict(Xtest)))
research-3/boosting_on_lesson.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Week 6 # # Phew. Is it week 6 already? # # Last week we had an intro to machine learning and regression and this week we continue with some more ML but focusing on classification instead. There are lots of courses on machine learning at DTU. And across many research areas, people use ML for all kinds of things. So there's a good chance you're already familiar with what's going to happen today. # # In the following, we continue introducing fundamentals of ML, decision trees and start with some prediction tasks on crime data. You might ask, why are we doing this? Well, a couple of reasons: # # 1. It ties nicely with how we started this course: do you remember all we learnt about predictive policing in Week 1? So, today it is our turn to make predictions and see how well we can do with the data we have been exploring. # # 2. Visualization **AND** machine learning is a powerful combination. A combination that is pretty rare. # - Usually it's the case that people are either good at machine learning or data viz, but not both. # - So what we will be able to do in this class is an unusual combo: We can use ML to understand data and then visualize the outputs of the machine-learning. # # The plan for today is as follows: # # 1. In part 1, we go more in depth on fundamentals of machine learning; # 2. In part 2, we get an introduction to Decision Trees; # 3. In part 3, we put everything together to predict criminal recidivism. # ## Part 1: Fundamentals of machine learning # We continue with a couple of lectures from <NAME> about model selection and feature extraction. These connect nicely with what you should have already read in DSFS Chaper 11. If you did not read the chater yet, it is time for you to do it. # # Find it on DTU Learn under 'Course content' $\rightarrow$ 'Content' $\rightarrow$ 'Lecture 6 reading' # # **Model selection** # [![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/MHhlAtw3Ces/0.jpg)](https://www.youtube.com/watch?v=MHhlAtw3Ces) # # **Feature extraction and selection** # [![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/RZmitKn220Q/0.jpg)](https://www.youtube.com/watch?v=RZmitKn220Q) # > *Exercise 1*: A few questions about machine learning to see whether you've read the text and watched the videos. # > # > * What do we mean by a 'feature' in a machine learning model? # > * What is the main problem with overfitting? # > * Explain the connection between the bias-variance trade-off and overfitting/underfitting. # > * The `Luke is for leukemia` on page 145 in the reading is a great example of why accuracy is not a good measure in very unbalanced problems. Try to come up with a similar example based on a different type of data (either one you are interested in or one related to the SF crime dataset). # ## Part 2: Decision Tree Intro # Now we turn to decision trees. This is a fantastically useful supervised machine-learning method, that we use all the time in research. To get started on the decision trees, we asked you to read DSFS, chapter 17 (if you didn't read it you can find it in DTU Learn). # # And our little session on decision trees wouldn't be complete without hearing from Ole about these things. # # [![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/LAA_CnkAEx8/0.jpg)](https://www.youtube.com/watch?v=LAA_CnkAEx8) # > *Exercise 2:* Just a few questions to make sure you've read the text (DSFS chapter 17) and/or watched the video. # > # > * There are two main kinds of decision trees depending on the type of output (numeric vs. categorical). What are they? # > * Explain in your own words: Why is entropy useful when deciding where to split the data? # > * Why are trees prone to overfitting? # > * Explain (in your own words) how random forests help prevent overfitting. # In the following I added some additional material for you to explore decision trees through some fantastic *visual* introductions. # # *Decision Trees 1*: The visual introduction to decision trees on this webpage is AMAZING. Take a look to get an intuitive feel for how trees work. Do not miss this one, it's a treat! http://www.r2d3.us/visual-intro-to-machine-learning-part-1/ # # *Decision Trees 2*: the second part of the visual introduction is about the topic of model selection, and bias/variance tradeoffs that we looked into earlier during this lesson. But once again, here those topics are visualized in a fantastic and inspiring way, that will make it stick in your brain better. So check it out http://www.r2d3.us/visual-intro-to-machine-learning-part-2/ # # # # *Decision tree tutorials*: And of course the best way to learn how to get this stuff rolling in practice, is to work through a tutorial or two. We recommend the ones below: # * https://jakevdp.github.io/PythonDataScienceHandbook/05.08-random-forests.html # * https://towardsdatascience.com/random-forest-in-python-24d0893d51c0 (this one also has good considerations regarding the one-hot encodings) # # (But there are many other good ones out there.) # ## Part 3: Predicting criminal recidivism # It is now time to put everything together and use the models we have read about for prediction. Today, we are still going to focus on crimes, but with a different dataset. # # The dataset is related to an algorithm used by judges and parole officers for scoring criminal defendant’s likelihood of reoffending (recidivism). It consists of information about defendants and variables used to measure recidivism. # # I'll provide you with more information about this data and its source next week. But, for now I don't want to give you more spoilers (you'll know why next week 😇), so let's get started. In the next exercises, we will try to **loosely** recreate the algorithm to predict whether a person is going to re-commit a crime in the future. # > *Exercise 3.1:* Getting the data ready. Before getting to predictions, we need to get the data, select the features, and define the target. Follow these steps for success: # > # > * Download the dataset from [GitHub](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/recidivism_dataset_sub.csv) and load it in a `pandas` dataframe. # > * Select the variables of interest. Here, a description of which one and their meaning: # > 1. `age`: age (in years) of the person,; # > 2. `sex`: either "Female" or "Male"; # > 3. `race`: a variable encoding the race of the person; # > 4. `juv_fel_count`: the number of previous juvenile felonies; # > 5. `juv_misd_count`: the number of previous juvenile misdemeanors; # > 6. `juv_other_count`: the number of prior juvenile convictions that are not considered either felonies or misdemeanors; # > 7. `priors_count`: the number of prior crimes committed; # > 8. `is_recid`: if the defendent has recommit a crime; # > 9. `days_b_screening_arrest`: Days between the arrest and screening. # > 9. `c_charge_degree`: Degree of the crime. It is either M (Misdemeanor), F (Felony), or O (not causing jail) # > # > * Finally, we need a target: # > * `two_year_recid` is what we want to predict. Its current values are $\in\left[0,1\right]$, where $0$ means the defendant did not recommit a crime within two years, and $1$ means the defendant recommitted a crime within two years. # Alright, we now have the data, but we still need a bit of **preprocessing** before we can get to the actual prediction. # # At the beginning, I wanted you to embed everything into a unique pipeline. I later found that it sometimes have issues (throw errors, takes long time when cross-validating, etc.). Thus, I have excluded this step from today's class. However, if you want to know more about pipelines, here, a nice optional tutorial for you: # # * https://towardsdatascience.com/step-by-step-tutorial-of-sci-kit-learn-pipeline-62402d5629b6 # > *Exercise 3.2:* Data preprocessing and label encoding. # > # > * To preprocess the data follow these steps: # > * filter out records where the `is_recid` feature is not known (i.e. where it is equal to -1); # > * only keep records that cause jail time; # > * only keep records that have between $-30$ and $30$ days between the arrest and screening. # > * Finally, drop `is_recid`, `c_charge_degree`, `days_b_screening_arrest` for the upcoming analysis. # > * Before we move on, let's explore the data with a few visualizations. Use the variable `two_year_recid` and create a plot with the following subplots: # > * A bar plot with the number of recommitted and non-recommitted crimes, e.g., number of 0s and 1s in `two_year_recid`. Now a couple of questions: What is the fraction of recommitted crimes over the total number of records? Is it balanced? # > * A bar plot with the fraction of recommitted crimes over total number of records per `sex`, e.g., the number of Females that recommitted a crime over the number of all female records. What do you observe? # > * A bar plot with the fraction of recommitted crimes over total number of records per `race` (compute as above). What do you observe? # > * A bar plot with the fraction of recommitted crimes over total number of records per `age` group (group ages as <20, 20-30, 30-40, etc. and compute as above). What do you observe? # > * Some features we are working with are categorical, so we need to deal with them by using encoders. There are many different types, but we will focus on the `OneHotEncoder` and the `LabelEncoder`: # > * Describe what these encoder do and choose one. Which one did you choose? Why? # > * What variables need to be transformed? # # <mark> **Note** The data source that I was using has changed, so the data currently doesn't include `is_recid=-1`and `c_charge_degree='O'`. Please, write the code as if you were filtering those variables anyway, it is a way for you to practice with `pandas`.</mark> # We are almost there! It is now time to make predictions. # > *Exercise 3.3:* Build a Decision Tree or a Random Forest. Now we are going to build a Decision Tree (or a Random Forest) classifier that takes as input the features defined above and predicts if a person is going to recommit the crime within two years. # > * Split the data in Train/Test sets. You can do this with `train_test_split` in `sklearn`, I used a 70/30 split, but you are free to try different ones. # > * **Note:** create a balanced dataset, that is, **grab an equal number of examples** from each target value. # > * Fit a model to your Train set. A good option is the `DecisionTreeClassifier` (or even better a [Random Forest](https://jakevdp.github.io/PythonDataScienceHandbook/05.08-random-forests.html), here is [another tutorial for Random Forests](https://towardsdatascience.com/random-forest-in-python-24d0893d51c0)). # > * Evaluate the performance of model on the test set (look at Accuracy, Precision, and Recall). What are your thoughts on these metrics? Is accuracy a good measure? # > * **hint:** Since you have created a balanced dataset, the baseline performance (random guess) is 50%. # > * Are your results tied to the specific training data/hyperparameter set you used? Try to perform a `RandomizedSearchCV` and recompute the performance metric above with the hyperparameters found. [Here](https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74) a nice tutorial for you! And here one on [cross-validation](https://towardsdatascience.com/cross-validation-in-machine-learning-72924a69872f) for those of you who crave for more. # > * Visualize the tree. There are different options to do so. The easiest one is to use `plot_tree`, but there are other [options](https://mljar.com/blog/visualize-decision-tree/). If you chose Random Forest, you can visualize a tree as well by extracting a single tree with `model.estimators_[n]` (n is the index of the estimator you want to select). # > * Visualize the Feature Importance. What do you observe? # > * **(Optional)** If you find yourself with extra time, come back to this exercise and tweak the encoder, model, and variables you use to see if you can improve the performance of the tree. **Note**: It's not 100% given that adding variables will improve your predictive performance. # Before you go, please, have a look at the following two activities: # --- # # 1) # # <mark> Take a minute (it is really one minute) to fill this [form](https://forms.gle/9RwhFc96na4E2Fmg7). It is really important for me to continue improving and give you better feedbacks. </mark> # # --- # 2) # # <mark> Some of you consider this course too easy. So, it's time to spice things up: once you have the best model you could find, go to DTU Learn and submit your code together with your final accuracy/precision/recall scores under DTU-Learn $\rightarrow$ Assignments. I'll make a Leaderboard and we'll see who's gonna win 🥇!!</mark> # # **Constraints:** Use a 70/30 train/test split, and `random_seed=42`. # # **Note 1:** Even if it is in the form of an assignment on DTU Learn it is **not** going to be evaluated. So, take it really as an opportunity to play around with your model and see how well you can do. # # **Note 2:** You have time until **Thursday at 23.59** to submit your model/performance score.
lectures/Week6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import pickle import matplotlib.pyplot as plt df = pd.read_pickle('2019.pkl.gz') # + possessions_og = [] minutes_og = [] p_counter = 1 s_counter = 0 hs_counter = 0 as_counter = 0 gameid = 'NONE' for row in df.itertuples(): if row[1] != gameid: p_counter = 1 s_counter = 0 hs_counter = 0 as_counter = 0 h_p_prev = sorted([row[i] for i in range(62,71,2)]) a_p_prev = sorted([row[i] for i in range(72,82,2)]) gameid = row[1] if(row[0] !=0): possessions_og.append((h_players,a_players,p_counter,h_score-hs_counter,a_score-as_counter)) minutes_og.append((h_players,a_players,seconds_elapsed-s_counter,h_score-hs_counter,a_score-as_counter)) h_players = sorted([row[i] for i in range(62,71,2)]) a_players = sorted([row[i] for i in range(72,82,2)]) h_score = row[37] a_score = row[38] seconds_elapsed = row[52] if h_players != h_p_prev or a_players != a_p_prev: possessions_og.append((h_p_prev,a_p_prev,p_counter,h_score-hs_counter,a_score-as_counter)) minutes_og.append((h_p_prev,a_p_prev,seconds_elapsed-s_counter,h_score-hs_counter,a_score-as_counter)) p_counter = 1 s_counter = seconds_elapsed as_counter = a_score hs_counter = h_score #break h_p_prev = h_players a_p_prev = a_players shot_made = row[49] is_d_rebound = row[56] is_turnover = row[59] if (shot_made == 1) or is_d_rebound or is_turnover: #print(row[0]+2,h_score,a_score,row[39]) p_counter += 1 # - number_of_min_examples = 0 minutes = [_ for _ in minutes_og if _[-3] >= 20] possessions = [_ for _ in possessions_og if _[-3] > 1] len(minutes),len(possessions) TARGET = minutes players = set() for mins in TARGET: players = players.union(set(mins[0])).union(set(mins[1])) players_list = sorted(list(players)) players_idx = {k:i for i,k in enumerate(players_list)} n_players = len(players_list) players_idx['<NAME>'] # + X_dict = {} ROWS = len(TARGET) y_target = np.zeros(3*ROWS) a_avec = np.zeros(3*ROWS) for i,mins in enumerate(TARGET): amount = mins[-3]/60 # seconds to minutes h_value = mins[-2] a_value = mins[-1] pos_fac = 0.5 for hp in mins[0]: X_dict[(i,players_idx[hp])] = pos_fac X_dict[(ROWS+i,n_players+players_idx[hp])] = pos_fac X_dict[(2*ROWS+i,n_players+players_idx[hp])] = 1 X_dict[(2*ROWS+i,players_idx[hp])] = 1 for ap in mins[1]: X_dict[(i,n_players+players_idx[ap])] = pos_fac X_dict[(ROWS+i,players_idx[ap])] = pos_fac X_dict[(2*ROWS+i,players_idx[ap])] = 1 X_dict[(2*ROWS+i,n_players+players_idx[ap])] = 1 y_target[i] = pos_fac*h_value/amount y_target[ROWS+i] = pos_fac*a_value/amount y_target[2*ROWS+i] = (h_value-a_value)/amount HFA_BIAS = 1e-3 X_dict[(i,n_players*2)] = HFA_BIAS#amount X_dict[(ROWS+i,n_players*2+1)] = -HFA_BIAS#amount X_dict[(2*ROWS+i,n_players*2+1)] = HFA_BIAS#amount #print(h_value,a_value,amount,hp) a_avec[i] = 0 a_avec[i+ROWS] = 0 # - import scipy.sparse X = scipy.sparse.dok_matrix((ROWS*3,n_players*2+2)) X._update(X_dict) X = scipy.sparse.csr_matrix(X) _ = plt.hist(np.array(X.sum(0))[0],100) from sklearn import linear_model # + clf = linear_model.Ridge(alpha=1000,fit_intercept=False) clf.fit(X,y_target) scale = 36 opm = clf.coef_[:clf.coef_.shape[0]//2-1] * scale dpm = -clf.coef_[clf.coef_.shape[0]//2-1:-2] * scale t_score = opm+dpm _ = plt.hist(opm,50,alpha=0.5,label='off') _ = plt.hist(dpm,50,alpha=0.5,label='def') _ = plt.hist(t_score,50,alpha=0.5,label='tot') plt.legend() scale*clf.coef_[-2],scale*clf.coef_[-1] # - v = np.argsort(t_score)[::-1] for i in v: #try: name = players_list[i] #if name in ['<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>']: print('{:30s} {:.2f} {:.2f}\t{:.2f}'.format(name,opm[i],dpm[i],t_score[i]))
build_apm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Evaluate 7 # # Training convergence figures. import pickle import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np from sys_simulator.general import load_with_pickle, sns_confidence_interval_plot from copy import deepcopy import os # + EXP_NAME = 'evaluate7' # ddpg ALGO_NAME = 'ddpg' filepath = "/home/lucas/dev/sys-simulator-2/data/ddpg/evaluate1_nonoise/20210609-163037/log.pickle" # ddpg fair # ALGO_NAME = 'ddpg-fair' filepath_fair = "/home/lucas/dev/sys-simulator-2/data/ddpg/evaluate1_nonoise/20210613-190001/log.pickle" # dql # ALGO_NAME = 'dql' # filepath = "/home/lucas/dev/sys-simulator-2/data/dql/evaluate2/20210524-232333/log.pickle" # a2c # ALGO_NAME = 'a2c' # filepath = "/home/lucas/dev/sys-simulator-2/data/a2c/evaluate2/20210524-224905/log.pickle" # output path OUTPUT_PATH = f'/home/lucas/dev/sys-simulator-2/figs/{EXP_NAME}/{ALGO_NAME}' data = load_with_pickle(filepath) data_fair = load_with_pickle(filepath_fair) # - data.keys() mue_sinrs = np.array(data['mue_sinrs']) mue_sinrs.shape d2d_sinrs = np.array(data['d2d_sinrs']) d2d_sinrs_fair = np.array(data_fair['d2d_sinrs']) d2d_sinrs.shape mue_tx_powers = np.array(data['mue_tx_powers']) mue_tx_powers.shape d2d_tx_powers = np.array(data['d2d_tx_powers']) d2d_tx_powers_fair = np.array(data_fair['d2d_tx_powers']) d2d_tx_powers.shape ctbs = np.array(data['channels_to_bs']) ctbs.shape d2d_tx_powers[0] data['trajectories'].keys() data['trajectories']['DUE.TX:1'][-10:] mue_availability = np.array(data['mue_availability']) mue_availability.shape # ## Fonts config x_font = { 'family': 'serif', 'color': 'black', 'weight': 'normal', 'size': 16, } y_font = { 'family': 'serif', 'color': 'black', 'weight': 'normal', 'size': 16, } ticks_font = { 'fontfamily': 'serif', 'fontsize': 13 } legends_font = { 'size': 13, 'family': 'serif' } # ## MUE SINR x = list(range(mue_sinrs.shape[0])) plt.figure(figsize=(10,7)) sns.lineplot(x=x, y=mue_sinrs.reshape(-1)) plt.xlabel('Steps', fontdict=x_font) plt.ylabel('MUE SINR [dB]', fontdict=y_font) plt.xticks(**ticks_font) plt.yticks(**ticks_font) fig_name = 'mue-sinr' svg_path = f'{OUTPUT_PATH}/{fig_name}.svg' eps_path = f'{OUTPUT_PATH}/{fig_name}.eps' print(svg_path) # save fig plt.savefig(svg_path) os.system(f'magick convert {svg_path} {eps_path}') plt.show() # # MUE Tx Power plt.figure(figsize=(10,7)) sns.lineplot(x=x, y=mue_tx_powers.reshape(-1)) plt.xlabel('Steps', fontdict=x_font) plt.ylabel('MUE Tx Power [dBW]', fontdict=y_font) plt.xticks(**ticks_font) plt.yticks(**ticks_font) fig_name = 'mue-tx-power' svg_path = f'{OUTPUT_PATH}/{fig_name}.svg' eps_path = f'{OUTPUT_PATH}/{fig_name}.eps' print(svg_path) # save fig plt.savefig(svg_path) os.system(f'magick convert {svg_path} {eps_path}') plt.show() # ## D2D SINR plt.figure(figsize=(10,7)) sns.lineplot(x=x, y=d2d_sinrs[:,0].reshape(-1), label='Device 0') sns.lineplot(x=x, y=d2d_sinrs[:,1].reshape(-1), label='Device 1') sns.lineplot(x=x, y=d2d_sinrs_fair[:,0].reshape(-1), label='Device 0') sns.lineplot(x=x, y=d2d_sinrs_fair[:,1].reshape(-1), label='Device 1') plt.xlabel('Steps', fontdict=x_font) plt.ylabel('D2D SINR [dB]', fontdict=y_font) plt.xticks(**ticks_font) plt.yticks(**ticks_font) plt.legend(prop=legends_font) fig_name = 'd2d-sinr' svg_path = f'{OUTPUT_PATH}/{fig_name}.svg' eps_path = f'{OUTPUT_PATH}/{fig_name}.eps' print(svg_path) # save fig plt.savefig(svg_path) os.system(f'magick convert {svg_path} {eps_path}') plt.show() # ## D2D Tx Power plt.figure(figsize=(10,7)) sns.lineplot(x=x, y=d2d_tx_powers[:,0].reshape(-1), label='Device 0') sns.lineplot(x=x, y=d2d_tx_powers[:,1].reshape(-1), label='Device 1') plt.xlabel('Steps', fontdict=x_font) plt.ylabel('D2D Tx Power [dBW]', fontdict=y_font) plt.xticks(**ticks_font) # plt.yticks(**ticks_font) y_ticks = [-120, -100, -80, -70, -60, -50] plt.yticks(y_ticks, **ticks_font) plt.legend(prop=legends_font) fig_name = 'd2d-tx-power' svg_path = f'{OUTPUT_PATH}/{fig_name}.svg' eps_path = f'{OUTPUT_PATH}/{fig_name}.eps' print(svg_path) # save fig plt.savefig(svg_path) os.system(f'magick convert {svg_path} {eps_path}') plt.show() # ## D2D Tx Power Fair plt.figure(figsize=(10,7)) sns.lineplot(x=x, y=d2d_tx_powers[:,0].reshape(-1), label='Device 0') sns.lineplot(x=x, y=d2d_tx_powers[:,1].reshape(-1), label='Device 1') sns.lineplot(x=x, y=d2d_tx_powers_fair[:,0].reshape(-1), label='Device 0 - Fair') sns.lineplot(x=x, y=d2d_tx_powers_fair[:,1].reshape(-1), label='Device 1 - Fair') plt.xlabel('Steps', fontdict=x_font) plt.ylabel('D2D Tx Power [dBW]', fontdict=y_font) plt.xticks(**ticks_font) # plt.yticks(**ticks_font) y_ticks = [-120, -100, -80, -70, -60, -50] plt.yticks(y_ticks, **ticks_font) plt.legend(prop=legends_font) fig_name = 'd2d-tx-power-fair' svg_path = f'{OUTPUT_PATH}/{fig_name}.svg' eps_path = f'{OUTPUT_PATH}/{fig_name}.eps' print(svg_path) # save fig plt.savefig(svg_path) os.system(f'magick convert {svg_path} {eps_path}') plt.show() # ## Channels to BS plt.figure(figsize=(10,7)) sns.lineplot(x=x, y=ctbs[:,0].reshape(-1), label='Device 0') sns.lineplot(x=x, y=ctbs[:,1].reshape(-1), label='Device 1') plt.xlabel('Steps', fontdict=x_font) plt.ylabel('Channel Loss [dB]', fontdict=y_font) plt.xticks(**ticks_font) plt.yticks(**ticks_font) plt.legend(prop=legends_font) fig_name = 'channels-to-bs' svg_path = f'{OUTPUT_PATH}/{fig_name}.svg' eps_path = f'{OUTPUT_PATH}/{fig_name}.eps' print(svg_path) # save fig plt.savefig(svg_path) os.system(f'magick convert {svg_path} {eps_path}') plt.show() # ## MUE availability plt.figure(figsize=(10,7)) sns.lineplot(x=x, y=mue_availability.reshape(-1)) plt.xlabel('Steps', fontdict=x_font) plt.ylabel('MUE availability', fontdict=y_font) plt.xticks(**ticks_font) plt.yticks([0.0, 1.0], **ticks_font) fig_name = 'mue-availability' svg_path = f'{OUTPUT_PATH}/{fig_name}.svg' eps_path = f'{OUTPUT_PATH}/{fig_name}.eps' print(svg_path) # save fig plt.savefig(svg_path) os.system(f'magick convert {svg_path} {eps_path}') plt.show() # ## Avg mue availability mue_availability.mean()
notebooks/figs-notebooks/evaluate7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.4 64-bit (''.venv'': venv)' # name: python37464bitvenvvenv49d3639ad28a4587b20fb040872df1af # --- # Imports # ----------- from arus import dataset from arus import developer # + # process raw dataset # --------------------------------- developer.set_default_logging() dataset.process_dataset('spades_lab', approach='muss') # -
examples/dataset/process_raw_dataset.ipynb