code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: python3.6 # language: python # name: python3.6 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt # ## 1. Sampling Interval # ### T_acc # !ls # Sampling Interval for Acceleration T_acc = pd.read_csv('1.csv', header = None, index_col=None) T_acc # ### T_mg # Sampling Interval for Mag/Gyro T_mg = pd.read_csv('data/Asyn2/Tmag.csv', header = None, index_col = None).values[0][0:1408] # ## 2. Tima Stamp for Plots # TimeStamp for Acceleration t_acc = pd.read_csv('data/Asyn2/tdacc.csv', header = None, index_col = None).values[0][0:1408] # TimeStamp for Mag/Gyro t_mg = pd.read_csv('data/Asyn2/tdmag.csv', header = None, index_col = None).values[0][0:1408] # TimeStamp for Integrate t = pd.read_csv('data/Asyn2/td3.csv', header = None, index_col = None).values[0][0:2813] len(T) for i in range(2,2799): T[i] = t[i] - t[i-1] # Time Interval for Integrate T = pd.read_csv('data/Asyn2/T3.csv', header = None, index_col = None).values[0][0:2813] T # ## 4. Ground Truth # Ground truth for acc along x axis gt_accx = pd.read_csv('data/Asyn2/gtaccx.csv', header = None, index_col = None).values[0][0:2813] # Ground truth for acc along y axis gt_accy = pd.read_csv('data/Asyn2/gtaccy.csv', header = None, index_col = None).values[0][0:2813] # Ground truth for velocity along x axis gt_velx = pd.read_csv('data/Asyn2/gtxv.csv', header = None, index_col = None).values[0][0:2813] # Ground truth for velocity along y axis gt_vely = pd.read_csv('data/Asyn2/gtyv.csv', header = None, index_col = None).values[0][0:2813] # Ground truth for pos along x axis gt_x = pd.read_csv('data/Asyn2/gtx.csv', header = None, index_col = None).values[0][0:2813] - 1.95 # Ground truth for pos along y axis gt_y = pd.read_csv('data/Asyn2/gty.csv', header = None, index_col = None).values[0][0:2813] - 1.81 # Ground truth for orientation gt_w = pd.read_csv('data/Asyn2/gtw.csv', header = None, index_col = None).values[0][0:2813] len(gt_accx) # ## 3. Estimation Results # Estimated acc along x axis accx = pd.read_csv('data/Asyn2/accx.csv', header = None, index_col = None).values[0][0:2813] # Estimated acc along y axis accy = pd.read_csv('data/Asyn2/accy.csv', header = None, index_col = None).values[0][0:2813] # Estimated orientation along z axis w = pd.read_csv('data/Asyn2/w.csv', header = None, index_col = None).values[0][0:2813] len(accx) # ## 4. Comparision of Estimated Acceleration vs Ground Truth # + plt.subplot(121) plt.plot(t, accx) plt.title('estiamted accx') plt.xlabel('time [s]') plt.ylabel('x acc [m/s^2]') plt.subplot(122) plt.plot(t, gt_accx) plt.title('ground truth accx') plt.xlabel('time [s]') # + plt.subplot(121) plt.plot(t, accy) plt.title('estiamted accy') plt.xlabel('time [s]') plt.ylabel('y acc [m/s^2]') plt.subplot(122) plt.plot(t, gt_accy) plt.title('ground truth accy') plt.xlabel('time [s]') # - # # Double Integration # $$v[i] = v[i-1] + \hat{acc}[i-1]\times T[i]$$ # $$p[i] = v[i] \times T[i] + \frac{1}{2}*\hat{acc}[i] \times T[i]^2$$ # ## 1. x pos # ### 1.1 Double Integration of Estimated Acceleration # + xx = np.zeros(2813) vx = np.zeros(2813) vx[0] = -0.3*3.14 vx[1] = -0.3*3.14 #accx[1]*T_acc[1] xx[1] = 0.5*accx[1]*T[1]**2 for i in range(2,2813): vx[i] = accx[i-1]*T[i] + vx[i-1] xx[i] = vx[i]*T[i] + 0.5*accx[i]*T[i]**2 + xx[i-1] # - # ### 1.2 Double Integartion of Ground Truth Acceleration # + gxx = np.zeros(2813) gvx = np.zeros(2813) gvx[0] = -0.3*3.14 gvx[1] = -0.3*3.14#gt_accx[1]*T_acc[1] gxx[1] = 0.5*gt_accx[1]*T[1]**2 for i in range(2,2813): gvx[i] = gt_accx[i-1]*T[i] + gvx[i-1] gxx[i] = gvx[i]*T[i] + 0.5*gt_accx[i]*T[i]**2 + gxx[i-1] # - # ### 1.3 Comparision of x Position plt.figure() plt.plot(t,vx) plt.plot(t,gvx) plt.plot(t,gt_velx) plt.ylim([-1.5, 1.0]) plt.legend(['estimated acc integrated velocity','ground truth acc integrated velocity','ground truth velocity']) plt.title('integrated velocity x axis') plt.ylabel('x velocity [m/s]') plt.xlabel('time [s]') plt.figure() plt.plot(t,xx) plt.plot(t,gxx) plt.plot(t,gt_x) plt.ylim([-.4, .5]) plt.legend(['double integration position','ground truth acc double integration','ground truth acceleration']) plt.title('double integrated pos x axis') plt.ylabel('x position [m]') plt.xlabel('time [s]') # ## 2. y pos # ### 2.1 Double Integration of Estimated Acceleration yy = np.zeros(2813) vy = np.zeros(2813) for i in range(2,2813): vy[i] = accy[i-1]*T[i] + vy[i-1] yy[i] = vy[i]*T[i] + 0.5*accy[i]*T[i]**2 + yy[i-1] # + gyy = np.zeros(2813) gvy = np.zeros(2813) gvy[1] = gt_accy[1]*T_acc[1] for i in range(2,2813): gvy[i] = gt_accy[i-1]*T[i] + gvy[i-1] gyy[i] = gvy[i]*T[i] + 0.5*gt_accy[i]*T[i]**2 + gyy[i-1] # - plt.plot() plt.plot(t,vy) plt.plot(t,gvy) plt.plot(t,gt_vely) plt.ylim([-1.0, 1.5]) plt.legend(['estimated velocity','ground truth acc integration','ground truth velocity']) plt.title('integrated velocity y axis') plt.ylabel('y velocity [m/s]') plt.xlabel('time [s]') plt.plot() plt.plot(t,yy) plt.plot(t,gyy) plt.plot(t,gt_y) plt.ylim([-.6, .1]) plt.title('double integrated pos y axis') plt.legend(['double integration position','ground truth acc double integration','ground truth']) plt.ylabel('y position [m]') plt.xlabel('time [s]') # ## Visualization # + import numpy as np import matplotlib.animation as animation fig = plt.figure() def main(): numframes = 1404 numpoints = 3 scat = plt.scatter(xx, yy, s=4) scat2 = plt.scatter(gxx, gyy, s=4) scat3 = plt.scatter(gt_x, gt_y, s=4) ani = animation.FuncAnimation(fig, update_plot, frames=xrange(numframes),fargs=(scat)) ani = animation.FuncAnimation(fig, update_plot, frames=xrange(numframes),fargs=(scat2)) ani = animation.FuncAnimation(fig, update_plot, frames=xrange(numframes),fargs=(scat3)) plt.legend(['double integrated estimated acc','double integrated ground truth acc','ground truth']) def update_plot(i, data, scat): scat.set_array(data[i]) return scat, main() plt.scatter(xx[1],yy[1],c='purple') plt.scatter(xx[-1],yy[-1],c='purple') plt.scatter(gxx[-1],gyy[-1],c='orange') plt.scatter(gt_x[-1],gt_y[-1],c='green') plt.xlabel('x [m]') plt.ylabel('y [m]') # - # # Indicators - How Good is the Estimation in Comparision # ## Position Error # ### 1. Same Point Distance_Error = np.sqrt((xx[-1] - xx[0])**2+(yy[-1] - yy[0])**2) Distance_Error xplot = [xx[1],xx[-1]] yplot = [yy[1],yy[-1]] plt.scatter(xplot[0],yplot[0],c='blue') plt.scatter(xplot[1],yplot[1],c='red') plt.legend(['Start point','End point']) plt.plot(xplot,yplot) plt.text(-0, 0, 'Position Error: 0.125 m', bbox=dict(facecolor='red', alpha=0.5)) plt.xlabel('x [m]') plt.ylabel('y [m]') plt.title('Position Error Based on Same Point') # ### 2. Reference Line gxx def maxDistance(xx, yy, rxx, ryy): err = 0 err_dis = 0 distance = 0 for i in range(len(xx)): err += (xx[i]-rxx[i])**2+(yy[i]-ryy[i])**2 err_dis += err if i > 0: distance += np.sqrt((rxx[i]-rxx[i-1])**2 + (ryy[i]-ryy[i-1])**2); err = np.sqrt(err/len(xx)) print('err', err) print('ref', distance) return err # ## Result from Defined Function error = maxDistance(xx, yy, gxx, gyy) # ## RMS Error from Built-in Function # + from sklearn.metrics import mean_squared_error from math import sqrt import math rms1 = sqrt(mean_squared_error(xx, gt_x)) rms2 = sqrt(mean_squared_error(yy, gt_y)) rms = sqrt(rms1**2 + rms2**2) # - rms # ## The RMS Error Start-End, Defined and Built-in Function all 0.05 m # ## Weighted RMS Error # $$RMS_{Final} = a \times RMS_{Reference} + (1-a) \times RMS_{SamePoint}$$ rms = 0.7*rms + 0.3*Distance_Error rms # ## Percentage percentage = rms/(2*math.pi*0.3) percentage "{0:.2%}".format(percentage) # ## Final Accuracy of Poisition # ## N = 3: # $$0.47\%$$ # ## N = 5: # $$0.60\%$$
PrePhase/Measurement.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # L2 Regularization # # The standard way to avoid overfitting is called **L2 regularization**. It consists of appropriately modifying your cost function, from: # $$J = -\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} \tag{1}$$ # To: # $$J_{regularized} = \small \underbrace{-\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} }_\text{cross-entropy cost} + \underbrace{\frac{1}{m} \frac{\lambda}{2} \sum\limits_l\sum\limits_k\sum\limits_j W_{k,j}^{[l]2} }_\text{L2 regularization cost} \tag{2}$$ # # $\sum\limits_k\sum\limits_j W_{k,j}^{[l]2}$ , is `np.sum(np.square(Wl))`, then # $$J_{regularized} = \small \underbrace{-\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} }_\text{cross-entropy cost} + \underbrace{\frac{1}{m} \frac{\lambda}{2} \sum\limits_lnp.sum(np.square(Wl)) }_\text{L2 regularization cost} \tag{2}$$ # # ## Forward and Backward propagation # Forward Propagation: SAME with normal network, only cost function is different # - Initialize W1, b1, W2, b2, W3, b3 # - Get X # - Compute $A1 = (W1 * X + b1)$ # - Compute $Z1 = ReLU(A1)$ # - Compute $A2 = (W2 * A1 + b2)$ # - Compute $Z2 = ReLU(A2)$ # - Compute $A3 = (W3 * A2 + b3)$ # - Compute $Z3 = sigmoid(A2)$ # - Calculate cost function $J = - \large\left(\small Y\log\left(A2\right) + (1-Y\log\left(1- A2\right) \large \right) \small + lambd * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3))) / ( 2 * m )$ # # Backward Propagation: # - Calculate $dZ3 = A3 - Y$ # - Calculate $dW3 = \large \frac{1}{m} \small (dZ2 * A2^T) + \large \frac{lambda}{m} * \small W3$ # - Calculate $db2 = \large \frac{1}{m} \small sum(dZ2)$ # - Calculate $dA2 = W3^T * dZ3Y$ # - Calculate $dZ2 = (W3^T, dZ3) * g^{[2]'}(Z2)$ # - Calculate $dW2 = \large \frac{1}{m} \small (dZ2 * A1^T) + \large \frac{lambda}{m} * \small W2$ # - Calculate $db2 = \large \frac{1}{m} \small sum(dZ2)$ # - Calculate $dZ1 = (W2^T, dZ2) * g^{[1]'}(Z1)$ # - Calculate $dW1 = \large \frac{1}{m} \small (dZ1 * X^T) + \large \frac{lambda}{m} * \small W1$ # - Calculate $db1 = \large \frac{1}{m} \small sum(d12)$ # # # Dropout # # Finally, **dropout** is a widely used regularization technique that is specific to deep learning. # **It randomly shuts down some neurons in each iteration.** Watch these two videos to see what this means! # # <!-- # To understand drop-out, consider this conversation with a friend: # - Friend: "Why do you need all these neurons to train your network and classify images?". # - You: "Because each neuron contains a weight and can learn specific features/details/shape of an image. The more neurons I have, the more featurse my model learns!" # - Friend: "I see, but are you sure that your neurons are learning different features and not all the same features?" # - You: "Good point... Neurons in the same layer actually don't talk to each other. It should be definitly possible that they learn the same image features/shapes/forms/details... which would be redundant. There should be a solution." # !--> # # # <center> # <video width="620" height="440" src="images/dropout1_kiank.mp4" type="video/mp4" controls> # </video> # </center> # <br> # <caption><center> <u> Figure 2 </u>: Drop-out on the second hidden layer. <br> At each iteration, you shut down (= set to zero) each neuron of a layer with probability $1 - keep\_prob$ or keep it with probability $keep\_prob$ (50% here). The dropped neurons don't contribute to the training in both the forward and backward propagations of the iteration. </center></caption> # # <center> # <video width="620" height="440" src="images/dropout2_kiank.mp4" type="video/mp4" controls> # </video> # </center> # # <caption><center> <u> Figure 3 </u>: Drop-out on the first and third hidden layers. <br> $1^{st}$ layer: we shut down on average 40% of the neurons. $3^{rd}$ layer: we shut down on average 20% of the neurons. </center></caption> # # # When you shut some neurons down, you actually modify your model. The idea behind drop-out is that at each iteration, you train a different model that uses only a subset of your neurons. With dropout, your neurons thus become less sensitive to the activation of one other specific neuron, because that other neuron might be shut down at any time. # # Examples # # Regularization # + # import packages import numpy as np import matplotlib.pyplot as plt from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters import sklearn import sklearn.datasets import scipy.io from testCases import * # %matplotlib inline plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # - # **Problem Statement**: You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head. # # <img src="images/field_kiank.png" style="width:600px;height:350px;"> # <caption><center> <u> **Figure 1** </u>: **Football field**<br> The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head </center></caption> # # # They give you the following 2D dataset from France's past 10 games. train_X, train_Y, test_X, test_Y = load_2D_dataset() # ## 1 - Non-regularized model # Following neural network can be used: # - in *regularization mode* -- by setting the `lambd` input to a non-zero value. We use "`lambd`" instead of "`lambda`" because "`lambda`" is a reserved keyword in Python. # - in *dropout mode* -- by setting the `keep_prob` to a value less than one # # You will first try the model without any regularization. Then, you will implement: # - *L2 regularization* -- functions: "`compute_cost_with_regularization()`" and "`backward_propagation_with_regularization()`" # - *Dropout* -- functions: "`forward_propagation_with_dropout()`" and "`backward_propagation_with_dropout()`" # def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1): grads = {} costs = [] # to keep track of the cost m = X.shape[1] # number of examples layers_dims = [X.shape[0], 20, 3, 1] # Initialize parameters dictionary. parameters = initialize_parameters(layers_dims) # Loop (gradient descent) for i in range(0, num_iterations): # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID. if keep_prob == 1: a3, cache = forward_propagation(X, parameters) elif keep_prob < 1: a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob) # Cost function if lambd == 0: cost = compute_cost(a3, Y) else: cost = compute_cost_with_regularization(a3, Y, parameters, lambd) # Backward propagation. assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout, # but this assignment will only explore one at a time if lambd == 0 and keep_prob == 1: grads = backward_propagation(X, Y, cache) elif lambd != 0: grads = backward_propagation_with_regularization(X, Y, cache, lambd) elif keep_prob < 1: grads = backward_propagation_with_dropout(X, Y, cache, keep_prob) # Update parameters. parameters = update_parameters(parameters, grads, learning_rate) # Print the loss every 10000 iterations if print_cost and i % 10000 == 0: print("Cost after iteration {}: {}".format(i, cost)) if print_cost and i % 1000 == 0: costs.append(cost) # plot the cost plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (x1,000)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters # Let's train the model without any regularization, and observe the accuracy on the train/test sets. parameters = model(train_X, train_Y) print ("On the training set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) # The train accuracy is 94.8% while the test accuracy is 91.5%. This is the **baseline model** (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model. plt.title("Model without regularization") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) # The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting. # ## 2 - L2 Regularization # + # GRADED FUNCTION: compute_cost_with_regularization def compute_cost_with_regularization(A3, Y, parameters, lambd): m = Y.shape[1] W1 = parameters["W1"] W2 = parameters["W2"] W3 = parameters["W3"] cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost L2_regularization_cost = lambd * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3))) / ( 2 * m ) cost = cross_entropy_cost + L2_regularization_cost return cost # + A3, Y_assess, parameters = compute_cost_with_regularization_test_case() print("cost = " + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1))) # - # **Expected Output**: # # <table> # <tr> # <td> # **cost** # </td> # <td> # 1.78648594516 # </td> # # </tr> # # </table> # + # GRADED FUNCTION: backward_propagation_with_regularization def backward_propagation_with_regularization(X, Y, cache, lambd): m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) + lambd * W3 / m db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) dW2 = 1./m * np.dot(dZ2, A1.T) + lambd * W2 / m db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) + lambd * W1 / m db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients # + X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case() grads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7) print ("dW1 = "+ str(grads["dW1"])) print ("dW2 = "+ str(grads["dW2"])) print ("dW3 = "+ str(grads["dW3"])) # - parameters = model(train_X, train_Y, lambd = 0.7) print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) # Congrats, the test set accuracy increased to 93%. You have saved the French football team! # # You are not overfitting the training data anymore. Let's plot the decision boundary. plt.title("Model with L2-regularization") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) # ## 3 - Dropout # ### 3.1 - Forward propagation with dropout # # **Exercise**: Implement the forward propagation with dropout. You are using a 3 layer neural network, and will add dropout to the first and second hidden layers. We will not apply dropout to the input layer or output layer. # # **Instructions**: # You would like to shut down some neurons in the first and second layers. To do that, you are going to carry out 4 Steps: # 1. In lecture, we dicussed creating a variable $d^{[1]}$ with the same shape as $a^{[1]}$ using `np.random.rand()` to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation, so create a random matrix $D^{[1]} = [d^{[1](1)} d^{[1](2)} ... d^{[1](m)}] $ of the same dimension as $A^{[1]}$. # 2. Set each entry of $D^{[1]}$ to be 0 with probability (`1-keep_prob`) or 1 with probability (`keep_prob`), by thresholding values in $D^{[1]}$ appropriately. Hint: to set all the entries of a matrix X to 0 (if entry is less than 0.5) or 1 (if entry is more than 0.5) you would do: `X = (X < 0.5)`. Note that 0 and 1 are respectively equivalent to False and True. # 3. Set $A^{[1]}$ to $A^{[1]} * D^{[1]}$. (You are shutting down some neurons). You can think of $D^{[1]}$ as a mask, so that when it is multiplied with another matrix, it shuts down some of the values. # 4. Divide $A^{[1]}$ by `keep_prob`. By doing this you are assuring that the result of the cost will still have the same expected value as without drop-out. (This technique is also called inverted dropout.) # + # GRADED FUNCTION: forward_propagation_with_dropout def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5): np.random.seed(1) # retrieve parameters W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] W3 = parameters["W3"] b3 = parameters["b3"] # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID Z1 = np.dot(W1, X) + b1 A1 = relu(Z1) D1 = np.random.rand(A1.shape[0], A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...) D1 = D1 < keep_prob # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold) A1 = np.multiply(A1, D1) # Step 3: shut down some neurons of A1 A1 = A1 / keep_prob # Step 4: scale the value of neurons that haven't been shut down Z2 = np.dot(W2, A1) + b2 A2 = relu(Z2) D2 = np.random.rand(A2.shape[0], A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...) D2 = D2 < keep_prob # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold) A2 = np.multiply(A2, D2) # Step 3: shut down some neurons of A2 A2 = A2 / keep_prob # Step 4: scale the value of neurons that haven't been shut down Z3 = np.dot(W3, A2) + b3 A3 = sigmoid(Z3) cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) return A3, cache # + X_assess, parameters = forward_propagation_with_dropout_test_case() A3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7) print ("A3 = " + str(A3)) # - # **Expected Output**: # # <table> # <tr> # <td> # **A3** # </td> # <td> # [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]] # </td> # # </tr> # # </table> # ### 3.2 - Backward propagation with dropout # # **Exercise**: Implement the backward propagation with dropout. As before, you are training a 3 layer network. Add dropout to the first and second hidden layers, using the masks $D^{[1]}$ and $D^{[2]}$ stored in the cache. # # **Instruction**: # Backpropagation with dropout is actually quite easy. You will have to carry out 2 Steps: # 1. You had previously shut down some neurons during forward propagation, by applying a mask $D^{[1]}$ to `A1`. In backpropagation, you will have to shut down the same neurons, by reapplying the same mask $D^{[1]}$ to `dA1`. # 2. During forward propagation, you had divided `A1` by `keep_prob`. In backpropagation, you'll therefore have to divide `dA1` by `keep_prob` again (the calculus interpretation is that if $A^{[1]}$ is scaled by `keep_prob`, then its derivative $dA^{[1]}$ is also scaled by the same `keep_prob`). # # + # GRADED FUNCTION: backward_propagation_with_dropout def backward_propagation_with_dropout(X, Y, cache, keep_prob): m = X.shape[1] (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dA2 = dA2 * D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation dA2 = dA2 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down dZ2 = np.multiply(dA2, np.int64(A2 > 0)) dW2 = 1./m * np.dot(dZ2, A1.T) db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dA1 = dA1 * D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation dA1 = dA1 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients # + X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case() gradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8) print ("dA1 = " + str(gradients["dA1"])) print ("dA2 = " + str(gradients["dA2"])) # + parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3) print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) # - plt.title("Model with dropout") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) # **Here are the results of our three models**: # # <table> # <tr> # <td> # **model** # </td> # <td> # **train accuracy** # </td> # <td> # **test accuracy** # </td> # # </tr> # <td> # 3-layer NN without regularization # </td> # <td> # 95% # </td> # <td> # 91.5% # </td> # <tr> # <td> # 3-layer NN with L2-regularization # </td> # <td> # 94% # </td> # <td> # 93% # </td> # </tr> # <tr> # <td> # 3-layer NN with dropout # </td> # <td> # 93% # </td> # <td> # 95% # </td> # </tr> # </table>
b_L2_Regularization_and_Dropout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dev Setups: Connecting Python and SQL # # The purpose of this Jupyter notebook is to demonstrate the usefulness of connecting python to a relational database by using a python toolkit called SQLAlchemy. This tutorial follows the previous document, *** Testing Python and Data Science basic stack *** # # **This notebook is for Mac OS and Windows specific instructions. See `DS_sql_dev_setup_linux.ipynb` for Linux.** # ### First off, what is a relational database? # # Basically, it is a way to store information such that information can be retrieved from it. # # MySQL and PostgreSQL are examples of relational databases. For the purposes of an Insight project, you can use either one. # # Why would you use a relational database instead of a csv or two? # # **A few reasons:** # # - They scale easily # # - They are easy to query # # - It’s possible to do transactions in those cases where you need to write to a database, not just read from it # # - Everyone in industry uses them, so you should get familiar with them, too. # # ***What does a relational database look like? *** # # We can take a look. First we need to set up a few things. The first thing we want to do is to get a PostgreSQL server up and running. # # # # ## Postgres Installation # # **Mac OS installation:** # Go to http://postgresapp.com/ and follow the three steps listed in the Quick Installation Guide. # # **Windows OS installation:** # Go to https://www.postgresql.org/download/windows/ to download the installer. # **If you're on a mac, you might need to add psql to PATH in order to interact with Postgres in the Terminal more easily**. See this website for info on bash profiles and PATH: https://hathaway.cc/2008/06/how-to-edit-your-path-environment-variables-on-mac/<br> # # **Edit your .bash_profile in your home directory. Since you already installed Anaconda, it should look something like:**<br> # ```export PATH="/Users/YOUR_USER_NAME/anaconda/bin:$PATH"``` # # **Right below the line added by anaconda you can add this line:**<br> # # ```export PATH="/Applications/Postgres.app/Contents/Versions/latest/bin:$PATH"``` # # **Save and reload the bash profile in Terminal**<br> # ``` source .bash_profile``` # # # ## Start your postgresql server # # **There are multiple ways to launch a postgres server. For now, let's stick with the following. ** # # **The only user right now for PSQL is 'postgres', you can make your database and enter it with that username in the terminal.** We're using a dataset on births, so we'll call it birth_db. <br> # ``` createdb birth_db -U postgres```<br> # ``` psql birth_db``` # # **If you want to make a new user for this database you can make one now. # Note: username in the below line must match your Mac/Linux username:**<br> # ``` CREATE USER username SUPERUSER PASSWORD '<PASSWORD>'```<br> # # **Exit out of PSQL (\q) and test logging in through this user:**<br> # ``` psql birth_db -h localhost -U username```<br> # ``` \c ``` (once in PSQL to check how you're logged in)<br> # # ## Set up SQLalchemy # In jupyter you can run code in the command line with the "!" special character as you'll see in the next cell. We do this here for ease but it's generally considered poor practice. Run the following commands to install the necessary packages for python to talk to a sql database. # !pip install sqlalchemy_utils # !conda install psycopg2 -y from sqlalchemy import create_engine from sqlalchemy_utils import database_exists, create_database import psycopg2 import pandas as pd # ## Define and populate a database # Run the cells below to define your new database and populate it with data from the included CSV file. # Define a database name # Set your postgres username dbname = 'birth_db' username = 'ccl' # change this to your username ## 'engine' is a connection to a database ## Here, we're using postgres, but sqlalchemy can connect to other things too. engine = create_engine('postgres://%s@localhost/%s'%(username,dbname)) print(engine.url) ## create a database (if it doesn't exist) if not database_exists(engine.url): create_database(engine.url) print(database_exists(engine.url)) # read a database from the included CSV and load it into a pandas dataframe # you may need to add a path in the command below if you aren't working in the same directory you saved the CSV birth_data = pd.read_csv('births2012_downsampled.csv', index_col=0) ## insert data into database from Python (proof of concept - this won't be useful for big data, of course) birth_data.to_sql('birth_data_table', engine, if_exists='replace') # The above line (to_sql) is doing a lot of heavy lifting. It's reading a dataframe, it's creating a table, and adding the data to the table. So ** SQLAlchemy is quite useful! ** # ## Working with PostgresSQL without Python # # **Open up the PostgreSQL app, click on the database you just created, ** <br> # # or alternatively type <br> # # ``` psql -h localhost ``` <br> # ``` \c birth_db ``` # # # into the command line # **You should see something like the following** # # `You are now connected to database "birth_db" as user "April".` # # # **Then try the following query:** # # ``` SELECT * FROM birth_data_table; ``` # # Note that the semi-colon indicates an end-of-statement. # ### You can see the table we created! But it's kinda ugly and hard to read. # Press q in your terminal at any time to get back to the command line. # # Try a few other sample queries. Before you type in each one, ask yourself what you think the output will look like: # # `SELECT * FROM birth_data_table WHERE infant_sex='M';` # # `SELECT COUNT(infant_sex) FROM birth_data_table WHERE infant_sex='M';` # # `SELECT COUNT(gestation_weeks), infant_sex FROM birth_data_table WHERE infant_sex = 'M' GROUP BY gestation_weeks, infant_sex;` # # `SELECT gestation_weeks, COUNT(gestation_weeks) FROM birth_data_table WHERE infant_sex = 'M' GROUP BY gestation_weeks;` # # All the above queries run, but they are difficult to visually inspect in the Postgres terminal. # ## Working with PostgreSQL in Python # + # Connect to make queries using psycopg2 con = None con = psycopg2.connect(database = dbname, user = username) # query: sql_query = """ SELECT * FROM birth_data_table WHERE delivery_method='Cesarean'; """ birth_data_from_sql = pd.read_sql_query(sql_query,con) birth_data_from_sql.head() # - # Once the data has been pulled into python, we can leverage pandas methods to work with the data. # %matplotlib inline birth_data_from_sql.hist(column='birth_weight'); # ### Is reading from a SQL database faster than from a Pandas dataframe? Probably not for the amount of data you can fit on your machine. # + def get_data(sql_query, con): data = pd.read_sql_query(sql_query, con) return data # %timeit get_data(sql_query, con) birth_data_from_sql = get_data(sql_query, con) birth_data_from_sql.head() # + def get_pandas_data(df, col, value): sub_df = df.loc[(df[col] == value)] return sub_df # %timeit get_pandas_data(birth_data, 'delivery_method', 'Cesarean') birth_data_out = get_pandas_data(birth_data, 'delivery_method', 'Cesarean') birth_data_out.head() # - # This should have given you a quick taste of how to use SQLALchemy, as well as how to run a few SQL queries both at the command line and in python. You can see that `pandas` is actually a quite a bit faster than PostgreSQL here. This is because we're working with quite a small database (2716 rows × 37 columns), and there is an overhead of time it takes to communicate between python and PostGreSQL. But as your database gets bigger (and certainly when it's too large to store in memory), working with relational databases becomes a necessity. # #### Congrats! You now have Python and SQL ready to go!
DS_sql_setup_part_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.4.2 # language: julia # name: julia-1.4 # --- # # Pleiades: Workprecision # <ul id="top"> # <li><a href="#Loading-packages"> # Loading Packages</a></li> # # <li><a href="#Computer-properties"> # Computer properties</a></li> # # <li><a href="#Initial-value-problem:-Pleiades"> # Initial value problem: Pleiades</a></li> # # <li><a href="#Test-solution"> # Test solution</a></li> # # <li><a href="#Work-Precision-diagrams-(Float64)"> # Work-Precision diagrams (Float64)</a></li> # # <li><a href="#Work-Precision-diagrams-(Float128)"> # Work-Precision diagrams (Float128)</a></li> # # <li><a href="#Save/Load-Data"> # Save/Load Data</a></li> # # </ul> # - PLEI- a celestial mechanics problem: seven stars in the plane with coordinates $x_i,y_i$ and masses $m_i=i$ ($i=1,\dots,7$) # # \begin{align*} # &x_i^{''}=\sum_{j \ne i} m_j(x_j-x_i)/r_{ij} \\ # &y_i^{''}=\sum_{j \ne i} m_j(y_j-y_i)/r_{ij} # \end{align*} # # where # # \begin{align*} # r_{ij}=((x_i-x_j)^2+ (y_i-y_j)^2)^{3/2}, \quad i,j=1,\dots,7 # \end{align*} # # - The initial values are # # \begin{align*} # & x_1(0)=3, \quad x_2(0)=3, \quad x_3(0)=-1, \quad x_4(0)=-3, \\ # & x_5(0)=2, \quad x_6(0)=-2, \quad x_7(0)=2 \\ # & y_1(0)=3, \quad y_2(0)=-3, \quad y_3(0)=2, \quad y_4(0)=0, \\ # & y_5(0)=0, \quad y_6(0)=-4, \quad y_7(0)=4, \\ # & x'_i(0)=y'_i(0)=0 \ \text{ for all } i \text{ with the exception of} \\ # & x'_6(0)=1.75, \quad x'_7(0)=-1.5, \quad y'_4(0)=-1.25, \quad y'_5(0)=1 # \end{align*} # # - Integration interval: $0 \le t \le 3$ # # # - Reference: Solving Ordinary Differential Equations I (Nonstiff Problems), <NAME>:Norsett, G.Wanner, (245 page), Springer # ## Loading packages using OrdinaryDiffEq,DiffEqDevTools,BenchmarkTools using IRKGaussLegendre using Plots,LinearAlgebra using Dates using JLD2, FileIO using RecursiveArrayTools # <a href="#top">Back to the top</a> # # ## Computer properties #export JULIA_NUM_THREADS=2 Threads.nthreads() # + #;cat /proc/cpuinfo # on Linux machines # - using Hwloc Hwloc.num_physical_cores() # <a href="#top">Back to the top</a> # # ## Initial value problem: Pleiades # + setprecision(BigFloat, 108); include("../ODEProblems/Pleiades.jl") include("../ODEProblems/InitialPleiades.jl") t0=0.0 t1=3.0 u064 =InitialPleiades(Float64) tspan64=(t0,t1) prob64 = ODEProblem(f,u064,tspan64); u0128=InitialPleiades(BigFloat) tspan128=(BigFloat(t0),BigFloat(t1)) prob128 = ODEProblem(f,u0128,tspan128); (typeof(u064),typeof(tspan64),typeof(u0128),typeof(tspan128)) q064=copy(u064[1:14]) v064=copy(u064[15:28]) probdyn64 = DynamicalODEProblem(dotq,dotv,q064,v064,tspan64) prob2nd64 = SecondOrderODEProblem(f2nd!,v064,q064,tspan64) q0128=copy(u0128[1:14]) v0128=copy(u0128[15:28]) probdyn128 = DynamicalODEProblem(dotq,dotv,q0128,v0128,tspan128) prob2nd128 = SecondOrderODEProblem(f2nd!,v0128,q0128,tspan128); # - # <a href="#top">Back to the top</a> # # ## Test solution # + setprecision(BigFloat, 256) #sol =solve(prob128,Vern9(),save_everystep=false,abstol=1e-32,reltol=1e-32); #@save "./Data/plei_test_sol.jld2" sol @load "./Data/plei_test_sol.jld2" sol test_sol = TestSolution(sol) sol.destats u0256 =InitialPleiades(BigFloat) Gm256=BigFloat.([1,2,3,4,5,6,7]) E0=NbodyEnergy(u0256,Gm256) (Float32(sol.t[end]),Float32(NbodyEnergy(sol.u[end],Gm256)/E0.-1)) # + ux=sol.u[end] solx=ArrayPartition(ux[1:14], ux[15:28]) test_solx = TestSolution(tspan128,[solx]) ux=sol.u[end] solx2nd=ArrayPartition(ux[15:28],ux[1:14]) test_solx2nd = TestSolution(tspan128,[solx2nd]); # - # <a href="#top">Back to the top</a> # # ## Work-Precision diagrams (Float64) abstols = 1.0 ./ 10.0 .^ (10:17) reltols = 1.0 ./ 10.0 .^ (10:17); # ### Second Order Problem: Vern9, IRKGL16 setups = [ Dict(:alg=>Vern9()), Dict(:alg=>IRKGL16(),:adaptive=>true, :mixed_precision=>false) ] solnames = ["Vern9","IRKGL16-adaptive"] wp2 = WorkPrecisionSet(probdyn64,abstols,reltols,setups;appxsol=test_solx,save_everystep=false,numruns=10,maxiters=10^9, names=solnames); plot(wp2) # ### SecondOrderProblem: DPRKN12 setups = [ Dict(:alg=>DPRKN12()) ] solnames=["DPRKN12"] wp3 = WorkPrecisionSet(prob2nd64,abstols,reltols,setups;appxsol=test_solx2nd,save_everystep=false,numruns=10,maxiters=10^9,name=solnames); plot(wp3) # ### All methods in one Plot plot(title="Float64 computations",xlabel="Error", ylabel="Time (s)") k=1 plot!(log10.(wp2.wps[k].errors),log10.(wp2.wps[k].times), seriestype=:scatter, label="Vern9 (Second Order ODE)", color="blue") plot!(log10.(wp2.wps[k].errors),log10.(wp2.wps[k].times), label="", lw=2, color="blue") # k=2 plot!(log10.(wp2.wps[k].errors),log10.(wp2.wps[k].times), seriestype=:scatter, label="IRKGL16 (Second Order ODE)", color="red") plot!(log10.(wp2.wps[k].errors),log10.(wp2.wps[k].times), label="", lw=2, color="red") # k=1 plot!(log10.(wp3.wps[k].errors),log10.(wp3.wps[k].times), seriestype=:scatter, label="DPRKN12 (Second Order ODE)", color="green") plot!(log10.(wp3.wps[k].errors),log10.(wp3.wps[k].times), label="", lw=2, color="green") # <a href="#top">Back to the top</a> # # ## Work-Precision diagrams (Float128) setprecision(BigFloat, 108) abstols = 1.0 ./ 10.0 .^ (12:30) reltols = 1.0 ./ 10.0 .^ (12:30); # ### Second Order ODE: Vern9, IRKGL16 (mixed-precision=true) setups = [ Dict(:alg=>Vern9()) Dict(:alg=>IRKGL16(),:adaptive=>true,:mixed_precision=>false) Dict(:alg=>IRKGL16(),:adaptive=>true,:mixed_precision=>true,:low_prec_type=>Float64) Dict(:alg=>IRKGL16(),:adaptive=>true,:mixed_precision=>true,:low_prec_type=>Float64,:threading=>true) ] solnames = ["Vern9","IRKGL16-adaptive","IRKGL16-adaptive/mix", "IRKGL16-adaptive/mix/threads" ] wp11 = WorkPrecisionSet(prob128,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=10,maxiters=10^9, name=solnames); plot(wp11) # ### Second Order ODE: Vern9, IRKGL16 (mixed-precision=false) setups = [ Dict(:alg=>Vern9()) Dict(:alg=>IRKGL16(),:adaptive=>true,:mixed_precision=>false) Dict(:alg=>IRKGL16(),:adaptive=>true,:mixed_precision=>false,:threading=>true) ] solnames=["Vern9", "IRKGL16-adaptive", "IRKGL16-adaptive/thread"] wp12 = WorkPrecisionSet(probdyn128,abstols,reltols,setups;appxsol=test_solx,save_everystep=false,numruns=1,maxiters=10^9,names=solnames); plot(wp12) # ### Second Order ODE: DPRKN12 setups = [ Dict(:alg=>DPRKN12()) ] solnames=["DPRKN12"] wp13 = WorkPrecisionSet(prob2nd128,abstols,reltols,setups;appxsol=test_solx2nd,save_everystep=false,numruns=1,maxiters=10^9,names=solnames); plot(wp13) # ### Plot- Vern9: General Ode vs Second Order ODE k=1 plot(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), seriestype=:scatter, label="Vern9 ( General ODE)") plot!(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), label="") # k=1 plot!(log10.(wp12.wps[k].errors),log10.(wp12.wps[k].times), seriestype=:scatter, label="Vern9 (Second Order ODE)") plot!(log10.(wp12.wps[k].errors),log10.(wp12.wps[k].times), label="") k=2 plot(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), seriestype=:scatter, label="IRKGL16 (General ODE)", color="red") plot!(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), label="", color="red") # k=3 plot!(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), seriestype=:scatter, label="IRKGL16-MIX (General ODE)", color="orange") plot!(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), label="", color="orange") # k=4 plot!(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), seriestype=:scatter, label="IRKGL16-MIX-Thread(General ODE)", color="blue") plot!(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), label="", color="blue") # k=2 plot!(log10.(wp12.wps[k].errors),log10.(wp12.wps[k].times), seriestype=:scatter, label="IRKGL16 (Second Order ODE)", color="green") plot!(log10.(wp12.wps[k].errors),log10.(wp12.wps[k].times), label="", color="green") # k=3 plot!(log10.(wp12.wps[k].errors),log10.(wp12.wps[k].times), seriestype=:scatter, label="IRKGL16-Thread (Second Order ODE)", color="black") plot!(log10.(wp12.wps[k].errors),log10.(wp12.wps[k].times), label="", color="black") # ### Plot for JuliaCon2020 # + plot(title="Work-Precision Diagram (Float128)", xlabel="Error", ylabel="Time (s)") #k=1 #plot(log10.(wp12.wps[k].errors),log10.(wp12.wps[k].times), seriestype=:scatter, label="Vern9 (DynamicProblem)", color="blue") #plot!(log10.(wp12.wps[k].errors),log10.(wp12.wps[k].times), label="",color="blue") # k=1 plot!(log10.(wp13.wps[k].errors),log10.(wp13.wps[k].times), seriestype=:scatter, label="", color="green") plot!(log10.(wp13.wps[k].errors),log10.(wp13.wps[k].times), label="DPRKN12", color="green", lw=3) # k=3 plot!(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), seriestype=:scatter, label="", color="red") plot!(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), label="IRKGL16-MIX", color="red", lw=3, linestyle=:dash) # k=4 plot!(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), seriestype=:scatter, label="", color="red") plot!(log10.(wp11.wps[k].errors),log10.(wp11.wps[k].times), label="IRKGL16-MIX (4 threads)", color="red", lw=3) # - # <a href="#top">Back to the top</a> # ### Save/Load Data # + # Float64 experiments #@save "./Data/output/OutPleiades_wp2.jld2" wp2 #@save "./Data/output/OutPleiades_wp3.jld2" wp3 # Float128 experiments #@save "./Data/output/OutPleiades_wp11.jld2" wp11 #@save "./Data/output/OutPleiades_wp12.jld2" wp12 #@save "./Data/output/OutPleiades_wp13.jld2" wp13 # - # Float64 experiments @load "./Data/output/OutPleiades_wp2.jld2" wp2 @load "./Data/output/OutPleiades_wp3.jld2" wp3 # Float128 experiments @load "./Data/output/OutPleiades_wp11.jld2" wp11 @load "./Data/output/OutPleiades_wp12.jld2" wp12 @load "./Data/output/OutPleiades_wp13.jld2" wp13 now()
Juliacon 2020/Pleiades-WorkPrecision.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests from bs4 import BeautifulSoup r = requests.get( "http://www.pyclass.com/example.html", headers={ 'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0' } ) content = r.content print(content) soup = BeautifulSoup(content, 'html.parser') print(soup.prettify()) divs = soup.find_all('div', {'class': 'cities'}) # .find() finds only the first element print(divs) print(divs[0].find_all('h2')) print(divs[0].find_all('h2')[0].text) for item in divs: print(item.find_all('h2')[0].text)
s28-webscraping/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: RISCluster_CUDA # language: python # name: riscluster_cuda # --- # + language="javascript" # IPython.OutputArea.prototype._should_scroll = function(lines) { # return false; # } # - # # Workflow Control # This notebook provides an end-to-end procedure for implementing the method described in "Unsupervised Deep Clustering of Seismic Data: Monitoring the Ross Ice Shelf, Antarctica." # # <img src="RISArrayMap.jpg" alt="RISArrayMap" width="600"/> # <a id="contents"></a> # *** # ## <u>Table of Contents</u> # # 1. [Initialize Project Environment](#section1) # 2. [Seismic Pre-Processing](#section2) # 3. [Set Universal Experiment Parameters](#section3) # 4. [Autoencoder (AEC)](#section4) # 5. [Gaussian Mixtures Model (GMM)](#section5) # 6. [Deep Embedded Clustering (DEC)](#section6) # 7. [Compare GMM & DEC](#section7) # 8. [Environmental Data Exploration](#section8) # # Appendices # A. [Evaluate Optimal Number of Clusters](#appendixA) # + import datetime import os import sys from IPython.display import Markdown as md import numpy as np import pandas as pd import torch from torch.utils.data import DataLoader, Subset from torchinfo import summary from torchvision import transforms from RISCluster import models, plotting, utils from RISCluster.networks import AEC, DEC from RISProcess.io import config # - # <a id="section1"></a> # *** # ## <u>1 Initialize Project Environment</u> # The default project structure is:<br> # `/Project Folder # ├── Config # ├── Data # │ ├── Meteo # │ ├── Ice # │ ├── Seismo # │ │ ├── MSEED # │ │ └── StationXML # └── Outputs # ` # <br>Note that the raw seismic data from 1-Dec-2014 to 1-Dec-2016 is nearly 1 TB. It may be practical to split out the project's `Data` folder onto a disk with more storage. If so, set the path to the data storage below. # + # Main project folder to save outputs: project_folder = '.' # Path to configuration files: path_config = f"{project_folder}/Config" # Path to folder containing data, including HDF file for ML workflow: path_data = f"{project_folder}/Data" # Path to raw seismic data: # path_data_seismo = f"{path_data}/Seismo" path_data_seismo = "/zdata2/data/wfjenkin/RIS_Seismic" # Path to save workflow outputs (ML models, figures, results, etc.) path_output = f"{project_folder}/Outputs" # Path to HDF dataset: fname_dataset = f"{path_data}/RISData_20210713.h5" # Path to save paper-ready figures: figure_savepath = f"{path_output}/Figures" utils.init_project_env([path_config, path_data, path_data_seismo, path_output, figure_savepath]) # - # <a href="#contents">Return to Top</a> # <a id="section2"></a> # *** # ## <u>2 Seismic Pre-Processing</u> # Four workflows are provided for obtaining and pre-processing seismic data. The recommended workflow makes use of sections 2.1, 2.3, and 2.4; section 2.2 is not required for the rest of the workflow, but instead provides a pipeline to save pre-processed data to disk. # ### 2.1 Download Data # In this workflow, seismic data is downloaded using the FDSN mass data downloader. Data set parameters are stored in the configuration file to configpath; MSEED data are saved to `datapath/MSEED`; and station XML data are saved to `datapath/StationXML`. The MSEED data are saved according to the following convention: # `Network.Station..Channel__YYYYMMDDTHHMMSSZ__YYYYMMDDTHHMMSSZ.mseed` parameters = { 'start': '20141201T0000', 'stop': '20141203T0000', 'mode': 'download', 'datapath': path_data_seismo, 'network': 'XH', 'station': '*', 'channel': 'HH*', } config_file = config('w', path=path_config, parameters=parameters) print("Run the following in Terminal:") md(f"`dlfdsn {config_file}`") # ### 2.2 Pre-process Data # In this workflow, raw seismic data is read from `datapath`, processed, and saved to `writepath` according to the following file naming conventions:<br> # `MSEED/Network/Station/Network.Station.Channel.YYYY.DAY.mseed` # # For the input data, two file formats are available. # <br>**Format 1:** # <br>`Network.Station.Channel.YYYY.DAY.mseed` # <br>**Format 2:** # <br>`Network.Station..Channel__YYYYMMDDTHHMMSSZ__YYYYMMDDTHHMMSSZ.mseed` # + # parameters = { # 'start': '20141201T0000', # 'stop': '20161203T0000', # 'mode': 'preprocess', # 'sourcepath': path_data_seismo, # 'name_format': 2, # 'writepath': f"{path_data_seismo}/Preprocessed", # 'parampath': f"{path_data_seismo}/Preprocessed", # 'network': 'XH', # 'channel': 'HHZ', # 'taper': 60, # 'prefeed': 60, # 'fs2': 50, # 'cutoff': '3, 20', # 'output': 'acc', # 'prefilt': '0.004, 0.01, 500, 1000', # 'waterlevel': 14, # 'detector': 'z', # 'on': 8, # 'off': 4, # 'num_workers': 4 # } parameters = { 'start': '20161119T0000', 'stop': '20161125T0000', 'mode': 'preprocess', 'sourcepath': f"{path_data_seismo}", 'name_format': 2, 'writepath': f"{path_data_seismo}/Preprocessed", 'parampath': f"{path_data_seismo}/Preprocessed", 'network': 'XH', 'channel': 'HHZ', 'taper': 60, 'prefeed': 60, 'fs2': 50, 'cutoff': '3, 20', 'output': 'acc', 'prefilt': '0.004, 0.01, 500, 1000', 'waterlevel': 14, # 'detector': 'z', # 'on': 8, # 'off': 4, 'num_workers': 17 } config_file = config('w', path=path_config, parameters=parameters) print("Run the following in Terminal:") md(f"`process {config_file}`") # - # ### 2.3 Detect Events & Build Catalogue # In this workflow, raw seismic data in `datapath` are processed in 24-hour segments, and an event detection algorithm is applied. The results of the event detector are compiled into a catalogue that is saved to disk at `writepath`. This catalogue serves as a useful pointer for follow-on processing of events of interest, rather than continuous data. # #### 2.3.1 Build Unsorted Catalogue # + # parameters = { # 'start': '20141201T0000', # 'stop': '20141203T0000', # 'mode': 'full', # 'sourcepath': path_data_seismo, # 'name_format': 2, # 'writepath': path_data, # 'parampath': path_data, # 'network': 'XH', # 'channel': 'HHZ', # 'taper': 60, # 'prefeed': 60, # 'fs2': 50, # 'cutoff': '3, 20', # 'output': 'acc', # 'prefilt': '0.004, 0.01, 500, 1000', # 'waterlevel': 14, # 'detector': 'z', # 'on': 8, # 'off': 4, # 'num_workers': 4 # } parameters = { 'start': '20141203T0000', 'stop': '20161121T0000', 'mode': 'detect', 'sourcepath': f"{path_data_seismo}/Preprocessed", 'name_format': 1, 'writepath': path_data, 'parampath': path_data, 'network': 'XH', 'station': '*', 'channel': 'HHZ', 'taper': 60, 'prefeed': 60, 'detector': 'recursive', 'STA': 0.5, 'LTA': 30, 'on': 15, 'off': 10, 'num_workers': 32 } config_file = config('w', path=path_config, parameters=parameters) print("Run the following in Terminal:") md(f"`process {config_file}`") # - # #### 2.3.2 Clean Catalogue # Remove duplicate detections, and if desired, detections that occur within a window (s) following an initial detection. window = 5 # !cleancat {path_data + '/catalogue.csv'} --dest {path_data + '/catalogue2.csv'} --window $window # ### 2.4 Build HDF Database from Catalogue # In this workflow, a catalogue of detections at catalogue is used to process raw seismic data in `datapath`. In addition to pre-processing, the traces, spectrograms, and metadata of the detections are saved to an HDF database located at `writepath`. Because this workflow is implemented in parallel and results are returned asynchronously, a new catalogue is saved to `writepath.csv` that corresponds to the indexing within the HDF dataset. The index within `writepath.csv` corresponds to the original catalogue at catalogue. parameters = { 'start': '20141203T0000', 'stop': '20161121T0000', 'mode': 'cat2h5', 'sourcepath': path_data_seismo, 'name_format': 2, 'writepath': fname_dataset, 'catalogue': os.path.join(path_data,'catalogue2.csv'), 'parampath': path_data, 'network': 'XH', 'channel': 'HHZ', 'taper': 10, 'prefeed': 10, 'fs2': 50, 'cutoff': '3, 20', 'T_seg': 4, 'NFFT': 256, 'tpersnap': 0.4, 'overlap': 0.9, # 'output': 'acc', # 'prefilt': '0.004, 0.01, 500, 1000', # 'waterlevel': 14, # 'detector': 'z', # 'on': 8, # 'off': 4, 'det_window': 5, 'num_workers': 1 } config_file = config('w', path=path_config, parameters=parameters) print("Run the following in Terminal:") md(f"`process {config_file}`") # <a href="#contents">Return to Top</a> # <a id="section3"></a> # *** # ## <u>3 Set Universal Parameters</u> # + exp_name = "FullArray" # Get the number of samples in the dataset. # !query_H5size $fname_dataset # Image Sample Indexes for Example Waveforms: img_index = [35578, 361499, 328177, 371888] # Generate new sample index for data set? genflag = False # - if genflag: M = 50000 # !GenerateSampleIndex $M $fname_dataset $path_data universal = { 'exp_name': exp_name, 'fname_dataset': fname_dataset, 'savepath': path_output, 'indexpath': os.path.join(path_data, 'TraValIndex_M=50000.pkl'), 'configpath': path_config } device_no = 1 device = utils.set_device(device_no) transform = 'vec_norm' # <a href="#contents">Return to Top</a> # <a id="section4"></a> # *** # ## <u>4 Autoencoder (AEC)</u> # ### 4.1 AEC Architecture summary(AEC(), (1, 1, 87, 100)) # ### 4.2 Configure AEC training parameters = { 'model': 'AEC', 'mode': 'train', 'n_epochs': 500, 'show': False, 'send_message': False, 'early_stopping': True, 'patience': 10, 'transform': transform, 'img_index': str(img_index)[1:-1], 'tb': True, 'tbport': 6999, 'workers': 8, 'loadmode': 'ram', 'datafiletype': 'h5' } hyperparameters = { 'batch_size': '64, 128, 256, 512', 'lr': '0.0001, 0.001, 0.01' } init_path = utils.config_training(universal, parameters, hyperparameters) config_AEC = utils.Configuration(init_path) config_AEC.load_config() config_AEC.set_device(device_no) config_AEC.show = True # ### 4.3 View Detection Examples fig = plotting.view_detections(fname_dataset, img_index) fig.savefig(f"{figure_savepath}/DetectionExamples.eps", dpi=300, facecolor='w') # ### 4.4 Train AEC print("Run the following in Terminal:") md(f"`runDEC {init_path}`") # <a id="BestAEC"></a> # ### 4.5 Select Best AEC Run # Use Tensorboard to view outputs from the various hyperparameter runs. # + batch_size = 64 LR = 0.001 expserial = 'Exp20210727T192309' runserial = f'Run_BatchSz={batch_size}_LR={LR}' # exp_path = f"{path_output}/Models/AEC/{expserial}/{runserial}" exp_path_AEC = os.path.join(path_output, 'Models', 'AEC', expserial, runserial) weights_AEC = os.path.join(exp_path_AEC, 'AEC_Params_Final.pt') print(weights_AEC) # - # Return to [Section 6.2](#ConfigDCM)<br> # Return to [Section 7](#section7) # ### 4.6 Evaluate AEC Training Performance # #### 4.6.1 Load Data and Model Parameters # + dataset = utils.SeismicDataset(fname_dataset, 'h5') model_AEC = AEC().to(device) model_AEC = utils.load_weights(model_AEC, weights_AEC, device) # - # Return to [Section 5.3](#GMMeval) # #### 4.6.2 Training and Validation History fig = plotting.view_history_AEC(os.path.join(exp_path_AEC, 'AEC_history.csv'), show=True) fig.savefig(os.path.join(figure_savepath, 'AEC_History.eps'), dpi=300, facecolor='w') # #### 4.6.3 Input, Latent Space, and Reconstruction fig = plotting.compare_images(model_AEC, 0, config_AEC) fig.savefig(os.path.join(figure_savepath, 'CompareInOut.eps'), dpi=300, facecolor='w') # ### 4.7 Evaluate all data through AEC # #### 4.7.1 Configure AEC Evaluation parameters = { 'model': 'AEC', 'mode': 'predict', 'show': False, 'send_message': False, 'transform': 'vec_norm', # 'img_index': str(img_index)[1:-1], 'tb': False, 'workers': 8, 'loadmode': 'ram', 'datafiletype': 'h5', 'saved_weights': weights_AEC } init_path = utils.config_training(universal, parameters) # #### 4.7.2 Evaluate all data print("Run the following in Terminal:") md(f"`runDC {init_path}`") # #### 4.7.3 Display Metrics for Entire Dataset # This metric is calculated by measuring the MSE of *all* spectrograms in the data set. with open(os.path.join(exp_path_AEC, 'Prediction', 'MSE.txt'), 'r') as f: print(f.read()) # [Return to Top](#contents) # <a id="section5"></a> # *** # ## <u>5 Gaussian Mixtures Model (GMM)</u> # ### 5.1 Configure GMM parameters = { 'model': 'GMM', 'mode': 'fit', 'show': False, 'send_message': False, 'transform': 'vec_norm', 'img_index': str(img_index)[1:-1], 'tb': False, 'workers': 8, 'loadmode': 'ram', 'datafiletype': 'h5', 'saved_weights': weights_AEC } hyperparameters = { 'n_clusters': '5, 6, 7, 8, 9, 10' } init_path = utils.config_training(universal, parameters, hyperparameters) config_GMM = utils.Configuration(init_path) config_GMM.load_config() config_GMM.set_device(device_no) config_GMM.show = True # ### 5.2 Run GMM print("Run the following in Terminal:") md(f"`runDC {init_path}`") # <a id="GMMeval"></a> # ### 5.3 Evaluate GMM Performance # Run [4.5](#BestAEC) and 4.6.1 to load AEC model. # #### 5.3.1 Load Data n_clusters = 8 loadpath_GMM = os.path.join(exp_path_AEC, 'GMM', f'n_clusters={n_clusters}') centroids_GMM = np.load(os.path.join(loadpath_GMM, 'centroids.npy')) labels_GMM = np.load(os.path.join(loadpath_GMM, 'labels.npy')) silh_scores_GMM = np.load(os.path.join(loadpath_GMM, 'silh_scores.npy')) z_AEC = np.load(os.path.join(exp_path_AEC, 'Prediction', 'Z_AEC.npy')) # #### 5.3.2 View Clustering Statistics pd.set_option('display.float_format', lambda x: '%.4e' % x) df_GMM = pd.read_csv(os.path.join(loadpath_GMM, 'cluster_performance.csv')) df_GMM['class'] = df_GMM['class'].fillna(-1).astype(int) df_GMM['N'] = df_GMM['N'].fillna(-1).astype(int) df_GMM # #### 5.3.3 View Clustering Results p = 2 fig = plotting.cluster_gallery( model_AEC, dataset, fname_dataset, device, z_AEC, labels_GMM, centroids_GMM, p, True, True ) fig.savefig(os.path.join(figure_savepath, 'GMM_Gallery.eps'), dpi=300, facecolor='w') # #### 5.3.4 View Silhouette Analysis fig = plotting.view_silhscore(silh_scores_GMM, labels_GMM, n_clusters, 'GMM') fig.savefig(os.path.join(figure_savepath, 'GMM_Silh.eps'), dpi=300, facecolor='w') # #### 5.3.5 View t-SNE Analysis # + try: from cuml import TSNE except: from sklearn.manifold import TSNE M = len(z_AEC) results_GMM = TSNE(n_components=2, perplexity=int(M/50), early_exaggeration=2000, learning_rate=int(M/25), n_iter=3000, verbose=0, random_state=2009).fit_transform(z_GMM.astype('float64')) # - fig = plotting.view_TSNE(results_GMM, labels_GMM, 't-SNE Results: GMM', True) fig.savefig(os.path.join(figure_savepath, 'GMM_TSNE.eps'), dpi=300, facecolor='w') # [Return to Top](#contents) # <a id="section6"></a> # *** # ## <u>6 Deep Embedded Clustering (DEC)</u> # ### 6.1 DEC Model Architecture summary(DEC(n_clusters=5), (1, 1, 87, 100)) # <a id="ConfigDCM"></a> # ### 6.2 Configure Training # Run [4.5](#BestAEC) first to get AEC weights. # + parameters = { 'model': 'DEC', 'mode': 'train', 'n_epochs': 400, 'show': False, 'send_message': False, 'transform': 'vec_norm', 'tb': True, 'tbport': 6999, 'workers': 4, 'loadmode': 'ram', 'datafiletype': 'h5', 'init': 'load', 'update_interval': -1, 'saved_weights': weights_AEC } hyperparameters = { 'batch_size': '64', 'lr': '0.001', 'n_clusters': '10', 'gamma': '0.001', 'tol': 0.003 } init_path = utils.config_training(universal, parameters, hyperparameters) config_DEC = utils.Configuration(init_path) config_DEC.load_config() config_DEC.set_device(device_no) config_DEC.show = True # - # ### 6.3 Train DEC Model # Run the following in Terminal: md(f"`runDC {init_path}`") # To specify which CUDA device(s) is(are) used, prepend the following: md(f"`CUDA_VISIBLE_DEVICES=7 runDC {init_path}`") # <a id="BestDEC"></a> # ### 6.4 Select Best DEC Run # Use Tensorboard to view outputs from the various hyperparameter runs. # + n_clusters = 8 batch_size = 64 LR = 0.001 gamma = 0.001 tol = 0.003 expserial = 'Exp20210730T172829' runserial = f'Run_Clusters={n_clusters}_BatchSz={batch_size}_LR={LR}_gamma={gamma}_tol={tol}' exp_path_DEC = os.path.join(path_output, 'Models', 'DEC', expserial, runserial) weights_DEC = os.path.join(exp_path_DEC, 'DEC_Params_Final.pt') print(weights_DEC) # - # Return to [Section 6.6](#DECeval)<br> # Return to [Section 7](#section7)<br> # Return to [Section 8](#section8) # ### 6.5 Evaluate DEC Training Performance fig = plotting.view_history_DEC([os.path.join(exp_path_DEC, 'DEC_history.csv'), os.path.join(exp_path_DEC, 'Delta_history.csv')], show=True) fig.savefig(os.path.join(figure_savepath, 'DEC_History.eps'), dpi=300, facecolor='w') # ### 6.6 Evaluate all data through DEC parameters = { 'model': 'DEC', 'mode': 'predict', 'show': False, 'send_message': False, 'transform': 'vec_norm', # 'img_index': str(img_index)[1:-1], 'tb': False, 'workers': 16, 'loadmode': 'ram', 'datafiletype': 'h5', 'saved_weights': weights_DEC } init_path = utils.config_training(universal, parameters) print("Run the following in Terminal:") md(f"`runDC {init_path}`") # To specify which CUDA device(s) is(are) used, prepend the following: md(f"`CUDA_VISIBLE_DEVICES=1 runDC {init_path}`") # <a id="DECeval"></a> # ### 6.7 Evaluate DEC Performance # Run [6.4](#BestDEC) first to get DEC weights. # #### 6.7.1 Load Data and Model Parameters dataset = utils.SeismicDataset(fname_dataset, 'h5') model_DEC = DEC(n_clusters).to(device) model_DEC = utils.load_weights(model_DEC, weights_DEC, device) # #### 6.7.2 Load Data loadpath_DEC = os.path.join(exp_path_DEC, 'Prediction') centroids_DEC = np.load(os.path.join(loadpath_DEC, 'centroids_DEC.npy')) labels_DEC = np.load(os.path.join(loadpath_DEC, 'labels_DEC.npy')) silh_scores_DEC = np.load(os.path.join(loadpath_DEC, 'silh_scores.npy')) z_DEC = np.load(os.path.join(loadpath_DEC, 'Z_DEC.npy')) # #### 6.7.3 View Clustering Statistics pd.set_option('display.float_format', lambda x: '%.4e' % x) df_DEC = pd.read_csv(os.path.join(loadpath_DEC, 'cluster_performance.csv')) df_DEC['class'] = df_DEC['class'].fillna(-1).astype(int) df_DEC['N'] = df_DEC['N'].fillna(-1).astype(int) df_DEC # #### 6.7.4 View Clustering Results p = 2 fig = plotting.cluster_gallery( model_DEC, dataset, fname_dataset, device, z_DEC, labels_DEC, centroids_DEC, p, True, True ) fig.savefig(os.path.join(figure_savepath, 'GMM_Gallery.eps'), dpi=300, facecolor='w') # #### 6.7.5 View Silhouette Analysis fig = plotting.view_silhscore(silh_scores_DEC, labels_DEC, n_clusters, 'DEC') fig.savefig(os.path.join(figure_savepath, 'GMM_Silh.eps'), dpi=300, facecolor='w') # #### 6.7.6 View t-SNE Analysis # + try: from cuml import TSNE except: from sklearn.manifold import TSNE M = len(z_DEC) results_DEC = TSNE(n_components=2, perplexity=int(M/50), early_exaggeration=2000, learning_rate=int(M/25), n_iter=3000, verbose=0, random_state=2009).fit_transform(z_DEC.astype('float64')) # - fig = plotting.view_TSNE(results_DEC, labels_DEC, 't-SNE Results: GMM', True) fig.savefig(os.path.join(figure_savepath, 'GMM_TSNE.eps'), dpi=300, facecolor='w') # [Return to Top](#contents) # <a id="section7"></a> # *** # ## <u>7 Compare GMM & DEC</u> # # Run [4.5](#BestAEC) and [6.4](#BestDEC) first to get AEC and DEC weights. # ### 7.1 Load Data # + dataset = utils.SeismicDataset(fname_dataset, 'h5') n_clusters = 8 batch_size = 64 LR = 0.001 gamma = 0.001 tol = 0.003 pd.set_option('display.float_format', lambda x: '%.3e' % x) # Load AEC/GMM data: model_AEC = AEC().to(device) model_AEC = utils.load_weights(model_AEC, weights_AEC, device) loadpath_GMM = os.path.join(exp_path_AEC, 'GMM', f'n_clusters={n_clusters}') centroids_GMM = np.load(os.path.join(loadpath_GMM, 'centroids.npy')) labels_GMM = np.load(os.path.join(loadpath_GMM, 'labels.npy')) silh_scores_GMM = np.load(os.path.join(loadpath_GMM, 'silh_scores.npy')) z_AEC = np.load(os.path.join(exp_path_AEC, 'Prediction', 'Z_AEC.npy')) MSE_GMM = np.load(os.path.join(loadpath_GMM, 'X_MSE.npy')) ip_GMM = np.load(os.path.join(loadpath_GMM, 'X_ip.npy')) df_GMM = pd.read_csv(os.path.join(loadpath_GMM, 'cluster_performance.csv')) df_GMM['class'] = df_GMM['class'].fillna(-1).astype(int) df_GMM['N'] = df_GMM['N'].fillna(-1).astype(int) # Load DEC data: expserial = 'Exp20210730T172829' runserial = f'Run_Clusters={n_clusters}_BatchSz={batch_size}_LR={LR}_gamma={gamma}_tol={tol}' exp_path_DEC = os.path.join(path_output, 'Models', 'DEC', expserial, runserial) weights_DEC = os.path.join(exp_path_DEC, 'DEC_Params_Final.pt') model_DEC = DEC(n_clusters).to(device) model_DEC = utils.load_weights(model_DEC, weights_DEC, device) loadpath_DEC = os.path.join(exp_path_DEC, 'Prediction') centroids_DEC = np.load(os.path.join(loadpath_DEC, 'centroids_DEC.npy')) labels_DEC = np.load(os.path.join(loadpath_DEC, 'labels_DEC.npy')) silh_scores_DEC = np.load(os.path.join(loadpath_DEC, 'silh_scores.npy')) z_DEC = np.load(os.path.join(loadpath_DEC, 'Z_DEC.npy')) MSE_DEC = np.load(os.path.join(loadpath_DEC, 'X_MSE.npy')) ip_DEC = np.load(os.path.join(loadpath_DEC, 'X_ip.npy')) df_DEC = pd.read_csv(os.path.join(loadpath_DEC, 'cluster_performance.csv')) df_DEC['class'] = df_DEC['class'].fillna(-1).astype(int) df_DEC['N'] = df_DEC['N'].fillna(-1).astype(int) # - # ### 7.2 Cluster Metrics # #### 7.2.1 Intra-cluster Similarity Metrics # Display metrics for intra-cluster sample similarity, comparing GMM (right) with DEC (left). df = pd.concat([df_GMM, df_DEC], axis=1) df # #### 7.2.2 Label Changes df = utils.measure_label_change(labels_GMM, labels_DEC) df # ### 7.3 Figure Comparisons # #### 7.3.1 Clustering Results p = 2 print('GMM ' + '=' * 75) fig1 = plotting.cluster_gallery( model_AEC, dataset, fname_dataset, device, z_AEC, labels_GMM, centroids_GMM, p, True, True ) print('DEC' + '=' * 75) fig2 = plotting.cluster_gallery( model_DEC, dataset, fname_dataset, device, z_DEC, labels_DEC, centroids_DEC, p, True, True ) fig1.savefig(os.path.join(figure_savepath, 'Results_GMM.pdf'), dpi=300, facecolor='w') fig2.savefig(os.path.join(figure_savepath, 'Results_DEC.pdf'), dpi=300, facecolor='w') # #### 7.3.2 Silhouette Analysis fig1 = plotting.view_silhscore(silh_scores_GMM, labels_GMM, n_clusters, 'GMM') fig2 = plotting.view_silhscore(silh_scores_DEC, labels_DEC, n_clusters, 'DEC') fig1.savefig(os.path.join(figure_savepath, 'Silh_GMM.pdf'), dpi=300, facecolor='w') fig2.savefig(os.path.join(figure_savepath, 'Silh_DEC.pdf'), dpi=300, facecolor='w') # #### 7.3.3 t-SNE Results # + if sys.platform == 'darwin': from sklearn.manifold import TSNE elif sys.platform == 'linux': from cuml import TSNE M = len(z_AEC) results_GMM = TSNE(n_components=2, perplexity=int(M/50), early_exaggeration=2000, learning_rate=int(M/25), n_iter=3000, verbose=0, random_state=2009).fit_transform(z_AEC.astype('float64')) results_DEC = TSNE(n_components=2, perplexity=int(M/50), early_exaggeration=2000, learning_rate=int(M/25), n_iter=3000, verbose=0, random_state=2009).fit_transform(z_DEC.astype('float64')) # - fig1 = plotting.view_TSNE(results_GMM, labels_GMM, 't-SNE Results: GMM', show=True) fig2 = plotting.view_TSNE(results_DEC, labels_DEC, 't-SNE Results: DEC', show=True) fig1.savefig(os.path.join(figure_savepath, 'tSNE_GMM.pdf'), dpi=300, facecolor='w') fig2.savefig(os.path.join(figure_savepath, 'tSNE_DEC.pdf'), dpi=300, facecolor='w') # #### 7.3.4 View Latent Space p = 2 fig = plotting.view_latent_space( z_AEC, z_DEC, labels_GMM, labels_DEC, centroids_GMM, centroids_DEC, n_clusters, p, True, True ) fig.savefig(os.path.join(figure_savepath, 'zspace.pdf'), dpi=300, facecolor='w') # #### 7.3.5 Cluster CDFs p = 2 fig = plotting.view_class_cdf( z_AEC, z_DEC, labels_GMM, labels_DEC, centroids_GMM, centroids_DEC, n_clusters, p, True, True ) fig.savefig(os.path.join(figure_savepath, 'CDF.pdf'), dpi=300, facecolor='w') # #### 7.3.6 Cluster PDFs p = 2 fig = plotting.view_class_pdf( z_AEC, z_DEC, labels_GMM, labels_DEC, centroids_GMM, centroids_DEC, n_clusters, p, True, True ) fig.savefig(os.path.join(figure_savepath, 'PDF.pdf'), dpi=300, facecolor='w') # [Return to Top](#contents) # <a id="section8"></a> # *** # ## <u>8 Environmental Data Exploration</u> # ### 8.1 Calculate Dataset Statistics # Run [6.4](#BestDEC) first. # #### 8.1.1 Load Catalogue # + # A = [{'idx': i, 'label': labels_DEC[i]} for i in np.arange(M)] # utils.save_labels(A, os.path.join(exp_path_DEC)) # - pd.reset_option('display.float_format') path_to_catalogue = f"{fname_dataset}.csv" path_to_labels = f"{exp_path_DEC}/Labels.csv" catalogue = utils.LabelCatalogue([path_to_catalogue, path_to_labels]) # #### 8.1.2 Station Statistics # View occurrence frequencies by station and label. catalogue.station_statistics().sort_values(by="N", ascending=False) # #### 8.1.3 Amplitude Statistics # View amplitude characteristics for each class. catalogue.amplitude_statistics() # #### 8.1.4 Seasonal Statistics # Compare occurrence frequencies in austral winter (JFM) to austral summer (JJA). catalogue.seasonal_statistics(mode=True) # #### 8.1.5 Peak Frequency Statistics # View average peak frequencies for each class: catalogue.get_peak_freq(fname_dataset, batch_size=2048, workers=12) # ### 8.2 View Environmental Data & Detection Statistics # #### 8.2.1 View Station DR02 station = "DR02" aws = "gil" fig = plotting.view_series( station, aws, path_data, path_to_catalogue, path_to_labels, env_vars=["sea_ice_conc","temp","wind_spd"], freq="hour", maxcounts=20, title=f"Station {station} Inter-annual Scale", show=True ) fig.savefig(os.path.join(figure_savepath, f'{station}.eps'), dpi=300, facecolor='w') # #### 8.2.2 View Station RS09 station = "RS09" aws = "mgt" start = datetime.datetime(2016,6,15) stop = datetime.datetime(2016,7,15) fig1 = plotting.view_series( station, aws, path_data, path_to_catalogue, path_to_labels, env_vars=["temp","wind_spd","tide"], vlines=[start, stop], freq="hour", maxcounts=30, figsize=(12,9), title=f"Station {station} Interannual Scale", show=True ) fig2 = plotting.view_series( station, aws, path_data, path_to_catalogue, path_to_labels, env_vars=["temp","wind_spd","tide"], times=[start, stop], freq="hour", maxcounts=20, figsize=(6,9), title=f"Station {station} Weekly Scale", showlabels=False, show=True ) fig1.savefig(os.path.join(figure_savepath, f'{station}_ia.eps'), dpi=300, facecolor='w') fig2.savefig(os.path.join(figure_savepath, f'{station}_wk.eps'), dpi=300, facecolor='w') # #### 8.2.3 Other Stations station = "RS17" aws = "mgt" start = datetime.datetime(2016,4,1) stop = datetime.datetime(2016,4,15) fig1 = plotting.view_series( station, aws, path_data, path_to_catalogue, path_to_labels, env_vars=["temp","wind_spd","tide"], vlines=[start, stop], freq="hour", maxcounts=30, figsize=(12,9), title=f"Station {station} Interannual Scale", show=True ) fig2 = plotting.view_series( station, aws, path_data, path_to_catalogue, path_to_labels, env_vars=["temp","wind_spd","tide"], times=[start, stop], freq="hour", maxcounts=20, figsize=(6,9), title=f"Station {station} Weekly Scale", showlabels=False, show=True ) # <a href="#contents">Return to Top</a> # <a id="appendixA"></a> # *** # ## Appendix A: Test for Optimal Number of Clusters # ### A.1 Load Data # Run <a href="#BestAEC">4.5</a> first to get AEC weights. # + index_tra, _ = utils.load_TraVal_index(fname_dataset, universal['indexpath']) tra_dataset = Subset(dataset, index_tra) dataloader = DataLoader(tra_dataset, batch_size=512, num_workers=16) model = AEC().to(device) model = utils.load_weights(model, AEC_weights, device) # - # ### A.2 Compute K-means Metrics klist = '2, 20' klist = np.arange(int(klist.split(',')[0]), int(klist.split(',')[1])+1) inertia, silh, gap_g, gap_u = models.kmeans_metrics(dataloader, model, device, klist) # ### A.3 Plot Metrics fig = plotting.view_cluster_stats(klist, inertia, silh, gap_g, gap_u, show=True) np.save('kmeans_inertia', inertia)
Workflow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Annotate enhancers by # - distance to the nearest gene # - GC content # + import sys import importlib sys.path.insert(0, '/cndd/fangming/CEMBA/snmcseq_dev') sys.path.insert(0, '/cndd2/fangming/projects/scf_enhancers/scripts/scf_enhancer_paper') from __init__ import * import __init__jupyterlab importlib.reload(__init__jupyterlab) from __init__jupyterlab import * import re import tqdm import pickle import collections import itertools import tables from scipy import sparse from scipy import stats from scipy import optimize import scipy.cluster.hierarchy as sch from statsmodels.stats.multitest import multipletests import snmcseq_utils importlib.reload(snmcseq_utils) import CEMBA_clst_utils importlib.reload(CEMBA_clst_utils) import enhancer_gene_utils importlib.reload(enhancer_gene_utils) # + # genes # enhs # to_evals f = '/cndd2/fangming/projects/scf_enhancers/enhancer_metacells_r100_n935/organized_for_jupyter/genes_list.tsv' genes_list = pd.read_csv(f, sep='\t', index_col=False) print(genes_list.shape) # enh list f = '/cndd2/fangming/projects/scf_enhancers/enhancer_metacells_r100_n935/organized_for_jupyter/enhs_list.tsv' enh_list = pd.read_csv(f, sep='\t', index_col=False) print(enh_list.shape) # pair list f = '/cndd2/fangming/projects/scf_enhancers/results/200521_to_evals_appended_201212.tsv' to_evals = pd.read_csv(f, sep='\t', index_col=False) to_evals = to_evals.astype({'dist': np.int, 'enh': np.int, 'is_in_genebody': np.bool_}) print(to_evals.shape) # + gid_to_gname = genes_list.groupby('gid').first()['gene_name'] gname_to_gid = genes_list.groupby('gene_name').first()['gid'] def gname_to_gid_nan(name): try: return gname_to_gid[name] except: return np.nan # - # ### nearest gene # + f = '/cndd2/fangming/projects/scf_enhancers/enhancer_metacells_r100_n935/organized_for_jupyter/enhancer_nearest_genes.bed' regions_info = pd.read_csv(f, sep='\t', header=None, dtype={0: str, 4: str}, names=['chr', 'start', 'end', 'trpt_chr', 'trpt_start', 'trpt_end', 'strand', 'trpt_id', 'trpt_name', 'gene_id', 'gene_name', 'dist', ] ) print(regions_info.shape) print(enh_list.shape) # expand enh_list enh_list_expand = pd.merge(enh_list, regions_info, on=['chr', 'start', 'end'], how='left') print(enh_list_expand.shape) enh_list_expand.head() # - fig, ax = plt.subplots() sns.distplot(np.log10(enh_list_expand['dist'].values)) ax.set_xlabel('log10(distance to the nearest gene)') ax.set_ylabel('Density') plt.show() # ### GC content # - get sequences # - compute number of gc's # - normalize by length # get sequences # !bedtools getfasta \ # -tab \ # -fi "/cndd/fangming/iGenome/mm10/genome.fa" \ # -bed "/cndd2/fangming/projects/scf_enhancers/enhancer_metacells_allresolutions/organized_for_jupyter/enhs_list.bed" \ # -fo "/cndd2/fangming/projects/scf_enhancers/enhancer_metacells_allresolutions/organized_for_jupyter/enhs_sequences.tsv" # compute the number of sequences f = '/cndd2/fangming/projects/scf_enhancers/enhancer_metacells_allresolutions/organized_for_jupyter/enhs_sequences.tsv' enh_seqs = pd.read_csv(f, sep='\t', header=None, names=['enh', 'seq']) enh_seqs.head() # get GC content; proper normalization def get_gc_content(seq): """ """ seq = seq.upper() num = seq.count('G') + seq.count('C') frac = num/len(seq) return num, frac # enh_seqs['GC_num'] = enh_seqs['seq'].apply(lambda x: get_gc_content(x)[0]) enh_seqs['GC_frac'] = enh_seqs['seq'].apply(lambda x: get_gc_content(x)[1]) print(enh_seqs.shape) enh_seqs.head() sns.distplot(enh_seqs['GC_frac'].values) # # Combine both info print(enh_seqs.shape, enh_list_expand.shape) enh_seqs.head() enh_list_expand.head() # check the two tables agree coords = (enh_list_expand['chr'] + ":" + enh_list_expand['start'].astype(str) + "-" + enh_list_expand['end'].astype(str) ).values np.all(enh_seqs['enh'].values == coords) # + # combined enh_annot = enh_list_expand.copy() enh_annot['GC_frac'] = enh_seqs['GC_frac'] print(enh_annot.shape) enh_annot.head() # - # save f = '/cndd2/fangming/projects/scf_enhancers/enhancer_metacells_r100_n935/organized_for_jupyter/enhs_list_annotated.tsv' enh_annot.to_csv(f, sep='\t', header=True, index=False, na_rep='NA')
.ipynb_checkpoints/annotate_enhancers-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # About: 検証環境の構築--コンテナのセキュリティアップデート # # --- # # Moodleコンテナのパッケージを更新する。 # ## 概要 # # Moodleコンテナのパッケージをアップデートする場合の検証環境を構築します。 # # ![検証環境](images/moodle-121-01.png) # ### グループ名の指定 # # このNotebookの操作対象となる UnitGroup名を指定します。 # + # (例) # ugroup_name = 'Moodle' ugroup_name = # + [markdown] heading_collapsed=true # #### チェック # + [markdown] hidden=true # 指定された `UnitGroup`名に対応する group_varsファイルが存在していることを確認します。エラーになる場合は、指定したUnitGroup名が正しくないと考えられます。 # + hidden=true from pathlib import Path if not (Path('group_vars') / (ugroup_name + '.yml')).exists(): raise RuntimeError(f"ERROR: not exists {ugroup_name + '.yml'}") # + [markdown] hidden=true # UnitGroupに属する VCノードに対して Ansible で操作できることを確認します。 # + hidden=true # !ansible {ugroup_name} -m ping # !ansible {ugroup_name} -b -a 'whoami' # - # ### VCCアクセストークンの入力 # # VCCにアクセスするためのトークンを入力します。 from getpass import getpass vcc_access_token = getpass() # + [markdown] heading_collapsed=true # #### チェック # + [markdown] hidden=true # 入力されたアクセストークンが正しいことを、VCCにアクセスして確認します。 # + hidden=true from vcpsdk.vcpsdk import VcpSDK vcp = VcpSDK(vcc_access_token) # + [markdown] heading_collapsed=true # ### 準備 # # これまでに他のNotebookで設定したパラメータを読み込む処理などを行います。 # # # + [markdown] hidden=true # group_varsファイルに保存されているパラメータを読み込みます。 # + hidden=true # %run scripts/group.py gvars = load_group_vars(ugroup_name) # + [markdown] hidden=true # VCCのVault サーバにアクセスする際に必要となるパラメータを環境変数に設定します。 # + hidden=true import os os.environ['VAULT_ADDR'] = vcp.vcc_info()['vault_url'] os.environ['VAULT_TOKEN'] = vcc_access_token # - # ## パラメータの設定 # # 対象となるコンテナ名や、検証環境にアクセスできる条件などを設定します。 # アップデート対象となるコンテナ名を指定します。現在実行中のコンテナを確認するためにコンテナ名の一覧を表示します。 # !ansible {ugroup_name} -a 'chdir=/opt/moodle docker-compose ps --services' \ # | grep {gvars['project_tag']} # アップデート対象となるコンテナ名を指定してください。 # + # (例) # update_container_name = 'moodle-0' update_container_name = # - # ここで構築する環境では、アクセス元のIPアドレスによって検証環境と運用環境との切り替えを行います。 # 検証環境を表示させることにするIPアドレスを指定してください。 # + # (例) # update_test_client = '192.168.10.0/24' # update_test_client = '192.168.10.10' update_test_client = # - # 現在の運用環境と、検証環境を区別するために用いるタグの値(文字列)を指定してください。 # # > 各環境のコンテナ名や論理ボリューム名はタグ含む名前が付けられます。そのため、コンテナ名や論理ボリューム名から、どの環境に属するコンテナ、論理ボリュームであるかを判別することができます。 # + # (例) # update_project_tag = '001' # update_project_tag = '20200101XXXXXX' update_project_tag = # - # タグに指定する値は、シリアルナンバーまたはタイムスタンプからなる文字列を付けることをお勧めしますが、任意の英数字からなる文字列を指定することができます。 # 指定されたタグが既存のものと重複していないこと、指定したコンテナ名が存在していることをチェックします。 if (update_project_tag == gvars['project_tag'] or update_project_tag in [x['project_tag'] for x in gvars.get('previous_info_list', []) if 'project_tag' in x]): raise RuntimeError("既に使用したタグ名です") # !ansible {ugroup_name} -a 'chdir=/opt/moodle docker-compose ps --services' \ # | grep -q {update_container_name} # 指定されたパラメータを group_vars に保存します。 update_container_target = update_container_name.split('-')[0] update_group_vars( ugroup_name, update_project_tag=update_project_tag, update_container_target=update_container_target, ) # + [markdown] heading_collapsed=true # ## 検証環境の作成 # # 現在運用中の環境を複製して、検証環境を作成します。 # # ![検証環境の作成](images/moodle-121-02.png) # + [markdown] hidden=true # ### 論理ボリュームの複製 # + [markdown] hidden=true # 現在、運用環境で使用中の論理ボリュームを複製するためにスナップショットを作成します。 # + hidden=true # !ansible-playbook -l {ugroup_name} playbooks/snapshot-for-test.yml # + [markdown] hidden=true # スナップショットが作成されたことを確認するために、論理ボリュームの一覧を表示します。 # + hidden=true # !ansible {ugroup_name} -b -a 'lvs -S "lv_attr=~V" -O lv_name' # + [markdown] hidden=true # 作成したボリュームをマウントします。 # + hidden=true mount_points = [ { 'name': 'moodle', 'vg': 'moodle', 'mountpoint': f'/opt/moodle/moodletest-{update_project_tag}/data/moodledata', }, { 'name': 'php', 'vg': 'moodle', 'mountpoint': f'/opt/moodle/moodletest-{update_project_tag}/data/php', }, { 'name': 'db', 'vg': 'db', 'mountpoint': f'/opt/moodle/dbtest-{update_project_tag}/data', }, ] for item in mount_points: snapshot = f'{update_project_tag}_{item["name"]}_test' dev = f'/dev/mapper/{item["vg"]}-{snapshot}{"-enc" if gvars[item["vg"] + "_volume_encrypt"] else ""}' print(f'path={item["mountpoint"]} src={dev}') # !ansible {ugroup_name} -b -m mount -a \ # 'path={item["mountpoint"]} src={dev} fstype=xfs state=mounted' # + [markdown] hidden=true # ### 設定ファイルの複製 # # 運用環境の設定ファイルをコピーして検証環境の設定ファイルを作成します。 # + hidden=true for name in ['moodle', 'db']: src = f'{name}-{gvars["project_tag"]}/conf' dst = f'{name}test-{update_project_tag}' print(f'cp -a {src} {dst}') # !ansible {ugroup_name} -b -a \ # 'chdir=/opt/moodle cp -a {src} {dst}' # + [markdown] hidden=true # コピーされた設定ファイルの一覧を確認します。 # + hidden=true target_dir = [f'{x}test-{update_project_tag}/conf' for x in ['moodle', 'db']] # !ansible {ugroup_name} -b -a \ # 'chdir=/opt/moodle tree {" ".join(target_dir)}' # + [markdown] hidden=true # ### コンテナイメージ # # 運用環境の更新対象となるコンテナから、検証環境用のベースとなるコンテナイメージを作成します。 # + hidden=true target_image = f'local/{update_container_target}:{update_project_tag}-test' print(target_image) # !ansible {ugroup_name} -a \ # 'docker commit {update_container_name} {target_image}' # + [markdown] hidden=true # コンテナイメージが作成されたことを確認します。  # + hidden=true # !ansible {ugroup_name} -a 'docker images {target_image}' # + [markdown] hidden=true # ### docker-compose.yml の更新 # # `docker-compose.yml`に検証環境のコンテナを追加します。 # + [markdown] hidden=true # 次のセルを実行すると検証環境用コンテナを追加した `docker-compose.yml` をローカル環境に作成し更新前との差分を表示します。また最後に表示されたリンクから更新後の `docker-compose.yml` を編集することもできます。 # + hidden=true # %run scripts/edit_conf.py update_docker_compose(ugroup_name) # + [markdown] hidden=true # 更新した`docker-compose.yml`をVCノードに配置します。 # + hidden=true upload_docker_compose(ugroup_name) # + [markdown] heading_collapsed=true # ## 検証環境の起動 # # 検証環境のコンテナを起動します。 # # ![検証環境の起動](images/moodle-121-03.png) # + [markdown] hidden=true # ### 検証環境コンテナの起動 # 検証環境のコンテナを起動します。 # + [markdown] hidden=true # 現在のコンテナの実行状態を確認します。  # + hidden=true # !ansible {ugroup_name} -a 'chdir=/opt/moodle \ # docker-compose ps' # + [markdown] hidden=true # 検証用のコンテナを起動します。 # + hidden=true # !ansible {ugroup_name} -a 'chdir=/opt/moodle \ # docker-compose up -d' # + [markdown] hidden=true # 検証用コンテナを起動した後の、コンテナの実行状態を確認します。 # + hidden=true # !ansible {ugroup_name} -a 'chdir=/opt/moodle \ # docker-compose ps' # + [markdown] hidden=true # ### パッケージのアップデート # + [markdown] hidden=true # 検証環境のアップデート対象のコンテナでパッケージをアップデートします。 # + hidden=true target_container = f'{update_container_target}test-{update_project_tag}' print(target_container) # !ansible {ugroup_name} -a 'chdir=/opt/moodle \ # docker-compose exec -T {target_container} bash -c \ # "( type yum && yum update -y ) || \ # ( type apt-get && apt-get update && apt-get -y upgrade )"' # + [markdown] hidden=true # パッケージのアップデート結果を反映させるためにコンテナの再起動を行います。 # + hidden=true # !ansible {ugroup_name} -a 'chdir=/opt/moodle \ # docker-compose restart {target_container}' # + [markdown] hidden=true # ### アップデートされたパッケージの確認 # # 運用環境と検証環境のパッケージの一覧を取得して比較することで、更新されたパッケージを確認します。 # + [markdown] hidden=true # 運用環境のコンテナのパッケージ一覧を取得します。 # + hidden=true current_container = f'{update_container_target}-{gvars["project_tag"]}' # package_current = !ansible {ugroup_name} -m shell -a 'chdir=/opt/moodle \ # docker-compose exec -T {current_container} \ # bash -c "( type rpm > /dev/null 2>&1 && rpm -qa ) || \ # ( type dpkg > /dev/null 2>&1 && dpkg -l )" | sort' # + [markdown] hidden=true # 検証環境のコンテナのパッケージ一覧を取得します。 # + hidden=true # package_new = !ansible {ugroup_name} -m shell -a 'chdir=/opt/moodle \ # docker-compose exec -T {target_container} \ # bash -c "( type rpm > /dev/null 2>&1 && rpm -qa ) || \ # ( type dpkg > /dev/null 2>&1 && dpkg -l )" | sort' # + [markdown] hidden=true # パッケージ一覧の差分を確認します。 # + hidden=true from difflib import unified_diff for line in unified_diff(package_current, package_new, fromfile=current_container, tofile=target_container): print(line) # + [markdown] hidden=true # ### リバースプロキシの設定 # # アクセス元のIPアドレスによって運用環境と検証環境を切り替えるようにリバースプロキシを設定します。 # + [markdown] hidden=true # 現在のリバースプロキシの設定ファイルを確認します。 # + hidden=true # !ansible {ugroup_name} -a 'cat /opt/moodle/proxy/conf/moodle-proxy.conf' # + [markdown] hidden=true # 次のセルを実行すると運用環境と検証環境を切り替えるように記述したApache HTTP Serverの設定ファイル # `moodle-proxy.conf`をローカル環境に作成します。作成後に現在の設定ファイルとの差分を表示します。 # また、ローカル環境に生成した `moodle-proxy.conf` を編集するためのリンクを最後に表示します。 # + hidden=true # %run scripts/edit_conf.py update_proxy_conf( ugroup_name, extra_vars={'update_test_client': update_test_client}) # + [markdown] hidden=true # ローカル環境の変更後の `moodle-proxy.conf` をVCノードに配置します。 # その後、設定ファイルの記述内容を反映させるためにリバースプロキシコンテナを再起動します。 # + hidden=true apply_proxy_conf(ugroup_name) # + [markdown] hidden=true # 再起動後のコンテナの状態を確認します。`proxy`コンテナの状態が `Up` になっていることを確認してください。 # + hidden=true # !ansible {ugroup_name} -a 'chdir=/opt/moodle \ # docker-compose ps' # - # ## Moodle を利用できることを確認 # # 検証用のMoodle環境にアクセスして正しく動作していることを確認します。 # 次のセルを実行するとMoodleのアドレスが表示されます。 from IPython.core.display import HTML HTML(u'<a href="{0}/admin/index.php" target="_blank">{0}/admin/index.php</a>'.format(gvars['moodle_url']))
Moodle/notebooks/121-検証環境の構築--コンテナのセキュリティアップデート.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Diabetes Dataset from sklearn import datasets diabetes=datasets.load_diabetes() X=diabetes.data Y=diabetes.target import pandas as pd df=pd.DataFrame(X) print(diabetes.feature_names) df.columns=diabetes.feature_names from sklearn import model_selection X_train,X_test,Y_train,Y_test=model_selection.train_test_split(X,Y) print(X_train.shape) print(X_test.shape) print(Y_train.shape) print(Y_test.shape) from sklearn.linear_model import LinearRegression algl=LinearRegression() algl.fit(X_train,Y_train) y_pred=algl.predict(X_test) import matplotlib.pyplot as plt plt.scatter(Y_test,y_pred) plt.show()
Lecture 7 Introduction to ML/Diabetes Dataset-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Zqj7nzr_excp" import torch import torchvision from torchvision import transforms from torchvision.transforms import ToTensor from PIL import Image from os import listdir import random import torch.optim as optim from torch.autograd import Variable import torch.nn.functional as F import torch.nn as nn import random import numpy as np from scipy import misc from PIL import Image import glob import imageio import os import cv2 import matplotlib.pyplot as plt from google.colab import files # + id="Sfpmw5KwfzUd" colab={"base_uri": "https://localhost:8080/", "height": 131} outputId="853c849d-d357-450f-a38e-0800a351c5f6" from google.colab import drive drive.mount('/content/drive') # + [markdown] id="nblzWScxEwAK" # #Configuration: # # # + id="BkuRu0tHEsda" input_path = 'file for input images' target_path = 'file for target images' content_path = 'your folder for recources' #path where the train/validation tensors, model_weights, losses, validation will be saved batch_size = 8 train_tensor_size = 2000 #number of images per training_tensor (should be: train_tensor_size % batch_size = 0) val_tensor_size = 1000 #number of images per training_tensor (should be: train_tensor_size % batch_size = 0) num_train_tensors = 20 #number of train tensors (should be: train_tensor_size * num_train_tensors + val_tensor_size = |images|) load_model = False #if True model will be loaded from model_weights_path model_weights_path = "your model path here" #path where your model weights will be loaded learn_rate = 0.0001 #learning rate for training sgd_momentum = 0.9 #momentum for stochastic gradient descent sgd_weight_decay=5e-4 #weight_decay for stochastic gradient descent total_epochs = 50 #number of training epochs save_cycle = 5 #save model, loss, validation every save_cycle epochs categories = ["white", "black", "green", "red", "yellow"] #for creating rgb pixel to class category (one_hot) dict_val = {(0.0, 0.0, 0.0): (0.0, 1.0, 0.0, 0.0, 0.0), #black (0.0, 0.0, 1.0): (0.0, 1.0, 0.0, 0.0, 0.0), #black (fail) (0.0, 1.0, 0.0): (0.0, 0.0, 1.0, 0.0, 0.0), #green (0.0, 1.0, 1.0): (1.0, 0.0, 0.0, 0.0, 0.0), #white (fail) (1.0, 0.0, 0.0): (0.0, 0.0, 0.0, 1.0, 0.0), #red (1.0, 0.0, 1.0): (1.0, 0.0, 0.0, 0.0, 0.0), #white (fail) (1.0, 1.0, 0.0): (0.0, 0.0, 0.0, 0.0, 1.0), #yellow (1.0, 1.0, 1.0): (1.0, 0.0, 0.0, 0.0, 0.0)} #white #for making model output to real output dict_reverse = {(0.0, 1.0, 0.0, 0.0, 0.0) : (0.0, 0.0, 0.0), #black (0.0, 0.0, 1.0, 0.0, 0.0) : (0.0, 1.0, 0.0), #green (0.0, 0.0, 0.0, 1.0, 0.0) : (1.0, 0.0, 0.0), #red (0.0, 0.0, 0.0, 0.0, 1.0) : (1.0, 1.0, 0.0), #yellow (1.0, 0.0, 0.0, 0.0, 0.0) : (1.0, 1.0, 1.0)} #white #for creating rgb pixel to class category (single value, cross entropyloss only allows single value) dict_train = {(0.0, 0.0, 0.0): 1, #black (0.0, 0.0, 1.0): 1, #black (fail) (0.0, 1.0, 0.0): 2, #green (0.0, 1.0, 1.0): 0, #white (fail) (1.0, 0.0, 0.0): 3, #red (1.0, 0.0, 1.0): 0, #white (fail) (1.0, 1.0, 0.0): 4, #yellow (1.0, 1.0, 1.0): 0} #white # + id="3FVW8Z_EvmTY" class SegNet(nn.Module): """neural network architecture inspired by SegNet""" def __init__(self): super(SegNet, self).__init__() #Encoder self.conv1 = nn.Conv2d(3, 64, (3,3), padding=1) self.conv2 = nn.Conv2d(64, 64, (3,3), padding=1) self.enc1_bn = nn.BatchNorm2d(64) self.maxpool1 = nn.MaxPool2d(2,2) self.conv3 = nn.Conv2d(64, 128, (3,3), padding=1) self.conv4 = nn.Conv2d(128, 128, (3,3), padding=1) self.enc2_bn = nn.BatchNorm2d(128) self.maxpool2 = nn.MaxPool2d((2,2),2) self.conv5 = nn.Conv2d(128, 256, (3,3), padding=1) self.conv6 = nn.Conv2d(256, 256, (3,3), padding=1) self.conv7 = nn.Conv2d(256, 256, (3,3), padding=1) self.enc3_bn = nn.BatchNorm2d(256) self.maxpool3 = nn.MaxPool2d((2,2),2) self.conv8 = nn.Conv2d(256, 512, (3,3), padding=1) self.conv9 = nn.Conv2d(512, 512, (3,3), padding=1) self.conv10 = nn.Conv2d(512, 512, (3,3), padding=1) self.enc4_bn = nn.BatchNorm2d(512) self.maxpool4 = nn.MaxPool2d((2,2),2) self.conv11 = nn.Conv2d(512, 512, (3,3), padding=1) self.conv12 = nn.Conv2d(512, 512, (3,3), padding=1) self.conv13 = nn.Conv2d(512, 512, (3,3), padding=1) self.enc5_bn = nn.BatchNorm2d(512) self.maxpool5 = nn.MaxPool2d((2,2),2) #Decoder self.upsample1 = nn.Upsample(scale_factor=2) self.conv14 = nn.Conv2d(512,512, (3,3), padding=1) self.conv15 = nn.Conv2d(512,512, (3,3), padding=1) self.conv16 = nn.Conv2d(512,512, (3,3), padding=1) self.dec1_bn = nn.BatchNorm2d(512) self.upsample2 = nn.Upsample(scale_factor=2) self.conv17 = nn.Conv2d(512,512, (3,3), padding=1) self.conv18 = nn.Conv2d(512,512, (3,3), padding=1) self.conv19 = nn.Conv2d(512,256, (3,3), padding=1) self.dec2_bn = nn.BatchNorm2d(256) self.upsample3 = nn.Upsample(scale_factor=2) self.conv20 = nn.Conv2d(256,256, (3,3), padding=1) self.conv21 = nn.Conv2d(256,256, (3,3), padding=1) self.conv22 = nn.Conv2d(256,128, (3,3), padding=1) self.dec3_bn = nn.BatchNorm2d(128) self.upsample4 = nn.Upsample(scale_factor=2) self.conv23 = nn.Conv2d(128,128, (3,3), padding=1) self.conv24 = nn.Conv2d(128,64, (3,3), padding=1) self.dec4_bn = nn.BatchNorm2d(64) self.upsample5 = nn.Upsample(scale_factor=2) self.conv25 = nn.Conv2d(64,64, (3,3), padding=1) self.conv26 = nn.Conv2d(64,5, (3,3), padding=1) self.softmax = nn.Softmax(dim=1) def forward(self, x): #Encoder x = F.relu(self.enc1_bn(self.conv2(F.relu(self.conv1(x))))) #print(x.size()) x = self.maxpool1(x) #print(x.size()) x = F.relu(self.enc2_bn(self.conv4(F.relu(self.conv3(x))))) #print(x.size()) x = self.maxpool2(x) #print(x.size()) x = F.relu(self.enc3_bn(self.conv7(F.relu(self.conv6(F.relu(self.conv5(x))))))) #print(x.size()) x = self.maxpool3(x) #print(x.size()) x = F.relu(self.enc4_bn(self.conv10(F.relu(self.conv9(F.relu(self.conv8(x))))))) #print(x.size()) x = self.maxpool4(x) #print(x.size()) x = F.relu(self.enc5_bn(self.conv13(F.relu(self.conv12(F.relu(self.conv11(x))))))) #print(x.size()) x = self.maxpool5(x) #print(x.size()) #print() #Decoder x = F.relu(self.dec1_bn(self.conv16(F.relu(self.conv15(F.relu(self.conv14(self.upsample1(x)))))))) #print(x.size()) x = F.relu(self.dec2_bn(self.conv19(F.relu(self.conv18(F.relu(self.conv17(self.upsample2(x)))))))) #print(x.size()) x = F.relu(self.dec3_bn(self.conv22(F.relu(self.conv21(F.relu(self.conv20(self.upsample3(x)))))))) #print(x.size()) x = F.relu(self.dec4_bn(self.conv24(F.relu(self.conv23(self.upsample4(x)))))) #print(x.size()) x = self.conv26(F.relu(self.conv25(self.upsample4(x)))) #print(x.size()) return x # + id="HO6IxRmVeiFj" def create_data(data_start, data_size, batch_size, input_path, target_path, target_dict, real_sequence, is_train): """create data for training/validation from img and xml to tensor""" transform = transforms.Compose([transforms.Resize((320, 576)), transforms.ToTensor()]) input_list = [] target_list = [] data = [] weights = [0,0,0,0,0] #weights for cross entropy loss pixel_class = [] #single pixel class inputs = os.listdir(input_path) inputs.sort() targets = os.listdir(target_path) targets.sort() for x in range(data_start, data_size): if(len(real_sequence) == 0): break #print("len sequence",len(real_sequence)) index = random.choice(real_sequence) real_sequence.remove(index) print(x) #if(len(data) == 8 and not is_train): # break #if(len(data) == 4): # break input = Image.open(input_path + inputs[index]) input_list.append(transform(input)) #input_list.append(ToTensor()(input)) target = Image.open(target_path + targets[index]) target_tensor = torch.round(transform(target)) #target_tensor = torch.round(ToTensor()(target)) if (is_train): target_tensor_final = torch.zeros(320,576, dtype=torch.long) #cross entropy loss allowed only torch.long else: target_tensor_final = torch.zeros(5,320,576, dtype=torch.long) for i in range(320): for j in range(576): pixel_class = target_dict[tuple(target_tensor[:,i,j].tolist())] #print("pixel class", pixel_class) #print("tensor", torch.tensor(pixel_class, dtype=torch.long)) #print("target size", target_tensor_final.size()) if (is_train): weights[pixel_class] += 1 target_tensor_final[i,j] = torch.tensor(pixel_class, dtype=torch.long) else: target_tensor_final[:,i,j] = torch.tensor(pixel_class, dtype=torch.long) weights[pixel_class.index(1)] += 1 target_list.append(target_tensor_final) if len(input_list) >= batch_size: data.append((torch.stack(input_list), torch.stack(target_list))) input_list = [] target_list = [] print('Loaded batch ', len(data), 'of ', int(len(inputs) / batch_size)) print('Percentage Done: ', 100 * (len(data) / int(len(inputs) / batch_size)), '%') weights = torch.tensor(weights, dtype=torch.float64) #weights = 1/(weights/weights.min()) #press weights in [0,1], with maximum value for each class return data, weights # + id="EviGvOgaX8r_" def train(train_data, model, optimizer, criterion, device): """ Trains/updates the model for one epoch on the training dataset. Parameters: train_data (torch tensor): trainset model (torch.nn.module): Model to be trained optimizer (torch.optim.optimizer): optimizer instance like SGD or Adam criterion (torch.nn.modules.loss): loss function like CrossEntropyLoss device (string): cuda or cpu """ # switch to train mode model.train() # iterate through the dataset loader i = 0 losses = [] for (inp, target) in train_data: # transfer inputs and targets to the GPU (if it is available) inp = inp.to(device) target = target.to(device) # compute output, i.e. the model forward output = model(inp) # calculate the loss loss = criterion(output, target) #print("loss", loss) losses.append(loss) print("loss {:.2}".format(loss)) # compute gradient and do the SGD step # we reset the optimizer with zero_grad to "flush" former gradients optimizer.zero_grad() loss.backward() optimizer.step() avg_loss = torch.mean(torch.stack(losses)).item() print("avg.loss {:.2}".format(avg_loss)) return losses # + id="utLbMONSGLCB" def calc_accuracy(output, target): """calculate accuracy from tensor(b,c,x,y) for every category c""" accs = [] acc_tensor = (output == target).int() for c in range(target.size(1)): correct_num = acc_tensor[:,c].sum().item() #item convert tensor in integer #print(correct_num) total_num = acc_tensor[:,c].numel() #print(total_num) accs.append(correct_num/total_num) return accs # + id="OqKInFHXGYNP" def calc_precision(output, target): """calculate precision from tensor(b,c,x,y) for every category c""" precs = [] for c in range(target.size(1)): true_positives = ((output[:,c] - (output[:,c] != 1).int()) == target[:,c]).int().sum().item() #print(true_positives) false_positives = ((output[:,c] - (output[:,c] != 1).int()) == (target[:,c] != 1).int()).int().sum().item() #print(false_positives) if(true_positives == 0): precs.append(1.0) else: precs.append(true_positives / (true_positives + false_positives)) return precs # + id="LK8pziSIGsuY" def calc_recall(output, target): """calculate recall from tensor(b,c,x,y) for every category c""" recs = [] for c in range(target.size(1)): relevants = (target[:,c] == 1).int().sum().item() #print(relevants) true_positives = ((output[:,c] - (output[:,c] != 1).int()) == target[:,c]).int().sum().item() #print(true_positives) if (relevants == 0): recs.append(1.0) else: recs.append(true_positives/relevants) return recs # + id="9RubCUwwG5Vm" def convert_to_one_hot(tensor, device): """converts a tensor from size (b,c,x,y) to (b,c,x,y) one hot tensor for c categorys""" for i in range(tensor.size(0)): max_idx = torch.argmax(tensor[i], 0, keepdim=True) one_hot = torch.FloatTensor(tensor[i].shape).to(device) one_hot.zero_() tensor[i] = one_hot.scatter_(0, max_idx, 1) # + id="wy8MoSpHBD3D" def validate(val_dataset, model, device, categories): """ validate the model with some validationfunctions on the test/validation dataset. Parameters: val_data (torch tensor): test/validation dataset model (torch.nn.module): Model to be trained loss (torch.nn.modules.loss): loss function like CrossEntropyLoss device (string): cuda or cpu categories (list): names of categories """ model.eval() # avoid computation of gradients and necessary storing of intermediate layer activations with torch.no_grad(): accs_avg = [0,0,0,0,0] precs_avg = [0,0,0,0,0] recs_avg = [0,0,0,0,0] counter = 0 for (inp, target) in val_dataset: # transfer to device inp = inp.to(device) target = target.to(device) # compute output output = model(inp) #print("before extra softmax") #print(output[:,:,100,100]) output = model.softmax(output) #print("after extra softmax") #print(output[:,:,100,100]) # convert from probabilities to one hot vectors convert_to_one_hot(output, device) #print("after convert to one hot") #print(output[:,:,100,100]) accs = calc_accuracy(output, target) precs = calc_precision(output, target) recs = calc_recall(output, target) #print("loss {:.2} IOU {:.2}".format(loss,iou)) for i in range(len(categories)): print("category {:10} accuracy {:.2} precision {:.2} recall {:.2} ".format(categories[i], accs[i], precs[i], recs[i])) accs_avg[i] += accs[i] precs_avg[i] += precs[i] recs_avg[i] += recs[i] print() counter += 1 for i in range(len(categories)): accs_avg[i] /= counter precs_avg[i] /= counter recs_avg[i] /= counter print("avg.category {:10} accuracy {:.2} precision {:.2} recall {:.2} ".format(categories[i], accs_avg[i], precs_avg[i], recs_avg[i])) return [accs_avg, precs_avg, recs_avg] # + id="zHj4zDuSR7Fv" def create_rgb_output(data, model, device, dict_reverse): """create rgb pictures from model output for data (rgb-image) on device parameter: data: torch.tensor (b,3,x,y) model: torch####################################################################### """ output = model(data.to(device)) final_output = model.softmax(output) convert_to_one_hot(final_output, device) real_output_tensor = torch.zeros(data.size(0),3,data.size(2), data.size(3), dtype=torch.float64) for x in range(data.size(0)): for i in range(data.size(2)): for j in range(data.size(3)): real_output_tensor[x][:,i,j] = torch.tensor(dict_reverse[tuple(final_output[x,:,i,j].tolist())]) return real_output_tensor # + id="jLo8TiB9R_UX" def plot_tensor(tensor): """plot tensor(3,x,y) as rgb-image""" plt.imshow(tensor.permute(1,2,0)) # + id="HLcJmU26-OAs" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="53a2c0c6-3344-4d96-cf38-23d5468e50d5" real_sequence = list(range(len(os.listdir(input_path)))) #create a list from [0,...,number of input pictures-1] !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! indices = [i*train_tensor_size for i in range(num_train_tensors +1)] #size of train tensors always has to be rejusted !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! for i in range(1,len(indices)): train_data, weights = create_data(indices[i-1],indices[i],batch_size, input_path, target_path, dict_train, real_sequence, True) torch.save(train_data, content_path + "Train_Tensor" + str(i) + ".pt") torch.save(weights, content_path + "Train_Weights" + str(i) + ".pt") real_sequence = list(range(len(os.listdir(input_path)))) val_data, _ = create_data(0,val_tensor_size, batch_size, input_path, target_path, dict_val, real_sequence, False) #always has to be rejusted torch.save(val_data, content_path + "Val_Tensor_Test.pt") # + id="f2VEpVq0ZYD-" colab={"base_uri": "https://localhost:8080/", "height": 884} outputId="b1f537a1-31e5-4d92-b994-ff158b26f458" # set a boolean flag that indicates whether a cuda capable GPU is available # we will need this for transferring our tensors to the device and # for persistent memory in the data loader is_gpu = torch.cuda.is_available() print("GPU is available:", is_gpu) print("If you are receiving False, try setting your runtime to GPU") # set the device to cuda if a GPU is available device = torch.device("cuda" if is_gpu else "cpu") #create model model = SegNet().to(device) if(load_model): model.load_state_dict(torch.load(model_weights_path))##################################################################### #define loss function weights = torch.load(content_path + "/Train_Weights_Test1.pt") for i in range(2, num_train_tensors +1): weights += torch.load(content_path + "/drive/My Drive/Train_Weights" + str(i) + ".pt") weights = 1/(weights/weights.min()) #press weights in [0,1], with maximum value for each class weights = weights.type(torch.FloatTensor) weights = weights.to(device) print("weights", weights) criterion = nn.CrossEntropyLoss(weights) #set optimizer for backpropagation optimizer = torch.optim.SGD(model.parameters(), lr= learn_rate, momentum = sgd_momentum, weight_decay= sgd_weight_decay) print(model) # + id="6oML5n20Zc5s" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="365f17b6-57c3-45be-f04a-7083bf142bde" val_list = [] loss_list = [] val_data = torch.load(content + "Val_Tensor.pt") for epoch in range(0, total_epochs): print("EPOCH:", epoch + 1) print("TRAIN") for i in range(1, num_train_tensors +1): #tensor_number): print("train_data_number:", i) train_data = torch.load(content_path + "Train_Tensor" +str(i) +".pt") loss_list.append(train(train_data, model, optimizer, criterion, device)) print("VALIDATION") val_list.append(validate(val_data, model, device, categories)) if ((epoch) % save_cycle == 0): torch.save(model.state_dict(), content_path + "Model_weights_" + str(epoch) + ".pt") torch.save(val_list, content_path + "val_list.pt") torch.save(loss_list, content_path + "loss_list.pt")
Colab Notebooks (Scripts)/Subwaystation_Segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # LASSO回归 # ### 目标:$$J(\theta)={{MSE}(y, \hat y;\theta)+\alpha{\sum_{i=1}^n}|{\theta_i}|}$$ import numpy as np import matplotlib.pyplot as plt np.random.seed(42) x = np.random.uniform(-3.0, 3.0, size=100) X = x.reshape(-1, 1) y = 0.5 * x + 3 + np.random.normal(0, 1, size=100) plt.scatter(x, y) plt.show() # + from sklearn.model_selection import train_test_split np.random.seed(666) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=666) # + from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression def PolynomialRegression(degree): return Pipeline([ ('ploy', PolynomialFeatures(degree=degree)), ('std_scaler', StandardScaler()), ('lin_reg', LinearRegression()) ]) # - from sklearn.metrics import mean_squared_error poly_reg = PolynomialRegression(degree=20) poly_reg.fit(X_train, y_train) y_poly_predict = poly_reg.predict(X_test) mean_squared_error(y_test, y_poly_predict) # + X_polt = np.linspace(-3, 3, 100).reshape(100, 1) y_polt = poly_reg.predict(X_polt) plt.scatter(x, y) plt.plot(X_polt[:, 0], y_polt, color='r') plt.axis([-3, 3, 0, 6]) plt.show() # - def plot_model(model): X_polt = np.linspace(-3, 3, 100).reshape(100, 1) y_polt = model.predict(X_polt) plt.scatter(x, y) plt.plot(X_polt[:, 0], y_polt, color='r') plt.axis([-3, 3, 0, 6]) plt.show() plot_model(poly_reg) # ## LASSO from sklearn.linear_model import Lasso def LassoRegression(degree, alpha): return Pipeline([ ('ploy', PolynomialFeatures(degree=degree)), ('std_scaler', StandardScaler()), ('lasso_reg', Lasso(alpha=alpha)) ]) # + lasso1_reg = LassoRegression(20, 0.01) lasso1_reg.fit(X_train, y_train) y1_predict=lasso1_reg.predict(X_test) mean_squared_error(y_test, y1_predict) # - plot_model(lasso1_reg) # + lasso2_reg = LassoRegression(20, 0.1) lasso2_reg.fit(X_train, y_train) y2_predict=lasso2_reg.predict(X_test) mean_squared_error(y_test, y2_predict) # - plot_model(lasso2_reg) # + lasso3_reg = LassoRegression(20, 1) lasso3_reg.fit(X_train, y_train) y3_predict=lasso3_reg.predict(X_test) mean_squared_error(y_test, y3_predict) # - plot_model(lasso3_reg)
data/20-LASSO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Analysis of Student Performance Data Set # # Education is a key factor in the development of social society. The study on improving education quality has never been stopped. Nowadays, with more structured data collected from students, a qualitative analysis using machine learning can give insights into the factors that have a strong influence on students' performance. Here in this report, we analyzed the _Student Performance Data Set[1]_ from [UCI](http://archive.ics.uci.edu/ml/datasets/Student+Performance) using a few supervised machine learning algorithms. # # This data collect student achievement in secondary education of two Portuguese schools. The math and Portuguese exam grades are recorded as the measure of performance. In this report, we focus on the math data set. There are 395 students has been surveyed on 30 school, demographic related features, such as gender, age, study time, etc. in this math dataset. We would like to use these features to build a model that can predict the performance of a new student. In addition, we also did feature selection to see the importance (correlation) of each feature on students' performance. # # There are three grades recorded as `G1, G2, G3` in three exams. They are highly correlated as expected. Here we focus on the prediction on `G3`, the final grade in the range [0, 20]. The following is a distribution of the grades # # ![](../results/images/G3.png) # # # # # # # [1]<NAME> and <NAME>. Using Data Mining to Predict Secondary School Student Performance. In <NAME> and <NAME> Eds., Proceedings of 5th FUture BUsiness TEChnology Conference (FUBUTEC 2008) pp. 5-12, Porto, Portugal, April, 2008, EUROSIS, ISBN 978-9077381-39-7.
doc/.ipynb_checkpoints/Analysis_student_performance-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 實體物件的建立 class name(): def _init_(self): #by setting self to define instance attributes objective = name() class Point: def _init_(self): # self 代表新建立的實體物件 self.x = 3 self.y = 4 # instance attribute include x and y p = Point() class Point: def _init_(self, x, y): # 初始化函數可以自定義參數 self.x = x self.y = x # instance attribute include x and y p = Point(1, 5) # 1 and 5 會分別放入x, y中 #實體物件基本語法 class Point: def _init_(self, x, y): # 初始化函數可以自定義參數 self.x = x self.y = x # instance attribute include x and y p = Point(1, 5) print(p.x+p.y) # + # 實體物件的設計 class Point: def __init__(self, x, y): self.x = x self.y = y p1 = Point(x = 1, y = 3) # 產生實體物件 並放進變數裡 print(p1.x, p1.y) p2 = Point(x = 7, y = 9) # 可建立多個實體物件 print(p2.x, p2.y) # + class FullName: def __init__(self, first, last): self.first = first self.last = last name1 = FullName(first = 'J.H', last = 'Goddric') print(name1.first, name1.last) name2 = FullName(first = 'St.', last = 'john') print(name2.first, name2.last) # + #實體方法 #封裝在實體物件的函數 class 類別名稱: #定義初始化函數 def __init__(self): #定義方法函數 def 方法名稱(self, ) #透過self操作實體物件 # - class Point: def __init__(self, x, y): self.x = x self.y = y def show(self): print(self.x, self.y) p = Point(1,5) #呼叫實體物件 p.show() # 呼叫實體方法 # + # Poin 實體物件 class Point: def __init__(self, x, y ): self.x = x self.y = y #定義實體方法 def show(self): print(self.x, self.y) #File 實體物件的設計:包裝檔案讀取的方式 def distance(self, targetX, targetY): return (((self.x-targetX)**2) + ((self.y-targetY) ** 2))**0.5 p = Point(3, 4) p.show() # 呼叫實體方法 / 函數 result = p.distance(0, 0) #計算距離 並儲存在result裡面 print(result) # - class File: def __init__(self, name): self.name = name self.file = None # Because the file not yet open def open(self): self.file = open(self.name, mode = 'r', encoding = 'utf-8') def read(self): return self.file.read() f1 = File('data.txt') f1.open() data = f1.read()
Instance-Attributes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="qYamHQ5gtcJl" # ## Truncamiento de $sin(x)$ en distintos órdenes: # + id="J-BIF2g7xXL1" outputId="cd4e8ad8-5409-43ea-c5da-02e1ea29e503" colab={"base_uri": "https://localhost:8080/", "height": 132} numpy.math.factorial # + [markdown] id="UGcIEvABtcJl" # $$sinx= x- \frac{x^3}{3!}+\frac{x^5}{5!}-\frac{x^7}{7!}+\frac{x^9}{9!}+\frac{x^{11}}{11!}+\frac{x^{13}}{13!}+\frac{x^{15}}{15!}+ O(9)$$ # + [markdown] id="dwnKiiLbtcJm" # $$\sin x = \sum_{n=0}^{\infty} \frac{(-1)^n x^{2n+1}}{(2n+1)!} $$ # + id="yjeN9dejtcJm" outputId="52dfd24e-8767-43ae-86ea-f5ca4298ce9a" colab={"base_uri": "https://localhost:8080/"} from math import pi, sqrt, factorial print(pi) # + id="p9AzmUYstcJq" def mysin(x,orden): ordn=[] mysn=[] y=0 ''' Creamos lista ordn y mysn ''' for i in range(0,orden,1): y += ((-1)**i)*(x**(1+2*i))/factorial(1+2*i) ordn.append(i) mysn.append(y) #print('orden: ', ordn ,'\n sen(x): ', mysn) return(y) #return(ordn,mysn,y) # + id="Q4_3_plutcJt" outputId="01940184-a5d2-435f-ce09-63047be51b4c" colab={"base_uri": "https://localhost:8080/"} mysin(pi/6,7) # + id="6ZIM3bsqtcJv" outputId="1bd4ae1d-669e-441d-94e5-3ecb845a2e38" colab={"base_uri": "https://localhost:8080/"} mysin(pi/6,3) # + id="hQ8JS_cttcJx" outputId="00ed5ae6-fb01-4ddf-e686-07efce102b96" colab={"base_uri": "https://localhost:8080/"} mysin(pi/6,2) # + id="0vWKr3H2tcJz" outputId="dbc10372-21de-4e65-ab9d-bec5ba0c3e89" colab={"base_uri": "https://localhost:8080/"} mysin(pi/6,1) # + id="Wb4GqDzotcJ1" outputId="3595e4f9-45af-4b33-c48a-076dd37eeb75" colab={"base_uri": "https://localhost:8080/"} vmysin # + id="XXSWbt3ntcJ4" outputId="1c98012d-7bf5-4263-fb38-3814d3270021" colab={"base_uri": "https://localhost:8080/", "height": 295} import numpy as np vmysin = np.vectorize(mysin, excluded=['order']) #x = np.linspace(-pi/2, pi/2, 500) #x = np.linspace(-pi, pi, 500) #x = np.linspace(-2*pi, 2*pi, 500) x = np.linspace(-4*pi, 4*pi, 500) y2 = vmysin(x, 2) y3 = vmysin(x, 3) y4 = vmysin(x, 4) y5 = vmysin(x, 5) y6 = vmysin(x, 6) y7 = vmysin(x, 7) y8 = vmysin(x, 8) y = np.sin(x) import matplotlib.pyplot as plt plt.plot(x, y, label='sin(x)') plt.plot(x, y2, label='orden 2') plt.plot(x, y3, label='orden 3') plt.plot(x, y4, label='orden 4') plt.plot(x, y5, label='orden 5') plt.plot(x, y6, label='orden 6') plt.plot(x, y7, label='orden 7') plt.plot(x, y8, label='orden 8') plt.title('Aproximación de sen(x) en distintos órdenes') plt.xlabel('x: ángulo (radianes)') plt.ylabel('sin(x)') plt.ylim([-3, 3]) #plt.ylim([-5, 5]) #plt.ylim([-10, 10]) #plt.ylim([-20, 20]) plt.legend() plt.savefig('plot1.png') #plt.show() # + id="Q6KSi8zztcJ6" outputId="ef4e3424-b18c-4d04-e3ee-2ef692edba9c" colab={"base_uri": "https://localhost:8080/", "height": 295} #plt.plot(x, y, label='sin(x)') plt.plot(x, y-y2, label='err. ord. 2') plt.plot(x, y-y3, label='err. ord. 3') plt.plot(x, y-y4, label='err. ord. 4') plt.plot(x, y-y5, label='err. ord. 5') plt.plot(x, y-y6, label='err. ord. 6') plt.plot(x, y-y7, label='err. ord. 7') plt.plot(x, y-y8, label='err. ord. 8') plt.title('Errores de aproximación de sen(x) en distintos órdenes') plt.xlabel('x: ángulo (radianes)') plt.ylabel('sin(x)') plt.ylim([-3, 3]) #plt.ylim([-5, 5]) #plt.ylim([-10, 10]) #plt.ylim([-20, 20]) plt.legend() plt.show() # + id="NqHl5M7gtcJ8" outputId="2ab76283-33ec-456b-d8cc-04e02b2b1e08" colab={"base_uri": "https://localhost:8080/", "height": 295} #plt.plot(x, y, label='sin(x)') plt.plot(x, y-y2, label='err. ord. 2') plt.plot(x, y-y3, label='err. ord. 3') plt.plot(x, y-y4, label='err. ord. 4') plt.plot(x, y-y5, label='err. ord. 5') plt.plot(x, y-y6, label='err. ord. 6') plt.plot(x, y-y7, label='err. ord. 7') plt.plot(x, y-y8, label='err. ord. 8') plt.title('Errores de aproximación de sen(x) en distintos órdenes') plt.xlabel('x: ángulo (radianes)') plt.ylabel('sin(x)') plt.ylim([-0.5, 0.5]) plt.xlim([-6, 0]) #plt.ylim([-5, 5]) #plt.ylim([-10, 10]) #plt.ylim([-20, 20]) #plt.legend() plt.show()
Notebooks/Taller 4/Taller4_errores.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # MadMiner particle physics tutorial # # # Part 3a: Training a likelihood ratio estimator # # <NAME>, <NAME>, <NAME>, and <NAME> 2018-2019 # In part 3a of this tutorial we will finally train a neural network to estimate likelihood ratios. We assume that you have run part 1 and 2a of this tutorial. If, instead of 2a, you have run part 2b, you just have to load a different filename later. # ## Preparations # + from __future__ import absolute_import, division, print_function, unicode_literals import logging import numpy as np import matplotlib from matplotlib import pyplot as plt # %matplotlib inline from madminer.sampling import SampleAugmenter from madminer import sampling from madminer.ml import ParameterizedRatioEstimator # + # MadMiner output logging.basicConfig( format='%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s', datefmt='%H:%M', level=logging.INFO ) # Output of all other modules (e.g. matplotlib) for key in logging.Logger.manager.loggerDict: if "madminer" not in key: logging.getLogger(key).setLevel(logging.WARNING) # - # ## 1. Make (unweighted) training and test samples with augmented data # At this point, we have all the information we need from the simulations. But the data is not quite ready to be used for machine learning. The `madminer.sampling` class `SampleAugmenter` will take care of the remaining book-keeping steps before we can train our estimators: # # First, it unweights the samples, i.e. for a given parameter vector `theta` (or a distribution `p(theta)`) it picks events `x` such that their distribution follows `p(x|theta)`. The selected samples will all come from the event file we have so far, but their frequency is changed -- some events will appear multiple times, some will disappear. # # Second, `SampleAugmenter` calculates all the augmented data ("gold") that is the key to our new inference methods. Depending on the specific technique, these are the joint likelihood ratio and / or the joint score. It saves all these pieces of information for the selected events in a set of numpy files that can easily be used in any machine learning framework. #sampler = SampleAugmenter('data/lhe_data_shuffled.h5') sampler = SampleAugmenter('data/delphes_data_shuffled.h5') # The `SampleAugmenter` class defines five different high-level functions to generate train or test samples: # - `sample_train_plain()`, which only saves observations x, for instance for histograms or ABC; # - `sample_train_local()` for methods like SALLY and SALLINO, which will be demonstrated in the second part of the tutorial; # - `sample_train_density()` for neural density estimation techniques like MAF or SCANDAL; # - `sample_train_ratio()` for techniques like CARL, ROLR, CASCAL, and RASCAL, when only theta0 is parameterized; # - `sample_train_more_ratios()` for the same techniques, but with both theta0 and theta1 parameterized; # - `sample_test()` for the evaluation of any method. # # For the arguments `theta`, `theta0`, or `theta1`, you can (and should!) use the helper functions `benchmark()`, `benchmarks()`, `morphing_point()`, `morphing_points()`, and `random_morphing_points()`, all defined in the `madminer.sampling` module. # # Here we'll train a likelihood ratio estimator with the ALICES method, so we focus on the `extract_samples_train_ratio()` function. We'll sample the numerator hypothesis in the likelihood ratio with 1000 points drawn from a Gaussian prior, and fix the denominator hypothesis to the SM. # # Note the keyword `sample_only_from_closest_benchmark=True`, which makes sure that for each parameter point we only use the events that were originally (in MG) generated from the closest benchmark. This reduces the statistical fluctuations in the outcome quite a bit. lll = np.load("./data/samples/train_ratioJB.npy") # # load saved version from next time!!! x, theta0, theta1, y, r_xz, t_xz = [np.load("./data/samples/{}_train_ratioJB.npy".format(var)) for var in ["x", "theta0", "theta1", "y", "r_xz", "t_xz"]] x, theta0, theta1, y, r_xz, t_xz, n_effective = sampler.sample_train_ratio( theta0=sampling.random_morphing_points(100, [('flat', 0., 2.)]), theta1=sampling.benchmark('sm'), n_samples=10000000, folder='./data/samples', filename='train_ratioJB', sample_only_from_closest_benchmark=True, #sample_only_from_closest_benchmark=False, return_individual_n_effective=True, ) # Let's also make a validation sample: # + active="" # _ = sampler.sample_train_ratio( # theta0=sampling.random_morphing_points(100, [('flat', 0., 2.)]), # theta1=sampling.benchmark('sm'), # n_samples=100000, # folder='./data/samples', # filename='val_ratioJB', # sample_only_from_closest_benchmark=True, # return_individual_n_effective=True, # partition="validation" # ) # - # For the evaluation we'll need a few test samples: # + active="" # _ = sampler.sample_test( # theta=sampling.morphing_point([0.]), # n_samples=10000, # folder='./data/samples', # filename='test00' # ) # # _ = sampler.sample_test( # theta=sampling.morphing_point([0.3]), # n_samples=10000, # folder='./data/samples', # filename='test08' # ) # # _ = sampler.sample_test( # theta=sampling.morphing_point([1.0]), # n_samples=10000, # folder='./data/samples', # filename='test10' # ) # # _ = sampler.sample_test( # theta=sampling.morphing_point([1.2]), # n_samples=10000, # folder='./data/samples', # filename='test12' # ) # # _ = sampler.sample_test( # theta=sampling.morphing_point([1.7]), # n_samples=10000, # folder='./data/samples', # filename='test20' # ) # # _ = sampler.sample_test( # theta=sampling.morphing_point([2.]), # n_samples=10000, # folder='./data/samples', # filename='test40' # ) # - # You might notice the information about the "effective number of samples" in the output. This is defined as `1 / max_events(weights)`; the smaller it is, the bigger the statistical fluctuations from too large weights. Let's plot this over the parameter space: # cut = (y.flatten()==0) # # x_ = theta0[cut][:,0] # y_ = n_effective[cut] # # order = np.argsort(x_) # y_ = y_[order] # x_ = x_[order] # # fig = plt.figure(figsize=(5,5)) # # plt.plot(x_[x_<0.5], y_[x_<0.5], lw=1.5) # plt.plot(x_[(x_>0.5)*(x_<3)], y_[(x_>0.5)*(x_<3)], lw=1.5) # plt.plot(x_[x_>3.], y_[x_>3.], lw=1.5) # # plt.xlabel("theta") # plt.ylabel('Effective number of samples') # plt.xlim(-0.2,2.2) # plt.ylim(0.,None) # #plt.yscale("log") # # plt.tight_layout() # plt.savefig("effective_samplesize.pdf") # # + cut = (y.flatten()==0) x_ = theta0[cut][:,0] y_ = n_effective[cut] order = np.argsort(x_) y_ = y_[order] x_ = x_[order] fig = plt.figure(figsize=(5,5)) plt.plot(x_[x_<0.4], y_[x_<0.4], lw=1.5) plt.plot(x_[(x_>0.4)*(x_<0.9)], y_[(x_>0.4)*(x_<0.9)], lw=1.5) plt.plot(x_[(x_>0.9)*(x_<1.1)], y_[(x_>0.9)*(x_<1.1)], lw=1.5) plt.plot(x_[(x_>1.1)*(x_<1.275)], y_[(x_>1.1)*(x_<1.275)], lw=1.5) plt.plot(x_[(x_>1.275)*(x_<1.425)], y_[(x_>1.275)*(x_<1.425)], lw=1.5) plt.plot(x_[x_>1.425], y_[x_>1.425], lw=1.5) plt.xlabel("theta") plt.ylabel('Effective number of samples') plt.xlim(-0.2,2.2) plt.ylim(0.,None) #plt.yscale("log") plt.tight_layout() plt.savefig("effective_samplesize.pdf") # + cut = (y.flatten()==0) x_ = theta0[cut][:,0] y_ = n_effective[cut] order = np.argsort(x_) y_ = y_[order] x_ = x_[order] fig = plt.figure(figsize=(5,5)) plt.plot(x_, y_, lw=1.5) # plt.plot(x_[x_<0.4], y_[x_<0.4], lw=1.5) # plt.plot(x_[(x_>0.4)*(x_<0.9)], y_[(x_>0.4)*(x_<0.9)], lw=1.5) # plt.plot(x_[(x_>0.9)*(x_<1.25)], y_[(x_>0.9)*(x_<1.25)], lw=1.5) # plt.plot(x_[x_>1.25], y_[x_>1.25], lw=1.5) plt.xlabel("theta") plt.ylabel('Effective number of samples') plt.xlim(-0.2,2.2) plt.ylim(0.,None) #plt.yscale("log") plt.tight_layout() #plt.savefig("effective_samplesize.pdf") # + cut = (y.flatten()==0) x_ = theta0[cut][:,0] y_ = n_effective[cut] order = np.argsort(x_) y_ = y_[order] x_ = x_[order] fig = plt.figure(figsize=(5,5)) plt.plot(x_, y_, lw=1.5) # plt.plot(x_[x_<0.4], y_[x_<0.4], lw=1.5) # plt.plot(x_[(x_>0.4)*(x_<0.9)], y_[(x_>0.4)*(x_<0.9)], lw=1.5) # plt.plot(x_[(x_>0.9)*(x_<1.25)], y_[(x_>0.9)*(x_<1.25)], lw=1.5) # plt.plot(x_[x_>1.25], y_[x_>1.25], lw=1.5) plt.xlabel("theta") plt.ylabel('Effective number of samples') plt.xlim(-0.2,2.2) plt.ylim(0.,None) #plt.yscale("log") plt.tight_layout() #plt.savefig("effective_samplesize.pdf") # - # ## 2. Plot cross section over parameter space # This is not strictly necessary, but we can also plot the cross section as a function of parameter space: # + thetas_benchmarks, xsecs_benchmarks, xsec_errors_benchmarks = sampler.cross_sections( theta=sampling.benchmarks(list(sampler.benchmarks.keys())) ) thetas_morphing, xsecs_morphing, xsec_errors_morphing = sampler.cross_sections( theta=sampling.random_morphing_points(100, [('flat', 0., 2.5)]) ) # + fig = plt.figure(figsize=(5,5)) sc = plt.scatter(thetas_morphing[:,0], xsecs_morphing, s=5., marker='o') plt.scatter(thetas_benchmarks[:,0], xsecs_benchmarks, s=50., marker='s', c="black") #plt.xlabel("theta") plt.xlabel(r"$\kappa$") plt.ylabel('xsec [pb]') #plt.ylim(0., None) plt.ylim(0., 0.001) plt.tight_layout() plt.savefig("xsec.pdf") # - # What you see here is a morphing algorithm in action. We only asked MadGraph to calculate event weights (differential cross sections, or basically squared matrix elements) at six fixed parameter points (shown here as squares with black edges). But with our knowledge about the structure of the process we can interpolate any observable to any parameter point without loss (except that statistical uncertainties might increase)! # ## 3. Train likelihood ratio estimator # It's now time to build the neural network that estimates the likelihood ratio. The central object for this is the `madminer.ml.ParameterizedRatioEstimator` class. It defines functions that train, save, load, and evaluate the estimators. # # In the initialization, the keywords `n_hidden` and `activation` define the architecture of the (fully connected) neural network: estimator = ParameterizedRatioEstimator( n_hidden=(100,100,), activation="tanh", dropout_prob=0.2, ) # To train this model we will minimize the ALICES loss function described in ["Likelihood-free inference with an improved cross-entropy estimator"](https://arxiv.org/abs/1808.00973). Many alternatives, including RASCAL, are described in ["Constraining Effective Field Theories With Machine Learning"](https://arxiv.org/abs/1805.00013) and ["A Guide to Constraining Effective Field Theories With Machine Learning"](https://arxiv.org/abs/1805.00020). There is also SCANDAL introduced in ["Mining gold from implicit models to improve likelihood-free inference"](https://arxiv.org/abs/1805.12244). # + estimator.train( method='alices', theta='data/samples/theta0_train_ratio.npy', x='data/samples/x_train_ratio.npy', y='data/samples/y_train_ratio.npy', r_xz='data/samples/r_xz_train_ratio.npy', t_xz='data/samples/t_xz_train_ratio.npy', theta_val='data/samples/theta0_val_ratio.npy', x_val='data/samples/x_val_ratio.npy', y_val='data/samples/y_val_ratio.npy', r_xz_val='data/samples/r_xz_val_ratio.npy', t_xz_val='data/samples/t_xz_val_ratio.npy', alpha=1., n_epochs=5, batch_size=512, n_workers=4, ) estimator.save('models/alices') # - # ## 4. Evaluate likelihood ratio estimator # `estimator.evaluate_log_likelihood_ratio(theta,x)` estimated the log likelihood ratio and the score for all combination between the given phase-space points `x` and parameters `theta`. That is, if given 100 events `x` and a grid of 25 `theta` points, it will return 25\*100 estimates for the log likelihood ratio and 25\*100 estimates for the score, both indexed by `[i_theta,i_x]`. # + theta_each = np.linspace(-20.,20.,21) theta0, theta1 = np.meshgrid(theta_each, theta_each) theta_grid = np.vstack((theta0.flatten(), theta1.flatten())).T np.save('data/samples/theta_grid.npy', theta_grid) theta_denom = np.array([[0.,0.]]) np.save('data/samples/theta_ref.npy', theta_denom) # + estimator.load('models/alices') log_r_hat, _ = estimator.evaluate_log_likelihood_ratio( theta='data/samples/theta_grid.npy', x='data/samples/x_test.npy', evaluate_score=False ) # - # Let's look at the result: # + bin_size = theta_each[1] - theta_each[0] edges = np.linspace(theta_each[0] - bin_size/2, theta_each[-1] + bin_size/2, len(theta_each)+1) fig = plt.figure(figsize=(6,5)) ax = plt.gca() expected_llr = np.mean(log_r_hat,axis=1) best_fit = theta_grid[np.argmin(-2.*expected_llr)] cmin, cmax = np.min(-2*expected_llr), np.max(-2*expected_llr) pcm = ax.pcolormesh(edges, edges, -2. * expected_llr.reshape((21,21)), norm=matplotlib.colors.Normalize(vmin=cmin, vmax=cmax), cmap='viridis_r') cbar = fig.colorbar(pcm, ax=ax, extend='both') plt.scatter(best_fit[0], best_fit[1], s=80., color='black', marker='*') plt.xlabel(r'$\theta_0$') plt.ylabel(r'$\theta_1$') cbar.set_label(r'$\mathbb{E}_x [ -2\, \log \,\hat{r}(x | \theta, \theta_{SM}) ]$') plt.tight_layout() plt.show() # - # Note that in this tutorial our sample size was very small, and the network might not really have a chance to converge to the correct likelihood ratio function. So don't worry if you find a minimum that is not at the right point (the SM, i.e. the origin in this plot). Feel free to dial up the event numbers in the run card as well as the training samples and see what happens then!
examples/tutorial_h4l/JB2_3a_likelihood_ratio.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## We extract only TXO and TX from 2020/03/13 to 2020/04/24 # * Data resorce: # * Futures: http://www.taifex.com.tw/cht/3/dlFutPrevious30DaysSalesData # * Options: http://www.taifex.com.tw/cht/3/dlOptPrevious30DaysSalesData # * Data exclude: # * Options: Only include the open 5 mins trading information, which is, 84500 ~ 85000 # * Futures: Include every transaction data import pandas as pd import numpy as np import os, glob import datetime as dt work_dir = os.getcwd() work_dir Option_path = os.path.join(work_dir, 'ParseData', 'Option') Future_path = os.path.join(work_dir, 'ParseData', 'Futures') Future_path vacations = [dt.date(2020, 5, 1)] # Taiwan vacations dates = pd.bdate_range(start='4/27/2020', end='5/7/2020') dates = dates.drop(vacations) dates.shape # ## Collect all the csv name extension = 'csv' os.chdir(Option_path) Opt_csv_List = glob.glob('*.{}'.format(extension)) # ## Read the Option csv and data cleaning def output_csv(dataCsv): # The product name needs to be careful with space df = pd.read_csv(os.path.join(Option_path, dataCsv), encoding = 'cp950') columns_name = ['TransDate', 'Product', 'ExcercisePrice', 'Maturity', 'Right', 'TransTime', 'TransPrice', 'Volumn', 'E'] df.columns = columns_name df_new = df[(df.Product == ' TXO ') & ((df.TransTime.between(left=84500, right=85000)) | (df.TransTime.between(left=134000, right=134500)))] df_new = df_new.drop('E', axis = 1) df_new.to_csv(os.path.join(Option_path, dataCsv), index = False) print("writing file to {}".format(dataCsv)) for dataCsv in Opt_csv_List: output_csv(dataCsv) # ## Read the Future csv and data cleaning extension = 'csv' os.chdir(Future_path) Fut_csv_List = glob.glob('*.{}'.format(extension)) New_Future_path = os.path.join(work_dir, 'Data', 'Futures') def output_csv2(dataCsv): df = pd.read_csv(os.path.join(Future_path, dataCsv), encoding = 'cp950') columns_name = ['TransDate', 'Product', 'Maturity', 'TransTime', 'TransPrice', 'Volumn', 'A', 'B', 'E'] df.columns = columns_name df_new = df[(df.Product == 'TX ') & (df.TransTime.between(left=84500, right=134500))] # The product name needs to be careful with space df_new = df_new.drop(['A', 'B', 'E'], axis = 1) df_new.to_csv(os.path.join(New_Future_path, dataCsv), index = False) print("writing file to {}".format(dataCsv)) for dataCsv in Fut_csv_List: output_csv2(dataCsv) # ## Make sure the new csv is correct df = pd.read_csv(os.path.join(Option_path, Opt_csv_List[7]), encoding = 'cp950') df.head() type(df.ExcercisePrice[0]) df = pd.read_csv(os.path.join(Future_path, Fut_csv_List[0]), encoding = 'cp950') len(df) # ## Slim down from all info to the vital info (Futures) # * For Futures, Only keep # * the nearby month contract # * There are multiple transaction within 1 sec, we use groupby to acquire the mean of the transaction Price as the representitive price # def futures_keep_info(dataCsv): 'Slim the Size' df = pd.read_csv(os.path.join(Future_path, dataCsv), encoding = 'cp950') meanPrice = round(df.groupby('TransTime').TransPrice.transform('mean'), 2) df['meanPrice'] = meanPrice nearbyMonth = df.Maturity[0] df_new = df[df.Maturity == nearbyMonth] df_new = df_new.drop_duplicates(subset = ['TransTime'], keep ='first') df_new.to_csv(os.path.join(Future_path, dataCsv), index = False) print("writing file to {}".format(dataCsv)) for dataCsv in Fut_csv_List: futures_keep_info(dataCsv) df = pd.read_csv(os.path.join(Future_path, Fut_csv_List[2]), encoding = 'cp950') df.head() df.groupby('Maturity').Volumn.sum() type(df.TransDate[0]) # ## Divided the Option data into Open interval and close interval OptionOpen_path = os.path.join(work_dir, 'Data', 'OptionOpen') OptionClose_path = os.path.join(work_dir, 'Data', 'OptionClose') def divide_csv(dataCsv): df = pd.read_csv(os.path.join(Option_path, dataCsv), encoding = 'cp950') df_open = df[(df.TransTime.between(left=84500, right=85000))] df_close = df[(df.TransTime.between(left=134000, right=134500))] df_open.to_csv(os.path.join(OptionOpen_path, dataCsv), index = False) df_close.to_csv(os.path.join(OptionClose_path, dataCsv), index = False) print("writing file to {}".format(dataCsv)) OptionOpen_path
Data_cleaning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # python collections库 # collections库是python内置的集合库,本文主要讲解以下5种数据结构的用法: # * namedtuple 命名元组,是tuple的子类 # * deque 双向列表 # * defaultdict 有默认值的字典,是dict的子类 # * OrderedDict key有序的字典,是dict的子类 # * Counter 计数器,是dict的子类 # # 准备工作 from collections import namedtuple,deque,defaultdict,OrderedDict,Counter # # namedtuple (python 2.6+) # 用法:namedtuple('名称',[属性列表]) Point = namedtuple('Point',['x','y']) p = Point(1,2) print '【Output】' print p print p.x,p.y print p.count,p.index print isinstance(p,Point) print isinstance(p,tuple) # # deque (python 2.4+) # 适用于队列和栈,插入和删除元素很高效。 lst = ['a','b','c'] dq = deque(lst) dq.append('d') print dq dq.pop() print dq dq.appendleft('-1') print dq dq.popleft() print dq # # defaultdict (python 2.5+) # 当key不存在的时候可返回一个默认值,默认值由传入的函数对象决定。 dd = defaultdict(lambda:'N/A') dd['key1'] = 'aa'; print dd['key1'] print dd['key2'] # # OrderedDict(python 2.7+) # key值有序的字典,顺序按照插入的顺序排序。 data = [('a',1),('b',2),('c',3)] d = dict(data) print d od = OrderedDict(data) print od # # Counter (python 2.7+) # ### 用序列生成Counter对象 s = 'abcdeabcdabcaba' c = Counter(s) print c print c.most_common(3) print sorted(c) print ''.join(sorted(c.elements())) print c.values() print c.elements() # ### 更新Counter对象 d = Counter('bbb') c.update(d) print c.most_common() # ### 用字典生成Counter对象 d = {'a':1,'b':2,'c':3} c = Counter(d) print c # ### value值为字符串时,按照字典序排序 d = {'a':'aa1','b':'ba1','c':'ca2'} c = Counter(d) print c
collections/handout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This demonstrates all the steps in my candidate selection before conducting visual inspection # + import numpy as np import splat import wisps.data_analysis as wispd from wisps.data_analysis import selection_criteria as sel_crt import shapey import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from scipy import stats import wisps import matplotlib as mpl from tqdm import tqdm import random import matplotlib.pyplot as plt # %matplotlib inline # + #some functions def get_indices(x): if x is None : return pd.Series({}) else: return pd.concat([pd.Series(x.indices), pd.Series(x.mags), pd.Series(x.snr)]) def get_spt(x): if x is None: return np.nan else: return x.spectral_type[0] #change f-test definition def f_test_fx(x, df1, df2): return stats.f.cdf(x, df1, df2) def box_parameters(idx, spt_range): bs=idx.shapes b=[x for x in bs if x.shape_name==spt_range][0] print ('{} {} m: {} b: {} s:{}, comp : {}, cont: {}'.format(spt_range, idx, round(b.coeffs[0], 2), round(b.coeffs[1], 2), round(b.scatter, 2), round(idx.completeness[spt_range], 2), round(idx.contamination[spt_range], 3))) # - cands=pd.read_pickle(wisps.LIBRARIES+'/new_real_ucds.pkl') # + #use the same columns for all data sets alldata=wisps.get_big_file() spex=wisps.Annotator.reformat_table(wisps.datasets['spex']) cands['line_chi']=cands.spectra.apply(lambda x : x.line_chi) cands['spex_chi']=cands.spectra.apply(lambda x: x.spex_chi) cands['f_test']=cands.spectra.apply(lambda x: x.f_test) spex_df=wisps.Annotator.reformat_table(wisps.datasets['spex']).reset_index(drop=True) manj=wisps.Annotator.reformat_table(wisps.datasets['manjavacas']).reset_index(drop=True) schn=wisps.Annotator.reformat_table(wisps.datasets['schneider']).reset_index(drop=True) ydwarfs=(manj[manj['spt'].apply(wisps.make_spt_number)>38].append(schn)).reset_index(drop=True) spex_df['spt']=np.vstack(spex_df['spt'].values)[:,0] manj['spt']=np.vstack(manj['spt'].values)[:,0] schn['spt']=np.vstack(schn['spt'].values)[:,0] cands.grism_id=cands.grism_id.apply(lambda x: x.lower()) cands['spt']=np.vstack(cands['spt'].values) # + #add x values spex['x']=spex.spex_chi/spex.line_chi alldata['x']=alldata.spex_chi/alldata.line_chi cands['x']=cands.spex_chi/cands.line_chi spex['f_test']=f_test_fx(spex.x.values, spex.dof.values-1, spex.dof.values-2) alldata['f_test']=f_test_fx(alldata.x.values, alldata.nG141.values-1, alldata.nG141.values-2) alldata=alldata.sort_values('x') spex=spex.sort_values('x') cands=cands.sort_values('x') # - alldata['datalabel']='alldata' spex['datalabel']='spex' cands['datalabel']='ucds' combined_ftest_df=pd.concat([cands, spex, alldata[(alldata.snr1>=3.) & (alldata.mstar_flag !=0)]]) # + #stats.f.cdf(.85564068, 108-1, 108+2) # + #list(spex[['x', 'dof']][spex.f_test.values >0.2].values) # - len(spex[np.logical_and(spex.f_test.values > 0.9, np.vstack(spex.spt.values)[:,0] >=17.)])/len(spex) len(spex[np.logical_and(spex.f_test.values < 0.02, np.vstack(spex.spt.values)[:,0] >=17.)])/len(spex) len(cands[np.logical_and(cands.f_test.values > 0.9, np.vstack(cands.spt.values)[:,0] >=17.)])/len(cands) len(cands[np.logical_and(cands.f_test.values < 0.02, np.vstack(cands.spt.values)[:,0] >=17.)])/len(cands) # + #star_ids=alldata[alldata['class_star'] !=0] #stars=wisps.Annotator.reformat_table(star_ids).reset_index(drop=True) #cy=stars[stars.grism_id.isin(cx.grism_id)] # - plt.plot(cands.x[cands.x<1.], '.') dt=alldata[(alldata.f_test<0.02) & (alldata.snr1>=3.) & (alldata.mstar_flag !=0)].reset_index(drop=True) dt['spt']=(dt['spt']).apply(wisps.make_spt_number).apply(float) dt=wisps.Annotator.reformat_table(dt).reset_index(drop=True) len(alldata[(alldata.f_test<0.02) & (alldata.snr1>=3.) & (alldata.mstar_flag !=0)]) wisps.datasets.keys() # + #wisps.Annotator.reformat_table(wisps.datasets['subd']) # + #get criteria ##only run this if new data gbhio=sel_crt.save_criteria(conts=dt) crts=sel_crt.crts_from_file() contamns=pd.DataFrame([ x.contamination for x in crts.values()]) compls=pd.DataFrame([ x.completeness for x in crts.values()]) contamns.index=[x for x in crts.keys()] compls.index=[x for x in crts.keys()] # - # %%capture ''' contamns.style.apply(lambda x: ["background-color: #7FDBFF" if (i >= 0 and (v < 0.1 and v > 0. )) else "" for i, v in enumerate(x)], axis = 1) ''' def get_toplowest_contam(subtype, n): top=contamns.sort_values('L5-T0')[:n] return {subtype: [x for x in top.index]} ordered={} for k in ['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-T9', 'Y dwarfs', 'subdwarfs']: ordered.update(get_toplowest_contam(k, 6)) to_use= ordered spex['spt']=np.vstack(spex.spt.values)[:,0] from tqdm import tqdm def multiplte_indices_selection(k): stat_dict={} indices= [crts[index_name] for index_name in to_use[k]] #make selections for each index separately cand_bools=[] spex_bools=[] trash_bools=[] for idx in indices: xkey=idx.xkey ykey=idx.ykey bx=[x for x in idx.shapes if x.shape_name==k][0] _, cbools=bx._select(np.array([cands[xkey].values,cands[ykey].values])) _, spbools=bx._select(np.array([spex[xkey].values,spex[ykey].values])) _, trbools=bx._select(np.array([dt[xkey].values, dt[ykey].values])) cand_bools.append(cbools) spex_bools.append(spbools) trash_bools.append(trbools) cands_in_that_class_bool=cands.spt.apply(lambda x: wisps.is_in_that_classification(x, k)) spex_in_that_class_bool=spex.spt.apply(lambda x: wisps.is_in_that_classification(x, k)) cand_bools.append(cands_in_that_class_bool) spex_bools.append(spex_in_that_class_bool) cands_selected=cands[np.logical_and.reduce(cand_bools, axis=0)] spexs_selected=spex[np.logical_and.reduce(spex_bools, axis=0)] print (' {} selected {} out of {} UCDS'.format(k, len( cands_selected), len(cands[cands_in_that_class_bool]))) print ('overall completeness {}'.format( len(spexs_selected)/len(spex[spex_in_that_class_bool]))) print ('total contaminants {}'.format(len(dt[np.logical_and.reduce(trash_bools)]))) print ('-------------------------------------------') # + #for k in ['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-T9', 'Y dwarfs']: # multiplte_indices_selection(k) # - contamns.idxmin(axis=0) from collections import OrderedDict ordered=[(k, contamns.idxmin(axis=0)[k]) for k in ['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-T9', 'Y dwarfs', 'subdwarfs']] to_use= [ (y, x) for x, y in ordered] to_use import pickle #save the random forest output_file=wisps.OUTPUT_FILES+'/best_indices_to_use.pkl' with open(output_file, 'wb') as file: pickle.dump(to_use,file) fp={} cands=cands[cands.grism_id.isin(dt.grism_id)] # + def plot_index_box(index_name, box_name, ax): #get the index and the box idx=crts[index_name] bx=[x for x in idx.shapes if x.shape_name==box_name][0] xkey=idx.xkey ykey=idx.ykey to_use_df=spex_df if box_name.lower()=='y dwarfs': to_use_df=ydwarfs if box_name.lower()=='subdwarfs': to_use_df=wisps.Annotator.reformat_table(idx.subdwarfs) to_use_df['spt']=17 xlim=[ bx.xrange[0]-.5*abs(np.ptp(bx.xrange)), bx.xrange[1]+.5*abs(np.ptp(bx.xrange))] ylim=[ bx.yrange[0]-.5*abs(np.ptp(bx.yrange)), bx.yrange[1]+.5*abs(np.ptp(bx.yrange))] if box_name.upper()=='T5-T9': print ('changin scale') print (bx.xrange[1]) xlim=[ bx.xrange[0]-0.2*abs(np.ptp(bx.xrange)), np.round(bx.xrange[1]+0.2*abs(np.ptp(bx.xrange)))] #remove nans from background bckgrd= dt[[xkey, ykey]].replace(-np.inf, np.nan).replace(np.inf, np.nan).dropna() # ax.scatter(bckgrd[xkey], bckgrd[ykey], s=1, c='#111111', label='Background') bckgrd=bckgrd[(bckgrd[xkey].between(xlim[0], xlim[1])) & (bckgrd[ykey].between(ylim[0], ylim[1]))] h=ax.hist2d(bckgrd[xkey].apply(float).values, bckgrd[ykey].apply(float).values, \ cmap='gist_yarg', vmin=50, vmax=1000) cands_slctd, cands_bools=bx._select(np.array([cands[xkey].values,cands[ykey].values])) trash_slctd, trsh_bools=bx._select(np.array([dt[xkey].values, dt[ykey].values])) #simul_slctd, simul_bools=bx._select(np.array([simulated_data[xkey].values, simulated_data[ykey].values])) print (len(cands_slctd[0]), len((cands))) cands_in_that_class_bool=(cands).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name)) spexs_slctd_in_that_class_bool= (to_use_df).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name)) #simulated_in_that_class_bool=(simulated_data[simul_bools]).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name)) if box_name.lower()=='subdwarfs': spexs_slctd_in_that_class_bool=np.ones(len(to_use_df), dtype=bool) cands_in_that_class=np.array([cands_slctd[0], \ cands_slctd[1]]) #simulated_in_that_class= np.array([simul_slctd[0][simulated_in_that_class_bool], simul_slctd[1][simulated_in_that_class_bool]]) spexs_slctd_in_that_class=np.array([to_use_df[xkey][spexs_slctd_in_that_class_bool], to_use_df[ykey][spexs_slctd_in_that_class_bool]]) #ax.scatter( simulated_in_that_class[0], simulated_in_that_class[1], facecolors='none', s=10, # edgecolors='#001f3f', label='simulated') ax.scatter(spexs_slctd_in_that_class[0], spexs_slctd_in_that_class[1], facecolors='none',\ edgecolors='#0074D9', label='Templates', s=50.) #ax.scatter(cands[xkey], cands[ykey], marker='x', facecolors='#FF851B', s=40., alpha=0.5) ax.scatter( cands_in_that_class[0], cands_in_that_class[1], marker ='+', s=150., alpha=1., facecolors='#FF851B', label='Discovered UCDs') ax.scatter(cands[xkey].values, cands[ykey].values, marker='+', s=150., alpha=0.3, facecolors='#FF851B') bx.color='None' bx.alpha=1. bx.linewidth=3 bx.linestyle='-' bx.edgecolor='#0074D9' bx.plot(ax=ax, only_shape=True, highlight=False) #cb = plt.colorbar(h[3], ax=ax, orientation='horizontal') #cb.set_label('Counts in bin', fontsize=16) plt.tight_layout() ax.set_xlabel(r'$'+str(idx.name.split(' ')[0])+'$', fontsize=14) ax.set_ylabel(r'$'+str(idx.name.split(' ')[1])+'$', fontsize=14) ax.set_title(box_name, fontsize=18) xbuffer=np.nanstd(to_use_df[[xkey,ykey]]) ax.minorticks_on() if (trash_slctd.shape[1])==0: fprate=0.0 else: fprate=(trash_slctd.shape[1]- cands_slctd.shape[1])/trash_slctd.shape[1] if box_name.lower()=='subdwarfs': fprate=1. fp[box_name]= fprate ax.set_xlim(xlim) ax.set_ylim(ylim) plt.tight_layout() print (' {} selected {}'.format(box_name, len(bx.select( bckgrd)))) return {str(box_name): bx} # - to_use # + active="" # cands # - idx=crts[to_use[1][0]] import matplotlib # + fig, ax=plt.subplots(nrows=3, ncols=3, figsize=(12, 14)) bxs=[] for idx, k in enumerate(to_use): print (idx, k) b=plot_index_box( k[0], k[1], np.concatenate(ax)[idx]) bxs.append(b) plt.tight_layout() cax = fig.add_axes([0.5, 0.1, .3, 0.03]) norm= matplotlib.colors.Normalize(vmin=50,vmax=1000) mp=matplotlib.cm.ScalarMappable(norm=norm, cmap='gist_yarg')# vmin=10, vmax=5000) cbar=plt.colorbar(mp, cax=cax, orientation='horizontal') cbar.ax.set_xlabel(r'Number of Contaminants', fontsize=18) fig.delaxes(np.concatenate(ax)[-1]) fig.delaxes(np.concatenate(ax)[-2]) np.concatenate(ax)[-4].set_title(r'$\geq$ T9 ', fontsize=18) #subdindx_index_crt=crts['H_2O-1/J-Cont H_2O-2/H_2O-1'] #subdrfs=wisps.Annotator.reformat_table(dummy_index_crt.subdwarfs) #tpls=wisps.Annotator.reformat_table(spex_df[spex_df.metallicity_class.isna()]) #a=np.concatenate(ax)[-1] #tpls=tpls[tpls.spt>16] #a.scatter(dt[subdindx_index_crt.xkey], dt[subdindx_index_crt.ykey], s=1., c='#111111', alpha=0.1) #a.scatter(tpls[subdindx_index_crt.xkey], tpls[subdindx_index_crt.ykey], marker='+', facecolors='#0074D9', label='SpeX', s=5.) #a.scatter(subdrfs[subdindx_index_crt.xkey], subdrfs[subdindx_index_crt.ykey], marker='+', facecolors='#2ECC40', label='SpeX', s=30.) #a.set_xlim([0., 1.35]) #a.set_ylim([0., 1.25]) #a.set_title('subdwarfs', fontsize=18) #a.set_xlabel(r'$'+str(subdindx_index_crt.name.split(' ')[0])+'$', fontsize=15) #a.set_ylabel(r'$'+str(subdindx_index_crt.name.split(' ')[1])+'$', fontsize=15) np.concatenate(ax)[-3].legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig(wisps.OUTPUT_FIGURES+'/index_index_plots.pdf', bbox_inches='tight', rasterized=True, dpi=150) # - # + #.grism_id.to_csv('/users/caganze/desktop/true_brown_dwarfs.csv') # - bx_dict={} for b in bxs: bx_dict.update(b) #invert to use inv_to_use = {v: k for k, v in to_use} ncandidates=[] for spt_range in bx_dict.keys(): idx_name=inv_to_use[spt_range] idx=crts[idx_name] s, bools=(bx_dict[spt_range])._select(np.array([dt[idx.xkey].values, dt[idx.ykey].values])) ncandidates.append(dt[bools]) candsss=(pd.concat(ncandidates).drop_duplicates(subset='grism_id')) cands.grism_id=cands.grism_id.apply(lambda x: x.lower().strip()) good_indices=[crts[x] for x in inv_to_use.values()] len(candsss), len(candsss[candsss.grism_id.isin(cands.grism_id.apply(lambda x: x.lower().strip())) & (candsss.spt.apply(wisps.make_spt_number)>16)]) len(candsss.drop_duplicates('grism_id'))/len(alldata) len(candsss[candsss.grism_id.isin(cands.grism_id) & (candsss.spt.apply(wisps.make_spt_number).between(35, 40))]) len(candsss), len(dt), len(alldata[alldata.mstar_flag !=0]) len(dt)/len(alldata) candsss.to_pickle(wisps.OUTPUT_FILES+'/selected_by_indices.pkl') # + #print out table def round_tuple(tpl, n=2): return round(tpl[0], n), round(tpl[1],n) for index, k in to_use: spt_range=k sindex=crts[index] bs=sindex.shapes bs=[x for x in bs if x.shape_name==spt_range] bx=bs[0] print (" {} & {} & {} & {} & {} & {} & {} & {} & {} & {} \\\ ".format(spt_range,sindex.xkey, sindex.ykey, round_tuple(bx.vertices[0]), round_tuple(bx.vertices[1]) , round_tuple(bx.vertices[2]), round_tuple(bx.vertices[3]), round(sindex.completeness[spt_range], 2), round(sindex.contamination[spt_range], 7), round(fp[spt_range],6))) # - len(candsss) # + #ghjk # - stars= alldata[alldata.mstar_flag !=0] # + cands_dff=(cands[np.logical_and(cands['snr1'] >=3., cands['spt'] >=17)]).sort_values('spt') spex_df=spex_df.sort_values('spt') star_snr=stars[['snr1', 'snr2', 'snr3', 'snr4']].apply(np.log10).dropna() star_snr=(star_snr[star_snr.snr1.between(-1, 4) & star_snr.snr3.between(-1, 4) & star_snr.snr4.between(-1, 4)]).reset_index(drop=True) fig, (ax, ax1)=plt.subplots(ncols=2, figsize=(12, 6)) h=ax.hist2d(star_snr['snr1'], star_snr['snr3'], cmap='gist_yarg', bins=10, label='Point Sources') #ax.scatter(star_snr['snr1'], star_snr['snr3'], c='#111111', s=1, alpha=0.1) cb = plt.colorbar(h[3], ax=ax, orientation='horizontal') cb.set_label('Counts in bin', fontsize=16) plt.tight_layout() #ax.scatter(star_snr['snr1'], star_snr['snr4'], s=1., c='k', alpha=0.1, # label='3D-HST or WISP') ax.scatter(spex_df['snr1'].apply(np.log10), spex_df['snr3'].apply(np.log10), s=10, c=spex_df.spt, cmap='coolwarm', marker='o', alpha=0.1, vmin=15, vmax=40) ax.scatter(spex_df['snr1'].apply(np.log10)[0], spex_df['snr3'].apply(np.log10)[0], s=10, c=spex_df.spt[0], cmap='coolwarm', label='Templates', marker='o', alpha=1., vmin=15, vmax=40) ax.scatter(cands_dff['snr1'].apply(np.log10), cands_dff['snr3'].apply(np.log10), c=cands_dff['spt'], s=40, marker='*', cmap='coolwarm', label='UCDs' , vmin=15, vmax=40) ax.set_xlim([-0.5, 4]) ax.set_ylim([-0.5, 4]) ax.set_xlabel('Log J-SNR', fontsize=18) ax.set_ylabel('Log H-SNR', fontsize=18) ax.legend(fontsize=18, loc='upper left') ax.axhline(np.log10(3), c='k', xmin=np.log10(3)-0.2, linestyle='--') ax.axvline(np.log10(3), c='k', ymin=np.log10(3)-0.2, linestyle='--') #ax1.scatter(stars['snr1'].apply(np.log10), stars['snr4'].apply(np.log10), s=1., c='k', alpha=0.1, # label='3D-HST or WISP') #ax1.scatter(star_snr['snr1'], star_snr['snr4'], c='#111111', s=1, alpha=0.1) h1=ax1.hist2d(star_snr['snr1'], star_snr['snr4'], cmap='gist_yarg', bins=10, label='Point Sources') mp=ax1.scatter(spex_df['snr1'].apply(np.log10), spex_df['snr4'].apply(np.log10), s=10, c=spex_df.spt, cmap='coolwarm', label='Templates', marker='o', alpha=0.1, vmin=15, vmax=40) ax1.scatter(cands_dff['snr1'].apply(np.log10), cands_dff['snr4'].apply(np.log10), c=cands_dff['spt'], s=40, marker='*', cmap='coolwarm', label='UCDs', vmin=15, vmax=40) ax1.set_xlim([-0.5, 4]) ax1.set_ylim([-0.5, 4]) ax1.set_xlabel(' Log J-SNR', fontsize=18) ax1.set_ylabel('Log MEDIAN-SNR', fontsize=18) #ax.legend(fontsize=18) ax1.axhline(np.log10(3), c='k', xmin=np.log10(3)-0.2, linestyle='--') ax1.axvline(np.log10(3), c='k', ymin=np.log10(3)-0.2, linestyle='--') cb1 = plt.colorbar(h1[3], ax=ax1, orientation='horizontal') cb1.set_label('Counts in bin', fontsize=16) #plt.tight_layout() import matplotlib cax = fig.add_axes([1.01, 0.21, .03, 0.7]) norm= matplotlib.colors.Normalize(vmin=15,vmax=40) mp=matplotlib.cm.ScalarMappable(norm=norm, cmap='coolwarm') cbar=plt.colorbar(mp, cax=cax, orientation='vertical') cbar.ax.set_ylabel(r'Spectral Type', fontsize=18) ax.minorticks_on() ax1.minorticks_on() cbar.ax.set_yticks([ 17, 20, 25, 30, 35, 40]) cbar.ax.set_yticklabels(['M5', 'L0', 'L5', 'T0', 'T5', 'Y0']) plt.tight_layout() plt.savefig(wisps.OUTPUT_FIGURES+'/snr_cutplots.pdf', \ bbox_inches='tight',rasterized=True, dpi=100) # + #import wisps # - big=wisps.get_big_file() bigsnr=big[big.snr1>=3.] # + # # - fig, ax=plt.subplots(figsize=(10, 6)) h=ax.hist(big.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linestyle=':', label='All', log=True, linewidth=3) h=ax.hist(stars.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3, label='Point Sources', linestyle='--', log=True) h=ax.hist(stars[stars.snr1>3].snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3, label='Selected', log=True) #h=ax.hist(bigsnr.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3, log=True) ax.minorticks_on() plt.xlabel('Log SNR') plt.ylabel('Number') plt.legend() plt.savefig(wisps.OUTPUT_FIGURES+'/snr_distribution.pdf', bbox_inches='tight', facecolor='white', transparent=False) # + #s3=wisps.Source(filename='goodss-01-G141_47749') #s4=wisps.Source(filename='goodss-01-G141_45524') # - bools=np.logical_and(stars.snr1.between(3, 1000), stars.f_test.between(1e-3, 1)) # + #s4._best_fit_line # - # fig, ax=plt.subplots(figsize=(8, 8)) # plt.plot(s4.wave, s4.flux, color='#111111', label='Flux') # plt.plot(s4.wave, s4.noise, '#39CCCC', label='Noise') # std=splat.getStandard(s4.spectral_type[0]) # std.normalize(range=[1.2, 1.5]) # chi, scale=splat.compareSpectra(s4.splat_spectrum, std, comprange=[[1.2, 1.5]], statistic='chisqr', scale=True) # std.scale(scale) # plt.plot(std.wave, std.flux, color='y', label='Best fit template') # plt.plot( s4._best_fit_line[0], color='#FF4136', label='Best fit line') # plt.xlim([1.1, 1.7]) # plt.ylim([0, 0.1]) # plt.xlabel('Wavelength (micron)') # plt.ylabel('Normalized Flux') # plt.legend() # plt.savefig(wisps.OUTPUT_FIGURES+'/example_line_fit.pdf', bbox_inches='tight', facecolor='white', transparent=False) compls.keys() # + fig, ax=plt.subplots(figsize=(8,6)) #for k in ['L0-L5', 'L5-T0', 'M7-L0', 'T0-T5', 'T5-T9','subdwarfs']: ax.scatter(compls['M7-L0'].values, contamns['M7-L0'].values, facecolors='none', edgecolors='#0074D9', label='M7-L0') ax.scatter(compls['L0-L5'].values, contamns['L0-L5'].values, marker='^', facecolors='none',\ edgecolors='#FF851B', label='L0-L5') ax.scatter(compls['L5-T0'].values, contamns['L5-T0'].values, marker='s', facecolors='none', edgecolors='#2ECC40', label='L5-T0') ax.scatter(compls['T0-T5'].values, contamns['T0-T5'].values, marker='$...$', facecolors='none', edgecolors='#FF4136', label='T0-T5') ax.scatter(compls['T5-T9'].values, contamns['T5-T9'].values, marker='X', facecolors='none', edgecolors='#111111', label='T5-T9') #h=plt.hist(contams[k].values, bins='auto', histtype='step', # label='All', log=True, linewidth=3) ax.set_xlabel('Completeness') ax.set_ylabel('Contamination') plt.legend() ax.set_yscale('log') plt.savefig(wisps.OUTPUT_FIGURES+'/completeness_contam.pdf', bbox_inches='tight', facecolor='white', transparent=False) # - compl_contam_table=pd.DataFrame(columns=contamns.columns, index=contamns.index) for k in compl_contam_table.columns: for idx in compl_contam_table.index: compl_contam_table.loc[idx, k]=(round(compls.loc[idx, k], 2), \ round(contamns.loc[idx, k], 3)) (compl_contam_table[['M7-L0', 'L0-L5', 'T0-T5',\ 'T5-T9', 'Y dwarfs', 'subdwarfs']]).to_latex()
notebooks/.ipynb_checkpoints/completeness_and_contamination-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # language: python # name: python3 # --- from main import * import requests import re logs._debug = True module = updater.Updater() module.update() module.download() r = requests.get('https://dmhy.anoneko.com/topics/list/page/1', proxies={'https':'http://127.0.0.1:7890'}, timeout = 15) r.text
live_update/debug.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jrhumberto/cd/blob/main/001_NLP.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="6p1A10DWeswB" # Fontes: # - https://medium.com/@alegeorgelustosa/an%C3%A1lise-de-sentimentos-em-python-2a7d04a836e0 # - https://github.com/AleLustosa/An-lise_Sentimento_NLTK/blob/master/Analise%20de%20Sentimentos%20com%20NLTK.ipynb # + id="xcZbmBiFevNw" #1 import pandas as pd import matplotlib.pyplot as plt import numpy as np import nltk # %matplotlib inline # + id="1YCS3Aq7evcL" #2 base_treinamento = [ ('este trabalho e agradável','alegria'), ('gosto de ficar no seu aconchego','alegria'), ('fiz a adesão ao curso hoje porque eu gostei','alegria'), ('eu sou admirada por muitos','alegria'), ('adoro como você e','alegria'), ('adoro seu cabelo macio','alegria'), ('adoro a cor dos seus olhos','alegria'), ('somos tão amáveis um com o outro','alegria'), ('sinto uma grande afeição por ele','alegria'), ('quero agradar meus filhos','alegria'), ('me sinto completamente amado','alegria'), ('eu amo você','alegria'), ('que grande alivio','alegria'), ('fel<NAME> novo', 'alegria'), ('a dor esta amenizando finalmente','alegria'), ('acho que me apaixonei','alegria'), ('amar e maravilhoso','alegria'), ('estou me sentindo muito animada','alegria'), ('me sinto muito bem hoje','alegria'), ('como o luar e belo','alegria'), ('o dia esta muito bonito','alegria'), ('nossa como sou afortunado','alegria'), ('as maravilhas do mundo','alegria'), ('recebi muito carinho hoje do meus colegas','alegria'), ('estou me sentindo reconfortada hoje','alegria'), ('e muito bom estar com os amigos','alegria'), ('estou muito contente com o resultado dos testes','alegria'), ('essa pintura esta bem brilhante otimo trabalho','alegria'), ('temos água em abundancia','alegria'), ('que roupa delicada','alegria'), ('você e um grande comediante','alegria'), ('que bondade a sua em vir aqui','alegria'), ('o amor e lindo','alegria'), ('nossa amizade vai durar para sempre','alegria'), ('estou eufórica com a noticia','alegria'), ('ele e realmente fiel a mim','alegria'), ('vou dar uma grande festa para comemorar meu aniversário','alegria'), ('graças a deus que eu enxerguei o certo','alegria'), ('essa e a melhor escolhas de todas','alegria'), ('o mais incrível e você minha bela','alegria'), ('e tão engraçado tentar explicar','alegria'), ('e emocionante estar neste lugar','alegria'), ('estou cativada pelo seu olhar','alegria'), ('estou loucamente apaixonada','alegria'), ('eu nunca tive duvidas','alegria'), ('estou rodeada pelo seu abraço','alegria'), ('eu vejo estrelas pelo caminho','alegria'), ('eu sinto o sol sempre que você esta por perto','alegria'), ('eu estou sorrindo de orelha a orelha','alegria'), ('isso vale a pena','alegria'), ('finalmente você colocou meu amor em primeiro lugar','alegria'), ('nós dançamos noite adentro','alegria'), ('seu amor e brilhante','alegria'), ('toquei muitos corações durante o meu caminho','alegria'), ('eu serei sua amiga e companheira','alegria'), ('você me traz de volta a vida','alegria'), ('você e como um sonho doce','alegria'), ('adoro este doce de frutas','alegria'), ('meu suco favorito','alegria'), ('estou agradecida pela ajuda','alegria'), ('e um enorme prazer ter você em nossa equipe','alegria'), ('trabalhar em equipe e o melhor','alegria'), ('me sinto flutuando no ar','alegria'), ('a brisa esta agradável hoje','alegria'), ('ótimo e compatível','alegria'), ('somos compatíveis um com o outro','alegria'), ('o órgão e compatível com o paciente','alegria'), ('estou contente fui aceita na faculdade','alegria'), ('fui aprovada no meu exame','alegria'), ('fui beneficiada pela minha empresa','alegria'), ('eu sou muito cativante','alegria'), ('estou contente com o apoio','alegria'), ('como este lugar e confortável','alegria'), ('e bom estar quente neste frio','alegria'), ('um elogio nunca e demais','alegria'), ('vou te chamar para comemorar','alegria'), ('e desejável a sua presença em nossa apresentação','alegria'), ('sou muito grata a você','alegria'), ('me dedico muito naquilo que faço','alegria'), ('estou completamente apaixonada ','alegria'), ('vamos agitar essa noite ','alegria'), ('você significa muito para mim','alegria'), ('vamos agir sem preconceitos e julgamentos','alegria'), ('finalmente completei a minha coleção, maravilhoso','alegria'), ('eu sou sua rainha ','alegria'), ('satisfatoriamente eu anuncio o vencedor dos jogos','alegria'), ('você me atrai facilmente ','alegria'), ('aquele rapaz e extremamente atraente','alegria'), ('sinto-me viva ','alegria'), ('sinto-me em paz ','alegria'), ('estamos tendo muito lucro','alegria'), ('muito bem esta tudo em ordem agora ','alegria'), ('podemos arrumar um emprego juntos ','alegria'), ('a arrumação esta terminada, que alívio','alegria'), ('o câncer e benigno ','alegria'), ('o amor e abundante','alegria'), ('vamos ser caridosos este natal','alegria'), ('com todo esse charme você irá atrair a todos','alegria'), ('nossa como você e charmoso querido ','alegria'), ('sou querida pelos meu amigos','alegria'), ('seja cuidadoso com os meus sentimentos','alegria'), ('estou comovido com tamanha caridade','alegria'), ('um chá quente e reconfortante','alegria'), ('que alegria ter vocês aqui ','alegria'), ('vamos aplaudir o vencedor ','alegria'), ('palmas para a aniversariante','alegria'), ('desejo a você tudo de bom','alegria'), ('hora de apreciar um bom vinho','alegria'), ('aprecio sua presença em minha escola','alegria'), ('anseio por seus próximos trabalhos','alegria'), ('maravilhoso jogo amistoso','alegria'), ('e ótimo que os ânimos tenham se apaziguado','alegria'), ('concretizei finalmente meu sonho','alegria'), ('você e abominável','nojo'), ('nossa, que vontade de vomitar', 'nojo'), ('que banheiro porco', 'nojo'), ('eu caguei nas calcas', 'nojo'), ('bebi demais e preciso vomitar', 'nojo'), ('esta saindo pus do ferimento', 'nojo'), ('ele tem frieria e micose no pe', 'nojo'), ('o povo da praia ao lado joga tudo no chao, bando de porco', 'nojo'), ('abomino a maneira como você age','nojo'), ('estou adoentado','nojo'), ('meu pai esta adoentado','nojo'), ('estamos todos doentes','nojo'), ('essa situação e muito amarga','nojo'), ('disse adeus amargamente','nojo'), ('tenho antipatia por aquela pessoa','nojo'), ('como pode ser tão antipática!','nojo'), ('que horrível seu asqueroso','nojo'), ('tenho aversão agente como você','nojo'), ('isso tudo e só chateação','nojo'), ('estou muito chateada com suas mentiras','nojo'), ('tão desagradável','nojo'), ('isso me desagrada completamente','nojo'), ('te desagrada isso','nojo'), ('estou com enjôos terríveis','nojo'), ('todos estão enfermos','nojo'), ('foi uma enfermidade terrível','nojo'), ('isso e muito grave','nojo'), ('não seja tão grosseiro','nojo'), ('você fez uma manobra ilegal','nojo'), ('sua indecente, não tem vergonha?','nojo'), ('você e malvado com as crianças','nojo'), ('que comentário maldoso','nojo'), ('sem escrúpulos você manipula a tudo','nojo'), ('sinto repulsa por você','nojo'), ('e repulsivo a maneira como olha para as pessoas','nojo'), ('estou indisposta','nojo'), ('a indisposição me atacou hoje','nojo'), ('acho que vou vomitar','nojo'), ('tem muito vomito lá','nojo'), ('que incomodo essa dor','nojo'), ('não me incomode nunca mais','nojo'), ('suas bobagens estão nos incomodando','nojo'), ('que nojo olha toda essa sujeira','nojo'), ('como isso está sujo','nojo'), ('tenho náuseas só de lembrar','nojo'), ('me sinto nauseada com o cheiro desta comida','nojo'), ('você esta obstruindo a passagem de ar','nojo'), ('você esta terrivelmente doente','nojo'), ('olhe que feia esta roupa','nojo'), ('que atitude deplorável','nojo'), ('nossa como você e feio','nojo'), ('muito mau tudo isso','nojo'), ('estou nojoso com você','nojo'), ('você cortou o meu assunto','nojo'), ('para que tanta chateação?','nojo'), ('esse perfume e enjoativo','nojo'), ('ser perigoso não nada bom','nojo'), ('você e perigoso demais para minha filhas','nojo'), ('que fetido este esgoto','nojo'), ('que fedido você esta','nojo'), ('que cachorro malcheiroso','nojo'), ('hora que ultraje','nojo'), ('e ultrajante da sua parte','nojo'), ('situação desagradável essa','nojo'), ('você só me da nojo','nojo'), ('tenho aversão a pessoas assim','nojo'), ('antipatia e um mal da sociedade','nojo'), ('que criatura abominável','nojo'), ('e depressiva a maneira como você vê o mundo','nojo'), ('me desagrada sua presença na festa','nojo'), ('sinto asco dessa coisa','nojo'), ('que hediondo!','nojo'), ('vou golfar o cafe fora','nojo'), ('hora que garota detestável!','nojo'), ('estou nauseada','nojo'), ('isso que você disse foi muito grave','nojo'), ('não seja obsceno na frente das crianças','nojo'), ('não seja rude com as visitas','nojo'), ('esse assunto me da repulsa','nojo'), ('que criança terrivelmente travessa','nojo'), ('que criança mal educada','nojo'), ('estou indisposta te dar o divorcio','nojo'), ('tão patetico, não tem nada mais rude para dizer?','nojo'), ('por motivo torpe, com emprego de meio cruel e com impossibilidade de defesa para a vítima','nojo'), ('a inveja e tão vil e vergonhosa que ninguem se atreve a confessá-la','nojo'), ('o miserável receio de ser sentimental e o mais vil de todos os receios modernos','nojo'), ('travesso gato quando fica com saudades do dono mija no sapato','nojo'), ('isso e um ato detestável e covarde','nojo'), ('revelam apenas o que e destrutivo e detestável para o povo','nojo'), ('não sei como e a vida de um patife, mais a de um homem honesto e abominável','nojo'), ('há coisas que temos que suportar para não acharmos a vida insuportável','nojo'), ('as injurias do tempo e as injustiças do homem','nojo'), ('odioso e desumano','nojo'), ('você não publicará conteúdo odiento, pornográfico ou ameaçador','nojo'), ('rancoroso e reprimido','nojo'), ('não há animal mais degradante, estúpido, covarde, lamentável, egoísta, rancoroso e invejoso do que o homem','nojo'), ('o virulento debate ente políticos','nojo'), ('eu imploro, não me matem!','medo'), ('tem certeza que não e perigoso?','medo'), ('ele apontou uma arma para minha cabeca', 'medo'), ('nao quero perder meu emprego, irei ficar pobre', 'medo'), ('engordei dez quilos, nao quero ser gordo', 'medo'), ('sera que aqui tem fantasma?', 'medo'), ('eu nao entro no cemiterio nem morto', 'medo'), ('encosto e esperito exitem', 'medo'), ('fantasma e do mau', 'medo'), ('o guima e cagao', 'medo'), ('fantasma me da medo', 'medo'), ('ferrou, o aviao esta caindo e vamos morrer', 'medo'), ('eu me cago de medo de entrar na favela sozinho', 'medo'), ('tenho pavor de parque de diversoes, os brinquedos sao muito altos', 'medo'), ('socorro, tem ladroes entrando em casa', 'medo'), ('uma facada deve doer mais que um tiro', 'medo'), ('tenho medo de ficar pobre e nao ter seguranca para minha familia', 'medo'), ('qual a necessidade dessa violencia, alguem pode se machucar fortemente', 'medo'), ('estranho alguem ter medo de chachorro, eles nao sao bravos', 'medo'), ('eu nao pulo de para-quedas nem morto, morro de medo', 'medo'), ('os presos la no pais Afeganistao sao torturados e sao quase mortos', 'medo'), ('os bandidos estao morando aqui no bairro, proximo a favela', 'medo'), ('eu nao posso perder ela, eu amo demais para ficar longe', 'medo'), ('socorro, eu vi um fantasma', 'medo'), ('eu tenho pavor de mar,morro de medo de me afogar', 'medo'), ('beira de abismo e loucura', 'medo'), ('acabou a luz e fiquei com medo', 'medo'), ('existe muitas pessoas ruins no mundo', 'medo'), ('no mato ao lado de casa tem cobra e aranha, e perigoso', 'medo'), ('eu nao saio sozinha de noite, so tem louco na rua', 'medo'), ('arma de fogo e faca sao perigosos', 'medo'), ('não tenho certeza se e seguro','medo'), ('tenho que correr pra não me pegarem','medo'), ('socorro! ele queria roubar os meus doces!','medo'), ('esse cara está me perseguindo','medo'), ('não entro lá, e um lugar muito perigoso','medo'), ('este lugar continua assustador','medo'), ('na selva tem muitos animais perigosos','medo'), ('avancem com cautela','medo'), ('este lugar está silencioso de mais, cuidado!','medo'), ('por favor, deixe-me viver!','medo'), ('vou ficar sem mesada se tirar nota baixa','medo'), ('parece que tem olhos nos vigiando','medo'), ('eu temo que a sentença do juiz possa ser negativa','medo'), ('mas essa missão e arriscada','medo'), ('salvem-se quem puder!','medo'), ('meu plano pode ser descoberto','medo'), ('não tive culpa, juro não fui eu','medo'), ('tenho que tomar cuidado com o lobisomem','medo'), ('se eu não achar, ele vai descobrir a verdade','medo'), ('meu deus, ele desapareceu!','medo'), ('tomara que eles não me vejam daqui!','medo'), ('mantenha isso em segredo, se descobrirem estaremos ferrados','medo'), ('por favor, me soltem, eu sou inocente','medo'), ('estou ouvindo passos atrás de mim','medo'), ('eu vou pedir socorro!','medo'), ('cuidado com as curvas na estrada','medo'), ('não sei não, parece perigoso','medo'), ('estou tremendo de medo!','medo'), ('socorro, eu vou cair!','medo'), ('eu não vou ate a floresta negra, e muito perigoso','medo'), ('ouço passos na minha direção','medo'), ('acho que está arriscado de mais','medo'), ('vamos voltar, e muito perigoso','medo'), ('fuja, se não acabaremos mortos','medo'), ('receio por não me livrar desta situação','medo'), ('socorro! ele está armado!','medo'), ('ei cuidado, você vai bater no poste!','medo'), ('socorro, nós estamos afundando','medo'), ('e serio, cuidado com essa arma!','medo'), ('os tubarões estão atacando!','medo'), ('sinto arrepios quando fico sozinho no escuro','medo'), ('calma, eu não estou com o dinheiro','medo'), ('eu acho que estou sendo enganado','medo'), ('ligeiro, temos que fugir depressa','medo'), ('tem um crocodilo selvagem vindo para cá','medo'), ('se ficarmos quietos eles não vão nos achar','medo'), ('fuja! o tigre parece faminto','medo'), ('estou sem saída, preciso de um milagre','medo'), ('tire isso de mim! socorro!','medo'), ('não sei nadar, vou me afogar!','medo'), ('não tenho certeza se e seguro','medo'), ('vou apanhar se meus pais verem meu boletim','medo'), ('não consigo sair daqui!','medo'), ('se sair tão tarde, poderei ser assaltada','medo'), ('não me deixe por favor!','medo'), ('espere, não pode me largar aqui sozinho','medo'), ('temo pela sua segurança','medo'), ('eu te entrego o dinheiro, por favor não me mate!','medo'), ('ele vai levar todo o meu dinheiro','medo'), ('não dirija tão rápido assim','medo'), ('me descobriram, irão me prender!','medo'), ('só espero que não me façam nenhum mal','medo'), ('vou me afogar, me ajudem a sair da água','medo'), ('não estaremos a salvo aqui','medo'), ('não quero nem pensar no que pode acontecer','medo'), ('nessa cidade e uma desgraça atrás da outra','medo'), ('alguem esta me ligando, estou assustado','medo'), ('isso não e remedio, não me matem','medo'), ('eu não confio nele, tenho que ter cautela','medo'), ('muita cautela','medo'), ('vou ser descoberto, meu deus','medo'), ('receio que terei de ir','medo'), ('a noite e muito perigosa','medo'), ('estou estremecendo com essa casa','medo'), ('olha aquela criatura se movendo monstruosamente','medo'), ('não agüento este suspense','medo'), ('afugente os cães','medo'), ('estou chocado e amedrontado com este assassinato brutal','medo'), ('e preciso afugenta com ímpeto este medo do inferno','medo'), ('seu políticos usam suas forças para afugentar e amedrontar o povo','medo'), ('o objetivo disso e apenas me amedrontar mais','medo'), ('isso me apavora','medo'), ('ele a feriu profundamente','raiva'), ('eu vou matar ele', 'raiva'), ('que droga, eu nao passei de ano na escola, pqp', 'raiva'), ('pqp, tomei dez multas de transito', 'raiva'), ('que droga, perdi tudo no cassino', 'raiva'), ('me roubaram na rua', 'raiva'), ('esse trouxa me deixou esperando', 'raiva'), ('vou despejar minha cólera em você','raiva'), ('me sinto atormentado','raiva'), ('não me contrarie','raiva'), ('meu time perdeu ontem', 'raiva'), ('so chove nesse lugar, que merda', 'raiva'), ('tirei ferias e fui despedido', 'raiva'), ('fui despedido por justa causa', 'raiva'), ('vou despedir aquele imcompetente', 'raiva'), ('bando de filho da puta, que raiva disso tudo', 'raiva'), ('nada mais faz sentido, que droga', 'raiva'), ('que lerdeza isso', 'raiva'), ('demora demais a fila', 'raiva'), ('nunca vai rapido a fila, sempre demora', 'raiva'), ('perdi todo meu dinheiro', 'raiva'), ('so tem gente feia nessa festa, vamos embora', 'raiva'), ('vou destruir tudo que foi construído','raiva'), ('não consigo terminar este trabalho, e muito frustrante','raiva'), ('me frustra a sua presença aqui','raiva'), ('esta comida me parece muito ruim','raiva'), ('você me destrói','raiva'), ('estamos separados','raiva'), ('estou odiando este vestido','raiva'), ('não pude comprar meu celular hoje','raiva'), ('ela e uma garota ruim','raiva'), ('estivemos em um show horroroso','raiva'), ('o ingresso estava muito caro','raiva'), ('se eu estragar tudo vai por água a baixo','raiva'), ('não possuo dinheiro algum','raiva'), ('sou muito pobre','raiva'), ('vai prejudicar a todos esta nova medida','raiva'), ('ficou ridículo','raiva'), ('este sapato esta muito apertado','raiva'), ('a musica e uma ofensa aos meus ouvidos','raiva'), ('não consigo terminar uma tarefa muito difícil','raiva'), ('reprovei em minha graduação','raiva'), ('estou muito chateado com tudo','raiva'), ('eu odeio em você','raiva'), ('e um desprazer conhecê-lo','raiva'), ('estou desperdiçando minhas ferias','raiva'), ('e muito ruim este jogo','raiva'), ('vamos ter muito rancor pela frente','raiva'), ('não achei que seria tão terrível','raiva'), ('vou vetar o orçamento ao cliente','raiva'), ('meus pais não consentiram nosso casamento','raiva'), ('eu odiei este perfume','raiva'), ('seu descaso e frustrante','raiva'), ('me sinto completamente amarga','raiva'), ('desprezo muito o seu trabalho','raiva'), ('estamos descontentes por nossa família','raiva'), ('vou infernizar a sua empresa','raiva'), ('estou furioso com estes valores','raiva'), ('obrigaram o rapaz a sair','raiva'), ('como ele pode deixar de lado?','raiva'), ('são apenas injurias sobre mim','raiva'), ('estou enfurecido com a situação dessa empresa','raiva'), ('estou com o diabo no corpo','raiva'), ('isso foi diabólico','raiva'), ('tenho aversão à gente chata','raiva'), ('não vou perdoar sua traição','raiva'), ('esse dinheiro sujo e corrupto','raiva'), ('eles me crucificam o tempo todo','raiva'), ('eu vou enlouquecer com todo este barulho','raiva'), ('não agüento todo esse assedio','raiva'), ('cólera do dragão','raiva'), ('isso e ridículo!','raiva'), ('da próxima vez, vou inventar tudo sozinho','raiva'), ('seus tolos! deixaram ele escapar!','raiva'), ('jamais te perdoarei','raiva'), ('o que e isso? outra multa','raiva'), ('você passou dos limites!','raiva'), ('sente-se e cale a boca','raiva'), ('ingratosvermesvocês me pagam!','raiva'), ('saiam da dai, se não arranco vocês dai!','raiva'), ('você já me causou problemas suficientes','raiva'), ('que transito maldito, perdi muito tempo parado aqui nessa porra', 'raiva'), ('porra, eu nao quero esperar nada, sai da minha frente', 'raiva'), ('a bebida esquentou demais, estou muito puto com essa porra', 'raiva'), ('ele me traiu com aquela vadia, filho da puta', 'raiva'), ('uma palavra a mais e eu ja mandaria calar a boca', 'raiva'), ('eu nao tenho paciencia com gente lerda e burra', 'raiva'), ('preconceito me da muita raiva, que povo mais escroto', 'raiva'), ('estou nervoso com esta situacao, eu nao mereco passar esta barra sozinho, estou para matar alguem', 'raiva'), ('eu estou bastante e mesma assim nao passei no teste, que merda', 'raiva'), ('eu faco dieta e constinuo engordando', 'raiva'), ('isso foi a gota d’agua','raiva'), ('o que você tem com isso?','raiva'), ('não vejo a hora de me livrar de você','raiva'), ('já entendi a jogada seus safados!','raiva'), ('você não merece piedade','raiva'), ('saia de perto de mim','raiva'), ('suma daqui, ou arranco seu couro!','raiva'), ('estou revoltado com essa situação','raiva'), ('seu idiota!','raiva'), ('não, eu não vou te emprestar dinheiro!','raiva'), ('você não passa de um cafajeste! vai embora','raiva'), ('pare de frescura e vá trabalhar','raiva'), ('eles merecem uma lição','raiva'), ('ainda estou muito bravo com você','raiva'), ('eu preciso surrar aquela chantagista','raiva'), ('olha o que você fez! derramou!','raiva'), ('você está pedindo pra apanhar!','raiva'), ('me deixa em paz!','raiva'), ('morra maldito, morra!','raiva'), ('você e mais irritante de perto','raiva'), ('e bom fechar o bico','raiva'), ('magicamente você me surpreendeu','surpresa'), ('e imenso esse globo','surpresa'), ('uau que noticia bacana', 'surpresa'), ('uau, eu tambem quero um carro desse para mim', 'surpresa'), ('eu nao esperava ele me pedir em casamento, que noticia surpreendente', 'surpresa'), ('sera que iremos conseguir fugir?', 'surpresa'), ('descobri que estou gravida', 'surpresa'), ('qual o resultado do teste?', 'surpresa'), ('fascinante esse filme, eu adorei', 'surpresa'), ('que susto, eu nao esperava', 'surpresa'), ('ue, eu achei que iria chover hoje', 'surpresa'), ('nunca se sabe', 'surpresa'), ('quem sera o assassino', 'surpresa'), ('voce nunca sabera se e homem ou mulher', 'surpresa'), ('isso e tremendamente interessante','surpresa'), ('meu bilhete for sorteado, inacreditável!','surpresa'), ('um assalto a mão armada!','surpresa'), ('incrível, cabe em qualquer lugar!','surpresa'), ('você por aqui?','surpresa'), ('não dá pra acreditar no que ela me contou','surpresa'), ('os convidados já estão chegando!','surpresa'), ('puxa vida! nunca nos livramos de alguem tão depressa','surpresa'), ('<NAME> sumiu, eu estava com ela na mão','surpresa'), ('oh! um disco voador','surpresa'), ('amigos, que bela surpresa!','surpresa'), ('nunca pensei que veria isso e perto','surpresa'), ('nem acredito que comi tanto','surpresa'), ('não acredito que veio me ver','surpresa'), ('não acredito que e tão descarado','surpresa'), ('me surpreende sua falta de tato','surpresa'), ('o predio onde eles moravam desabou!','surpresa'), ('inacreditável um bolo tão grande','surpresa'), ('e serio mesmo? não dá pra acreditar','surpresa'), ('como assim não vai ao nosso encontro?','surpresa'), ('como assim não tem ninguem em casa?','surpresa'), ('ue, mas para onde ele foi?!','surpresa'), ('por essa eu não esperava','surpresa'), ('nossa, olha só que mergulho','surpresa'), ('minha esposa está grávida!','surpresa'), ('meu dinheiro sumiu!','surpresa'), ('e verdade que os dois terminaram?!?','surpresa'), ('caramba, nem vi você chegar','surpresa'), ('nossa, como pode alguem cozinhar tão mal?','surpresa'), ('nossa que incrível','surpresa'), ('a fórmula sumiu!','surpresa'), ('eu nem acredito que já estou terminando o curso','surpresa'), ('não acredito que esta aqui comigo novamente','surpresa'), ('está escondendo algo de nós!','surpresa'), ('como assim, ainda não terminou a tarefa','surpresa'), ('pensei que já estivesse pronta!','surpresa'), ('opa! quem apagou a luz?','surpresa'), ('caramba! aonde vai tão rápido?','surpresa'), ('estamos seguindo o caminho errado!','surpresa'), ('quatro reais o litro da gasolina!','surpresa'), ('me assustei ao vê-lo desse jeito!','surpresa'), ('minha mãe está grávida, acredita nisso?','surpresa'), ('parece mentira você ter crescido tanto','surpresa'), ('me surpreende sua imaginação','surpresa'), ('suas roupas são realmente lindas','surpresa'), ('com consegue ser tão bela?','surpresa'), ('essa e realmente uma casa deslumbrante','surpresa'), ('superou minhas expectativas','surpresa'), ('e admirável a maneira como se comporta','surpresa'), ('isso e realmente chocante','surpresa'), ('algumas noticias me surpreenderam no noticiário','surpresa'), ('surpreendente sua festa','surpresa'), ('nossa, que top esse filme!, eu nao esperava', 'surpresa'), ('cheguei atrasado e me assim consegui entregar o resultado, ufa!', 'surpresa'), ('ufa, essa foi por pouco, quase caimos fora', 'surpresa'), ('quem sabe na proxima vida', 'surpresa'), ('estou tremendo de alegria','surpresa'), ('chocou grande parte do mundo','surpresa'), ('eu ficaria muito espantado com a sua vinda','surpresa'), ('ele e admirável','surpresa'), ('sua beleza me surpreendeu','surpresa'), ('seus olhos são surpreendentemente verdes','surpresa'), ('os políticos se surpreendem quando alguem acredita neles','surpresa'), ('estou perplexa com essas denuncias','surpresa'), ('fiquei perplexo com suas palavras','surpresa'), ('estou abismado com sua prosa','surpresa'), ('eu ficaria realmente abismado se me dissessem isso','surpresa'), ('o grupo foi surpreendido enquanto lavava o carro','surpresa'), ('estou boquiaberto com as imagens','surpresa'), ('estou boquiaberto com essas suas palavras','surpresa'), ('esse quadro e maravilhoso','surpresa'), ('este carro me deixou maravilhado','surpresa'), ('estou maravilhada','surpresa'), ('pqp, voce e gay?', 'surpresa'), ('qual sera o meu salario ano que vem?', 'surpresa'), ('essa expectativa esta me matando','surpresa'), ('vou caminhar sempre na expectativa de encontrá-lo','surpresa'), ('você emudece minhas palavras','surpresa'), ('minhas palavras vão emudecer se não parar de me surpreender','surpresa'), ('a mulher e um efeito deslumbrante da natureza','surpresa'), ('estou deslumbrada com essas jóias','surpresa'), ('isso e romântico e deslumbrante','surpresa'), ('isso pode ser surpreendentemente deslumbrante','surpresa'), ('trabalho deslumbrante','surpresa'), ('essas pessoas são esplêndida','surpresa'), ('e esplendido como o ceu se encontra no momento','surpresa'), ('e um carro fantástico','surpresa'), ('um edifício realmente fantástico','surpresa'), ('por favor não me abandone','tristeza'), ('não quero ficar sozinha','tristeza'), ('não me deixe sozinha','tristeza'), ('estou abatida','tristeza'), ('ele entrou em coma para sempre, que dó', 'tristeza'), ('uma bacteria matou a populacao inteira', 'tristeza'), ('a miseria tomou conta do pais todo', 'tristeza'), ('pobreza e miseria matam todos de fome no mundo', 'tristeza'), ('eu nao aguento mais viver sem ele', 'tristeza'), ('eu trabalho muito e nao ganho nada, e triste', 'tristeza'), ('cancer e triste demais', 'tristeza'), ('qualquer doenca e terrivel', 'tristeza'), ('sofrer acidente com trauma e muito dramatico', 'tristeza'), ('saber que funkeiro ganha mais que professor e de doer o coracao', 'tristeza'), ('bati o carro no poste, me ferrei', 'tristeza'), ('acabaram minhas ferias', 'tristeza'), ('ficar doente e muito ruim', 'tristeza'), ('ele me traiu com um vadia', 'tristeza'), ('meus pais estao doentes', 'tristeza'), ('depressao e uma doenca silenciosa e mortal', 'tristeza'), ('ela perdeu o emprego e entrou em depressao profunda', 'tristeza'), ('amputar um membro devido a doenca e triste demais', 'tristeza'), ('filhos pequenos foram separados dos pais na fronteira, que tristeza', 'tristeza'), ('os animais sao cacados e mortos a toa, sem razao nenhuma, eu morro de dó', 'tristeza'), ('sacrificar os animais devido a ganancia e muito triste', 'tristeza'), ('e bizarro o quanto morre de fome na africa', 'tristeza'), ('qualquer doenca que cause sequelas e terrivel', 'tristeza'), ('a guerra so traz destruicao e pobreza', 'tristeza'), ('saudade faz muito mal para o coracao', 'tristeza'), ('ele esta todo abatido','tristeza'), ('tão triste suas palavras','tristeza'), ('seu amor não e mais meu','tristeza'), ('estou aborrecida','tristeza'), ('isso vai me aborrecer','tristeza'), ('estou com muita aflição','tristeza'), ('me aflige o modo como fala','tristeza'), ('estou em agonia com meu intimo','tristeza'), ('não quero fazer nada','tristeza'), ('me sinto ansiosa e tensa','tristeza'), ('não consigo parar de chorar','tristeza'), ('não consigo segurar as lagrimas','tristeza'), ('e muita dor perder um ente querido','tristeza'), ('estou realmente arrependida','tristeza'), ('acho que o carma volta, pois agora sou eu quem sofro','tristeza'), ('você não cumpriu suas promessas','tristeza'), ('me sinto amargurada','tristeza'), ('coitado esta tão triste','tristeza'), ('já e tarde de mais','tristeza'), ('nosso amor acabou','tristeza'), ('essa noite machuca só para mim','tristeza'), ('eu não estou mais no seu coração','tristeza'), ('você mudou comigo','tristeza'), ('quando eu penso em você realmente dói','tristeza'), ('como se fosse nada você vê minhas lagrimas','tristeza'), ('você disse cruelmente que não se arrependeu','tristeza'), ('eu nunca mais vou te ver','tristeza'), ('ela esta com depressão','tristeza'), ('a depressão aflige as pessoas','tristeza'), ('estar depressivo e muito ruim','tristeza'), ('estou derrotada e deprimida depois deste dia','tristeza'), ('e comovente te ver dessa maneira','tristeza'), ('e comovente ver o que os filhos do brasil passam','tristeza'), ('como me sinto culpada','tristeza'), ('estou abatida','tristeza'), ('a ansiedade tomou conta de mim','tristeza'), ('as pessoas não gostam do meu jeito','tristeza'), ('adeus passamos bons momentos juntos','tristeza'), ('sinto sua falta','tristeza'), ('ele não gostou da minha comida','tristeza'), ('estou sem dinheiro para a comida','tristeza'), ('queria que fosse o ultimo dia da minha vida','tristeza'), ('você está com vergonha de mim','tristeza'), ('ela não aceitou a minha proposta','tristeza'), ('era o meu ultimo centavo','tristeza'), ('reprovei de ano na faculdade','tristeza'), ('afinal você só sabe me desfazer','tristeza'), ('eu falhei em tudo nessa vida','tristeza'), ('eu fui muito humilhado','tristeza'), ('e uma história muito triste','tristeza'), ('ninguem acredita em mim','tristeza'), ('eu não sirvo para nada mesmo','tristeza'), ('droga, não faço nada direito','tristeza'), ('sofrimento em dobro na minha vida','tristeza'), ('fui demitida essa semana','tristeza'), ('as crianças sofrem ainda mais que os adultos','tristeza'), ('pra mim um dia e ruim, o outro e pior','tristeza'), ('de repente perdi o apetite','tristeza'), ('oh que dia infeliz','tristeza'), ('estamos afundados em contas','tristeza'), ('nem um milagre pode nos salvar','tristeza'), ('só me resta a esperança','tristeza'), ('pior que isso não pode ficar','tristeza'), ('meu salário e baixo','tristeza'), ('não passei no vestibular','tristeza'), ('ninguem se importa comigo','tristeza'), ('ninguem lembrou do meu aniversário','tristeza'), ('tenho tanto azar','tristeza'), ('o gosto da vingança e amargo','tristeza'), ('sou uma mulher amargurada depois de que você me deixou','tristeza'), ('estou desanimada com a vida','tristeza'), ('e um desanimo só coitadinha','tristeza'), ('a derrota e depressiva','tristeza'), ('discriminar e desumano','tristeza'), ('que desanimo','tristeza'), ('e uma desonra para o pais','tristeza'), ('a preocupação deveria nos levar a ação não a depressão','tristeza'), ('passamos ao desalento e a loucura','tristeza'), ('aquele que nunca viu a tristeza nunca reconhecerá a alegria','tristeza'), ('cuidado com a tristeza ela e um vicio','tristeza')] # + id="QcrDgsn3evta" #3 exemplo_base = pd.DataFrame(base_treinamento) exemplo_base.columns =['Frase','Sentimento'] # + id="C5MpVeDuev74" colab={"base_uri": "https://localhost:8080/"} outputId="00d7ceaa-b0d1-478e-d422-44b8cf01fa17" #4 print("tamanho da base de treinamento {}".format(exemplo_base.shape[0])) exemplo_base.Sentimento.value_counts() # + id="LXYcxi2eewWV" colab={"base_uri": "https://localhost:8080/"} outputId="9ad0f480-85fd-4a86-c353-fbf904d7e0b9" #5 print((exemplo_base.Sentimento.value_counts() / exemplo_base.shape[0])*100) # + id="SNn5knIvewij" colab={"base_uri": "https://localhost:8080/", "height": 677} outputId="b03ce2ad-30e3-486d-a5a4-e1c1c91acb48" #6 exemplo_base.sample(n=20) # + id="h3DkcArKewva" #7 base_teste =[('não precisei pagar o ingresso','alegria'), ('se eu ajeitar tudo fica bem','alegria'), ('minha fortuna ultrapassa a sua','alegria'), ('sou muito afortunado','alegria'), ('e benefico para todos esta nova medida','alegria'), ('ficou lindo','alegria'), ('achei esse sapato muito simpático','alegria'), ('estou ansiosa pela sua chegada','alegria'), ('congratulações pelo seu aniversário','alegria'), ('delicadamente ele a colocou para dormir','alegria'), ('a musica e linda','alegria'), ('sem musica eu não vivo','alegria'), ('conclui uma tarefa muito difícil','alegria'), ('conclui minha graduação','alegria'), ('estou muito contente com tudo','alegria'), ('eu confio em você','alegria'), ('e um prazer conhecê-lo','alegria'), ('o coleguismo de vocês e animador','alegria'), ('estou aproveitando as ferias','alegria'), ('vamos aproveitar as ferias','alegria'), ('e muito divertido este jogo','alegria'), ('vamos ter muita diversão','alegria'), ('não achei que me divertiria tanto assim','alegria'), ('vou consentir o orçamento ao cliente','alegria'), ('com o consentimento dos meus pais podemos nos casar','alegria'), ('eu adorei este perfume','alegria'), ('sua bondade e cativante','alegria'), ('estou despreocupada','alegria'), ('não me preocupo com o que aconteceu','alegria'), ('me sinto completamente segura','alegria'), ('estimo muito o seu trabalho','alegria'), ('somos estimados por nossa família','alegria'), ('concretizamos nossa ideia','alegria'), ('nosso ideal foi alcançado','alegria'), ('estamos muito felizes juntos','alegria'), ('estou tão animada com os preparativos para o casamento','alegria'), ('você será muito amado meu filho','alegria'), ('os apaixonados são maravilhosos','alegria'), ('agradeço imensamente o seu apoio nestes dias','alegria'), ('esta comida me parece muito atraente','alegria'), ('você me completa','alegria'), ('poderemos completar o projeto hoje!','alegria'), ('estamos namorando','alegria'), ('estou namorando este vestido a um tempo','alegria'), ('pude comprar meu celular hoje','alegria'), ('e um deleite poder compartilhar minhas vitórias','alegria'), ('ela e um boa garota','alegria'), ('estivemos em um ótimo show','alegria'), ('o mundo e feio como o pecado','nojo'), ('a coisa mais difícil de esconder e aquilo que não existe','nojo'), ('você errou feio aquele gol','nojo'), ('nunca vou me casar sou muito feia','nojo'), ('os golpes da adversidade são terrivelmente amargos','nojo'), ('os homem ficam terrivelmente chatos','nojo'), ('abominavelmente convencido','nojo'), ('terrivelmente irritado','nojo'), ('eu bebi demais e vomitei', 'nojo'), ('eu nao limpo banheiro nem morto', 'nojo'), ('nossa, que cheiro de bosta! esta muito fedido', 'nojo'), ('esta fedido demais aqui', 'nojo'), ('esse peixe esta podre, nao coma', 'nojo'), ('ele foi jantar sem lavar as maos, que porco', 'nojo'), ('nao tem hijeine nenhuma aqui, lixo!', 'nojo'), ('as instituições publicas estão terrivelmente decadentes','nojo'), ('a população viveu em isolamento por muito tempo','nojo'), ('estou terrivelmente preocupada','nojo'), ('o nacionalismo e uma doença infantil','nojo'), ('se me es antipático a minha negação esta pronta','nojo'), ('muitos documentários sobre esse casal antipático','nojo'), ('sua beleza não desfaça sua antipatia','nojo'), ('esta e uma experiência desagradável','nojo'), ('o homem cuspiu na rua', 'nojo'), ('a comida estava estragada', 'nojo'), ('comer inseto e nojento', 'nojo'), ('desagradável estrago nos banheiros','nojo'), ('o mais irritante no amor e que se trata de um crime que precisa de um cúmplice','nojo'), ('a situação nos causa grande incomodo','nojo'), ('estou preocupado com o incomodo na garganta','nojo'), ('simplesmente não quero amolação da policia','nojo'), ('você e uma criaturinha muito impertinente','nojo'), ('o peso e a dor da vida','nojo'), ('me arrependo amargamente de minhas ações','nojo'), ('o destino e cruel e os homens não são dignos de compaixão','nojo'), ('o ódio conduz ao isolamento cruel e ao desespero','nojo'), ('encerrou com o massacre mais repudiável e asqueroso que se conhece','nojo'), ('de mal gosto e asqueroso','nojo'), ('tudo e inserto neste mundo hediondo','nojo'), ('o crime de corrupção e um crime hediondo','nojo'), ('o rio esta fetido e de cor escura','nojo'), ('muito lixo no rio o deixa malcheiroso','nojo'), ('existe uma laranja podre no grupo e já desconfiamos quem e','nojo'), ('foi de repente estou machucado e me sentindo enjoado','nojo'), ('eu fiquei enojado','nojo'), ('daqui alguns meses vou embora deste pais que já estou nauseado','nojo'), ('que abominável esse montro!','medo'), ('vamos alarmar a todos sobre a situação','medo'), ('estou amedrontada','medo'), ('estou com muito medo da noite','medo'), ('ele esta me ameaçando a dias','medo'), ('quanta angustia','medo'), ('eu nao gosto de arma, e perigoso', 'medo'), ('qualquer um tem medo de alguem armado', 'medo'), ('arrumar briga hoje em dia e muito perigoso', 'medo'), ('eu nao reajo em nenhum assalto, to fora!', 'medo'), ('esses bichos: cobra, aranha, escorpiao sao horriveis, eu tenho muito medo', 'medo'), ('nossa que alto aqui, eu nao gosto de altura', 'medo'), ('montanha-russa da muito medo, geralmente eu nao tenho coragem', 'medo'), ('estou angustiada','medo'), ('angustiadamente vou sair e casa','medo'), ('isso me deixa apavorada','medo'), ('você esta me apavorando','medo'), ('estou desconfiada de você','medo'), ('não confio em você','medo'), ('ate o cachorro está apavorado','medo'), ('estou assustado com as ações do meu colega','medo'), ('agora se sente humilhado, apavorado','medo'), ('assustou a população e provocou mortes','medo'), ('estou com dificuldades para respirar e muito assustado','medo'), ('os policiais se assustaram quando o carro capotou','medo'), ('o trabalhador e assombrado pelo temor do desemprego','medo'), ('este lugar e mal assombrado','medo'), ('estou assombrado pela crise financeira','medo'), ('mesmo aterrorizado lembro de você','medo'), ('aterrorizado e suando frio','medo'), ('um grupo de elefantes selvagens tem aterrorizado vilas','medo'), ('me sinto intimidada pela sua presença','medo'), ('tenho medo de ser advertida novamente','medo'), ('estou correndo o risco de ser advertido','medo'), ('estou correndo riscos de saúde','medo'), ('os riscos são reais','medo'), ('podemos perder muito dinheiro com essa investida','medo'), ('socorro, fui intimado a depor','medo'), ('fui notificado e estou com medo de perde a guarda da minha filha','medo'), ('estou angustiada com meus filhos na rua','medo'), ('e abominável o que fazem com os animais','medo'), ('foi terrível o tigre quase o matou','medo'), ('me advertiram sobre isso','medo'), ('ate que enfim, não agüentava mais te esperar','raiva'), ('eu quero meu dinheiro de volta agora!','raiva'), ('eu odeio a escola!','raiva'), ('pqp, que merda de festa', 'raiva'), ('eu nunca tiro notas altas, que droga', 'raiva'), ('fiquei chapado e bati o carro, pqp que raiva', 'raiva'), ('nao passei no vestibular por pouco, ridiculo esse teste!', 'raiva'), ('vou fazer picadinho de você','raiva'), ('detesto trabalhar no verão','raiva'), ('quero minha comida, e quero agora!','raiva'), ('melhor você recolher minhas compras agora!','raiva'), ('quero descer agora sua maluca','raiva'), ('vou reclamar com o gerente!','raiva'), ('vai engolir o que disse!','raiva'), ('ele me ridiculariza diante de todos','raiva'), ('não quero mais saber de você','raiva'), ('vejo você na cadeia safado!','raiva'), ('agora vou ter que pagar mais isso ainda!','raiva'), ('saia logo do banheiro!','raiva'), ('suba já para o seu quarto!','raiva'), ('eu falei para calar a boca seu idiota!','raiva'), ('eu disse para você cair fora!','raiva'), ('não agüento mais que fiquem me culpando sem motivo!','raiva'), ('não suporto olhar na sua cara!','raiva'), ('eu não sou um elefante','raiva'), ('juro que se olhar pra mim eu o mato!','raiva'), ('chega, não quero saber mais deste assunto','raiva'), ('como pode ser tão burro?','raiva'), ('não me aborreça seu moleque','raiva'), ('não quero me aborrecer com estas bobagens','raiva'), ('ele me agrediu!','raiva'), ('eu amaldiçôo você e a sua família','raiva'), ('não me amole','raiva'), ('não venha me amolar','raiva'), ('isso tudo e uma tormenta','raiva'), ('eu vou matar você','raiva'), ('para que simplificar se você pode sempre complicar','raiva'), ('isso esta me enlouquecendo','raiva'), ('estou furiosa com você','raiva'), ('isso mesmo fique furioso','raiva'), ('esses livros são magníficos','surpresa'), ('esse vinho e magnífico','surpresa'), ('seria magnífico ver o esperaculo','surpresa'), ('o casamento foi estupendo','surpresa'), ('e um jogador bárbaro estupendo','surpresa'), ('esse dia esta excelente','surpresa'), ('o cantor estava excelente','surpresa'), ('o universo e assombroso','surpresa'), ('o amor e sublime','surpresa'), ('sua sublime atuação','surpresa'), ('e formidável meu caro walter','surpresa'), ('eu nao acredito! voce e gay?', 'surpresa'), ('nossa, que noticia explendida!', 'surpresa'), ('eu nao imaginava isso, que noticia boa', 'surpresa'), ('como e formidável a presença de todos','surpresa'), ('e formidável ter a quem dizer adeus','surpresa'), ('e um conselheiro formidável o seu','surpresa'), ('o artigo foi formidável','surpresa'), ('pica pau e um destaque no imaginário brasileiro','surpresa'), ('ah! o absoluto do imaginário','surpresa'), ('isso foi surreal','surpresa'), ('uma historia completamente surreal','surpresa'), ('essas pinturas beiram o surreal','surpresa'), ('você nem acreditam de tão surreal','surpresa'), ('incrível!','surpresa'), ('fiquei pasma com tudo isso','surpresa'), ('você me deixa pasmo','surpresa'), ('estou admirado com a sua astucia','surpresa'), ('que bela surpresa você me fez','surpresa'), ('não acredito que fez isso!','surpresa'), ('isso foi apavorante','surpresa'), ('isso tão de repente','surpresa'), ('estou chocada com isso','surpresa'), ('estou surpresa e desconsertada','surpresa'), ('esta realmente deslumbrante querida','surpresa'), ('fiquei completamente sem plavras','surpresa'), ('e espantoso o modo como ele nos olha','surpresa'), ('incrivel você estar aqui','surpresa'), ('que fantástica festa minha querida','surpresa'), ('isso tudo e um erro','tristeza'), ('eu sou errada eu sou errante','tristeza'), ('tenho muito dó do cachorro','tristeza'), ('e dolorida a perda de um filho','tristeza'), ('essa tragedia vai nos abalar para sempre','tristeza'), ('perdi meus filhos','tristeza'), ('ela nao para de chorar a morte do marido', 'tristeza'), ('ela perdeu um filho no aborto', 'tristeza'), ('os pais deles morreram em um acidente', 'tristeza'), ('o aviao caiu no mar e nao tiveram mais noticias', 'tristeza'), ('perdi meu curso','tristeza'), ('sou só uma chorona','tristeza'), ('você e um chorão','tristeza'), ('se arrependimento matasse','tristeza'), ('me sinto deslocado em sala de aula','tristeza'), ('foi uma passagem fúnebre','tristeza'), ('nossa condolências e tristeza a sua perda','tristeza'), ('desanimo, raiva, solidão ou vazies, depressão','tristeza'), ('vivo te desanimando','tristeza'), ('estou desanimado','tristeza'), ('imperador sanguinário, depravado e temeroso','tristeza'), ('meu ser esta em agonia','tristeza'), ('este atrito entre nos tem que acabar','tristeza'), ('a escuridão desola meu ser','tristeza'), ('sua falsa preocupação','tristeza'), ('sua falsidade me entristece','tristeza'), ('quem esta descontente com os outros esta descontente consigo próprio','tristeza'), ('a torcida esta descontente com a demissão do tecnico','tristeza'), ('estou bastante aborrecido com o jornal','tristeza'), ('me sinto solitário e entediado','tristeza'), ('a vida e solitária para aqueles que não são falsos','tristeza'), ('como com compulsão depois da depressão','tristeza'), ('estou me desencorajando a viver','tristeza'), ('ele desencoraja minhas vontades','tristeza'), ('isso vai deprimindo por dentro','tristeza'), ('acho que isso e defeituoso','tristeza'), ('os remedios me derrubam na cama','tristeza'), ('a depressão vai me derrubar','tristeza'), ('suas desculpas são falsas','tristeza'), ('não magoe as pessoas','tristeza')] # + id="smuchIoOew6s" colab={"base_uri": "https://localhost:8080/"} outputId="8c7166b0-250b-442b-df5e-3218e8959070" #8 exemplo_base_teste = pd.DataFrame(base_teste) exemplo_base_teste.columns = ['frase','Sentimento'] print('Tamanho da base de teste: '.format(exemplo_base_teste.shape[0])) exemplo_base_teste.Sentimento.value_counts() # + id="pbdsJd9GexGP" colab={"base_uri": "https://localhost:8080/"} outputId="b008140b-31ea-444d-9cc5-4e18d02fe57e" #9 print((exemplo_base_teste.Sentimento.value_counts() / exemplo_base_teste.shape[0])*100) # + id="fZblFnPhexRB" colab={"base_uri": "https://localhost:8080/"} outputId="9ea83d5c-2e99-4eab-f9c3-62dd8aa1423f" #10 nltk.download('stopwords') nltk.download('rslp') lista_stop=nltk.corpus.stopwords.words('portuguese') np.transpose(lista_stop) # + id="5oaKlN92exfA" #11 lista_stop.append('tipo') lista_stop.append('tão') lista_stop.append('tudo') lista_stop.append('vai') # + id="pRVpqUJjexrC" #12 ######################################################### # + id="Deppp9Zgex3T" #13 def removeStopWords(texto): frases = [] for (palavras, sentimento) in texto: semStop=[p for p in palavras.split() if p not in lista_stop] frases.append((semStop, sentimento)) return frases # + id="5GEDfKwxeyFG" #14 def aplica_stemmer(texto): stemmer = nltk.stem.RSLPStemmer() frases_sem_stemming = [] for (palavras, sentimentos) in texto: com_stemming = [str(stemmer.stem(p)) for p in palavras.split() if p not in lista_stop] frases_sem_stemming.append((com_stemming,sentimentos)) return frases_sem_stemming # + id="rxd7E-SBeyS5" #15 frases_com_stem_treinamento = aplica_stemmer(base_treinamento) # + id="1Z3z3BxNeygZ" colab={"base_uri": "https://localhost:8080/", "height": 363} outputId="4e32ddf4-c690-423b-9e58-1d411b4773e5" #16 pd.DataFrame(frases_com_stem_treinamento, columns=['Frase','Sentimento']).sample(10) # + id="BjYoUM5FeytQ" #17 frases_com_stem_teste = aplica_stemmer(base_teste) # + id="PZ3CKo_tey-h" #18 def busca_palavras(frases): todas_palavras = [] for (palavras,sentimento) in frases: todas_palavras.extend(palavras) return todas_palavras # + id="wPZU-ed1ezLi" #19 palavras_treinamento = busca_palavras(frases_com_stem_treinamento) palavras_teste = busca_palavras(frases_com_stem_teste) # + id="uqBhgiiHezbA" colab={"base_uri": "https://localhost:8080/"} outputId="b4e2a1d8-15ad-467e-8ce8-a2bc1ce6eaa4" #20 print("Quantidade de palavras na base de treinamento: {}".format(pd.DataFrame(palavras_treinamento).count())) # + id="lekVmV47ezpq" #21 def busca_frequencia(palavras): palavras = nltk.FreqDist(palavras) return palavras # + id="fdR_BI7pez2w" #22 frequencia_treinamento = busca_frequencia(palavras_treinamento) # + id="4L5qgFBQe0Dy" colab={"base_uri": "https://localhost:8080/"} outputId="f15f9196-0e91-438c-c7f1-82f1ab5bd197" #23 frequencia_treinamento.most_common(20) # + id="8sPkHNaXe0RH" #24 frequencia_teste = busca_frequencia(palavras_teste) # + id="ydsGvqUFe0e7" #25 def busca_palavras_unicas(frequencia): frequencia = frequencia.keys() return frequencia palavras_unicas_treinamento = busca_palavras_unicas(frequencia_treinamento) palavras_unicas_teste = busca_palavras_unicas(frequencia_teste) # + id="7AV2hKV8e0r-" #26 def extrator_palavras(documento): doc = set(documento) caracteristicas = {} for palavras in palavras_unicas_treinamento: caracteristicas['%s' % palavras] = (palavras in doc) return caracteristicas # + id="F8Z7vCGIe07i" #27 def extrator_palavras_teste(documento): doc = set(documento) caracteristicas = {} for palavras in palavras_unicas_teste: caracteristicas['%s' % palavras] = (palavras in doc) return caracteristicas # + id="kVA4dtQae1J6" #28 base_completa_treinamento = nltk.classify.apply_features(extrator_palavras, frases_com_stem_treinamento) base_completa_teste = nltk.classify.apply_features(extrator_palavras_teste, frases_com_stem_teste) # + id="L_Cy9inye1Xh" #29 ############################################################### # + id="OFnWyJ0Fe1kE" #30 classificador = nltk.NaiveBayesClassifier.train(base_completa_treinamento) # + id="mt6Vgu_qe1uF" colab={"base_uri": "https://localhost:8080/"} outputId="9fc78486-b931-4104-d254-705b97246058" #31 print(classificador.labels()) # + id="ZdvR5Vi3e19O" colab={"base_uri": "https://localhost:8080/"} outputId="62d3fc04-3fa7-4fa7-977f-5680e6072fac" #32 print(classificador.show_most_informative_features(10)) # + id="YJ4Cw7Ghe2J6" colab={"base_uri": "https://localhost:8080/"} outputId="f48a109b-bfec-495c-8ac2-9ca3bfc4a34a" #33 print(nltk.classify.accuracy(classificador, base_completa_teste)) # + id="CC0aAVZwe2Xb" #34 erros = [] for (frase,classe) in base_completa_teste: resultado = classificador.classify(frase) if resultado != classe: erros.append((classe, resultado,frase)) # + id="JFY9h2hee2kI" colab={"base_uri": "https://localhost:8080/"} outputId="ba645c63-5c3d-483a-bc8d-38ff0e959ce4" #35 from nltk.metrics import ConfusionMatrix esperado = [] previsto = [] for (frase, classe) in base_completa_teste: resultado = classificador.classify(frase) previsto.append(resultado) esperado.append(classe) matriz = ConfusionMatrix(esperado,previsto) print(matriz) # + id="luJ5R2Xde2ww" colab={"base_uri": "https://localhost:8080/"} outputId="b2eedb93-51e4-4239-8975-52c117c8adda" #36 teste = "Nossa, que notícia maravilhosa!" teste_stemming = [] stemmer = nltk.stem.RSLPStemmer() for (palavras_treinamento) in teste.split(): com_stem = [p for p in palavras_treinamento.split()] teste_stemming.append(str(stemmer.stem(com_stem[0]))) novo = extrator_palavras(teste_stemming) distribuicao = classificador.prob_classify(novo) for classe in distribuicao.samples(): print("%s : %f"%(classe, distribuicao.prob(classe))) # + id="rzcE8Tige2_T" colab={"base_uri": "https://localhost:8080/"} outputId="3d1e07a8-40c3-431d-8aa6-9cca8dbfa3f3" #37 teste = "Pqp, que Trânsito chato da porra!" teste_stemming = [] stemmer = nltk.stem.RSLPStemmer() for (palavras_treinamento) in teste.split(): com_stem = [p for p in palavras_treinamento.split()] teste_stemming.append(str(stemmer.stem(com_stem[0]))) novo = extrator_palavras(teste_stemming) distribuicao = classificador.prob_classify(novo) for classe in distribuicao.samples(): print("%s : %f"%(classe, distribuicao.prob(classe)))
001_NLP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #hide # %load_ext autoreload # %autoreload 2 # + # default_exp conjugates # - # # Conjugates # # > This module contains scripts to pre-calculate the variational Bayes step, which is the most computationally demanding step in DGLM updating and forecasting. These functions are called automatically and do not need to be directly referenced by a user. # + #exporti import numpy as np from scipy.special import digamma from scipy import optimize as opt from functools import partial from pybats.shared import trigamma, load_interpolators, load_sales_example import pickle import zlib import os # - #export def beta_approx(x, ft, qt): x = x ** 2 return np.array([digamma(x[0]) - digamma(x[1]) - ft, trigamma(x=x[0]) + trigamma(x=x[1]) - qt]).reshape(-1) #export def gamma_approx(x, ft, qt): x = x ** 2 return np.array([digamma(x[0]) - np.log(x[1]) - ft, trigamma(x=x[0]) - qt]).reshape(-1) #export def gamma_alpha_approx(x, qt): x = x**2 return np.array([trigamma(x=x[0]) - qt]).reshape(-1) #export def pois_alpha_param(qt, alpha=1.): sol = opt.root(partial(gamma_alpha_approx, qt=qt), x0=np.sqrt(np.array([alpha])), method='lm') return sol.x ** 2 #export def gamma_solver(ft, qt, alpha=1., beta=1.): # If q_t is is small, can use an approximation if qt < 0.0001: alpha = 1/qt beta = np.exp(digamma(alpha) - ft) return np.array([alpha, beta]) # all else fails, do the optimization for alpha, followed by an exact soln for beta alpha = pois_alpha_param(qt)[0] beta = np.exp(digamma(alpha) - ft) return np.array([alpha, beta]) #export def beta_solver(ft, qt, alpha=1., beta=1.): # If qt is small, likely consistent with a large alpha, beta - can use an approximation # Ref: West & Harrison, pg. 530 alpha = (1 / qt) * (1 + np.exp(ft)) beta = (1 / qt) * (1 + np.exp(-ft)) if qt < 0.0025: return np.array([alpha, beta]) # all else fails, do the optimization sol = opt.root(partial(beta_approx, ft=ft, qt=qt), x0=np.sqrt(np.array([alpha, beta])), method='lm') return sol.x ** 2 #export # generic conj function def conj_params(ft, qt, alpha=1., beta=1., interp=False, solver_fn=None, interp_fn=None): # the shape of these can vary a lot, so standardizing here. ft, qt = np.ravel(ft)[0], np.ravel(qt)[0] # do we want to interpolate? if interp and interp_fn is not None: # we may be asking for a value that's outside the interp range if interp_fn.ft_lb < ft < interp_fn.ft_ub and \ interp_fn.qt_lb**2 < qt < interp_fn.qt_ub**2: return interp_fn(ft, qt) # all else fails, do the optimization return solver_fn(ft, qt, alpha, beta) #exporti interp_beta, interp_gamma = load_interpolators() assert interp_beta is not None assert interp_gamma is not None #export # specific conjugate params functions bern_conjugate_params = partial(conj_params, solver_fn=beta_solver, interp_fn=interp_beta, interp=True) pois_conjugate_params = partial(conj_params, solver_fn=gamma_solver, interp_fn=interp_gamma, interp=True) bin_conjugate_params = partial(conj_params, solver_fn=beta_solver, interp_fn=interp_beta, interp=True) #hide from nbdev.export import notebook2script notebook2script()
nbs/06_conjugates.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .groovy // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Groovy // language: groovy // name: groovy // --- // ## Median Filter // + //load ImageJ // %classpath config resolver imagej.public https://maven.imagej.net/content/groups/public // %classpath add mvn net.imagej imagej 2.0.0-rc-67 //create ImageJ object ij = new net.imagej.ImageJ() // - // This `Op`, provided a radius `r`, assigns every pixel in the output image with the median value of its neighborhood (of radius `r`) in the input. Let's see how this `Op` is called: ij.op().help("filter.median") // Let's take a look at each of the parameters: // // * `IterableInterval out`: the output image // * `RandomAccessibleInterval in`: the input image // * `Shape shape`: The [`Shape`](http://javadoc.scijava.org/ImgLib2/net/imglib2/algorithm/neighborhood/Shape.html) that defines the neighborhood that the `Op` will search to find the maximum value. Note that there are a lot of different shapes to choose from, and this is also where the radius is defined. // * `OutOfBoundsFactory outOfBoundsFactory`: An **optional** parameter that tells the `Op` what to do along the edges of the image. For the purposes of this tutorial we will ignore this parameter. // // Let's get an image to apply this filter on. For the purposes of this tutorial we will only take the first channel of the image: // + input = ij.scifio().datasetIO().open("http://imagej.net/images/clown.jpg") //grab the first slice (index 0) from the channel dimension (index 2) imageToFilter = ij.op().run("hyperSliceView", input, 2, 0) ij.notebook().display(imageToFilter) // - // Now we need to create a `Shape`. Let's create a [`HyperSphereShape`](http://javadoc.scijava.org/ImgLib2/net/imglib2/algorithm/neighborhood/HyperSphereShape.html) with radius `3`. We also need to create an output image. Once we have these we can run the filter: // + import net.imglib2.algorithm.neighborhood.HyperSphereShape radius = 3 as int shape = new HyperSphereShape(radius) output = ij.op().run("create.img", imageToFilter) ij.op().filter().median(output, imageToFilter, shape) ij.notebook().display(output)
notebooks/1-Using-ImageJ/Ops/filter/median.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [py3] # language: python # name: Python [py3] # --- # # Publications markdown generator for academicpages # # Takes a set of bibtex of publications and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). # # The core python code is also in `pubsFromBibs.py`. # Run either from the `markdown_generator` folder after replacing updating the publist dictionary with: # * bib file names # * specific venue keys based on your bib file preferences # * any specific pre-text for specific files # * Collection Name (future feature) # # TODO: Make this work with other databases of citations, # TODO: Merge this with the existing TSV parsing solution from pybtex.database.input import bibtex import pybtex.database.input.bibtex from time import strptime import string import html import os import re #todo: incorporate different collection types rather than a catch all publications, requires other changes to template publist = { "proceeding": { "file" : "proceedings.bib", "venuekey": "booktitle", "venue-pretext": "In the proceedings of ", "collection" : {"name":"publications", "permalink":"/publication/"} }, "journal":{ "file": "pubs.bib", "venuekey" : "journal", "venue-pretext" : "", "collection" : {"name":"publications", "permalink":"/publication/"} } } # + @Article{DuncanKetzInati11, author = {<NAME> <NAME> Inati, <NAME> <NAME>}, title = {Evidence for area CA1 as a match/mismatch detector: A high-resolution fMRI study of the human hippocampus}, journal = {Hippocampus}, volume = {22}, number = {3}, publisher = {Wiley Subscription Services, Inc., A Wiley Company}, issn = {1098-1063}, url = {http://dx.doi.org/10.1002/hipo.20933}, doi = {10.1002/hipo.20933}, pages = {389--398}, keywords = {hippocampus, area CA1, mismatch/mismatch signal, comparator, human, fMRI}, year = {2012}, } % 20152133 @Article{TambiniKetzDavachi10, Author="<NAME>. and <NAME>. and <NAME>. ", Title="{{E}nhanced brain correlations during rest are related to memory for recent experiences}", Journal="Neuron", Year="2010", Volume="65", Pages="280--290", Month="Jan", Abstract={Long-term storage of episodic memories is hypothesized to result from the off-line transfer of information from the hippocampus to neocortex, allowing a hippocampal-independent cortical representation to emerge. However, off-line hippocampal-cortical interactions have not been demonstrated to be linked with long-term memory. Here, using functional magnetic resonance imaging, we examined if hippocampal-cortical BOLD correlations during rest following an associative encoding task are related to later associative memory performance. Our data show enhanced functional connectivity between the hippocampus and a portion of the lateral occipital complex (LO) during rest following a task with high subsequent memory compared to pretask baseline resting connectivity. This effect is not seen during rest following a task with poor subsequent memory. Furthermore, the magnitude of hippocampal-LO correlations during posttask rest predicts individual differences in later associative memory. These results demonstrate the importance of postexperience resting brain correlations for memory for recent experiences.} } % 19744565 @Article{MarsolekDeasonKetz10, Author="<NAME>. and <NAME>. and <NAME>. and <NAME>. and <NAME>. and <NAME>. and <NAME>. and <NAME>. and <NAME>. ", Title="{{I}dentifying objects impairs knowledge of other objects: a relearning explanation for the neural repetition effect}", Journal="Neuroimage", Year="2010", Volume="49", Pages="1919--1932", Month="Jan", Abstract={Different items in long-term knowledge are stored in the neocortex as partially overlapping representations that can be altered slightly with usage. This encoding scheme affords well-documented benefits, but potential costs have not been well explored. Here we use functional magnetic resonance imaging (fMRI), neurocomputational modeling, and electrophysiological measures to show that strengthening some visual object representations not only enhances the subsequent ability to identify those (repeated) objects-an effect long known as repetition priming-but also impairs the ability to identify other (non-repeated) objects-a new effect labeled antipriming. As a result, the non-repeated objects elicit increased neural activity likely for the purpose of reestablishing their previously weakened representations. These results suggest a novel reevaluation of the ubiquitously observed repetition effect on neural activity, and they indicate that maintenance relearning may be a crucial aspect of preserving overlapping neural representations of visual objects in long-term memory.} } @ARTICLE{OReillyBhattacharyyaHowardEtAl11, author = {O'Reilly, <NAME>. and <NAME> Howard, <NAME>. and <NAME>}, title = {Complementary Learning Systems}, journal = {Cognitive Science}, publisher = {Blackwell Publishing Ltd}, issn = {1551-6709}, url = {http://dx.doi.org/10.1111/j.1551-6709.2011.01214.x}, doi = {10.1111/j.1551-6709.2011.01214.x}, keywords = {Hippocampus, Neocortex, Learning, Memory, Consolidation, Neural network models}, year = {2011} } @ARTICLE{KetzMorkondaOReilly13, author = {Ketz, , <NAME> Morkonda, , <NAME> O'Reilly, , <NAME>.}, journal = {PLoS Comput Biol}, publisher = {Public Library of Science}, title = {Theta Coordinated Error-Driven Learning in the Hippocampus}, year = {2013}, month = {06}, volume = {9}, url = {http://dx.doi.org/10.1371%2Fjournal.pcbi.1003067}, abstract = {<title>Author Summary</title><p>We present a novel hippocampal model based on the oscillatory dynamics of the theta rhythm, which enables the network to learn much more efficiently than the Hebbian form of learning that is widely assumed in most models. Specifically, two pathways, Tri-Synaptic and Mono-Synaptic, alternate in strength during theta oscillations to provide an alternation of encoding vs. recall bias in area CA1. The difference between these two states and the unaltered cortical input representation creates an error signal, which can drive powerful error-driven learning in both Tri-Synaptic and Mono-Synaptic pathways. Furthermore, the presence of these alternating modes of network behavior (encoding vs. recall) provide an intriguing target for future work examining how prefrontal control mechanisms can manipulate the behavior of the hippocampus.</p>}, number = {6}, doi = {10.1371/journal.pcbi.1003067} } @ARTICLE{DepueKetzMollisonEtAl13, author = {Depue, <NAME> and Ketz, Nick and Mollison, <NAME>. and Nyhus, Erika and Banich, <NAME>. and <NAME>}, title = {ERPs and neural oscillations during volitional suppression of memory retrieval}, journal = {J. Cognitive Neuroscience}, issue_date = {October 2013}, volume = {25}, number = {10}, month = oct, year = {2013}, issn = {0898-929X}, pages = {1624--1633}, numpages = {10}, url = {http://dx.doi.org/10.1162/jocn_a_00418}, doi = {10.1162/jocn_a_00418}, acmid = {2518118}, publisher = {MIT Press}, address = {Cambridge, MA, USA}, } @ARTICLE{KetzOReillyCurran13, title = "Classification aided analysis of oscillatory signatures in controlled retrieval ", journal = "NeuroImage ", volume = "85", number = "2", pages = "749-60", year = "2014", note = "", issn = "1053-8119", doi = "http://dx.doi.org/10.1016/j.neuroimage.2013.06.077", url = "http://www.sciencedirect.com/science/article/pii/S1053811913007209", author = "<NAME> and <NAME> and <NAME>" } @ARTICLE{KetzJensenOReilly14, title = "Thalamic Pathways Underlying PFC-MTL Oscillatory Interactions ", journal = "Trends in Neurosciences ", volume = "38", number = "1", pages = "3-12", year = "2015", note = "", author = "<NAME> and <NAME> and <NAME>" } @article{KetzJonesBryantEtAl18, title={Closed-Loop Slow-Wave tACS Improves Sleep-Dependent Long-Term Memory Generalization by Modulating Endogenous Oscillations}, author={Ketz, <NAME> Jones, <NAME> and Bryant, <NAME> and Clark, <NAME> and <NAME>}, journal={Journal of Neuroscience}, volume={38}, number={33}, pages={7314--7326}, year={2018}, publisher={Soc Neuroscience} } @article{JonesChoeBryantEtAl18, title={Dose-Dependent Effects of Closed-Loop tACS Delivered During Slow-Wave Oscillations on Memory Consolidation}, author={Jones, <NAME> and Choe, Jaehoon and Bryant, <NAME> and Robinson, <NAME> and Ketz, <NAME> and Skorheim, <NAME> and Combs, Angela and Lamphere, <NAME> and <NAME> and <NAME> and others}, journal={Frontiers in Neuroscience}, volume={12}, pages={867}, year={2018}, publisher={Frontiers} } @article{lerner2019transcranial, title={transcranial Current stimulation During sleep Facilitates Insight into temporal Rules, but does not Consolidate Memories of Individual sequential experiences}, author={<NAME> Ketz, <NAME> and Jones, <NAME> and Bryant, <NAME> and <NAME> and Skorheim, <NAME> and Hartholt, Arno and Rizzo, <NAME> and Gluck, <NAME> and Clark, <NAME> and others}, journal={Scientific reports}, volume={9}, number={1}, pages={1516}, year={2019}, publisher={Nature Publishing Group} } @article{ketz2019using, title={Using World Models for Pseudo-Rehearsal in Continual Learning}, author={<NAME> and Kolouri, Soheil and <NAME>}, journal={arXiv preprint arXiv:1903.02647}, year={2019} } @article{kolouri2019attention, title={Attention-Based Structural-Plasticity}, author={<NAME> Ketz, <NAME> and Krichmar, Jeffrey and <NAME>}, journal={arXiv preprint arXiv:1903.06070}, year={2019} } @article{ladosz2019deep, title={Deep Reinforcement Learning with Modulated Hebbian plus Q Network Architecture}, author={<NAME> and Ben-Iwhiwhu, Eseoghene and <NAME> and Ketz, Nicholas and Kolouri, Soheil and Krichmar, <NAME> and <NAME> and Soltoggio, Andrea}, journal={arXiv preprint arXiv:1909.09902}, year={2019} } @article{pilly2019one, title={One-shot tagging during wake and cueing during sleep with spatiotemporal patterns of transcranial electrical stimulation can boost long-term metamemory of individual episodes in humans}, author={<NAME> and Skorheim, <NAME> and Hubbard, <NAME> and Ketz, <NAME> and Roach, <NAME> and Jones, <NAME> and <NAME> and Bryant, <NAME> and <NAME> and <NAME> and others}, journal={bioRxiv}, pages={672378}, year={2019}, publisher={Cold Spring Harbor Laboratory} } # + html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;" } def html_escape(text): """Produce entities within text.""" return "".join(html_escape_table.get(c,c) for c in text) # - for pubsource in publist: parser = bibtex.Parser() bibdata = parser.parse_file(publist[pubsource]["file"]) #loop through the individual references in a given bibtex file for bib_id in bibdata.entries: #reset default date pub_year = "1900" pub_month = "01" pub_day = "01" b = bibdata.entries[bib_id].fields try: pub_year = f'{b["year"]}' #todo: this hack for month and day needs some cleanup if "month" in b.keys(): if(len(b["month"])<3): pub_month = "0"+b["month"] pub_month = pub_month[-2:] elif(b["month"] not in range(12)): tmnth = strptime(b["month"][:3],'%b').tm_mon pub_month = "{:02d}".format(tmnth) else: pub_month = str(b["month"]) if "day" in b.keys(): pub_day = str(b["day"]) pub_date = pub_year+"-"+pub_month+"-"+pub_day #strip out {} as needed (some bibtex entries that maintain formatting) clean_title = b["title"].replace("{", "").replace("}","").replace("\\","").replace(" ","-") url_slug = re.sub("\\[.*\\]|[^a-zA-Z0-9_-]", "", clean_title) url_slug = url_slug.replace("--","-") md_filename = (str(pub_date) + "-" + url_slug + ".md").replace("--","-") html_filename = (str(pub_date) + "-" + url_slug).replace("--","-") #Build Citation from text citation = "" #citation authors - todo - add highlighting for primary author? for author in bibdata.entries[bib_id].persons["author"]: citation = citation+" "+author.first_names[0]+" "+author.last_names[0]+", " #citation title citation = citation + "\"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + ".\"" #add venue logic depending on citation type venue = publist[pubsource]["venue-pretext"]+b[publist[pubsource]["venuekey"]].replace("{", "").replace("}","").replace("\\","") citation = citation + " " + html_escape(venue) citation = citation + ", " + pub_year + "." ## YAML variables md = "---\ntitle: \"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + '"\n' md += """collection: """ + publist[pubsource]["collection"]["name"] md += """\npermalink: """ + publist[pubsource]["collection"]["permalink"] + html_filename note = False if "note" in b.keys(): if len(str(b["note"])) > 5: md += "\nexcerpt: '" + html_escape(b["note"]) + "'" note = True md += "\ndate: " + str(pub_date) md += "\nvenue: '" + html_escape(venue) + "'" url = False if "url" in b.keys(): if len(str(b["url"])) > 5: md += "\npaperurl: '" + b["url"] + "'" url = True md += "\ncitation: '" + html_escape(citation) + "'" md += "\n---" ## Markdown description for individual page if note: md += "\n" + html_escape(b["note"]) + "\n" if url: md += "\n[Access paper here](" + b["url"] + "){:target=\"_blank\"}\n" else: md += "\nUse [Google Scholar](https://scholar.google.com/scholar?q="+html.escape(clean_title.replace("-","+"))+"){:target=\"_blank\"} for full citation" md_filename = os.path.basename(md_filename) with open("../_publications/" + md_filename, 'w') as f: f.write(md) print(f'SUCESSFULLY PARSED {bib_id}: \"', b["title"][:60],"..."*(len(b['title'])>60),"\"") # field may not exist for a reference except KeyError as e: print(f'WARNING Missing Expected Field {e} from entry {bib_id}: \"', b["title"][:30],"..."*(len(b['title'])>30),"\"") continue
markdown_generator/.ipynb_checkpoints/PubsFromBib-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf1 # language: python # name: tf1 # --- # # Image Classification # # 1. Examine and understand data # 2. Build an input pipeline # 3. Build the model # 4. Train the model # 5. Test the model # 6. Improve the model and repeat the process # ### Import TensorFlow and other libraries # + import matplotlib.pyplot as plt import numpy as np import os import PIL import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.models import Sequential from tensorflow.keras.applications import ResNet50 import pathlib # - # ### Load and explore the dataset train_data_dir = "../dataset/image_classifier/tf_ssldoctable/train" train_data_dir = pathlib.Path(train_data_dir) if train_data_dir.exists(): image_count = len(list(train_data_dir.glob('*/*.png'))) print(image_count) else: print("Image directory not found !") bargraph = list(train_data_dir.glob('bargraph/*')) PIL.Image.open(str(bargraph[0])) doctable = list(train_data_dir.glob('doctable/*')) PIL.Image.open(str(doctable[0])) test_data_dir = "../dataset/image_classifier/tf_ssldoctable/test" test_data_dir = pathlib.Path(test_data_dir) if test_data_dir.exists(): image_count = len(list(test_data_dir.glob('*/*.png'))) print(image_count) else: print("Image directory not found !") bargraph = list(test_data_dir.glob('bargraph/*')) PIL.Image.open(str(bargraph[0])) doctable = list(test_data_dir.glob('doctable/*')) PIL.Image.open(str(doctable[0])) # ### Load data using a Keras utility # # Train and Validation Split: 80 % and 20% batch_size = 8 img_height = 224 img_width = 224 train_dataset = tf.keras.utils.image_dataset_from_directory( train_data_dir, validation_split=0.2, subset="training", seed=123, image_size=(img_height, img_width), batch_size=batch_size) val_dataset = tf.keras.utils.image_dataset_from_directory( train_data_dir, validation_split=0.2, subset="validation", seed=123, image_size=(img_height, img_width), batch_size=batch_size) test_dataset = tf.keras.utils.image_dataset_from_directory( test_data_dir, shuffle=True, image_size=(img_height, img_width), batch_size=batch_size) # Find the class names in the **class_names** attribute on these datasets class_names = train_dataset.class_names print(class_names) # ### Visualize data # * Visualize first batch of size 8 # * Print shape of batch size - **(Batch_size, Height, Width, RGB)** # + import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) for images, labels in train_dataset.take(1): for i in range(8): ax = plt.subplot(2, 4, i + 1) plt.imshow(images[i].numpy().astype("uint8")) plt.title(class_names[labels[i]]) plt.axis("off") # - for image_batch, labels_batch in train_dataset: print(image_batch.shape) print(labels_batch.shape) break # ### Configure dataset for performance # * Two important methods to use when loading data # * Use buffered prefetching to yield data from disk without having I/O become blocking # # # * **Dataset.cache** - keeps the images in memory after they're loaded off disk during the first epoch. This will ensure the dataset does not become a bottleneck while training your model. If your dataset is too large to fit into memory, you can also use this method to create a performant on-disk cache. # * **Dataset.prefetch** - overlaps data preprocessing and model execution while training. # + AUTOTUNE = tf.data.AUTOTUNE # train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) train_dataset = train_dataset.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE) val_dataset = val_dataset.cache().prefetch(buffer_size=AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) # - # ### Data augmentation and standardize data # * The RGB channel values are in the [0, 255] range # * Standardize values to be in the [0, 1] range by using **tf.keras.layers.Rescaling** # # # * Don't have a large image dataset - a good practice to artificially introduce sample diversity by applying random, yet realistic, transformations to the training images, such as rotation and horizontal flipping # * Helps expose the model to different aspects of the training data and reduce overfitting # # # * Rescale pixel values - use **tf.keras.applications.MobileNetV2** as base model. This model expects pixel values in [-1, 1] # + data_augmentation = tf.keras.Sequential([ tf.keras.layers.Resizing(img_height, img_width), tf.keras.layers.Rescaling(1./255), #tf.keras.layers.Rescaling(1./127.5, offset=-1), tf.keras.layers.RandomFlip('horizontal'), tf.keras.layers.RandomRotation(0.2), ]) # - for image, _ in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image = image[1] for i in range(8): ax = plt.subplot(2, 4, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) #plt.imshow(augmented_image[0] / 255) plt.imshow(augmented_image[0]) plt.axis('off') # # \*\*\*\*\* Feature Extractor \*\*\*\*\* # ## Feature extraction # * Freeze the convolutional base created from the previous step and to use as a feature extractor # * Add a classifier on top of it and train the top-level classifier # # ### Create base model from the pre-trained convnets # * Base model - ResNet50 model # * Pre-trained on the ImageNet dataset, a large dataset consisting of 1.4M images and 1000 classes # * Base of knowledge will help to classify bargraph and doctable from specific dataset. # # ##### To Do: # * Instantiate a ResNet50 model pre-loaded with weights trained on ImageNet # * Ideal for feature extraction - **include_top=False** ensures that the last layer of ResNet50 model will not be loaded # * If **weights=None**, then the weights would be initialized randomly (in this case, not be performing transfer learning) # Initialize the Pretrained Model fc_resnet_50 = ResNet50(weights='imagenet', input_shape=(img_height, img_width, 3)) print(fc_resnet_50.summary()) print(fc_resnet_50.input_shape) # Initialize the Pretrained Model resnet_50 = ResNet50(weights='imagenet', input_shape=(img_height, img_width, 3), include_top=False,) print(resnet_50.summary()) # Feature extractor converts each 224x224x3 image into a 7x7x2048 block of features, check what it does to an example batch of images: image_batch, label_batch = next(iter(train_dataset)) feature_batch = resnet_50(image_batch) print(feature_batch.shape) # #### Freeze the convolutional base # * Important- freeze the convolutional base before you compile and train the model # * By setting the trainable attribute to False (**resnet_50.trainable = False**), ensure that the original (ImageNet) weights of the model will remain constant # # # ### Important note about BatchNormalization layers # # # * **tf.keras.layers.BatchNormalization** - precautions should be taken in the context of fine-tuning # * If **layer.trainable = False**, the **BatchNormalization** layer will run in inference mode, and will not update its mean and variance statistics # * If unfreeze a model that contains **BatchNormalization** layers in order to do fine-tuning, keep the **BatchNormalization** layers in inference mode by passing **training = False** when calling the base model # * Otherwise, the updates applied to the non-trainable weights will destroy what the model has learned # * Reason to use Sigmoid activation function on bargraph and doctable dataset- # # Reference: https://keras.io/examples/vision/image_classification_from_scratch/ # # # if num_classes == 2: # # activation = "sigmoid" # # units = 1 # # else: # # activation = "softmax" # # units = num_classes # ### Important Note: # # Reference: https://stackoverflow.com/questions/47312219/what-is-the-definition-of-a-non-trainable-parameter # # In keras, non-trainable parameters (as shown in model.summary()) means the number of weights that are not updated during training with backpropagation. # # There are mainly two types of non-trainable weights: # # * The ones that you have chosen to keep constant when training. This means that keras won't update these weights during training at all. # * The ones that work like statistics in BatchNormalization layers. They're updated with mean and variance, but they're not "trained with backpropagation". # # Weights are the values inside the network that perform the operations and can be adjusted to result in what we want. The backpropagation algorithm changes the weights towards a lower error at the end. # # **By default, all weights in a keras model are trainable.** # # When you create layers, internally it creates its own weights and they're trainable. (The backpropagation algorithm will update these weights) # # When you make them untrainable, the algorithm will not update these weights anymore. This is useful, for instance, when you want a convolutional layer with a specific filter, like a Sobel filter, for instance. You don't want the training to change this operation, so these weights/filters should be kept constant. # # There is a lot of other reasons why you might want to make weights untrainable. # # Changing parameters: # # For deciding whether weights are trainable or not, you take layers from the model and set trainable: # # **model.get_layer(layerName).trainable = False #or True** # # This must be done before compilation. # + # Set this parameter to make sure it's not being trained resnet_50.trainable = False # Set the input layer inputs = tf.keras.Input(shape=(img_height, img_width, 3)) x = data_augmentation(inputs) # Set the feature extractor layer x = resnet_50(x, training=False) # Set the pooling layer and Dropout layer x = tf.keras.layers.GlobalAveragePooling2D()(x) x = tf.keras.layers.Dropout(0.2)(x) # Set the final layer with sigmoid activation function outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x) # Create the new model object model = tf.keras.Model(inputs=inputs, outputs=outputs, name="tf_uos_model") # - # ### Compile the model # * Compile the model before training it # * Since there are two classes, use the **tf.keras.losses.BinaryCrossentropy** loss with **from_logits=True** since the model provides a linear output # Compile it base_learning_rate = 0.0001 model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) print(model.summary()) # ### Parameters are divided between two tf.Variable objects, the weights and biases. print(len(model.trainable_variables)) # ## Train the model # * Training on CPU # * After training for 10 epochs, you should see ~50% accuracy on the validation set initial_epochs = 10 loss0, accuracy0 = model.evaluate(val_dataset) print("initial loss: {:.2f}".format(loss0)) print("initial accuracy: {:.2f}".format(accuracy0)) history = model.fit(train_dataset, epochs=initial_epochs, validation_data=val_dataset) # ### Learning curves # # * Check at the learning curves of the training and validation accuracy/loss when using the ResNet50 base model as a fixed feature extractor # * Overfitting as val_loss is higher than train_loss # + acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] print("acc: ",acc) print("\n val_acc: ", val_acc) print("\n loss: ", loss) print("\n val_loss: ", val_loss) # + plt.figure(figsize=(8, 8)) plt.subplot(2, 1, 1) plt.plot(acc, label='Training Accuracy') plt.plot(val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.ylabel('Accuracy') plt.ylim([min(plt.ylim()),1]) plt.title('Training and Validation Accuracy') plt.subplot(2, 1, 2) plt.plot(loss, label='Training Loss') plt.plot(val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.ylabel('Cross Entropy') plt.ylim([0,1.0]) plt.title('Training and Validation Loss') plt.xlabel('epoch') plt.show() # - # ### Evaluation and prediction # # * Verify the performance of the model on new data using test set # * Use fine-tuned model to predict if image is a bargraph or doctable loss, accuracy = model.evaluate(test_dataset) print('Test accuracy :', accuracy) # + # Retrieve a batch of images from the test set image_batch, label_batch = test_dataset.as_numpy_iterator().next() predictions = model.predict_on_batch(image_batch).flatten() # Apply a sigmoid since our model returns logits predictions = tf.nn.sigmoid(predictions) predictions = tf.where(predictions < 0.5, 0, 1) print('Predictions:\n', predictions.numpy()) print('Labels:\n', label_batch) # - plt.figure(figsize=(10, 10)) for i in range(8): ax = plt.subplot(2, 4, i + 1) plt.imshow(image_batch[i].astype("uint8")) plt.title(class_names[predictions[i]]) plt.axis("off") # # \*\*\*\*\* Fine-tuning \*\*\*\*\* # ## Fine-tuning # # * In feature extraction experiment - only training a few layers on top of a ResNet50 base model # * Weights of the pre-trained network were not updated during training # * Increase performance - train (or "fine-tune") the weights of the top layers of the pre-trained model alongside the training of the classifier you added # * The training process will force the weights to be tuned from generic feature maps to features associated specifically with the dataset # * Should try to fine-tune a small number of top layers rather than the whole ResNet50 model # * **In most CNN, the higher layers are more specific to the dataset on which the model was trained** # * **First few layers learn very simple and generic features that generalize to almost all types of images** # * **Goal of fine-tuning** - adapt these specialized features to work with new dataset, rather than overwrite generic learning # # ### Un-freeze the top layers of the model # * Unfreeze the **resnet_50** and set the bottom layers to be un-trainable # * Recompile the model (necessary for these changes to take effect), and resume training # Initialize the Pretrained Model ft_resnet_50 = ResNet50(weights='imagenet', input_shape=(img_height, img_width, 3), include_top=False,) print(ft_resnet_50.summary()) # Converts each 224x224x3 image into a 7x7x2048 block of features, check what it does to an example batch of images: image_batch, label_batch = next(iter(train_dataset)) feature_batch = ft_resnet_50(image_batch) print(feature_batch.shape) # + ft_resnet_50.trainable = True # Let's take a look to see how many layers are in the base model print("Number of layers in the base model: ", len(ft_resnet_50.layers)) # + # Set the input layer ft_inputs = tf.keras.Input(shape=(img_height, img_width, 3)) x = data_augmentation(ft_inputs) # Set the feature extractor layer x = ft_resnet_50(x) # Set the pooling layer and Dropout layer x = tf.keras.layers.GlobalAveragePooling2D()(x) x = tf.keras.layers.Dropout(0.2)(x) # Set the final layer with sigmoid activation function ft_outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x) # Create the new model object ft_model = tf.keras.Model(inputs=ft_inputs, outputs=ft_outputs, name="tf_ft_uos_model") # + # Fine-tune from this layer onwards fine_tune_at = 100 # Freeze all the layers before the `fine_tune_at` layer for layer in ft_resnet_50.layers[:fine_tune_at]: layer.trainable = False # - # ### Compile the model # # # Train a much larger model and want to readapt the pretrained weights, it is important to use a lower learning rate at this stage. Otherwise, your model could overfit very quickly. base_learning_rate = 0.0001 ft_model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer = tf.keras.optimizers.RMSprop(learning_rate=base_learning_rate/10), metrics=['accuracy']) print(ft_model.summary()) # #### Trainable variables print(len(ft_model.trainable_variables)) # ### Continue training or fine-tuning the model # # * If you trained to convergence earlier, this step will improve your accuracy by a few percentage points. # + import os checkpoint_path = "../uos_imgclsfr_training/cp-{epoch:04d}.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) # Create a callback that saves the model's weights cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1, save_freq=125*batch_size) ft_model.save_weights(checkpoint_path.format(epoch=0)) # + initial_epochs = 10 fine_tune_epochs = 10 total_epochs = initial_epochs + fine_tune_epochs history_fine = ft_model.fit(train_dataset, epochs=total_epochs, initial_epoch=0, #initial_epochs or history.epoch[-1], validation_data=val_dataset, callbacks=[cp_callback]) # - # ### Fine-tune Learning curves # # * Check at the learning curves of the training and validation accuracy/loss when fine-tuning the last few layers of the ResNet50 base model and training the classifier on top of it # * New training set is relatively small # * After fine tuning the model nearly reaches 98% accuracy on the validation set # + acc = history_fine.history['accuracy'] val_acc = history_fine.history['val_accuracy'] loss = history_fine.history['loss'] val_loss = history_fine.history['val_loss'] print("acc: ",acc) print("\n val_acc: ", val_acc) print("\n loss: ", loss) print("\n val_loss: ", val_loss) # + plt.figure(figsize=(8, 8)) plt.subplot(2, 1, 1) plt.plot(acc, label='Training Accuracy') plt.plot(val_acc, label='Validation Accuracy') plt.ylim([0.8, 1]) plt.plot([initial_epochs-1,initial_epochs-1], plt.ylim(), label='Start Fine Tuning') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(2, 1, 2) plt.plot(loss, label='Training Loss') plt.plot(val_loss, label='Validation Loss') plt.ylim([0, 1.0]) plt.plot([initial_epochs-1,initial_epochs-1], plt.ylim(), label='Start Fine Tuning') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.xlabel('epoch') plt.show() # - # ### Evaluation and prediction # # * Verify the performance of the model on new data using test set # * Use fine-tuned model to predict if image is a bargraph or doctable loss, accuracy = model.evaluate(test_dataset) print('Test accuracy :', accuracy) # + # Retrieve a batch of images from the test set image_batch, label_batch = test_dataset.as_numpy_iterator().next() predictions = model.predict_on_batch(image_batch).flatten() # Apply a sigmoid since our model returns logits predictions = tf.nn.sigmoid(predictions) predictions = tf.where(predictions < 0.5, 0, 1) print('Predictions:\n', predictions.numpy()) print('Labels:\n', label_batch) # - plt.figure(figsize=(10, 10)) for i in range(8): ax = plt.subplot(2, 4, i + 1) plt.imshow(image_batch[i].astype("uint8")) plt.title(class_names[predictions[i]]) plt.axis("off") # ## Summary # # * **Using a pre-trained model for feature extraction:** When working with a small dataset, it is a common practice to take advantage of features learned by a model trained on a larger dataset in the same domain. This is done by instantiating the pre-trained model and adding a fully-connected classifier on top. The pre-trained model is "frozen" and only the weights of the classifier get updated during training. In this case, the convolutional base extracted all the features associated with each image and you just trained a classifier that determines the image class given that set of extracted features. # # # # * **Fine-tuning a pre-trained model:** To further improve performance, one might want to repurpose the top-level layers of the pre-trained models to the new dataset via fine-tuning. In this case, you tuned your weights such that your model learned high-level features specific to the dataset. This technique is usually recommended when the training dataset is large and very similar to the original dataset that the pre-trained model was trained on. # Reference: # # * https://www.tensorflow.org/tutorials/images/classification # * https://www.tensorflow.org/tutorials/images/transfer_learning # * https://www.earthdatascience.org/courses/intro-to-earth-data-science/file-formats/use-text-files/format-text-with-markdown-jupyter-notebook/ # * https://github.com/miladfa7/Image-Classification-Transfer-Learning/blob/master/ResNet_image_classification.ipynb # * https://towardsdatascience.com/transfer-learning-for-image-classification-using-tensorflow-71c359b56673
TfKeras/src/TF_ImgClassifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Titanic survival prediction notebook # A notebook to create predictions for the Titanic Kaggle challange: https://www.kaggle.com/c/titanic/overview # # ## Content # This notebook contains: # 1. Basic data visualization # 2. basic data cleaning # 3. model training and selection # # ## Next steps # 1. More feature engineering of for example the "Cabin-variable and the "Name"-variable to see if those variables contain extra information # 2. Investigate the missclassified passangers to see if they contain any pattern (with a data set this small it's easy to go through all of the missclassified passangers in the training set) # + #================================================== # Import libraries & set seed #================================================== import os import pandas as pd import numpy as np import matplotlib # %matplotlib inline import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter import seaborn as sns import time # machine learning from sklearn.linear_model import LogisticRegression, LogisticRegressionCV from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier # Other sklearn funcitonality from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve from sklearn.metrics import confusion_matrix from sklearn.metrics import f1_score from sklearn.decomposition import PCA from sklearn.model_selection import cross_val_score, GridSearchCV, learning_curve np.random.seed(342) # - # ## Set metrics # A section where we set some metrics for the training methodology below: # * SCORING_METRIC: Which metric shall the models be fitted to optimize # * N_JOBS: How many CPU cores are going to be used fitting the models? SCORING_METRIC = 'accuracy' # This is the N_JOBS = -1 # ## Load and investigate data # + #============================================================================== # Load Training and test data #============================================================================== # Load training dataset base_path = os.path.dirname(os.getcwd()) file_path = os.path.join(base_path, "data") file_name = "train.csv" data_destination_train = os.path.join(file_path, file_name) # Full file path + name raw_data_train=pd.read_csv(data_destination_train,keep_default_na=False,na_values='') # Load test dataset - we will perform the same transformations we are making to the training dataset in parallel # in order be able to run the model on it at the end file_name = "test.csv" data_destination_test = os.path.join(file_path, file_name) # Full file path + name raw_data_test=pd.read_csv(data_destination_test,keep_default_na=False,na_values='') # Check if it worked raw_data_train.head() # - raw_data_train.info() # + # Investigate data # We see that Age have missing values print(raw_data_train.shape) raw_data_train.describe(include="all") # - cont_variables = [ "Age", "Fare"] categorical_variables = ["Pclass", "Sex", "SibSp", "Parch", "Embarked"] dependent_variable = "Survived" # ## Plotting data # * Basic visualisaiton of the independent and dependent data # + # Scatter plot #plt.scatter(raw_data_train["Age"],raw_data_train["Survived"]) def plot_dependent_variable_split_continous(data_set_in, dependent_variable, independent_variable): # create figure fig = plt.figure(figsize=(16,6)) ax=fig.add_subplot(121) ax.scatter(raw_data_train[independent_variable], raw_data_train[dependent_variable]) ax=fig.add_subplot(122) # Survived sns.distplot(raw_data_train.loc[raw_data_train[dependent_variable] == 1][independent_variable], hist = False, kde = True, kde_kws = {'shade': True, 'linewidth': 3}, label = "Survived") # Not Survived sns.distplot(raw_data_train.loc[raw_data_train[dependent_variable] == 0][independent_variable], hist = False, kde = True, kde_kws = {'shade': True, 'linewidth': 3}, label = "Died") # Set the title plt.title('Density Plot with dependent variable split with independent variable ' + independent_variable) # show the plot plt.show() # - def plot_dependent_variable_split_categorical(data_set_in, dependent_variable, independent_variable): # create figure fig = plt.figure(figsize=(16,6)) ax=fig.add_subplot(122) ture_dependent = data_set_in.loc[data_set_in[dependent_variable] == 1] false_dependent = data_set_in.loc[data_set_in[dependent_variable] == 0] true_relative_frequency = ture_dependent[independent_variable].astype("category").value_counts(sort = False) / len(ture_dependent) false_relative_frequency = false_dependent[independent_variable].astype("category").value_counts(sort = False) / len(false_dependent) x = np.asarray(list(range(1, len(true_relative_frequency)*1 + 1, 1))) try: ax.bar(x-0.1, true_relative_frequency, width = 0.2, color = "g") ax.bar(x+0.1, false_relative_frequency, width = 0.2, color = "r") x_labels = ture_dependent[independent_variable].value_counts(sort = False).index #ture_dependent[independent_variable].value_counts().plot(kind = "bar", color = "green") #false_dependent[independent_variable].value_counts().plot(kind = "bar", color = "blue") plt.xticks(x, x_labels) # Set the title plt.title('Relative frequency Plot with dependent variable split with independent variable ' + independent_variable) # add the frequency plot ax=fig.add_subplot(121) ax.bar(x-0.1, ture_dependent[independent_variable].astype("category").value_counts(sort = False), width = 0.2, color = "g") ax.bar(x+0.1, false_dependent[independent_variable].astype("category").value_counts(sort = False), width = 0.2, color = "r") plt.xticks(x, x_labels) plt.title('frequency Plot with dependent variable split with independent variable ' + independent_variable) except: print("Error plotting variable: " + indep_variable) plt.show() for indep_variable in categorical_variables: plot_dependent_variable_split_categorical(raw_data_train, dependent_variable, indep_variable) plt.show() # + # We wee that the variables age and Fare have some outliner values and a non-linear relationship and should be bucketed bin_array_age = [0.0, 5, 10, 15, 20, 50, 60, 1000] bin_array_fare = [0.0, 10, 20, 40, 100, 10000] for indep_variable in cont_variables: plot_dependent_variable_split_continous(raw_data_train, dependent_variable, indep_variable) plt.show() # - # ## Clean data by dropping/replacing missing data # * Create functions for cleaning the data that takes a df as input to be able to clean both test and train data set def create_family_dummy_variable(df_in_train, df_in_test): # funciton that creates a variable for the number of family members return df_in_train, df_in_test # Fix missing values in continous categories def fix_missing_age(df_in_train, df_in_test): # Funciton that fixes mising values for age mean_age = df_in_train.append(df_in_test)["Age"].mean() df_in_train.loc[df_in_train["Age"].isnull(), "Age"] = mean_age df_in_test.loc[df_in_test["Age"].isnull(), "Age"] = mean_age return df_in_train, df_in_test # Fix missing values in continous categories def fix_missing_fare(df_in_train, df_in_test): # Funciton that fixes mising values for age mean_age = df_in_train.append(df_in_test)["Fare"].mean() df_in_train.loc[df_in_train["Fare"].isnull(), "Fare"] = mean_age df_in_test.loc[df_in_test["Fare"].isnull(), "Fare"] = mean_age return df_in_train, df_in_test def fix_missing_embarkement(df_in_train, df_in_test): # Funciton that fixes mising values for age most_frequent_embarkement = df_in_train.append(df_in_test)["Embarked"].value_counts().index[0] df_in_train.loc[df_in_train["Embarked"].isnull(), "Embarked"] = most_frequent_embarkement df_in_test.loc[df_in_test["Embarked"].isnull(), "Embarked"] = most_frequent_embarkement return df_in_train, df_in_test # Function that drops the unnecicary variables def drop_variables(df_in_train, df_in_test): # Funciton that drops variables variables_to_drop = ["PassengerId", "Ticket", "Cabin", "Name"] df_in_train = df_in_train.drop(variables_to_drop, axis = 1) df_in_test = df_in_test.drop(variables_to_drop, axis = 1) return df_in_train, df_in_test # Function that bins continous variable def bin_continous_variable(df_in_train, df_in_test, variable_to_bin, bin_array): df_in_train = df_in_train.copy() df_in_test = df_in_test.copy() train_bins= pd.cut(df_in_train[variable_to_bin], bins = bin_array) test_bins = pd.cut(df_in_test[variable_to_bin], bins = bin_array) # add the binned variable to the dataframe df_in_train[variable_to_bin+"_bins"] = train_bins df_in_test[variable_to_bin+"_bins"] = test_bins # drop the original variable df_in_train = df_in_train.drop(variable_to_bin, axis = 1) df_in_test = df_in_test.drop(variable_to_bin, axis = 1) return df_in_train, df_in_test # We limit Sib and Parch to 0, 1, 2+ def limit_categorical_variable(df_in_train, df_in_test, categorical_variable, limit = 2): df_in_train.loc[df_in_train[categorical_variable] >= limit, categorical_variable] = limit df_in_test.loc[df_in_test[categorical_variable] >= limit, categorical_variable] = limit return df_in_train, df_in_test # Function that returns dummy variables def get_dummies(df_in_train, df_in_test, categorical_variables): for variable in categorical_variables: df_in_train[variable] = df_in_train[variable].astype('category') df_in_test[variable] = df_in_test[variable].astype('category') df_dummies_train=df_in_train[categorical_variables] df_dummies_test=df_in_test[categorical_variables] #create dummies df_dummies_train = pd.get_dummies(df_dummies_train, drop_first=True) df_dummies_test = pd.get_dummies(df_dummies_test, drop_first=True) # add the dummies to the datasets df_in_train = df_in_train.merge(right = df_dummies_train, left_index=True, right_index=True, how='inner') df_in_test = df_in_test.merge(right = df_dummies_test, left_index=True, right_index=True, how='inner') # drop the categorical variables df_in_train = df_in_train.drop(categorical_variables, axis = 1) df_in_test = df_in_test.drop(categorical_variables, axis = 1) return df_in_train, df_in_test # + # applying all of the cleaning and transformation functions on the train and test data sets cleaned_train, cleaned_test = fix_missing_age(raw_data_train, raw_data_test) cleaned_train, cleaned_test = fix_missing_fare(cleaned_train, cleaned_test) cleaned_train, cleaned_test = fix_missing_embarkement(cleaned_train, cleaned_test) cleaned_train, cleaned_test = drop_variables(cleaned_train, cleaned_test) # limit the categorical variables cleaned_train, cleaned_test = limit_categorical_variable(cleaned_train, cleaned_test, "SibSp") cleaned_train, cleaned_test = limit_categorical_variable(cleaned_train, cleaned_test, "Parch") cleaned_train, cleaned_test = get_dummies(cleaned_train, cleaned_test, categorical_variables) # we now bin the continous variables cleaned_train_binned, cleaned_test_binned = bin_continous_variable(cleaned_train, cleaned_test, "Age", bin_array_age) cleaned_train_binned, cleaned_test_binned = bin_continous_variable(cleaned_train_binned, cleaned_test_binned, "Fare", bin_array_fare) cleaned_train_binned, cleaned_test_binned = get_dummies(cleaned_train_binned, cleaned_test_binned, ["Age_bins", "Fare_bins"]) cleaned_train.head() cleaned_train.head() # - # ## Plot the data again sns.pairplot(cleaned_train, hue = 'Survived') # ## Prepare data for model training # * Do a train/test split # + #============================================================================== # Data sampling #============================================================================== #Create X and y #X=cleaned_train_binned.drop([dependent_variable],axis=1) #y=cleaned_train_binned[dependent_variable].astype('int') X=cleaned_train.drop([dependent_variable],axis=1) y=cleaned_train[dependent_variable].astype('int') # Evaluate the model by splitting the training data into train and validation sets, setting seed=93 X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.25, random_state=93) print("Percentage of survivors in training dataset: %0.6s"%(y_train.mean())) print("Percentage of survivors in validation dataset: %0.6s"%(y_validation.mean())) # - # create the x for submitting the test x_test = cleaned_test # ## Train models # * Create functionality # + # Funcitons to plot the trained results def calculate_AUC(model, test_actuals, test_features, ax = None): # Calculate the AUC logit_roc_auc = roc_auc_score(test_actuals, model.predict_proba(test_features)[:,1]) fpr, tpr, thresholds = roc_curve(test_actuals, model.predict_proba(test_features)[:,1]) if ax is None: fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True) ax.plot(fpr, tpr, label='Model (AUC = %0.2f)' % logit_roc_auc) ax.plot([0, 1], [0, 1],'r--') ax.set_xlim([0.0, 1.0]) ax.set_ylim([0.0, 1.05]) ax.set_xlabel('False Positive Rate') ax.set_ylabel('True Positive Rate') ax.set_title('Receiver operating characteristic') ax.legend(loc="lower right") def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5), ax = None): """Generate a simple plot of the test and training learning curve""" if ax is None: fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True) ax.set_title(title) if ylim is not None: ax.set_ylim(*ylim) ax.set_xlabel("Training examples") ax.set_ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) ax.grid() ax.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") ax.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") ax.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") ax.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") ax.legend(loc="best") def plot_model_fit_summary(model, test_actuals, test_features, title): fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(16,6)) # Create the subplots calculate_AUC(model, test_actuals, test_features, ax=axs[0]) # Create the subplots plot_learning_curve(model, title, test_features, test_actuals, ax=axs[1]) fig.suptitle('Test_title') plt.show() # - test_model = KNeighborsClassifier() test_model.fit(X_train, y_train) plot_model_fit_summary(test_model, y_train, X_train, "Test") # ## Model pipe testing # * Do a wide testing of multiple models using SKLearn pipeline functionality # + # Test multiple funcitons with GridSearchCV models_to_train = [RandomForestClassifier(), LogisticRegression(penalty='l2'), SVC(probability=True), KNeighborsClassifier(), GaussianNB(), GradientBoostingClassifier() ] model_names = ['RandomForest', 'LogisticRegression', 'SVC', 'KNN', "GaussianNB", "GradientBoost"] best_models = [] best_model_AUC = [] parameters = { 'RandomForest':{ 'max_depth':np.arange(5,7), 'max_features':np.arange(7,9) }, 'LogisticRegression':{ 'C':[10e-2, 10e-1, 1, 10, 10e2] }, 'SVC':{ 'kernel': ['rbf'], 'gamma': [0.1, 1, 10], 'C': [0.1, 1, 10] }, 'KNN':{ 'n_neighbors': [1, 3, 5, 10 , 20] }, 'GaussianNB': {}, 'GradientBoost':{ 'min_samples_leaf':[1, 3, 5], 'max_depth': [1, 3, 5] } } # Find out the best model for i in range(0,len(model_names)): act_model = models_to_train[i] act_model_name = model_names[i] print("Start training " + act_model_name) # Get the start time start_time = time.time() act_params = parameters[act_model_name] act_model = GridSearchCV(act_model, act_params, cv=5, scoring=SCORING_METRIC, n_jobs = N_JOBS) act_model.fit(X_train, y_train) act_best_model = act_model.best_estimator_ best_models.append(act_best_model) best_model_AUC.append(roc_auc_score(y_train, act_best_model.predict_proba(X_train)[:,1])) #predict_proba [:,1] # get the end time time_taken = time.time() - start_time print("Stop training " + act_model_name + ". Time taken {} s".format(time_taken)) # displaying the results result_df = pd.DataFrame(best_model_AUC).T result_df.columns = model_names result_df = result_df[result_df.iloc[-1].sort_values(ascending=True).index] result_df.T.plot(kind='bar', title = "Different models " + SCORING_METRIC, legend = False ) result_df.head() # - plot_learning_curve(best_models[0], "RandomForest", X_train, y_train) # ## Investigate the best models form above # * Based on pipeline model testing we choose the best models for more investigation # ### Logistic regression # * Currently no need to dig deeper in the Logistic regression # ### Random forest # + #============================================================================== # Random Forest #============================================================================== # Create the model object random_forest_v1 = RandomForestClassifier() parameters = { 'max_depth':[1, 3, 5, 7, 9, 11], 'max_features':[1, 5, 9], 'n_estimators':[1, 10, 100, 500] } random_forest_v1 = GridSearchCV(random_forest_v1, parameters, cv=10, scoring=SCORING_METRIC, n_jobs=N_JOBS) random_forest_v1.fit(X_train, y_train) print(random_forest_v1.best_params_) # Call calculate_AUC calculate_AUC(random_forest_v1.best_estimator_, y_validation, X_validation) # + # Zoom in on the relevant parambeters and refit the model with a smaller mesh # Create the model object random_forest_v2 = RandomForestClassifier() parameters = { 'max_depth':[4, 5, 6], 'max_features':[4,5,6], 'n_estimators':[75, 100, 125] } random_forest_v2 = GridSearchCV(random_forest_v2, parameters, cv=10, scoring=SCORING_METRIC, n_jobs=N_JOBS) random_forest_v2.fit(X_train, y_train) print(random_forest_v2.best_params_) # Call calculate_AUC calculate_AUC(random_forest_v2.best_estimator_, y_validation, X_validation) # - # ### GradientBoost # + # Create the model object gb_v1 = GradientBoostingClassifier() parameters = { 'min_samples_leaf':[1, 3, 5], 'max_depth': [2, 3, 4], 'n_estimators': [10, 100, 1000] } gb_model_v1 = GridSearchCV(gb_v1, parameters, cv=10, scoring=SCORING_METRIC, n_jobs=N_JOBS) gb_model_v1.fit(X_train, y_train) print(gb_model_v1.best_params_) # Call calculate_AUC calculate_AUC(gb_model_v1.best_estimator_, y_validation, X_validation) # + # Drill down in the fitting gb_v2 = GradientBoostingClassifier() parameters = { 'min_samples_leaf':[1, 2], 'max_depth': [1, 2, 3], 'n_estimators': [50, 100, 200] } gb_model_v2 = GridSearchCV(gb_v2, parameters, cv=10, scoring=SCORING_METRIC, n_jobs=N_JOBS) gb_model_v2.fit(X_train, y_train) print(gb_model_v2.best_params_) # Call calculate_AUC calculate_AUC(gb_model_v2.best_estimator_, y_validation, X_validation) # - # ### Invistegate wrong answers # * Next steps would be to investigate the missclassified data further # * This section should be developed further # + model_to_test = gb_model_v2 y_comp = model_to_test.predict(X_validation) comparison = X_validation.merge(right=pd.DataFrame(y_validation), left_index=True, right_index = True) print(comparison.shape) comparison = comparison.merge(right= pd.DataFrame(y_comp, index = comparison.index, columns = ["predicted_survive"]), left_index=True, right_index = True) comparison["right_prediction"] = 0 comparison.loc[comparison["Survived"] == comparison["predicted_survive"], "right_prediction"] = 1 comparison.head() # - miss_classified = comparison.loc[comparison["right_prediction"] == 0] print(miss_classified.shape) miss_classified.head() raw_data_train_wrong_prediciton = raw_data_train.loc[miss_classified.index] raw_data_train_wrong_prediciton.shape for indep_variable in categorical_variables: plot_dependent_variable_split_categorical(raw_data_train_wrong_prediciton, dependent_variable, indep_variable) plt.show() for indep_variable in cont_variables: plot_dependent_variable_split_continous(raw_data_train_wrong_prediciton, dependent_variable, indep_variable) plt.show() # ## Fit the final model with all of the available data # * Final model is choosen based on analysis of of the results from above # + final_model = GradientBoostingClassifier(max_depth=2, min_samples_leaf=1, n_estimators=100) X_final = pd.concat([X_train,X_validation]) y_final = pd.concat([y_train,y_validation]) final_model.fit(X_final, y_final) # - # ## Create submission file # * Create a submission file to submitt to Kaggle x_test.info() # + # create the submit data file # Select the name of the file and the model that are going to be used base_path = os.path.dirname(os.getcwd()) file_path = os.path.join(base_path, "output_data") if not os.path.exists(file_path): os.makedirs(file_path) file_name = "gb_submission_v5.csv" # Create the output data y_submit_rf = final_model.predict(x_test) submit_df = pd.DataFrame(y_submit_rf, columns = ["Survived"]) submit_df.index = raw_data_test["PassengerId"] submit_df.rename(index=str, columns={"PassengerId": "PassengerId", "0": "Survived"}) submit_df submit_df.to_csv(os.path.join(file_path,file_name), sep = ",")
jupyter_notebooks/Titanic_prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Using your our own glacier inventory with OGGM # [The Randolph Glacier Inventory](https://www.glims.org/RGI/) is a key dataset to model glaciers at any scale: it includes outlines of the extent of each glacier in the world, an information that is critical in figuring out how much a particular glacier might contribute to rising sea level. These glacier outlines are the starting point of any simulation in OGGM. The RGI's latest version (v6), as well as v5, are provided and supported by OGGM (see the [documentation](https://oggm.readthedocs.io/en/latest/input-data.html#glacier-outlines-and-intersects)). However, there are [several issues](https://rgitools.readthedocs.io/en/latest/known-issues.html) in the RGI that might make you want to use your own corrected glacier outlines. # # This notebook describes how to feed OGGM with them. We will show you three case studies about how to give any geometry to OGGM and avoid errors of incompatibility between your shapefile and the model framework. # # We have three case studies which should cover a number of applications: # 1. [Dividing a glacier into smaller entities](#case1) (common case, useful for poorly outlined glaciers, which are in reality separate dynamical entities) # 2. [Merging two glaciers together](#case2) (useful for tidewater glaciers in particular, not much elsewhere) # 3. [Start from a completely independent inventory](#case3) # ## TLDR; # If you want to use custom data to feed OGGM with, you should: # - **make a shapefile that resembles the RGI one: same attributes, and the glacier geometries should be in lon/lat projection**. The most important attribute is `geometry`, of course, but others are used by OGGM as well: refer to [the OGGM documentation](https://docs.oggm.org/en/latest/input-data.html#glacier-outlines-and-intersects) to decide which ones. The RGI documentation (found in the RGI directory after download) is useful as well! # - **compute and use a new [glacier interesects](https://rgitools.readthedocs.io/en/latest/tools.html#glacier-intersects) file**, or make sure you don't need one and disable this option in OGGM. # ## Structure of an RGI file import numpy as np import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt import oggm import os from oggm import cfg, utils, workflow, tasks, graphics from oggm.core import inversion cfg.initialize(logging_level='WARNING') # Let's read a file from the standard RGI: utils.get_rgi_dir(version='61') sh = utils.get_rgi_region_file('11', version='61') sh # Shapefiles are best read an manipulated with [geopandas](http://geopandas.org/) in python (see also our [working_with_rgi.ipynb](working_with_rgi.ipynb) tutorial): gdf = gpd.read_file(sh) gdf.head() # An RGI file contains the actual glacier geometries, but also a number of attribute which are used by OGGM afterwards. Let's learn how to make our own file now. # <a id='case1'></a> # ## Case 1: dividing a glacier into smaller entities # A typical example of wrongly divided glacier is Hintereisferner, in Austria: # + # OGGM set-up cfg.PATHS['working_dir'] = utils.gettempdir(dirname='rgi-case-1', reset=True) cfg.PARAMS['border'] = 10 # Get the HEF geometry and plot it gl = utils.get_rgi_glacier_entities(['RGI60-11.00897']) gl.plot(edgecolor='k'); # - # Obviously, the two smaller tongues used to flow in the main one but this is not the case anymore today. We need updated geometries. # ### Make a new "RGI file" # There is no simple way to automate the process of finding these bad geometries, but we are [working on this](https://github.com/OGGM/partitioning) (don't hold your breath, this has been in development since a long time). Here we use a geometry that we prepared in QGis: # We simulate the case where we only have the geometry, nothing else divides = gpd.read_file(utils.get_demo_file('divides_alps.shp')) divides = divides.loc[divides.RGIId == 'RGI50-11.00897'][['geometry']] divides divides.plot(edgecolor='k'); # Now we use the RGI entity as template - it's good to use the same attributes as the original RGI glacier, because most of them are already correct: template = pd.concat([gl]*3, ignore_index=True) template # We change the important ones: # Attributes template['RGIId'] = ['RGI60-11.00897_d01', 'RGI60-11.00897_d02', 'RGI60-11.00897_d03'] template['Name'] = ['Hintereisferner d01', 'Hintereisferner d02', 'Hintereisferner d03'] # Geometries template['geometry'] = divides['geometry'].values # Center point for i, geom in template[['geometry']].iterrows(): cenlon, cenlat = geom.geometry.centroid.xy template.loc[i, 'CenLon'] = cenlon template.loc[i, 'CenLat'] = cenlat # This is important to properly georeference the file import salem template.crs = salem.wgs84.srs template.plot(edgecolor='k'); # Save it: hef_new_shape_path = os.path.join(cfg.PATHS['working_dir'], 'hef_divided.shp') template.to_file(hef_new_shape_path) # ### Compute the intersects # Hintereisferner has a divide with another glacier as well! Let's find out which: intersects_alps = gpd.read_file(utils.get_rgi_intersects_region_file('11')) intersects_hef = intersects_alps.loc[(intersects_alps.RGIId_1 == 'RGI60-11.00897') | (intersects_alps.RGIId_2 == 'RGI60-11.00897')] intersects_hef # Ok, we can now create a file which has all the glaciers we need to compute the relevant intersects (note that we could also use the full standard RGI with just HEF replaced): for_intersects = pd.concat([template, utils.get_rgi_glacier_entities(['RGI60-11.00846'])], ignore_index=True) for_intersects.crs = salem.wgs84.srs for_intersects.plot(edgecolor='k'); # Good! Let's use [rgitools](https://rgitools.readthedocs.io) to compute the intersects for this new situation: from rgitools.funcs import compute_intersects new_intersects = compute_intersects(for_intersects) f, ax = plt.subplots() for_intersects.plot(ax=ax, edgecolor='k'); new_intersects.plot(ax=ax, edgecolor='r'); # Good! We can store our intersects to use them with OGGM afterwards: hef_intersects_path = os.path.join(cfg.PATHS['working_dir'], 'hef_divided_intersects.shp') new_intersects.to_file(hef_intersects_path) # ### Finally: the OGGM run # + # This is important! We tell OGGM to recompute the glacier area for us cfg.PARAMS['use_rgi_area'] = False # This is the default anyway, but we set it here to be sure cfg.PARAMS['use_intersects'] = True # This is important! cfg.set_intersects_db(hef_intersects_path) # This is to avoid a download in the tutorial, you dont' need do this at home cfg.PATHS['dem_file'] = utils.get_demo_file('hef_srtm.tif') # This is important again - standard OGGM rgidf = gpd.read_file(hef_new_shape_path) gdirs = workflow.init_glacier_directories(rgidf, reset=True, force=True) # - workflow.execute_entity_task(tasks.define_glacier_region, gdirs); workflow.execute_entity_task(tasks.glacier_masks, gdirs); workflow.execute_entity_task(tasks.compute_centerlines, gdirs); workflow.execute_entity_task(tasks.initialize_flowlines, gdirs); workflow.execute_entity_task(tasks.catchment_area, gdirs); workflow.execute_entity_task(tasks.catchment_width_geom, gdirs); workflow.execute_entity_task(tasks.catchment_width_correction, gdirs); graphics.plot_catchment_width(gdirs, add_intersects=True, corrected=True) # It works! # # **The intersects in OGGM are used for two main things:** # - when a grid-point glacier section touches an intersect, it will be attributed a rectangular bed (instead of a parabolic one) # - when interpolating the ice thickness to a 2D grid, the boundary condition thickness=0 at the glacier outline is removed where there are intersects # # **We recommend to use intersects for your runs as well.** # <a id='case2'></a> # ## Case 2: merging glaciers # Sometimes, you may want to *merge* glaciers together. This case is less frequent than Case 1, but might be useful for calving glaciers, which are sometimes divided in the RGI. # ### Original RGI outlines # We use a case study for two marine-terminating glaciers in Alaska that have to be merged into a single outline in order to model a correct calving flux for these glaciers (following the methods described in # [Recinos, et al. (in review)](https://www.the-cryosphere-discuss.net/tc-2018-254/)). The resulting shapefile is a new one that needs to be adapted in order for OGGM to run. # We will study the Sawyer Glacier (`RGI60-01.03890`) that is actually connected via the calving front with this other entity (`RGI60-01.23664`). Visit this [link](https://glacierchange.wordpress.com/2011/03/16/saywer-glacier-alaska-retreat/) to learn more about the retreat of the Sawyer Glacier and see images illustrating this connection. gl = utils.get_rgi_glacier_entities(['RGI60-01.03890', 'RGI60-01.23664']) # Here OGGM downloaded the outlines for both glaciers. If we plot them together, we can see that both glaciers drain into the same fjord. See the google map below: cfg.initialize(logging_level='WARNING') cfg.PATHS['working_dir'] = utils.gettempdir(dirname='rgi-case-2-example', reset=True) cfg.PARAMS['border'] = 10 gdirs = workflow.init_glacier_directories(['RGI60-01.03890', 'RGI60-01.23664'], from_prepro_level=3, reset=True, force=True) graphics.plot_googlemap(gdirs); # The upper glacier map is a zoom version of the plot below. # They share the same glaciers terminus. Therefore, to estimate a calving flux for these glaciers we need them connected. # ### Let's merge these two outlines using geopandas merged = gl.dissolve(by='O2Region', as_index=False) merged = merged[gl.columns] merged.plot(edgecolor='k'); # ### RGI attributes # We now have a new shapefile, which resembles an RGI one but has wrong attributes. Some aren't relevant, but some are. See the [documentation](https://oggm.readthedocs.io/en/latest/input-data.html#glacier-outlines-and-intersects) for a list. # # The important ones are: RGIId, CenLon, CenLat, TermType, Area. Area and CenLon, Cenlat can be calculated by OGGM later, as we have seen earlier. Here, we prefer to keep the Area computed by the RGI for consistency. # We use the RGI as template (this avoids strange IO issues) template = gl.iloc[[0]].copy() template['geometry'] = merged['geometry'].values # + # Change CenLon, CenLat cenlon, cenlat = merged.iloc[0].geometry.centroid.xy template['CenLon'] = cenlon template['CenLat'] = cenlat # We sum up the areas template['Area'] = gl.Area.sum() template['Area'] # - # In [Recinos, et al. (in review)](https://www.the-cryosphere-discuss.net/tc-2018-254/) we wanted to estimate a frontal ablation flux for this new outline and compare it with previous estimates found in the literature for the Sawyer Glacier ([McNabb et al., 2015](https://agupubs.onlinelibrary.wiley.com/doi/full/10.1002/2014JF003276)). # # For this reason we kept the Sawyer glacier attributes to the following variables: # `RGIId, GLIMSId, Name` # # The `TermType` should be equal to 1, for Marine-terminating. template['TermType'] = 1 template['Name'] = 'Sawyer Glacier merged with RGI60-01.23664' # Now we can write a new shapefile for this glacier. # We recommend doing this if you have to make several model runs. You can also integrate this outline to your main RGI shapefile: cfg.initialize() cfg.PATHS['working_dir'] = utils.gettempdir(dirname='rgi-case-2', reset=True) template.crs = salem.wgs84.srs template.to_file(os.path.join(cfg.PATHS['working_dir'], 'merged.shp')) # ### Run OGGM with this new glacier # For simplicity, we do not compute the intersects in this case: **however, we recommend you do do so (see above). In all cases, do not use the intersects provided automatically with OGGM when using custom inventories, as they are likely to be wrong.** # + # Set-up the run cfg.PARAMS['border'] = 10 # We don't use intersects here cfg.PARAMS['use_intersects'] = False # We prefer OGGM to use the area we computed ourselves cfg.PARAMS['use_rgi_area'] = True # Use our merged file rgidf = gpd.read_file(os.path.join(cfg.PATHS['working_dir'], 'merged.shp')) gdirs = workflow.init_glacier_directories(rgidf, reset=True, force=True) gdirs # - # Here we are not able to use the [Pre-processed directories](https://docs.oggm.org/en/latest/input-data.html#pre-processed-directories) and the respective [Processing levels](https://docs.oggm.org/en/latest/input-data.html#preprodir) that OGGM provides for a easy run set up. We can't use this workflow simply because we have a different beguinning than OGGM, we have a different RGI! We just need to type more and run all the model task one by one: # + from oggm.workflow import execute_entity_task # Calculate the DEMs and the masks execute_entity_task(tasks.define_glacier_region, gdirs) execute_entity_task(tasks.glacier_masks, gdirs) # Calculate the Pre-processing tasks task_list = [ tasks.compute_centerlines, tasks.initialize_flowlines, tasks.catchment_area, tasks.catchment_width_geom, tasks.catchment_width_correction, ] for task in task_list: execute_entity_task(task, gdirs) # - graphics.plot_googlemap(gdirs); graphics.plot_catchment_width(gdirs, corrected=True); # <a id='case3'></a> # ## Case 3: start from a completely independent inventory # Refer to [Case 1](#case1) above, where we show how to start from an RGI template. The main message is: get the attributes right and let OGGM compute the area! # ## What's next? # # - return to the [OGGM documentation](https://docs.oggm.org) # - back to the [table of contents](welcome.ipynb)
notebooks/use_your_own_inventory.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="99bd6354-9099-4f22-9b56-5dfb60e455d6" _uuid="a8c4f03760fefe2435e5147bb2a33bcc6be6ce7f" import numpy as np import pandas as pd import warnings import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import cross_val_score from xgboost import XGBClassifier from lightgbm import LGBMClassifier from sklearn.linear_model import LogisticRegression import time random_state = 6 np.random.seed(random_state) warnings.filterwarnings('ignore') # + _cell_guid="fdc2fbef-4259-40ad-be9e-b210a6b43f61" _uuid="6991dcc1b3b9561e4449860dc1852086773145b0" # %matplotlib inline get_ipython().run_line_magic('matplotlib', 'inline') # latex parameter font = { 'family': 'serif', 'serif': ['Computer Modern Roman'], 'weight' : 'regular', 'size' : 18 } plt.rc('font', **font) plt.rc('text', usetex=False) # plt.style.use('classic') color_map = 'viridis' # + _cell_guid="c5b8281f-7144-4199-9665-14e0a5e6506f" _uuid="62e91275b3c6111713f6cb86b6e4b5766cbc13cc" df_train = pd.read_csv('../input/train.csv', na_values=-1) df_test = pd.read_csv('../input/test.csv', na_values=-1) # + [markdown] _cell_guid="0ee9bf20-1534-402c-9f12-08b90f50ceab" _uuid="530029ba341a8c81c0379fcf606b9d4c796aa4ad" # ### Check if both test and train have the same shape # + _cell_guid="82d36446-bf46-4aa3-a4ac-d3494b7b78f7" _uuid="57bf7ac786264b43005dad611bb8679f221ccc38" print('Training data shape: {}'.format(df_train.shape)) print('Training data shape: {}'.format(df_test.shape)) # + [markdown] _cell_guid="9d79d7b2-6dc9-403f-bc9b-2df6e09557dc" _uuid="131d56963fdc7119fc9f07170c187a589dc90221" # ### Check if there are any missing values # + _cell_guid="4cdb4ab6-8ac7-4e0c-bb51-6ec07173bc86" _uuid="17328e6d5cebf478911eea6587f40a89dfdcd965" print('Is null on train: {}'.format(df_train.isnull().any().any())) print('Is null on test: {}'.format(df_test.isnull().any().any())) # + [markdown] _cell_guid="bc75a561-296c-46e9-8515-900b5fc5f339" _uuid="5f353c6c63cbf425b3775ce764b203d30e7b79a7" # ### Descriptive statistics # + _cell_guid="23e2e7a4-c281-4f62-8b50-4b342e12b079" _uuid="190a0b8e5eb4c86c55283311b5dfce5debd60a36" df_train.describe() # + [markdown] _cell_guid="ea70a75e-3a34-4895-b5c6-8b396ff3dc1e" _uuid="cb1ee68c1572b05a96e0e9c477fd2cf62e26b45d" # ### Heatmap # + _cell_guid="4c13c499-f556-4bb4-9109-333b274fa168" _uuid="8835d2faa57710aeaa0bc691293fa5ba1b4484e3" import seaborn as sns cor = df_train.corr() plt.figure(figsize=(16,10)) sns.heatmap(cor) # + [markdown] _cell_guid="6b4a05f2-fe71-4c5c-89bc-cb6127e80151" _uuid="86a3956cc8a6de899e3a508367bd1579ee335750" # #### Since 'ps_calc' features do not show any have zero relationship with other features # #### We can delete them. # + _cell_guid="8947fd3b-bdd7-4a60-85d5-6d81df11cc21" _uuid="29602e250c2e1531f61da8e8ec287dfaf0fef225" col_to_drop = list(df_train.columns[df_train.columns.str.startswith('ps_calc_')]) df_train = df_train.drop(col_to_drop, axis=1) df_test = df_test.drop(col_to_drop, axis=1) # + [markdown] _cell_guid="a2dfcdab-6091-47e7-a59f-be4852aaa7da" _uuid="9fe437d51e3aa4b3085dc59c3c7ae44d51f1d250" # ## Work with missing values # + _cell_guid="d4e041cc-7eea-4e55-8fc0-ffe67c73dca5" _uuid="094deead4bd48cd2339143e3d87626bfe30d5c84" def get_missing_features(df): missings = pd.DataFrame([], columns=['feature', 'no_recoreds', 'percentage']) total_rows = df.shape[0] index = 0 for feature in list(df): total_nulls = df[feature].isnull().sum() if total_nulls > 0: missings_perc = total_nulls / total_rows missings.loc[index] = [feature, total_nulls, missings_perc] index += 1 missings = missings.sort_values('no_recoreds', ascending=False) return missings # + _cell_guid="776ce99d-24f4-484f-8ab8-35556539ef0e" _uuid="6a52318bdf9574d953fa6d5af82a3201d14e497b" df_missings = get_missing_features(df_train) print(df_missings) # + [markdown] _cell_guid="c2f02b69-0199-45ef-a919-d8bf3a3a1e85" _uuid="1e7414c459c61b915bacabe3b5c08c7cb3af2012" # ### Bar plot of missing features # + _cell_guid="d12db854-2abc-4241-bd04-cdf348c0c91d" _uuid="71540aeeb24f3a7bd481ee016e28615abe8cc54f" df_missings.plot(x='feature', y='no_recoreds', kind='bar', ) # + [markdown] _cell_guid="82a7544b-db17-438b-801b-ba8a8d39d35d" _uuid="3cf5baac613e67df53a7f1a163175f9d5c135b0d" # **Treat missing values by mode of the column** # + _cell_guid="6ce1eb73-8567-41fe-bc2f-f1454107ec70" _uuid="1f408d8ee2bc72a4ebf722796f13a87d3f4b2134" for i, feature in enumerate(list(df_train.drop(['id'], axis=1))): if df_train[feature].isnull().sum() > 0: df_train[feature].fillna(df_train[feature].mode()[0],inplace=True) for i, feature in enumerate(list(df_test.drop(['id'], axis=1))): if df_test[feature].isnull().sum() > 0: df_test[feature].fillna(df_test[feature].mode()[0],inplace=True) # + [markdown] _cell_guid="c7555d19-9e28-4399-b5f0-25aa1e603487" _uuid="f5be45a8c7dff83cd2b7e7865b9ba42391235f8d" # ### Check if there are any missing values # + _cell_guid="3a62ee1f-8b69-417d-8e77-ac5b3712f838" _uuid="50e32800d2e0f47d12afdc58d2c13e2f911d3e98" get_missing_features(df_train) get_missing_features(df_test) # + [markdown] _cell_guid="1c84a627-3c91-453b-b420-17205e2c2496" _uuid="9c0cec94c33b28ac47b77e6ea0043b31e5b08e7e" # ## Check category features of the dataset # + _cell_guid="692e072d-43e4-4dc8-a4e7-4bdc564cef83" _uuid="120695abdabec37df1272e3b88d5bee11bcfb746" cat_cols = [col for col in df_train.columns if '_cat' in col] dummed_cols = [] for cat_col in cat_cols: unique_values = len(np.unique(df_train[cat_col])) if unique_values < 50: dummed_cols.append(cat_col) print('{} has {} unique values'.format(cat_col, unique_values)) # + [markdown] _cell_guid="e3d3386c-a2fa-44ca-b10a-c82942241043" _uuid="8e38bfce5705d0d93ea3649ec33859d5f412f7c7" # ## Transform category features to dummies # + _cell_guid="a4017be6-2f19-4630-849a-26c49e923813" _uuid="5a5f46bf7e611d7e048f69e75058b2af8cdeb5de" id_test = df_test['id'].values y = df_train['target'].values df_train = df_train.drop(['target','id'], axis = 1) df_test = df_test.drop(['id'], axis = 1) cat_features = [a for a in df_train.columns if a.endswith('cat')] for column in cat_features: temp = pd.get_dummies(pd.Series(df_train[column])) df_train = pd.concat([df_train,temp],axis=1) df_train = df_train.drop([column],axis=1) for column in cat_features: temp = pd.get_dummies(pd.Series(df_test[column])) df_test = pd.concat([df_test,temp],axis=1) df_test = df_test.drop([column],axis=1) print(df_train.values.shape, df_test.values.shape) # + [markdown] _cell_guid="07764364-1994-441c-afb7-12e6e3309a04" _uuid="7db7081eed1b349322adebd8cb75cedcafaa4b79" # ### Gini coeficient # + _cell_guid="c6ce3a25-c86f-4f96-bbd7-603da3316ba2" _uuid="9e6d376ab77f19c1e126b50b63eaaabfbb352997" # from https://www.kaggle.com/mashavasilenko/ # porto-seguro-xgb-modeling-and-parameters-tuning def eval_gini(y_true, y_prob): y_true = np.asarray(y_true) y_true = y_true[np.argsort(y_prob)] ntrue = 0 gini = 0 delta = 0 n = len(y_true) for i in range(n-1, -1, -1): y_i = y_true[i] ntrue += y_i gini += y_i * delta delta += 1 - y_i gini = 1 - 2 * gini / (ntrue * (n - ntrue)) return gini def gini_xgb(preds, dtrain): labels = dtrain.get_label() gini_score = -eval_gini(labels, preds) return [('gini', gini_score)] def gini_normalized(a, p): return gini(a, p) / gini(a, a) # + [markdown] _cell_guid="ecfd8b20-01a3-4eb6-915a-a191244d867e" _uuid="471abc86cd2834c3201e5a2d9145cfda5e7a93b6" # ### Ensembling # + _cell_guid="e054fad5-59dc-4349-8acf-260b8d65e462" _uuid="c4d36c458997b60937f005b48b634e439436648c" from sklearn.model_selection import StratifiedKFold class Create_ensemble(object): def __init__(self, n_splits, base_models): self.n_splits = n_splits self.base_models = base_models def predict(self, X, y, T): X = np.array(X) y = np.array(y) T = np.array(T) folds = list(StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=random_state).split(X, y)) S_train = np.zeros((X.shape[0], len(self.base_models))) S_test = np.zeros((T.shape[0], len(self.base_models))) for i, clf in enumerate(self.base_models): S_test_i = np.zeros((T.shape[0], self.n_splits)) for j, (train_idx, valid_idx) in enumerate(folds): X_train = X[train_idx] y_train = y[train_idx] X_valid = X[valid_idx] y_valid = y[valid_idx] clf.fit(X_train, y_train) valid_pred = clf.predict_proba(X_valid)[:,1] S_train[valid_idx, i] = valid_pred S_test_i[:, j] = clf.predict_proba(T)[:,1] print( "\nTraining Gini for model {} : {}".format(i, eval_gini(y, S_train[:,i]))) S_test[:, i] = S_test_i.mean(axis=1) return S_train, S_test # + [markdown] _cell_guid="16b81345-575d-4688-bc79-33c695a3829c" _uuid="003b63179183a3fb927bee07debfd07e790a2921" # ## Lightbm model # + _cell_guid="10d3e204-dce9-4392-bef5-85ecc0a91438" _uuid="7373ed69879d400bcdc4205eb40bf997b26ae222" # LightGBM params lgb_params = {} lgb_params['learning_rate'] = 0.02 lgb_params['n_estimators'] = 700 lgb_params['max_bin'] = 15 lgb_params['subsample'] = 0.8 lgb_params['subsample_freq'] = 10 lgb_params['colsample_bytree'] = 0.8 lgb_params['min_child_samples'] = 800 lgb_params['random_state'] = random_state lgb_params['scale_pos_weight'] = 3 lgb_params2 = {} lgb_params2['learning_rate'] = 0.02 lgb_params2['n_estimators'] = 900 lgb_params2['max_bin'] = 20 lgb_params2['subsample'] = 0.8 lgb_params2['subsample_freq'] = 10 lgb_params2['colsample_bytree'] = 0.8 lgb_params2['min_child_samples'] = 600 lgb_params2['random_state'] = random_state lgb_params2['scale_pos_weight'] = 3 lgb_model = LGBMClassifier(**lgb_params) lgb_model2 = LGBMClassifier(**lgb_params2) # + _cell_guid="c91ed1eb-8a81-43b6-941e-966943ed315c" _uuid="46875d57a9413c0513be8b757058976c817d4d96" lgb_stack = Create_ensemble(n_splits = 5, base_models = [lgb_model, lgb_model2]) X = df_train Y = y T = df_test lgb_train_pred, lgb_test_pred = lgb_stack.predict(X, Y, T) # + _cell_guid="573482df-d0b7-4dc2-8358-0b8a21aad287" _uuid="e74a6e4a002fa7c17b0bbe9cdba9aa122684074b" # Create submission file sub = pd.DataFrame() sub['id'] = id_test sub['target'] = lgb_test_pred.mean(axis=1) sub.to_csv('lightgbm_submit_ensemble_features.csv', float_format='%.6f', index=False) # + [markdown] _cell_guid="e2548117-1bba-455b-9293-7b66235d59b5" _uuid="0dc4d299ef57a598f2cdd1185570cc91de9bd2f2" # ## correlation among the model results # + _cell_guid="3890e769-db50-47e1-a07d-93be57008ae9" _uuid="eecb3e49473ea73767c26779c0ee02577a56295e" import seaborn as sns test_pred_df = pd.DataFrame(data = lgb_test_pred) cor = test_pred_df.corr() plt.figure(figsize=(16,10)) sns.heatmap(cor)
10 poer sugero safe driver prediction/safe-driver-prediction-top-1-lightgbm-0-29132.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python3 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # Tutorial-IllinoisGRMHD: symmetry__set_gzs_staggered_gfs.C # # ## Authors: <NAME> & <NAME> # # <font color='red'>**This module is currently under development**</font> # # ## In this tutorial module we explain how `IllinoisGRMHD` handles symmetry options and the ghostzones of staggered gridfunctions. This module will likely be absorbed by another one once we finish documenting the code. # # ### Required and recommended citations: # # * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. IllinoisGRMHD: an open-source, user-friendly GRMHD code for dynamical spacetimes. Class. Quantum Grav. 32 (2015) 175009. ([arxiv:1501.07276](http://arxiv.org/abs/1501.07276)). # * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>. Primitive Variable Solvers for Conservative General Relativistic Magnetohydrodynamics. Astrophysical Journal, 641, 626 (2006) ([astro-ph/0512420](https://arxiv.org/abs/astro-ph/0512420)). # * **(Recommended)** <NAME>., <NAME>., <NAME>. An efficient shock-capturing central-type scheme for multidimensional relativistic flows - II. Magnetohydrodynamics. A&A 400 (2) 397-413 (2003). DOI: 10.1051/0004-6361:20021641 ([astro-ph/0210618](https://arxiv.org/abs/astro-ph/0210618)). # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This module is organized as follows # # 0. [Step 0](#src_dir): **Source directory creation** # 1. [Step 1](#introduction): **Introduction** # 1. [Step 2](#symmetry__set_gzs_staggered_gfs__c): **`symmetry__set_gzs_staggered_gfs.C`** # 1. [Step n-1](#code_validation): **Code validation** # 1. [Step n](#latex_pdf_output): **Output this notebook to $\LaTeX$-formatted PDF file** # <a id='src_dir'></a> # # # Step 0: Source directory creation \[Back to [top](#toc)\] # $$\label{src_dir}$$ # # We will now use the [cmdline_helper.py NRPy+ module](Tutorial-Tutorial-cmdline_helper.ipynb) to create the source directory within the `IllinoisGRMHD` NRPy+ directory, if it does not exist yet. # + # Step 0: Creation of the IllinoisGRMHD source directory # Step 0a: Add NRPy's directory to the path # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory import os,sys nrpy_dir_path = os.path.join("..","..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step 0b: Load up cmdline_helper and create the directory import cmdline_helper as cmd IGM_src_dir_path = os.path.join("..","src") cmd.mkdir(IGM_src_dir_path) # Step 0c: Create the output file path outfile_path__symmetry__set_gzs_staggered_gfs__C = os.path.join(IGM_src_dir_path,"symmetry__set_gzs_staggered_gfs.C") # - # <a id='introduction'></a> # # # Step 1: Introduction \[Back to [top](#toc)\] # $$\label{introduction}$$ # <a id='symmetry__set_gzs_staggered_gfs__c'></a> # # # Step 2: `symmetry__set_gzs_staggered_gfs.C` \[Back to [top](#toc)\] # $$\label{symmetry__set_gzs_staggered_gfs__c}$$ # + # %%writefile $outfile_path__symmetry__set_gzs_staggered_gfs__C #include "cctk.h" #include "cctk_Parameters.h" #include <cstdio> #include <cstdlib> #include "IllinoisGRMHD_headers.h" void IllinoisGRMHD_set_symmetry_gzs_staggered(const cGH *cctkGH, const int *cctk_lsh,CCTK_REAL *X,CCTK_REAL *Y,CCTK_REAL *Z, CCTK_REAL *gridfunc, CCTK_REAL *gridfunc_syms,int stagger_x,int stagger_y,int stagger_z) { DECLARE_CCTK_PARAMETERS; if(CCTK_EQUALS(Symmetry, "equatorial")) CCTK_VError(VERR_DEF_PARAMS,"Warning: Symmetry==equatorial not supported! USE AT YOUR OWN RISK. You will need to comment this error message out."); // No symmetries -> return. if(CCTK_EQUALS(Symmetry, "none")) return; CCTK_REAL dz = Z[CCTK_GFINDEX3D(cctkGH,0,0,1)] - Z[CCTK_GFINDEX3D(cctkGH,0,0,0)]; CCTK_REAL z_offset = dz*0.5*stagger_z; int num_gzs=0; //FIXME: Might want to use cctk_nghostzones instead... while( (Z[CCTK_GFINDEX3D(cctkGH,0,0,num_gzs)]+z_offset) < -dz*0.1 && num_gzs<cctk_lsh[2]) num_gzs++; if(num_gzs*2>=cctk_lsh[2]) CCTK_VError(VERR_DEF_PARAMS,"ERROR in symmetry__set_gzs_staggered_gfs.C"); #pragma omp parallel for for(int k=0;k<num_gzs;k++) for(int j=0;j<cctk_lsh[1];j++) for(int i=0;i<cctk_lsh[0];i++) { int index_inside__sym_gz = CCTK_GFINDEX3D(cctkGH,i,j,k); /* This loop sets symmetry ghostzones, regardless of how the gridfunction is staggered. * * STAGGERED PATTERN: * if num_gzs==1 && stagger_z==1: * z[] = {-dz/2,dz/2,3dz/2, etc} -> gridfunc[index 0] = gridfunc_syms[2]*gridfunc[index 1] * * if num_gzs==2 && stagger_z==1: * z[] = {-3dz/2,-dz/2,dz/2,3dz/2 etc} * -> gridfunc[index 0] = gridfunc_syms[2]*gridfunc[index 3] * -> gridfunc[index 1] = gridfunc_syms[2]*gridfunc[index 2] * . * . * . * -> gridfunc[i] = gridfunc_syms[2]*gridfunc[(num_gz*2-1)-i] * * UNSTAGGERED PATTERN: * if num_gzs==1 && stagger_z==0: * z[] = {-dz,0,dz, etc} -> gridfunc[index 0] = gridfunc_syms[2]*gridfunc[index 2] * * if num_gzs==2 && stagger_z==0: * z[] = {-2dz,-dz,0,dz,2dz, etc} -> gridfunc[index 0] = gridfunc_syms[2]*gridfunc[index 4] * z[] = {-2dz,-dz,0,dz,2dz, etc} -> gridfunc[index 1] = gridfunc_syms[2]*gridfunc[index 3] * . * . * . * -> gridfunc[i] = gridfunc_syms[2]*gridfunc[(num_gz*2)-i] * * OVERALL PATTERN: gridfunc[i] = gridfunc_syms[2]*gridfunc[(num_gz*2-stagger_z)-i] */ int matching_index_outside_sym_gz = CCTK_GFINDEX3D(cctkGH,i,j,(num_gzs*2-stagger_z)-k); gridfunc[index_inside__sym_gz] = gridfunc_syms[2]*gridfunc[matching_index_outside_sym_gz]; } } # - # <a id='code_validation'></a> # # # Step n-1: Code validation \[Back to [top](#toc)\] # $$\label{code_validation}$$ # # First we download the original `IllinoisGRMHD` source code and then compare it to the source code generated by this tutorial notebook. # + # Verify if the code generated by this tutorial module # matches the original IllinoisGRMHD source code # First download the original IllinoisGRMHD source code import urllib from os import path original_IGM_file_url = "https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/IllinoisGRMHD/src/symmetry__set_gzs_staggered_gfs.C" original_IGM_file_name = "symmetry__set_gzs_staggered_gfs-original.C" original_IGM_file_path = os.path.join(IGM_src_dir_path,original_IGM_file_name) # Then download the original IllinoisGRMHD source code # We try it here in a couple of ways in an attempt to keep # the code more portable try: original_IGM_file_code = urllib.request.urlopen(original_IGM_file_url).read().decode("utf-8") # Write down the file the original IllinoisGRMHD source code with open(original_IGM_file_path,"w") as file: file.write(original_IGM_file_code) except: try: original_IGM_file_code = urllib.urlopen(original_IGM_file_url).read().decode("utf-8") # Write down the file the original IllinoisGRMHD source code with open(original_IGM_file_path,"w") as file: file.write(original_IGM_file_code) except: # If all else fails, hope wget does the job # !wget -O $original_IGM_file_path $original_IGM_file_url # Perform validation # Validation__symmetry__set_gzs_staggered_gfs__C = !diff $original_IGM_file_path $outfile_path__symmetry__set_gzs_staggered_gfs__C if Validation__symmetry__set_gzs_staggered_gfs__C == []: # If the validation passes, we do not need to store the original IGM source code file # !rm $original_IGM_file_path print("Validation test for symmetry__set_gzs_staggered_gfs.C: PASSED!") else: # If the validation fails, we keep the original IGM source code file print("Validation test for symmetry__set_gzs_staggered_gfs.C: FAILED!") # We also print out the difference between the code generated # in this tutorial module and the original IGM source code print("Diff:") for diff_line in Validation__symmetry__set_gzs_staggered_gfs__C: print(diff_line) # - # <a id='latex_pdf_output'></a> # # # Step n: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-IllinoisGRMHD__symmetry__set_gzs_staggered_gfs.pdf](Tutorial-IllinoisGRMHD__symmetry__set_gzs_staggered_gfs.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means). latex_nrpy_style_path = os.path.join(nrpy_dir_path,"latex_nrpy_style.tplx") # #!jupyter nbconvert --to latex --template $latex_nrpy_style_path --log-level='WARN' Tutorial-IllinoisGRMHD__symmetry__set_gzs_staggered_gfs.ipynb # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__symmetry__set_gzs_staggered_gfs.tex # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__symmetry__set_gzs_staggered_gfs.tex # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__symmetry__set_gzs_staggered_gfs.tex # !rm -f Tut*.out Tut*.aux Tut*.log
IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__symmetry__set_gzs_staggered_gfs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # + library(tesseract) library(magick) pngfile <- pdftools::pdf_convert('D:\\TEXTproject\\datafiles\\eftos-ent-kingdom-of-a-thousand.pdf',dpi = 600) pngfile #str(pngfile) text1 <- tesseract::ocr(pngfile[24:34]) text2 <- tesseract::ocr(pngfile[54:64]) text3 <- tesseract::ocr(pngfile[84:94]) text4 <- tesseract::ocr(pngfile[114:124]) #cat(text) # - head(text1) head(text2) head(text3) head(text4) fileConn<-file("D:\\TEXTproject\\datafiles\\part1.csv") writeLines(text1, fileConn) close(fileConn) fileConn<-file("D:\\TEXTproject\\datafiles\\part2.csv") writeLines(text2, fileConn) close(fileConn) fileConn<-file("D:\\TEXTproject\\datafiles\\part3.csv") writeLines(text3, fileConn) close(fileConn) fileConn<-file("D:\\TEXTproject\\datafiles\\part4.csv") writeLines(text4, fileConn) close(fileConn)
parts_divide.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Import Cell import os import pandas as pd import numpy as np import xlrd import csv import sys import win32com.client import statsmodels.api as sm import statsmodels.formula.api as smf import statistics import math import matplotlib as mp from matplotlib import pylab from scipy import stats from scipy.stats.stats import pearsonr from itertools import groupby from more_itertools import unique_everseen import seaborn # %matplotlib inline # # Generate Subject and File Lists # + root = "S:\\Killgore_SCAN\\UA_SCAN_Shared\\PREEMPT\\" dumplist = [] for p,s,f in os.walk(root): for n in f: dumplist.append(os.path.join(p,n)) xlApp = win32com.client.Dispatch('Excel.Application') masterobject = xlApp.Workbooks.Open(root+"Tracking_Storage_Scheduling_Logs\\PREEMPT_Subject_Masterlist.xlsx", False, True, None, Password='').Worksheets(1) masterlist = pd.DataFrame(list(masterobject.Range(masterobject.Cells(1,1),masterobject.Cells(masterobject.UsedRange.Rows.Count,19)).Value)) masterlist.columns = masterlist.iloc[0] masterlist.reindex(masterlist.index.drop(0)) v2subslist = list(masterlist[masterlist.Status == 'V2 Complete']['Record ID'].astype(int)) v2subslist.sort() cortdf = pd.read_excel(open(root+'Analyses\\Cortisol\\Cortisol_Analysis.xlsx',"rb"), index_col=False) cortdf['Log Mean (µg/dL)'] = [math.log(y,10) for y in cortdf['Mean (µg/dL)']] trierdf = cortdf.loc[(cortdf['General Time'] == 8) | (cortdf['General Time'] == 9)] triersubloopdf = cortdf.loc[(cortdf['General Time'] == 8)] cortsubslist = list(triersubloopdf['Record ID'].astype(int)) cortsubslist.sort() fileslist = [x for x in dumplist if "UA_SCAN_Shared\\PREEMPT\\Data\\PREEMPT1_" in x] fileslist[:] = [x for x in fileslist if any("PREEMPT1_%04d" % y in x for y in cortsubslist)] markovlist = [x for x in fileslist if "aliens_task.csv" in x] ospanalllist = [x for x in fileslist if "ospan" in x] ospanlist = [x for x in ospanalllist if not "math" in x] msceitlist = [x for x in fileslist if (("msceit" in x) or ("MSCEIT" in x))] eqilist = [x for x in fileslist if (("eqi2" in x) or ("EQi" in x) or ("EQI" in x) or ("eqi" in x))] neolist = [x for x in fileslist if (("neo3" in x) and ("copy1" in x))] # - # # Build Data Vectors (Dependent on File Lists) # + msceittotallist = [] msceitexparealist = [] msceitstratarealist = [] msceitpercbranchlist = [] msceitusingbranchlist = [] msceitunderbranchlist = [] msceitmanagebranchlist = [] eqitotallist = []; eqisplist = []; eqisrlist = []; eqisalist = []; eqieslist = []; eqiselist = []; eqieelist = []; eqiaslist = []; eqiinlist = []; eqiislist = []; eqiirlist = []; eqiemlist = []; eqirelist = []; eqidmlist = []; eqipslist = []; eqirtlist = []; eqiiclist = []; eqiiclist = []; eqismlist = []; eqifllist = []; eqistlist = []; eqioplist = []; eqihalist = [] pretrierlist8 = [] posttrierlist9 = [] diftrierlist = [] moodlist = []; moodcortlist = []; agelist = []; agecortlist = []; genderlist = []; gendercortlist = []; testorderlist = []; testordercortlist = [] scorelist = [] cortsampleslist = [] for x in v2subslist: moodlist.append(masterlist[masterlist['Record ID']==x].reset_index()['Mood Condition'][0]) agelist.append(masterlist[masterlist['Record ID']==x].reset_index()['Age'][0]) genderlist.append(masterlist[masterlist['Record ID']==x].reset_index()['Gender'][0]) testorderlist.append(masterlist[masterlist['Record ID']==x].reset_index()['Test Order'][0]) for x in cortsubslist: moodcortlist.append(masterlist[masterlist['Record ID']==x].reset_index()['Mood Condition'][0]) agecortlist.append(masterlist[masterlist['Record ID']==x].reset_index()['Age'][0]) gendercortlist.append(masterlist[masterlist['Record ID']==x].reset_index()['Gender'][0]) testordercortlist.append(masterlist[masterlist['Record ID']==x].reset_index()['Test Order'][0]) for x in ospanlist: scorelist.append(pd.read_csv(x)['runningcorrect'].iloc[-1]) for x in msceitlist: msceittotallist.append(pd.read_excel(open(x,"rb"),sheet_name='MSCEIT_Demographic_and_Scores')['SS_TOT'][0]) msceitexparealist.append(pd.read_excel(open(x,"rb"),sheet_name='MSCEIT_Demographic_and_Scores')['SS_EXP'][0]) msceitstratarealist.append(pd.read_excel(open(x,"rb"),sheet_name='MSCEIT_Demographic_and_Scores')['SS_REA'][0]) msceitpercbranchlist.append(pd.read_excel(open(x,"rb"),sheet_name='MSCEIT_Demographic_and_Scores')['SS_B1'][0]) msceitusingbranchlist.append(pd.read_excel(open(x,"rb"),sheet_name='MSCEIT_Demographic_and_Scores')['SS_B2'][0]) msceitunderbranchlist.append(pd.read_excel(open(x,"rb"),sheet_name='MSCEIT_Demographic_and_Scores')['SS_B3'][0]) msceitmanagebranchlist.append(pd.read_excel(open(x,"rb"),sheet_name='MSCEIT_Demographic_and_Scores')['SS_B4'][0]) for x in eqilist: eqitotallist.append(pd.read_excel(x,sheet_name='EQ_i_2.0_Scores')['TOT_T'][0]) eqisplist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['SP_T'][0]) eqisrlist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['SR_T'][0]) eqisalist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['SA_T'][0]) eqieslist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['ES_T'][0]) eqiselist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['SE_T'][0]) eqieelist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['EE_T'][0]) eqiaslist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['AS_T'][0]) eqiinlist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['IN_T'][0]) eqiislist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['IS_T'][0]) eqiirlist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['IR_T'][0]) eqiemlist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['EM_T'][0]) eqirelist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['RE_T'][0]) eqidmlist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['DM_T'][0]) eqipslist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['PS_T'][0]) eqirtlist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['RT_T'][0]) eqiiclist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['IC_T'][0]) eqismlist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['SM_T'][0]) eqifllist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['FL_T'][0]) eqistlist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['ST_T'][0]) eqioplist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['OP_T'][0]) eqihalist.append(pd.read_excel(open(x,"rb"),sheet_name='EQ_i_2.0_Scores')['HA_T'][0]) for x in cortsubslist: try: pretrierlist8.append(trierdf.loc[(trierdf['Record ID'] == int(x)) & (trierdf['General Time'] == 8), 'Mean (µg/dL)'].iloc[0]) posttrierlist9.append(trierdf.loc[(trierdf['Record ID'] == int(x)) & (trierdf['General Time'] == 9), 'Mean (µg/dL)'].iloc[0]) except: pass pretrierlist8 = [math.log(y,10) for y in pretrierlist8] posttrierlist9 = [math.log(y,10) for y in posttrierlist9] diftrierlist = [i-j for i,j in zip(posttrierlist9,pretrierlist8)] diftrierlistz = stats.mstats.zscore(diftrierlist) cortsampleslist = [] for x in list(cortsubslist): subrow = cortdf[cortdf['Record ID'] == x].reset_index() if len(subrow.index) > 0: temp = [] for x in range(1,12): try: temp.append(subrow[subrow['General Time'] == x].reset_index()['Mean (µg/dL)'][0]) except: temp.append(None) cortsampleslist.append(temp) # - # # Build Markov Dataframe (Dependent on File Lists) # + for i, x in enumerate(markovlist): name = os.path.basename(x)[9:13] if i == 0: df = pd.read_csv(x, delimiter=',', index_col=False) df = df.iloc[9:] df.insert(0, 'SubjectID', name) elif i > 0: tempdf = pd.read_csv(x, delimiter=',', index_col=False) tempdf = tempdf.iloc[9:] tempdf.insert(0, 'SubjectID', name) df = df.append(tempdf, ignore_index=True) level1list = [] level2list = [] df = df.drop(df[df.choice1 == 0].index) df = df.drop(df[df.choice2 == 0].index) table = pd.DataFrame( {'subjID': df.SubjectID, 'reward': df.money }) for x in list(df.choice1): level1list.append('stimulus %s' % x) table['level1_choice'] = level1list this = 10*df.choice1 + df.choice2 for x in this: if x == 11: level2list.append('stimulus 3') if x == 12: level2list.append('stimulus 4') if x == 21: level2list.append('stimulus 5') if x == 22: level2list.append('stimulus 6') table['level2_choice'] = level2list df.to_csv("markov_dataset.csv", sep=',', index=False) table.to_csv("markov_dataset.txt", sep='\t', index=False) # - # # Gather VAS Responses (Dependent on File Lists) df = pd.read_csv('C:\\Users\\localadmin\\Python Scripts\\vasday2_dataset.csv', delimiter=',', index_col=False) df['record_id'] = df['record_id'].str[9:].astype(int) vasalertlist = []; vassadlist = []; vastenselist = []; vaseffortlist = []; vashappylist = []; vaswearylist = []; vascalmlist = []; vassleepylist = []; vasglobalvigorlist = []; vasglobalaffectlist = [] vasalertcortlist = []; vassadcortlist = []; vastensecortlist = []; vaseffortcortlist = []; vashappycortlist = []; vaswearycortlist = []; vascalmcortlist = []; vassleepycortlist = []; vasglobalvigorcortlist = []; vasglobalaffectcortlist = [] vasalertmeanlist = []; vassadmeanlist = []; vastensemeanlist = []; vaseffortmeanlist = []; vaseffortmeanlist = []; vashappymeanlist = []; vaswearymeanlist = []; vascalmmeanlist = []; vassleepymeanlist = []; vasglobalvigormeanlist = []; vasglobalaffectmeanlist = [] vasalertcortmeanlist = []; vassadcortmeanlist = []; vastensecortmeanlist = []; vaseffortcortmeanlist = []; vaseffortcortmeanlist = []; vashappycortmeanlist = []; vaswearycortmeanlist = []; vascalmcortmeanlist = []; vassleepycortmeanlist = []; vasglobalvigorcortmeanlist = []; vasglobalaffectcortmeanlist = [] for x in v2subslist: subrow = df[df['record_id'] == x].reset_index() if len(subrow.index) > 0: vasalertlist.append([subrow['vas_alert_1'][0],subrow['vas_alert_2'][0],subrow['vas_mood_boost_alert'][0],subrow['vas_mood_boost_alert_2'][0],subrow['vas_mood_boost_alert_3'][0],subrow['vas_mood_boost_alert_4'][0],subrow['vas_mood_boost_alert_5'][0],subrow['vas_mood_boost_alert_6'][0],subrow['vas_mood_boost_alert_7'][0],subrow['vas_alert_3'][0]]) vassadlist.append([subrow['vas_sad_1'][0],subrow['vas_sad_2'][0],subrow['vas_mood_boost_sad'][0],subrow['vas_mood_boost_sad_2'][0],subrow['vas_mood_boost_sad_3'][0],subrow['vas_mood_boost_sad_4'][0],subrow['vas_mood_boost_sad_5'][0],subrow['vas_mood_boost_sad_6'][0],subrow['vas_mood_boost_sad_7'][0],subrow['vas_sad_3'][0]]) vastenselist.append([subrow['vas_tense_1'][0],subrow['vas_tense_2'][0],subrow['vas_mood_boost_tense'][0],subrow['vas_mood_boost_tense_2'][0],subrow['vas_mood_boost_tense_3'][0],subrow['vas_mood_boost_tense_4'][0],subrow['vas_mood_boost_tense_5'][0],subrow['vas_mood_boost_tense_6'][0],subrow['vas_mood_boost_tense_7'][0],subrow['vas_tense_3'][0]]) vaseffortlist.append([subrow['vas_effort_1'][0],subrow['vas_effort_2'][0],subrow['vas_mood_boost_effort'][0],subrow['vas_mood_boost_effort_2'][0],subrow['vas_mood_boost_effort_3'][0],subrow['vas_mood_boost_effort_4'][0],subrow['vas_mood_boost_effort_5'][0],subrow['vas_mood_boost_effort_6'][0],subrow['vas_mood_boost_effort_7'][0],subrow['vas_effort_3'][0]]) vashappylist.append([subrow['vas_happy_1'][0],subrow['vas_happy_2'][0],subrow['vas_mood_boost_happy'][0],subrow['vas_mood_boost_happy_2'][0],subrow['vas_mood_boost_happy_3'][0],subrow['vas_mood_boost_happy_4'][0],subrow['vas_mood_boost_happy_5'][0],subrow['vas_mood_boost_happy_6'][0],subrow['vas_mood_boost_happy_7'][0],subrow['vas_happy_3'][0]]) vaswearylist.append([subrow['vas_weary_1'][0],subrow['vas_weary_2'][0],subrow['vas_mood_boost_weary'][0],subrow['vas_mood_boost_weary_2'][0],subrow['vas_mood_boost_weary_3'][0],subrow['vas_mood_boost_weary_4'][0],subrow['vas_mood_boost_weary_5'][0],subrow['vas_mood_boost_weary_6'][0],subrow['vas_mood_boost_weary_7'][0],subrow['vas_weary_3'][0]]) vascalmlist.append([subrow['vas_calm_1'][0],subrow['vas_calm_2'][0],subrow['vas_mood_boost_calm'][0],subrow['vas_mood_boost_calm_2'][0],subrow['vas_mood_boost_calm_3'][0],subrow['vas_mood_boost_calm_4'][0],subrow['vas_mood_boost_calm_5'][0],subrow['vas_mood_boost_calm_6'][0],subrow['vas_mood_boost_calm_7'][0],subrow['vas_calm_3'][0]]) vassleepylist.append([subrow['vas_sleepy_1'][0],subrow['vas_sleepy_2'][0],subrow['vas_mood_boost_sleepy'][0],subrow['vas_mood_boost_sleepy_2'][0],subrow['vas_mood_boost_sleepy_3'][0],subrow['vas_mood_boost_sleepy_4'][0],subrow['vas_mood_boost_sleepy_5'][0],subrow['vas_mood_boost_sleepy_6'][0],subrow['vas_mood_boost_sleepy_7'][0],subrow['vas_sleepy_3'][0]]) vasglobalvigorlist.append([subrow['vas_global_vigor_1'][0],subrow['vas_global_vigor_2'][0],subrow['vas_mood_boost_global_vigor'][0],subrow['vas_mood_boost_global_vigor_2'][0],subrow['vas_mood_boost_global_vigor_3'][0],subrow['vas_mood_boost_global_vigor_4'][0],subrow['vas_mood_boost_global_vigor_5'][0],subrow['vas_mood_boost_global_vigor_6'][0],subrow['vas_mood_boost_global_vigor_7'][0],subrow['vas_global_vigor_3'][0]]) vasglobalaffectlist.append([subrow['vas_global_affect_1'][0],subrow['vas_global_affect_2'][0],subrow['vas_mood_boost_global_affect'][0],subrow['vas_mood_boost_global_affect_2'][0],subrow['vas_mood_boost_global_affect_3'][0],subrow['vas_mood_boost_global_affect_4'][0],subrow['vas_mood_boost_global_affect_5'][0],subrow['vas_mood_boost_global_affect_6'][0],subrow['vas_mood_boost_global_affect_7'][0],subrow['vas_global_affect_3'][0]]) for x in vasalertlist: vasalertmeanlist.append(statistics.mean(x)) for x in vassadlist: vassadmeanlist.append(statistics.mean(x)) for x in vastenselist: vastensemeanlist.append(statistics.mean(x)) for x in vaseffortlist: vaseffortmeanlist.append(statistics.mean(x)) for x in vashappylist: vashappymeanlist.append(statistics.mean(x)) for x in vaswearylist: vaswearymeanlist.append(statistics.mean(x)) for x in vascalmlist: vascalmmeanlist.append(statistics.mean(x)) for x in vassleepylist: vassleepymeanlist.append(statistics.mean(x)) for x in vasglobalvigorlist: vasglobalvigormeanlist.append(statistics.mean(x)) for x in vasglobalaffectlist: vasglobalaffectmeanlist.append(statistics.mean(x)) for x in cortsubslist: subrow = df[df['record_id'] == x].reset_index() if len(subrow.index) > 0: vasalertcortlist.append([subrow['vas_alert_1'][0],subrow['vas_alert_2'][0],subrow['vas_mood_boost_alert'][0],subrow['vas_mood_boost_alert_2'][0],subrow['vas_mood_boost_alert_3'][0],subrow['vas_mood_boost_alert_4'][0],subrow['vas_mood_boost_alert_5'][0],subrow['vas_mood_boost_alert_6'][0],subrow['vas_mood_boost_alert_7'][0],subrow['vas_alert_3'][0]]) vassadcortlist.append([subrow['vas_sad_1'][0],subrow['vas_sad_2'][0],subrow['vas_mood_boost_sad'][0],subrow['vas_mood_boost_sad_2'][0],subrow['vas_mood_boost_sad_3'][0],subrow['vas_mood_boost_sad_4'][0],subrow['vas_mood_boost_sad_5'][0],subrow['vas_mood_boost_sad_6'][0],subrow['vas_mood_boost_sad_7'][0],subrow['vas_sad_3'][0]]) vastensecortlist.append([subrow['vas_tense_1'][0],subrow['vas_tense_2'][0],subrow['vas_mood_boost_tense'][0],subrow['vas_mood_boost_tense_2'][0],subrow['vas_mood_boost_tense_3'][0],subrow['vas_mood_boost_tense_4'][0],subrow['vas_mood_boost_tense_5'][0],subrow['vas_mood_boost_tense_6'][0],subrow['vas_mood_boost_tense_7'][0],subrow['vas_tense_3'][0]]) vaseffortcortlist.append([subrow['vas_effort_1'][0],subrow['vas_effort_2'][0],subrow['vas_mood_boost_effort'][0],subrow['vas_mood_boost_effort_2'][0],subrow['vas_mood_boost_effort_3'][0],subrow['vas_mood_boost_effort_4'][0],subrow['vas_mood_boost_effort_5'][0],subrow['vas_mood_boost_effort_6'][0],subrow['vas_mood_boost_effort_7'][0],subrow['vas_effort_3'][0]]) vashappycortlist.append([subrow['vas_happy_1'][0],subrow['vas_happy_2'][0],subrow['vas_mood_boost_happy'][0],subrow['vas_mood_boost_happy_2'][0],subrow['vas_mood_boost_happy_3'][0],subrow['vas_mood_boost_happy_4'][0],subrow['vas_mood_boost_happy_5'][0],subrow['vas_mood_boost_happy_6'][0],subrow['vas_mood_boost_happy_7'][0],subrow['vas_happy_3'][0]]) vaswearycortlist.append([subrow['vas_weary_1'][0],subrow['vas_weary_2'][0],subrow['vas_mood_boost_weary'][0],subrow['vas_mood_boost_weary_2'][0],subrow['vas_mood_boost_weary_3'][0],subrow['vas_mood_boost_weary_4'][0],subrow['vas_mood_boost_weary_5'][0],subrow['vas_mood_boost_weary_6'][0],subrow['vas_mood_boost_weary_7'][0],subrow['vas_weary_3'][0]]) vascalmcortlist.append([subrow['vas_calm_1'][0],subrow['vas_calm_2'][0],subrow['vas_mood_boost_calm'][0],subrow['vas_mood_boost_calm_2'][0],subrow['vas_mood_boost_calm_3'][0],subrow['vas_mood_boost_calm_4'][0],subrow['vas_mood_boost_calm_5'][0],subrow['vas_mood_boost_calm_6'][0],subrow['vas_mood_boost_calm_7'][0],subrow['vas_calm_3'][0]]) vassleepycortlist.append([subrow['vas_sleepy_1'][0],subrow['vas_sleepy_2'][0],subrow['vas_mood_boost_sleepy'][0],subrow['vas_mood_boost_sleepy_2'][0],subrow['vas_mood_boost_sleepy_3'][0],subrow['vas_mood_boost_sleepy_4'][0],subrow['vas_mood_boost_sleepy_5'][0],subrow['vas_mood_boost_sleepy_6'][0],subrow['vas_mood_boost_sleepy_7'][0],subrow['vas_sleepy_3'][0]]) vasglobalvigorcortlist.append([subrow['vas_global_vigor_1'][0],subrow['vas_global_vigor_2'][0],subrow['vas_mood_boost_global_vigor'][0],subrow['vas_mood_boost_global_vigor_2'][0],subrow['vas_mood_boost_global_vigor_3'][0],subrow['vas_mood_boost_global_vigor_4'][0],subrow['vas_mood_boost_global_vigor_5'][0],subrow['vas_mood_boost_global_vigor_6'][0],subrow['vas_mood_boost_global_vigor_7'][0],subrow['vas_global_vigor_3'][0]]) vasglobalaffectcortlist.append([subrow['vas_global_affect_1'][0],subrow['vas_global_affect_2'][0],subrow['vas_mood_boost_global_affect'][0],subrow['vas_mood_boost_global_affect_2'][0],subrow['vas_mood_boost_global_affect_3'][0],subrow['vas_mood_boost_global_affect_4'][0],subrow['vas_mood_boost_global_affect_5'][0],subrow['vas_mood_boost_global_affect_6'][0],subrow['vas_mood_boost_global_affect_7'][0],subrow['vas_global_affect_3'][0]]) for x in vasalertcortlist: vasalertcortmeanlist.append(statistics.mean(x)) for x in vassadcortlist: vassadcortmeanlist.append(statistics.mean(x)) for x in vastensecortlist: vastensecortmeanlist.append(statistics.mean(x)) for x in vaseffortcortlist: vaseffortcortmeanlist.append(statistics.mean(x)) for x in vashappycortlist: vashappycortmeanlist.append(statistics.mean(x)) for x in vaswearycortlist: vaswearycortmeanlist.append(statistics.mean(x)) for x in vascalmcortlist: vascalmcortmeanlist.append(statistics.mean(x)) for x in vassleepycortlist: vassleepycortmeanlist.append(statistics.mean(x)) for x in vasglobalvigorcortlist: vasglobalvigorcortmeanlist.append(statistics.mean(x)) for x in vasglobalaffectcortlist: vasglobalaffectcortmeanlist.append(statistics.mean(x)) # # Build Vectors for Markov Computational Model Parameters df = pd.read_csv('C:\\Users\\localadmin\\Python Scripts\\compmodeloutput.csv', delimiter=',', index_col=False) modela1cortlist = []; modela2cortlist = []; modelb1cortlist = []; modelb2cortlist = []; modelpicortlist = []; modelwcortlist = []; modellambdacortlist = []; modelloglikcortlist = [] for i, x in enumerate(cortsubslist): modela1cortlist.append(df.loc[df['Unnamed: 0'] == 'a1[%s]' % str(i+1), 'mean'].iloc[0]) modela2cortlist.append(df.loc[df['Unnamed: 0'] == 'a2[%s]' % str(i+1), 'mean'].iloc[0]) modelb1cortlist.append(df.loc[df['Unnamed: 0'] == 'beta1[%s]' % str(i+1), 'mean'].iloc[0]) modelb2cortlist.append(df.loc[df['Unnamed: 0'] == 'beta2[%s]' % str(i+1), 'mean'].iloc[0]) modelpicortlist.append(df.loc[df['Unnamed: 0'] == 'pi[%s]' % str(i+1), 'mean'].iloc[0]) modelwcortlist.append(df.loc[df['Unnamed: 0'] == 'w[%s]' % str(i+1), 'mean'].iloc[0]) modellambdacortlist.append(df.loc[df['Unnamed: 0'] == 'lambda[%s]' % str(i+1), 'mean'].iloc[0]) modelloglikcortlist.append(df.loc[df['Unnamed: 0'] == 'log_lik[%s]' % str(i+1), 'mean'].iloc[0]) modelb1basedcortlist = [i*j for i,j in zip(modelb1cortlist,modelwcortlist)] modelb2basedcortlist = [i*j for i,j in zip(modelb2cortlist,modelwcortlist)] modelb1freecortlist = [i*(1-j) for i,j in zip(modelb1cortlist,modelwcortlist)] modelb2freecortlist = [i*(1-j) for i,j in zip(modelb2cortlist,modelwcortlist)] # # Build Vectors for Markov Regression Model Parameters # + df = pd.read_csv('C:\\Users\\localadmin\\Python Scripts\\markov_dataset.csv', delimiter=',', index_col=False) modelfreebetaslist = []; modelbasedbetaslist = [] modelfreebetascortlist = []; modelbasedbetascortlist = [] for x in cortsubslist: model = smf.mixedlm('stay ~ prevmoney + common + prevmoney * common', df[pd.to_numeric(df.SubjectID) == x], groups=df[pd.to_numeric(df.SubjectID) == x]['SubjectID']).fit() modelfreebetascortlist.append(model.params[1]) modelbasedbetascortlist.append(model.params[3]) # - # # Generate Scatter Plots (No Grouping) # + #Edit These SAVE = False var1 = diftrierlistz var2 = modelb1cortlist var1label = "Log Cortisol Delta (z-score)" var2label = "Reward x Transition Interaction Effect (Model-Based)" title = "MSCEIT Total Score vs.\nReward x Transition Interaction Effect (N=%s)" % len(var1) savename = "Analyses\\Markov\\MSCEITTotal-ModelBased.pdf" #-------------------------------------------- table = pd.DataFrame( {var1label: var1, var2label: var2 }) for x in table.columns: table = table[np.isfinite(table[x])] lm = seaborn.lmplot(x=var1label, y=var2label, palette=('r'), data=table, legend_out=False) ax = mp.pyplot.gca() ax.set_title("%s\nr=%.4f, p=%.4f" % (title,pearsonr(table[var1label],table[var2label])[0],pearsonr(table[var1label],table[var2label])[1])) print("r = %s, p = %s" % pearsonr(table[var1label],table[var2label])) if SAVE == True: lm.savefig(root+savename, bbox_inches='tight') # - # # Generate Bar Graphs (Group Averages) # + #Edit These SAVE = False var = diftrierlistz groupvar = moodcortlist varlabel = "EQI Decision Making Composite Score" grouplabel = "Pre-Post TSST Difference in Cortisol (µg/dL)" title = "EQI Decision Making Composite Score vs.\nPre-Post TSST Difference in Cortisol (N=%s)" % len(var) savename = "Analyses\\EQI\\EQIDecisionMaking-PrePostDifTSSTCortisol.pdf" table = pd.DataFrame( {varlabel: var, grouplabel: groupvar }) table = table[np.isfinite(table[varlabel])] bp = seaborn.barplot(x=grouplabel, y=varlabel, data=table) ax = mp.pyplot.gca() ax.set_title(title) if SAVE == True: lm.savefig(root+savename, bbox_inches='tight') # - # # Generate Scatter Plots (Group or Mean Split) # + #Edit These SAVE = False var1 = diftrierlistz var2 = modelbasedbetascortlist groupvar = scorelist MEANSPLIT = True var1label = "Pre-Post TSST Difference in Cortisol (µg/dL)" var2label = "Reward x Transition Interaction Effect" highgrouplabel = "High EQI Total Score" lowgrouplabel = "Low EQI Total Score" title = "Reward x Transition Interaction Effect vs\nPre-Post TSST Difference in Cortisol" savename = "Analyses\\Markov\\ModelBased-StressReactivity-MoodSplit.pdf" #-------------------------------------------- table = pd.DataFrame( {var1label: var1, var2label: var2, 'z_raw': groupvar, 'z_group': groupvar }) grouplist = [] for i, x in enumerate(table.z_raw): if MEANSPLIT == True: if x > statistics.mean(groupvar): grouplist.append(highgrouplabel) else: grouplist.append(lowgrouplabel) else: grouplist.append(groupvar[i]) for x in list(unique_everseen(grouplist)): grouplist = [w.replace(x, x + ' (N=%s)' % grouplist.count(x)) for w in grouplist] table['z_group'] = grouplist seaborn.set(rc={'figure.figsize':(300,300)}) #seaborn.reset_orig() lm = seaborn.lmplot(x=var1label, y=var2label, hue = 'z_group', data=table, legend=False) ax = mp.pyplot.gca() ax.set_title(title) mp.pyplot.legend(bbox_to_anchor=(1, 1), loc=2) for x in list(unique_everseen(grouplist)): print("%s" % x + " Group: r = %s, p = %s" % (pearsonr(table[var1label][table['z_group'] == x],table[var2label][table['z_group'] == x]))) if MEANSPLIT == True: print("Mean of Grouping Variable: %.4f" % statistics.mean(groupvar)) if SAVE == True: lm.savefig(root+savename, bbox_inches='tight') # - # # Generate Repeated Measures Plots (Individual) for i,x in enumerate(cortsubslist): #Edit These VASPLOT = True CORTPLOT = False SAVE = False subcharted = cortsubslist[i] mood = moodlist[i] var = vasglobalaffectcortlist[i] varlabel = "Self Reported Global Affect" title = "Changes in Global Affect over Time" savename = "Analyses\\VAS\\GlobalAffect-Individual\\VASGlobalAffect-%04d-%sC.pdf" % (subcharted,mood) #-------------------------------------------- mp.pyplot.clf() pointlabel = "y" if VASPLOT == True: pointlabel = "VAS Assessment Point" table = pd.DataFrame( {varlabel: var, pointlabel: var }) table[pointlabel] = table.index + 1 if VASPLOT == True: table[pointlabel] = ['ES1','ES2','B1','B2','B3','B4','B5','B6','B7','ES3'] if CORTPLOT == True: table[pointlabel] = ['S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S8', 'S9', 'S10', 'S11',] lm = seaborn.pointplot(x=pointlabel, y=varlabel, data=table, ci=80) if VASPLOT == True: lm.set_title("%s: Subject %04d\nMood - %s" % (title,subcharted,mood)) else: lm.set_title("%s: Subject %04d" % (title,subcharted)) if SAVE == True: mp.pyplot.savefig(root+savename) # # Generate Repeated Measures Plots (All + Grouped) # + #Edit These VASPLOT = True CORTPLOT = False SAVE = False var = vasglobalaffectlist varlabel = "Global Affect Score" pointlabel = "VAS Assessment Point" title = "" titlefont = {'weight':'bold','size':18,} xlabelfont = {'weight':'bold','size':18,} ylabelfont = {'weight':'bold','size':18,} figureparams = {'size':(15,10),'labelpad':25,'scale':1.5,'capsize':.1,'legendloc':(.145, -.15),} savename = "Analyses\\VAS\\VASGlobalAffect-All-NEW-SE.png" #-------------------------------------------- mp.pyplot.clf() table = pd.DataFrame( {varlabel: [item for sublist in var for item in sublist], pointlabel: var[0]*len(var) }) fig, lm = mp.pyplot.subplots(figsize=figureparams['size']) if VASPLOT == True: table['SubID'] = [item for item, count in zip(v2subslist, [len(var[0])]*len(var)) for i in range(count)] nmoodlist = moodlist for x in list(unique_everseen(nmoodlist)): nmoodlist = [w.replace(x, x + ' (N=%s)' % nmoodlist.count(x)) for w in nmoodlist] table[pointlabel] = ['ES1','ES2','B1','B2','B3','B4','B5','B6','B7','ES3']*len(var) table['Mood'] = [item for item, count in zip(nmoodlist, [len(var[0])]*len(var)) for i in range(count)] lm = seaborn.pointplot(x=pointlabel, y=varlabel, data=table, hue="Mood", palette=('b', 'r', 'g'), ci=80, scale=figureparams['scale'], capsize=figureparams['capsize']) lgd = lm.legend(bbox_to_anchor=figureparams['legendloc'], loc=2, borderaxespad=0., ncol=3, fontsize=16) if CORTPLOT == True: table['SubID'] = [item for item, count in zip(list(cortsubslist), [len(var[0])]*len(var)) for i in range(count)] table[pointlabel] = ['12:45','13:25','13:35','14:10','15:05','15:15','15:50','16:40','17:10','17:35', '18:05']*len(var) lm = seaborn.pointplot(x=pointlabel, y=varlabel, data=table, color='black', ci=80, scale=figureparams['scale'], capsize=figureparams['capsize']) for x in [1.5, 4.5, 7.5, 9.5]: mp.pyplot.plot([x, x], [.35, .15], linewidth=2.5, color='b' if x==9.5 else 'r', linestyle='dotted') for axis in ['top','bottom','left','right']: lm.spines[axis].set_linewidth(2) lm.set_title(title, titlefont, loc='left') lm.set_ylabel(varlabel, ylabelfont, labelpad=figureparams['labelpad']) lm.set_xlabel(pointlabel, xlabelfont, labelpad=figureparams['labelpad']) lm.tick_params(labelsize=16, size=8, direction='out', width=2) if SAVE == True: fig.savefig(root+savename, bbox_extra_artists=(lgd,), bbox_inches='tight', transparent=False) if VASPLOT == True: table[pointlabel] = [1,2,3,4,5,6,7,8,9,10]*len(var) if CORTPLOT == True: table[pointlabel] = [1,2,3,4,5,6,7,8,9,10,11]*len(var) table = table.rename(index=str, columns={varlabel: "VASChange", pointlabel: "Time"}) table = table[np.isfinite(table['VASChange'])] table.to_csv("C:\\Users\\localadmin\\R Scripts\\R_repeatedm_linearmixed_dataset.csv", sep=',', index=False) if VASPLOT == True: for x in list(unique_everseen(nmoodlist)): print("\n\nMixed Linear Model Output for %s\n" % x) md = smf.mixedlm("VASChange ~ Time + Mood", data=table[table['Mood']==x], groups=table[table['Mood']==x]["SubID"]) mdf = md.fit() print(mdf.summary()) # + predictor = diftrierlist mediator = eqirtlist outcome = msceitmanagebranchlist var1label = 'x' var2label = 'y' var3label = 'z' table = pd.DataFrame( {var1label: predictor, var2label: mediator, var3label: outcome }) table.to_csv("C:\\Users\\localadmin\\R Scripts\\R_causalmediation_dataset.csv", sep=',', index=False) # + #Edit These SAVE = False var1 = diftrierlistz var2 = modelbasedbetascortlist var3 = modelfreebetascortlist var1label = "Log Cortisol Delta (z-score)" var2label = "Reward x Transition Interaction Effect (Model-Based)" var3label = "Reward Main Effect (Model-Free)" title = "Decision Making Strategies Recruited vs.\nPre-Post TSST Cortisol Deltas (N=%s)" % len(var1) savename = "Analyses\\Markov\\DecisionStrat-StressReactivityLog.pdf" #-------------------------------------------- table = pd.DataFrame( {var1label: var1, var2label: var2, var3label: var3 }) for x in table.columns: table = table[np.isfinite(table[x])] fig, ax = mp.pyplot.subplots() ax2 = ax.twinx() ax.yaxis.label.set_color('r') ax2.yaxis.label.set_color('b') ax.tick_params(axis='y', colors='r') ax2.tick_params(axis='y', colors='b') lm = seaborn.regplot(x=var1label, y=var2label, color='r', data=table, ax=ax) lm = seaborn.regplot(x=var1label, y=var3label, color='b', data=table, ax=ax2) ax = mp.pyplot.gca() ax.set_title(title) #ax.set_title("%s\nr=%.4f, p=%.4f" % (title,pearsonr(table[var1label],table[var2label])[0],pearsonr(table[var1label],table[var2label])[1])) print("r = %s, p = %s" % pearsonr(table[var1label],table[var3label])) if SAVE == True: fig.savefig(root+savename, bbox_inches='tight') # -
Analyses Functions WIP/Analysis Tools.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt from gensim.models import Word2Vec import numpy as np from itertools import product from tqdm import tqdm import sys sys.path.append('../src') from models import open_pickle, filter_terms_not_in_wemodel # + we_model_name = "sg_dim300_min100_win5" we_vector_size = 300 we_model_dir = '../data/external/wiki-english/wiki-english-20171001/%s' % we_model_name we_model = Word2Vec.load(we_model_dir+'/model.gensim') print ('loading done!') # + RESULTS_FILEPATH = '../data/interim/association_metric_exps.pickle' EXPERIMENT_DEFINITION_FILEPATH = '../data/interim/experiment_definitions.pickle' IMAGE_SAVE_FILEPATH = '../reports/figures/exp_results.png' NONRELATIVE_IMAGE_SAVE_FILEPATH = '../reports/figures/nonrelative_exp_results.png' exp_def_dict = open_pickle(EXPERIMENT_DEFINITION_FILEPATH) results_dict = open_pickle(RESULTS_FILEPATH) # - results_dict[2]['second'] def add_axes_obj_labels(ax, exp_num, target_label, A_label, B_label): TITLE_FONT_SIZE = 12 [target_label, A_label, B_label] = [s.upper() for s in [target_label, A_label, B_label]] ax.set_title(f'#{exp_num}: {target_label} terms: {B_label} (left) vs. {A_label} (right)', fontsize=TITLE_FONT_SIZE) ax.set_xlabel(f'Bias Value') ax.set_ylabel(f'Word') ax.yaxis.set_ticklabels([]) def annotate_points(ax, terms, x_array, y): POINT_FONT_SIZE = 9 for i, txt in enumerate(terms): ax.annotate(txt, (x_array[i], y[i]), fontsize=POINT_FONT_SIZE) def add_scatters_and_lines(ax, arr_first, arr_second, threshold_first, threshold_second, mean_first, mean_second, pct_5_second, pct_95_second, lower_bound, upper_bound, y): S = 20 # Marker size ZERO_LINE_COLOR = 'lime' FIRST_ORDER_COLOR = 'black' SECOND_ORDER_COLOR = 'red' SECOND_ORDER_PERCENTILES_COLOR = 'blue' SHADE_DARKNESS = 0.2 XAXIS_LIMIT = 0.6 y = [i for i in range(1,len(arr_first)+1)] #ax.scatter(arr_first, y, c=FIRST_ORDER_COLOR, s=S) ax.scatter(arr_second, y, c=SECOND_ORDER_COLOR, s=S) ax.xaxis.grid() #ax.axvline(threshold_first, c=FIRST_ORDER_COLOR, linestyle='-.',label='first-order threshold') ax.axvline(threshold_second, color=SECOND_ORDER_COLOR, linestyle='-.', label='second-order threshold') #ax.axvline(-threshold_first, c=FIRST_ORDER_COLOR, linestyle='-.') ax.axvline(-threshold_second, color=SECOND_ORDER_COLOR, linestyle='-.') #ax.axvline(mean_first, c=FIRST_ORDER_COLOR, label='first-order mean') #ax.axvline(mean_second, c=SECOND_ORDER_COLOR, label='second-order mean') ax.axvspan(lower_bound, upper_bound, alpha=SHADE_DARKNESS, color=SECOND_ORDER_PERCENTILES_COLOR) #ax.axvspan(pct_5_second, pct_95_second, alpha=SHADE_DARKNESS, color=SECOND_ORDER_PERCENTILES_COLOR) #ax.axvspan(-threshold_first, threshold_first, alpha=SHADE_DARKNESS, color='black') ax.set_xlim(-XAXIS_LIMIT, XAXIS_LIMIT) # + fig, axs = plt.subplots(10,2, figsize=(15,50)) LEGEND_SIZE = 10 exps = range(1,11) target_letters = ['X','Y'] for exp_num, target_letter in tqdm(product(exps, target_letters), total=20): col = 0 if target_letter =='X' else 1 ax = axs[exp_num-1, col] arr_first = results_dict[exp_num]['first'][f'{target_letter}_array'] arr_second = results_dict[exp_num]['second'][f'{target_letter}_array'] threshold_first = results_dict[exp_num]['first']['threshold'] threshold_second = results_dict[exp_num]['second']['threshold'] mean_first = results_dict[exp_num]['first'][f'{target_letter}_mean'] mean_second = results_dict[exp_num]['second'][f'{target_letter}_mean'] pct_5_second = results_dict[exp_num]['second']['pct_5'] pct_95_second = results_dict[exp_num]['second']['pct_95'] lower_bound = results_dict[exp_num]['second']['lower_bound'] upper_bound = results_dict[exp_num]['second']['upper_bound'] y = [i for i in range(1,len(arr_first)+1)] print(arr_first) terms = exp_def_dict[exp_num][f'{target_letter}_terms'] target_label = exp_def_dict[exp_num][f'{target_letter}_label'] A_label = exp_def_dict[exp_num]['A_label'] B_label = exp_def_dict[exp_num]['B_label'] add_scatters_and_lines(ax, arr_first, arr_second, threshold_first, threshold_second, mean_first, mean_second, pct_5_second, pct_95_second, lower_bound, upper_bound, y) annotate_points(ax, terms, arr_first, y) add_axes_obj_labels(ax, exp_num, target_label, A_label, B_label) axs[0,0].legend(loc=2, prop={'size': LEGEND_SIZE}) fig.tight_layout(pad=2) print('Rendering...') plt.savefig(IMAGE_SAVE_FILEPATH) plt.show() # - # 1. (Target groups) are/are not biased overall compared to randomly selected words (shown today) # 2. # + jupyter={"outputs_hidden": true} from prettytable import PrettyTable t = PrettyTable() t.field_names = ['Exp Num', 'Target Labels', 'Attribute Labels'] print(len(exp_def_dict)) for i in range(1, len(exp_def_dict)+1): print(i) t.add_row([i, exp_def_dict[i]['X_label'] + ' vs. ' + exp_def_dict[i]['Y_label'], exp_def_dict[i]['A_label'] + ' vs. ' + exp_def_dict[i]['B_label']]) print(t) # - # # Non-relative Biases # + fig, axs = plt.subplots(10,2, figsize=(15,50)) LEGEND_SIZE = 10 exps = range(1,11) target_letters = ['X','Y'] for exp_num, target_letter in tqdm(product(exps, target_letters), total=20): col = 0 if target_letter =='X' else 1 ax = axs[exp_num-1, col] #arr_first = results_dict[exp_num]['first'][f'{target_letter}_array'] #arr_second = results_dict[exp_num]['second'][f'{target_letter}_array'] arr_first = results_dict[exp_num]['second']['A_biases'] arr_second = arr_first threshold_first = results_dict[exp_num]['first']['threshold'] threshold_second = results_dict[exp_num]['second']['threshold'] mean_first = results_dict[exp_num]['first'][f'{target_letter}_mean'] mean_second = results_dict[exp_num]['second'][f'{target_letter}_mean'] pct_5_second = results_dict[exp_num]['second']['pct_5'] pct_95_second = results_dict[exp_num]['second']['pct_95'] y = [i for i in range(1,len(arr_first)+1)] print(arr_first) terms = exp_def_dict[exp_num][f'{target_letter}_terms'] target_label = exp_def_dict[exp_num][f'{target_letter}_label'] A_label = exp_def_dict[exp_num]['A_label'] B_label = exp_def_dict[exp_num]['B_label'] add_scatters_and_lines(ax, arr_first, arr_second, threshold_first, threshold_second, mean_first, mean_second, pct_5_second, pct_95_second, y) annotate_points(ax, terms, arr_first, y) add_axes_obj_labels(ax, exp_num, target_label, A_label, B_label) axs[0,0].legend(loc=2, prop={'size': LEGEND_SIZE}) fig.tight_layout(pad=2) print('Rendering...') plt.savefig(NONRELATIVE_IMAGE_SAVE_FILEPATH) plt.show() # -
notebooks/Legacy/Viz/NonrelativeBiasesViz-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:root] * # language: python # name: conda-root-py # --- # + from os import path import astropy.coordinates as coord from astropy.table import Table, join import astropy.units as u from astropy.io import fits import numpy as np from matplotlib.gridspec import GridSpec import matplotlib as mpl import matplotlib.pyplot as plt # %matplotlib inline import h5py import emcee from pyia import GaiaData import gala.dynamics as gd import gala.coordinates as gc import gala.integrate as gi import gala.potential as gp from gala.units import galactic from gala.mpl_style import center_emph, center_deemph # - cluster_c = coord.SkyCoord(ra=179.5*u.deg, dec=-28.8*u.deg) # ### PW1-00 # + from astropy.nddata import StdDevUncertainty from specutils import Spectrum1D from spectres import spectres def get_spec(filename, rebin=None): hdu = fits.open(filename)[0] hdr = hdu.header data = hdu.data # flux = data[2] # err = data[3] flux = data[0] err = data[1] wvln = 10 ** (hdr['CRVAL1'] + hdr['CDELT1']*np.arange(hdr['NAXIS1'])) kw = dict() if rebin: flux, err = spectres(wvln[10:-10:rebin], wvln, flux, err) wvln = wvln[10:-10:rebin] kw['spectral_axis'] = wvln * u.angstrom return Spectrum1D(flux=flux * u.one, uncertainty=StdDevUncertainty(err*u.one), **kw) # - spec = get_spec('../data/pw1-00_comb_multi_stitchbr.fits', rebin=4) # + # wvln_lim = (4000, 8000) pad = 20. # angstroms vshift = 5. wvln_lims = [6563 + np.array([-pad, pad]) + vshift, 4862.7 + np.array([-pad, pad]) + vshift, 4340 + np.array([-pad, pad]) + vshift, 4102 + np.array([-pad, pad]) + vshift] fig, axes = plt.subplots(len(wvln_lims), 1, figsize=(12, 3 * len(wvln_lims))) chars = [r'H$\alpha$', r'H$\beta$', r'H$\gamma$', r'H$\delta$'] for i, wvln_lim in enumerate(wvln_lims): ax = axes[i] mask = (spec.wavelength > wvln_lim[0]*u.angstrom) & (spec.wavelength < wvln_lim[1]*u.angstrom) x = spec.wavelength[mask] y = spec.flux[mask] ax.plot(x, y, marker='', drawstyle='steps-mid', lw=2, color='k') ax.set_xlim(wvln_lim) ylim = ax.get_ylim() ax.text(wvln_lim[0] + 1., ylim[1] - 0.12*(ylim[1]-ylim[0]), chars[i], va='top', fontsize=22) # for l in balmer: # ax.axvline(l) ax.set_xlabel(r'wavelength [$\AA$]') fig.tight_layout() # - # ### CMD / sky / proper motion: # + pt_thresh = 8 lite_ptstyle = dict(marker='.', ls='none', alpha=0.2, color='k') dark_ptstyle = dict(marker='.', ls='none', ms=6, alpha=0.6, color='k') # + all_g = GaiaData('../data/data-joined.fits') all_c = all_g.get_skycoord(distance=False) # near_cluster = (all_c.separation(cluster_c) < 3*u.deg) & (all_g.G0 < 20) near_cluster = (all_g.G0 < 20) g = all_g[near_cluster] c = all_c[near_cluster] mag_c = c.transform_to(gc.MagellanicStream) # - def make_panel(ax, col, mag, col_lim, mag_lim, col_label, mag_label, poly_mask, flipy=True): col_binsize = 0.04 mag_binsize = 0.10 # norm = mpl.colors.LogNorm(vmin=pt_thresh+1, vmax=5e1) norm = mpl.colors.LogNorm(vmin=1e-1, vmax=1e4) H, xe, ye = np.histogram2d(col, mag, bins=(np.arange(col_lim[0], col_lim[1]+1e-3, col_binsize), np.arange(mag_lim[0], mag_lim[1]+1e-3, mag_binsize))) # H[H < pt_thresh] = np.nan m = ax.pcolormesh(xe, ye, H.T, norm=norm, label='', cmap='Greys', zorder=-1000, rasterized=True, linewidth=0) if poly_mask is not None: ax.plot(col[poly_mask], mag[poly_mask], zorder=100, label='', **dark_ptstyle) ax.set_xlim(col_lim) if flipy: ax.set_ylim(mag_lim[::-1]) else: ax.set_ylim(mag_lim) ax.set_xlabel(col_label) ax.set_ylabel(mag_label) cluster_pm = [-0.56, 0.47] * u.mas/u.yr # + nodes = np.array([[-0.25, 15.8], [-0.1, 18.], [0.2, 19.8], [0.0, 20.4], [-0.3, 19.], [-0.5, 16.1], [-0.25, 15.8]]) polygon = mpl.path.Path(nodes) mask = polygon.contains_points(np.vstack((g.BP0-g.RP0, g.G0)).T) fig, axes = plt.subplots(1, 3, figsize=(11, 3.5), constrained_layout=True) make_panel(axes[0], col=g.BP0-g.RP0, mag=g.G0, col_lim=(-1, 1), mag_lim=(12, 20.5), col_label=r'$({\rm BP}-{\rm RP})_0$', mag_label=r'$G_0$', poly_mask=mask) axes[0].plot(nodes[:, 0], nodes[:, 1], marker='', color='tab:blue', lw=2, alpha=0.4) axes[0].set_ylim(20, 12) style = dict(marker='', linewidth=3, ls='-', alpha=0.5, color='tab:red', zorder=-100) # ------ # Sky plot ax = axes[1] ax.set_xlim(cluster_c.ra.degree + 2.5, cluster_c.ra.degree - 2.5) ax.set_ylim(cluster_c.dec.degree - 2.5, cluster_c.dec.degree + 2.5) H, xe, ye = np.histogram2d(all_g.ra, all_g.dec, bins=(np.linspace(*ax.get_xlim()[::-1], num=64), np.linspace(*ax.get_ylim(), num=64))) ax.pcolormesh(xe, ye, H.T, cmap='Greys', vmax=2 * H.max()) # pm_mask = np.sqrt((g.pmra.value - -0.5)**2 + (g.pmdec.value - 0.5)**2) < 1.5 pm_mask = (g.pm_prob > 0.2) ax.plot(g.ra[mask & pm_mask], g.dec[mask & pm_mask], **dark_ptstyle) ax.set_xlabel('RA [deg]') ax.set_ylabel('Dec [deg]') dx, dy = cluster_pm * 2 ax.arrow(cluster_c.ra.degree, cluster_c.dec.degree, dx.value, dy.value, head_width=0.15, head_length=0.2, linewidth=2, color='tab:green', zorder=1000) ax.text(178.5, -27.8, r'$\bar{\mu}$', ha='right', fontsize=22, color='tab:green') # ------ # PM plot ax = axes[2] ax.set_xlim(-10, 10) ax.set_ylim(-10, 10) H, xe, ye = np.histogram2d(all_g.pmra, all_g.pmdec, bins=(np.linspace(*ax.get_xlim(), num=64), np.linspace(*ax.get_ylim(), num=64))) ax.pcolormesh(xe, ye, H.T, cmap='Greys', vmax=2 * H.max()) ax.plot(g.pmra[mask], g.pmdec[mask], **dark_ptstyle) ax.set_xlabel(r'$\mu_\alpha$ [{:latex_inline}]'.format(u.mas/u.yr)) ax.set_ylabel(r'$\mu_\delta$ [{:latex_inline}]'.format(u.mas/u.yr)) axes[0].xaxis.set_ticks(np.arange(-1, 1+1e-3, 0.5)) axes[1].xaxis.set_ticks(np.arange(177, 182+1e-3, 1)) axes[2].xaxis.set_ticks(np.arange(-10, 10+1e-3, 5)) fig.savefig('../plots/talks/threepanel_5.png', dpi=250) # - # ### Three-panel with MIKE targets from astropy.table import hstack mike = Table.read('../data/mike_final.fits')[6:] mike_c = coord.SkyCoord(mike['RA'], mike['DEC'], unit=u.deg) idx, sep, _ = coord.match_coordinates_sky(mike_c, all_c) joined = hstack((mike[sep < 12*u.arcsec], all_g.data[idx[sep < 12*u.arcsec]])) len(joined) # + fig, axes = plt.subplots(1, 2, figsize=(7.5, 3.5), constrained_layout=True) make_panel(axes[0], col=g.BP0-g.RP0, mag=g.G0, col_lim=(-1, 1), mag_lim=(12, 20.5), col_label=r'$({\rm BP}-{\rm RP})_0$', mag_label=r'$G_0$', poly_mask=g.pm_prob > 0.2) style = dict(marker='', linewidth=3, ls='-', alpha=0.5, color='tab:red', zorder=-100) # ------ # Sky plot ax = axes[1] ax.set_xlim(cluster_c.ra.degree + 2.5, cluster_c.ra.degree - 2.5) ax.set_ylim(cluster_c.dec.degree - 2.5, cluster_c.dec.degree + 2.5) H, xe, ye = np.histogram2d(all_g.ra, all_g.dec, bins=(np.linspace(*ax.get_xlim()[::-1], num=64), np.linspace(*ax.get_ylim(), num=64))) ax.pcolormesh(xe, ye, H.T, cmap='Greys', vmax=2 * H.max()) ax.plot(all_g.ra[all_g.pm_prob > 0.2], all_g.dec[all_g.pm_prob > 0.2], **dark_ptstyle) ax.set_xlabel('RA [deg]') ax.set_ylabel('Dec [deg]') axes[0].xaxis.set_ticks(np.arange(-1, 1+1e-3, 0.5)) axes[1].xaxis.set_ticks(np.arange(178, 181+1e-3, 1)) axes[1].scatter(joined['ra'], joined['dec'], marker='o', facecolor='none', s=60, color='tab:red') axes[0].scatter(joined['BP0']-joined['RP0'], joined['G0'], marker='o', facecolor='none', s=60, color='tab:red') axes[0].set_xlim(-0.75, 0.35) axes[0].set_ylim(20, 12) axes[1].set_xlim(181, 177.5) axes[1].set_ylim(-30.5, -27) fig.savefig('../plots/talks/twopanel_mike_2.png', dpi=250) # - (1*u.deg * 29*u.kpc).to(u.pc, u.dimensionless_angles()) # ### Transparent thing derp = Table() derp['ra'] = g.ra[mask & pm_mask].value derp['dec'] = g.dec[mask & pm_mask].value derp.write('../plots/talks/pw1.fits') fig, ax = plt.subplots(1, 1, figsize=(6, 6)) ax.plot(g.ra[mask & pm_mask], g.dec[mask & pm_mask], marker='o', ls='none', ms=5, alpha=0.75, mew=0, color='tab:blue') ax.set_xlim(182, 177) ax.set_ylim(-30, -27.5) ax.set_aspect('equal') ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) for _, x in ax.spines.items(): x.set_visible(False) fig.savefig('../plots/talks/title.png', dpi=250) # ### Tidal radius Menc = gp.MilkyWayPotential().mass_enclosed([23, 0, 15.]*u.kpc)[0] m = 1200 * u.Msun (m / Menc)**(1/3) * np.linalg.norm([23, 0, 15.]) * 1000 # ### MW disk in MC coordinates disk_c = coord.SkyCoord(l=np.random.uniform(0, 360, 1000000)*u.deg, b=np.random.uniform(-10, 10, 1000000)*u.deg, frame='galactic').transform_to(gc.MagellanicStreamNidever08) plt.plot(disk_c.L.degree, disk_c.B.degree, marker='o', ls='none', color='tab:orange') plt.xlim(80, -80) plt.ylim(-50, 50) plt.gca().xaxis.set_visible(False) plt.gca().yaxis.set_visible(False) for _,x in plt.gca().spines.items(): x.set_visible(False) # --- # # ### RV / Fe/H distribution: # + mike_data = Table.read('../data/mike_final.fits') mike_data['pw1_id'] = [int(x.strip()[-2:]) if 'pw' in x else 9999 for x in mike_data['NAME']] data = Table.read('../output/pw1-region-data-joined.fits') data['pw1_id'] = [int(x.strip()[-2:]) if 'PW' in x else 9999 for x in data['pw1_name']] # - all_tbl = join(data, mike_data, keys='pw1_id') mike_tbl = all_tbl[all_tbl['pw1_name'] != ''] tbl = mike_tbl[(mike_tbl['SNR_RES'] > 10) & (mike_tbl['FEH'] < 0.)] len(tbl) # + def ln_normal(x, mu, std): return -0.5 * (x-mu)**2 / std**2 - 0.5*np.log(2*np.pi) - np.log(std) def robust_ln_prob(p, tbl): lnf, mu, lns = p s = np.exp(lns) f = np.exp(lnf) lp = lnf if not -10 < lnf < 0: return -np.inf lp += ln_normal(mu, -1, 4) l1 = ln_normal(tbl['FEH'], mu, s) + np.log(f) l2 = ln_normal(tbl['FEH'], 0, 5) + np.log(1-f) ll = np.logaddexp(l1, l2).sum() return ll + lp # - np.sum(tbl['FEH']/tbl['FEHERR']**2) / np.sum(1/tbl['FEHERR']**2) np.sum(tbl['VHELIO']/tbl['VERR']**2) / np.sum(1/tbl['VERR']**2) # + plt.figure(figsize=(6, 6)) plt.errorbar(tbl['VHELIO'], tbl['FEH'], xerr=tbl['VERR'], yerr=tbl['FEHERR'], ls='none', marker='o', ecolor='#888888', alpha=0.75, color='k') plt.xlim(225, 275) plt.ylim(-2, 0) ax = plt.gca() ax.yaxis.set_ticks(np.arange(-2, 0+1e-3, 0.5)) ax.xaxis.set_ticks(np.arange(230, 270+1e-3, 10)) ax.text(230, -0.15, (r'$[{\rm Fe}/{\rm H}] \approx -1.16$' + '\n' + r'$v_{\rm helio} \approx 250$ ' + '[{:latex_inline}]'.format(u.km/u.s)), ha='left', va='top', fontsize=22) ax.set_ylabel(r'[Fe/H]') ax.set_xlabel(r'$v_{\rm helio}$ ' + '[{:latex_inline}]'.format(u.km/u.s)) ax.set_title('all S/N > 10') fig.tight_layout() # - coord.SkyCoord(l=287.4560, b=22.9476, unit=u.deg, frame='galactic').transform_to(gc.MagellanicStream)
notebooks/Talk-figures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # %matplotlib inline import pandas df = pandas.read_excel('s3://nassiri-ia241/house_price.xls') df df['unit_price']=df['price']/df['area'] df df['year']=pandas.to_datetime(df['built_in'],format='%Y')# convert built_in to year in date df # + avg_price_per_year = df.groupby('year').mean()['price'] avg_price_per_year # - avg_price_per_year.plot() # + avg_unit_price_per_year = df.groupby('year').mean()['unit_price'] avg_unit_price_per_year.plot() # - # Highest avg unit price in 1930 df['price'].hist() df['unit_price'].hist() avg_price_per_type = df.groupby('house_type').mean()['price'] avg_price_per_type.plot.bar() avg_unit_price_per_type = df.groupby('house_type').mean()['unit_price'] avg_unit_price_per_type.plot.bar() house_type_count = df.groupby('house_type').count()['year'] house_type_count.plot.pie() df.plot.scatter(x='area',y= 'price')
lec10-lab9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Milestone Report # ### For Capstone Project 1 - Credit Card Client Default # In this Milestone Report, we will do the following: # # 1. Define the problem; # 2. Identify our client; # 3. Describe our data set, and how we cleaned/wrangled it; # 4. List other potential data sets we could use; and # 5. Explain our initial findings. # ### 1. Define the problem # # The goal of this capstone project is to build a machine learning model to predict the probability of default of credit card clients. Provided with a robust and reliable model to predict the probability of client default, credit card companies would be better equipped manage the risk of their portfolios. # ### 2. Identify your client # # Our hypothetical client could be any credit card company. # # Credit card companies could use this model for the following: # 1. To create more reliable stochastic cash flow forecasts for the company; and # 2. To make better-informed decisions about allowing clients to increase their credit limits. # ### 3. Describe our data set, and how we cleaned/wrangled it; # #### Our Data Source # # The data set that we use for this project is from a sample of Taiwanese credit card clients. This anonymized data set was used by Dr. <NAME> in his 2009 paper "The comparisons of data mining techniques for the predictive accuracy of probability of default of credit card clients." [1] The dataset has been made available to the public and posted on the UC Irvine Machine Learning Repository website (See: http://archive.ics.uci.edu/ml/datasets/default+of+credit+card+clients). # # The dataset contains 30,000 observations and 24 attributes. # # The response variable is a binary variable for default payment in the following month. # # There are 23 explanatory variables included in the dataset. These include: # # ● History of past payment; # ● Amount of bill statement; # ● Amount of previous payment; # ● Marital status; # ● Education; # ● Age; and # ● Gender. # # The dataset posted on the UC Irvine Machine Learning Repository website is in .xls format. # #### How we cleaned/wrangled the data # 1. We corrected some erroneous values for the `EDUCATION` feature. # 2. For each of the six `bill amount` features (one for each of the six prior months), we engineered a new feature: the ratio of $\left(\frac{\text{bill amount}}{\text{credit limit}}\right)$. # 3. For each of the six pairs of `bill amount` and `pay amount` (one pair for each of the six prior months), we engineered a new feature: the ratio of $\left(\frac{\text{bill amount} - \text{pay amount}}{\text{credit limit}}\right)$. # 4. We renamed columns such that all column names: (1) contained no capital letters, (2) contained no spaces, and (3) were shorter. # 5. We transformed certain features by taking the natural logarithm. # 6. We transformed all of the categorical variables using one-hot-encoding. # # ### 4. List other potential data sets we could use # # We did not deem it appropriate or necessary to use any additional sources of data. # ### 5. Explain our initial findings # We used logistic regression to test whether or not certain features were statistically significant predictors of default. We used a significance level of $\alpha = 0.05$. We tested the following features: # 1. age; # 2. credit limit; # 3. the ratio of $\left(\frac{\text{bill amount}}{\text{credit limit}}\right)$; and # 4. the ratio of $\left(\frac{\text{bill amount} - \text{pay amount}}{\text{credit limit}}\right)$ # # We found that each of these features were statistically significant predictors of default. # #### Citations: # # [1] <NAME>., & <NAME>. (2009). The comparisons of data mining techniques for the predictive accuracy of probability of default of credit card clients. Expert Systems with Applications, 36(2), 2473-2480.
reports/Milestone-Report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Arduino Joystick Shield Example # # This example shows how to use the [Sparkfun Joystick](https://www.sparkfun.com/products/9760) # on the board. The Joystick shield contains an analog joystick which is # connected to A0 and A1 analog channels of the Arduino connector. It also # contains four push buttons connected at D3-D6 pins of the Arduino connector. # # For this notebook, an Arduino joystick shield is required. # + from pynq.overlays.base import BaseOverlay base = BaseOverlay("base.bit") # - # ### 1. Use Microblaze to control the joystick # Make sure the joystick shield is plugged in. For the Microblaze to transfer # direction or button values back, we need to define a few additional constants. # + DIRECTION_VALUE_MAP = { 0: 'up', 1: 'up_right', 2: 'right', 3: 'down_right', 4: 'down', 5: 'down_left', 6: 'left', 7: 'up_left', 8: 'center' } BUTTON_INDEX_MAP = { 'D3': 0, 'D4': 1, 'D5': 2, 'D6': 3 } # - # The joystick can measure horizontal direction `x` # and vertical direction `y`. # # The thresholds for raw values are: # # Horizontal: # # | Threshold | Direction | # | ------------------ |:------------:| # | x < 25000 | left | # | 25000 < x < 39000 | center | # | x > 39000 | right | # # Vertical: # # | Threshold | Direction | # | ------------------ |:------------:| # | y < 25000 | down | # | 25000 < y < 39000 | center | # | y > 39000 | up | # + # %%microblaze base.ARDUINO #include "xparameters.h" #include "circular_buffer.h" #include "gpio.h" #include "xsysmon.h" #include <pyprintf.h> #define X_THRESHOLD_LOW 25000 #define X_THRESHOLD_HIGH 39000 #define Y_THRESHOLD_LOW 25000 #define Y_THRESHOLD_HIGH 39000 typedef enum directions { up = 0, right_up, right, right_down, down, left_down, left, left_up, centered }direction_e; static gpio gpio_buttons[4]; static XSysMon SysMonInst; XSysMon_Config *SysMonConfigPtr; XSysMon *SysMonInstPtr = &SysMonInst; int init_joystick(){ unsigned int i, status; SysMonConfigPtr = XSysMon_LookupConfig(XPAR_SYSMON_0_DEVICE_ID); if(SysMonConfigPtr == NULL) return -1; status = XSysMon_CfgInitialize( SysMonInstPtr, SysMonConfigPtr, SysMonConfigPtr->BaseAddress); if(XST_SUCCESS != status) return -1; for (i=0; i<4; i++){ gpio_buttons[i] = gpio_open(i+3); gpio_set_direction(gpio_buttons[i], GPIO_IN); } return 0; } unsigned int get_direction_value(){ direction_e direction; unsigned int x_position, y_position; while ((XSysMon_GetStatus(SysMonInstPtr) & XSM_SR_EOS_MASK) != XSM_SR_EOS_MASK); x_position = XSysMon_GetAdcData(SysMonInstPtr, XSM_CH_AUX_MIN+1); y_position = XSysMon_GetAdcData(SysMonInstPtr, XSM_CH_AUX_MIN+9); if (x_position > X_THRESHOLD_HIGH) { if (y_position > Y_THRESHOLD_HIGH) { direction = right_up; } else if (y_position < Y_THRESHOLD_LOW) { direction = right_down; } else { direction = right; } } else if (x_position < X_THRESHOLD_LOW) { if (y_position > Y_THRESHOLD_HIGH) { direction = left_up; } else if (y_position < Y_THRESHOLD_LOW) { direction = left_down; } else { direction = left; } } else { if (y_position > Y_THRESHOLD_HIGH) { direction = up; } else if (y_position < Y_THRESHOLD_LOW) { direction = down; } else { direction = centered; } } return direction; } unsigned int get_button_value(unsigned int btn_i){ unsigned int value; value = gpio_read(gpio_buttons[btn_i]); return value; } # - # ### 2. Define Python wrapper for Microblaze functions # We will also need to initialize the joystick before we can read any value. # The following function returns `0` if the initialization is successful. init_joystick() # The following Python wrappers will call the Microblaze functions internally. # + def read_direction(): direction_value = get_direction_value() return DIRECTION_VALUE_MAP[direction_value] def read_button(button): return get_button_value(BUTTON_INDEX_MAP[button]) # - # ### 3. Find direction # We can measure the direction by calling `read_direction()`. # # For the next cell, leave the joystick in its natural position. read_direction() # Let's pull the joystick towards the bottom right corner. read_direction() # ### 4. Read button values # # Based on the [schematic](https://cdn.sparkfun.com/datasheets/Dev/Arduino/Shields/Joystick_Shield-v14.pdf) # of the shield, we can see the read value will go low if the corresponding # button has been pressed. # # Run the next cell while pushing both button `D4` and `D6`. for button in BUTTON_INDEX_MAP: if read_button(button): print('Button {} is not pressed.'.format(button)) else: print('Button {} is pressed.'.format(button))
boards/Pynq-Z2/base/notebooks/arduino/arduino_joystick.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 다항분포 나이브베이즈 모형 # 연습문제 3 # + from sklearn.datasets import load_digits digits = load_digits() X = digits.data y = digits.target from sklearn.naive_bayes import MultinomialNB model_mult = MultinomialNB().fit(X, y) from sklearn.metrics import classification_report print(classification_report(y, model_mult.predict(X))) # - # ### 감성분석 # 네이버 영화 평은 utf-8로 제작 되어있음. 그래서 codecs로 부름 import codecs with codecs.open("ratings_train.txt", encoding='utf-8') as f: data = [line.split('\t') for line in f.read().splitlines()] data = data[1:] #헤더 제외 # pprint는 pretty print. 어떤 포맷에 맞추어 이쁘게 프린트. # 맨 마지막은 점수. 안좋으면 0, 좋으면 1 from pprint import pprint pprint(data[0]) X = list(zip(*data))[1] y = np.array(list(zip(*data))[2], dtype=int) # + from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline from sklearn.metrics import classification_report model1 = Pipeline([ ('vect', CountVectorizer()), ('mb', MultinomialNB()), ]) # - model1.fit(X, y) import codecs with codecs.open("ratings_test.txt", encoding='utf-8') as f: data_test = [line.split('\t') for line in f.read().splitlines()] data_test = data_test[1:] # + X_test = list(zip(*data_test))[1] y_test = np.array(list(zip(*data_test))[2], dtype=int) print(classification_report(y_test, model1.predict(X_test))) # + from sklearn.feature_extraction.text import TfidfVectorizer model2 = Pipeline([ ('vect', TfidfVectorizer()), ('mb', MultinomialNB()), ]) # - model2.fit(X, y) # + from konlpy.tag import Okt pos_tagger = Okt() def tokenize_pos(doc): return ['/'.join(t) for t in pos_tagger.pos(doc)] # - model3 = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize_pos)), ('mb', MultinomialNB()), ]) model3.fit(X, y) print(classification_report(y_test, model3.predict(X_test))) model4 = Pipeline([ ('vect', TfidfVectorizer(tokenizer=tokenize_pos, ngram_range=(1, 2))), ('mb', MultinomialNB()), ]) # %%time model4.fit(X, y) print(classification_report(y_test, model4.predict(X_test))) # 연습문제 1 model1.predict(["캡이다"]) model1.predict(["감동이다"]) model1.predict(["돈 아깝다"]) model1.predict(["돈이 안 아깝다"]) model1.predict(["짜증난다"]) model1.predict(["환상이다"]) model1.predict(["오진다"]) model1.predict(["지린다"]) model1.predict(["굿"]) model1.predict(["썩"]) # ### 의사결정나무 # + from sklearn.datasets import load_iris data = load_iris() y = data.target X = data.data[:, 0:2] feature_names = data.feature_names[0:2] from sklearn.tree import DecisionTreeClassifier tree1 = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0).fit(X, y) # + import io from IPython.core.display import Image from sklearn.tree import export_graphviz def plot_decision_regions(X, y, model, title): resolution = 0.01 markers = ('s', '^', 'o') colors = ('red', 'blue', 'lightgreen') cmap = mpl.colors.ListedColormap(colors) x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = model.predict( np.array([xx1.ravel(), xx2.ravel()]).T).reshape(xx1.shape) plt.contour(xx1, xx2, Z, cmap=mpl.colors.ListedColormap(['k'])) plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=[cmap(idx)], marker=markers[idx], s=80, label=cl) plt.xlabel(data.feature_names[2]) plt.ylabel(data.feature_names[3]) plt.legend(loc='upper left') plt.title(title) return Z # - plot_decision_regions(X, y, tree1, "Depth 3") plt.show() # + from sklearn.model_selection import KFold, cross_val_score cv = KFold(5, shuffle=True, random_state=0) model = DecisionTreeClassifier(criterion='entropy', max_depth=1, random_state=0) cross_val_score(model, X, y, scoring="accuracy", cv=cv).mean() # + from sklearn.model_selection import KFold, cross_val_score ls = [] for depth in range(1, 10): cv = KFold(5, shuffle=True, random_state=0) model = DecisionTreeClassifier(criterion='entropy', max_depth=depth, random_state=0) ls.append(cross_val_score(model, X, y, scoring="accuracy", cv=cv).mean()) plt.figure(figsize = (12, 4)) plt.title("cross validation curve") plt.plot(np.arange(1, 10), ls) # x를 1부터 10까지로 설정 plt.ylabel("accuracy") plt.xlabel("depth") plt.show() # -
statistics/200527-stat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np df_powerplant = pd.read_csv("powerplants2.csv") #.column("Population") df_powerplant.head(3) # + df_powerplant = df_powerplant.fillna(value=0) df_powerplant.head(3) # - df_powerplant.columns # ### 1. Clean Energy # Making a dataframe of clean energy: # biopower, hydro power, hydro power(PS), solar power, wind power, geo power # df_CE_powerplant = df_powerplant.drop(['Total_MW','Coal_MW','NG_MW','Crude_MW','Nuclear_MW','Other_MW'], axis=1) df_CE_powerplant = df_CE_powerplant.drop(['Plant_Code','Utility_ID','sector_nam','PrimSource','source_des','tech_desc','Source','Period','Latitude','Longitude'], axis=1) df_CE_powerplant.head(5) df_CE_powerplant['total clean energy MW'] = df_CE_powerplant['Bio_MW']+df_CE_powerplant['Hydro_MW']+df_CE_powerplant['HydroPS_MW']+df_CE_powerplant['Solar_MW']+df_CE_powerplant['Wind_MW']+df_CE_powerplant['Geo_MW'] df_CE_powerplant.head(5) # df_CE_powerplant = df_CE_powerplant[df_CE_powerplant['total clean energy MW'] != 0] # df_CE_powerplant.head(5) df_CE_powerplant.tail(5) # + def PrimCESource(plantName): powerplant = df_CE_powerplant[df_CE_powerplant['Plant_Name'] == plantName] powerplant = powerplant.reset_index(drop=True) Bio = powerplant['Bio_MW'][0] Hydro = powerplant['Hydro_MW'][0] HydroPS = powerplant['HydroPS_MW'][0] Solar = powerplant['Solar_MW'][0] Wind = powerplant['Wind_MW'][0] Geo = powerplant['Geo_MW'][0] dictionary = {} number_and_power_source = [ [Bio, 'Biopower'], [Hydro, 'Hydroelectric power'], [HydroPS, 'Pumped-storage Hydroelectric power'], [Solar, 'Solar power'], [Wind, 'Wind power'], [Geo, 'Geothermal power'], ] for number, power_source in number_and_power_source: if number in dictionary.keys(): dictionary[number] += [power_source] else: dictionary[number] = [power_source] dictionary[0] = ['No Clean Energy'] max_key = max(dictionary.keys()) return (" & ".join(dictionary[max_key])) print(PrimCESource('Bankhead Dam')) print(PrimCESource('Barry')) print(PrimCESource('Hartz Way')) # - [ PrimCESource(plantName) for plantName in ['Bankhead Dam', 'Barry'] ] df_CE_powerplant['Plant_Name'].apply(PrimCESource) df_CE_powerplant['Prime Source'] = df_CE_powerplant['Plant_Name'].apply(PrimCESource) df_CE_powerplant.head(5) # ### 2. Prime Source Energy # Energy produce from the powerplant from its prime power source # df_primepower = df_powerplant.drop(['Plant_Code','Utility_ID','sector_nam','source_des','tech_desc','Source','Period','Latitude','Longitude'], axis=1) df_primepower = df_primepower.drop(['Utility_Na','Zip','Street_Add'], axis=1) df_primepower.head(5) df_PrimSource = df_primepower.groupby(['PrimSource']).count() df_PrimSource # + def PrimEnergy(plantName): powerplant = df_primepower[df_primepower['Plant_Name'] == plantName] powerplant = powerplant.reset_index(drop=True) Coal = powerplant["Coal_MW"][0] # coal NG = powerplant["NG_MW"][0] # natural gas Crude = powerplant["Crude_MW"][0] # petroleum Bio = powerplant['Bio_MW'][0] # biomass Hydro = powerplant['Hydro_MW'][0] # hydroelectric HydroPS = powerplant['HydroPS_MW'][0] # pumped storage Nuclear = powerplant["Nuclear_MW"][0] # nuclear Solar = powerplant['Solar_MW'][0] # solar Wind = powerplant['Wind_MW'][0] # wind Geo = powerplant['Geo_MW'][0] # geothermal Other = powerplant["Other_MW"][0] # other power_source = [ [Coal, 'coal'], [NG,'natural gas'], [Crude, 'petroleum'], [Bio,'biomass'], [Hydro,'hydroelectric'], [HydroPS,'pumped storage'], [Nuclear,'nuclear'], [Solar,'solar'], [Wind,'wind'], [Geo,'geothermal'], [Other,'other'], ] PSourceE = 0 for energy, PowerSource in power_source: if PowerSource == powerplant["PrimSource"][0]: PSourceE = PSourceE + energy else: PSourceE = PSourceE + 0 return PSourceE print(PrimEnergy('Bankhead Dam')) print(PrimEnergy('Barry')) print(PrimEnergy('Hartz Way')) # - [ PrimEnergy(plantName) for plantName in ['Bankhead Dam', 'Barry'] ] df_primepower['Prime Source Energy MW'] = df_primepower['Plant_Name'].apply(PrimEnergy) df_primepower.head(5) # ### 3. Finding the counties with top 10 energy produce # df_countypower = df_powerplant.drop(['Plant_Code','Utility_ID','sector_nam','source_des','tech_desc','Source','Period','Latitude','Longitude'], axis=1) df_countypower = df_countypower.drop(['Utility_Na','Zip','Street_Add'], axis=1) df_countypower.head(5) df_countypower_group = df_countypower.groupby(['County']) # df_countypower_group countypower_total_energy = df_countypower_group['Total_MW'].sum() countypower_total_energy.head(10) # type(df_countypower_total_energy) # Series df_countypower_total = countypower_total_energy.to_frame() df_countypower_total.head(10) df_countypower_total_1 = df_countypower_total.drop([0]) df_countypower_sort = df_countypower_total_1.sort_values(by=['Total_MW'], ascending=False) df_countypower_sort.head(10) # ### 4. Top 10 Utility Energy Produce # df_Utilities = df_powerplant.drop(['Plant_Code','Utility_ID','sector_nam','source_des','tech_desc','Source','Period','Latitude','Longitude'], axis=1) df_Utilities = df_Utilities.drop(['Zip','Street_Add','City'], axis=1) df_Utilities.head(5) df_Utilities_group = df_Utilities.groupby(['Utility_Na']) Utilitypower_total_energy = df_Utilities_group['Total_MW'].sum() Utilitypower_total_energy.head(10) df_Utilitypower_total_energy = Utilitypower_total_energy.to_frame() df_Utilitypower_total_energy.head(10) df_Utilitypower_total_energy_1 = df_Utilitypower_total_energy.drop([0]) df_Utilitypower_sort = df_Utilitypower_total_energy_1.sort_values(by=['Total_MW'], ascending=False) df_Utilitypower_sort.head(10) # ### 5. The Prime Power Source of Utilities # df_Utilities_sum = df_Utilities.groupby(['Utility_Na']).sum() df_Utilities_sum = df_Utilities_sum.drop([0]) df_Utilities_sum.head(10) # df_Utilities_sum.shape # 3696, 12 df_Utilities_sum_index = df_Utilities_sum.reset_index() df_Utilities_sum_index # + def PrimSource(UtilityName): Utility = df_Utilities_sum_index[df_Utilities_sum_index['Utility_Na'] == UtilityName] Utility = Utility.reset_index(drop=True) Coal = Utility["Coal_MW"][0] NG = Utility["NG_MW"][0] Crude = Utility["Crude_MW"][0] Bio = Utility['Bio_MW'][0] Hydro = Utility['Hydro_MW'][0] HydroPS = Utility['HydroPS_MW'][0] Nuclear = Utility["Nuclear_MW"][0] Solar = Utility['Solar_MW'][0] Wind = Utility['Wind_MW'][0] Geo = Utility['Geo_MW'][0] Other = Utility["Other_MW"][0] # return Utility dictionary = {} number_and_power_source = [ [Coal, 'Coal energy'], [NG, 'Natural gas'], [Crude, 'Petroleum energy'], [Bio, 'Biopower'], [Hydro, 'Hydroelectric power'], [HydroPS, 'Pumped-storage Hydroelectric power'], [Nuclear, 'Nuclear power'], [Solar, 'Solar power'], [Wind, 'Wind power'], [Geo, 'Geothermal power'], [Other, 'Other'] ] for number, power_source in number_and_power_source: if number in dictionary.keys(): dictionary[number] += [power_source] else: dictionary[number] = [power_source] dictionary[0] = ['No Energy Produce'] max_key = max(dictionary.keys()) return (" & ".join(dictionary[max_key])) print(PrimSource('180 Raritan Energy Solutions, LLC')) # - # df_Utilities_sum_index['Utility_Na'].apply(PrimSource) df_Utilities_sum_index['Utility Prime Source'] = df_Utilities_sum_index['Utility_Na'].apply(PrimSource) df_Utilities_sum_index.head(10)
Lec_6_PowerPlants2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import pprint import random import numpy as np from scipy.stats import norm from scipy.stats import beta import matplotlib.pyplot as plt # Simple intros [here](https://www.chrisstucchio.com/blog/2013/bayesian_bandit.html) and [here](https://lazyprogrammer.me/bayesian-bandit-tutorial/) and [here](https://stats.stackexchange.com/questions/237037/bayesian-updating-with-new-data) and [here](https://medium.com/@siddharth.1729_65206/of-bandits-and-bidding-26b6109d5d9d) # # A simple bayesian updating on a normal distribution # + trials = 10 nb_draws = 10000 nb_bins = 200 random.seed(42) mu = [0] * trials sigma = [0] * trials results = [] lower = 0.0 upper = 20.0 mu[0] = 5 sigma[0] = 2.7 def plot(y, nb_bins, lower, upper, title): x = np.linspace(lower, upper, nb_bins) plt.plot(x, y) plt.title(title) plt.legend() plt.show() def sort_into_bins(draws, nb_bins, lower, upper): a, b = np.histogram(draws, bins=nb_bins, range=(lower, upper)) return a # draw some realized prices (from true distribution) realized_prices = np.random.normal(mu[0], sigma[0], trials) print(realized_prices) for i in range(1, trials): mu[i] = (sigma[i-1] * realized_prices[i] + (sigma_0 ** 2) * mu[i-1]) / (sigma[i-1] + sigma_0 ** 2) sigma[i] = (sigma[i-1] * sigma_0 ** 2) / (sigma[i-1] + sigma_0 ** 2) draws = np.random.normal(mu[i], sigma[i], nb_draws) y = sort_into_bins(draws, nb_bins, lower, upper) results.append(y) title = "Distribution after %s trials" % i plot(y, nb_bins, lower, upper, title) # + trials = 4 nb_draws = 10000 nb_bins = 200 random.seed(42) mu = [0] * trials sigma = [0] * trials results = [] lower = 0.0 upper = 20.0 mu[0] = 10 sigma[0] = 20 def plot(y, nb_bins, lower, upper, title): x = np.linspace(lower, upper, nb_bins) plt.plot(x, y) plt.title(title) plt.legend() plt.show() def sort_into_bins(draws, nb_bins, lower, upper): a, b = np.histogram(draws, bins=nb_bins, range=(lower, upper)) return a # draw some realized prices (from true distribution) realized_prices = np.random.normal(mu[0], sigma[0], trials) print(realized_prices) for i in range(1, trials): mu[i] = (sigma[i-1] * realized_prices[i] + (sigma_0 ** 2) * mu[i-1]) / (sigma[i-1] + sigma_0 ** 2) sigma[i] = (sigma[i-1] * sigma_0 ** 2) / (sigma[i-1] + sigma_0 ** 2) draws = np.random.normal(mu[i], sigma[i], nb_draws) y = sort_into_bins(draws, nb_bins, lower, upper) results.append(y) title = "Distribution after %s trials" % i if i % 100: plot(y, nb_bins, lower, upper, title) # - np.random.random() < 1 # # Multi-armed Bayesian Bandit # + NUM_TRIALS = 2000 PRICE_BINS = [i for i in range(11)] INITIAL_BANDIT_PROBABILITIES = [0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 0.5, 0.4, 0.3, 0.2, 0.1] class Bandit(object): def __init__(self, price, initial_probability): self.price = price self.probability = initial_probability self.a = 1 self.b = 1 def pull(self): return np.random.random() < self.probability def sample(self): return np.random.beta(self.a, self.b) def update(self, x): self.a += x self.b += 1 - x # - def plot(bandits, trial): x = np.linspace(0, 1, 200) for bandit in bandits: y = beta.pdf(x, bandit.a, bandit.b) plt.plot(x, y, label="real p: %.4f" % bandit.price) plt.title("Bandit distributions after %s trials" % trial) plt.legend() plt.show() # + def experiment(): bandits = [Bandit(price, initial_probability) for price, initial_probability in zip(PRICE_BINS, INITIAL_BANDIT_PROBABILITIES)] sample_points = [5,10,20,50,100,200,500,1000,1500,1999] for i in range(NUM_TRIALS): # take a sample from each bandit bestbandit = None maxsample = -1 allsamples = [] # let's collect these just to print for debugging for bandit in bandits: sample = bandit.sample() allsamples.append("%.4f" % sample) if sample > maxsample: maxsample = sample bestbandit = bandit if i in sample_points: print("current samples: %s" % allsamples) plot(bandits, i) # pull the arm for the bandit with the largest sample x = bestbandit.pull() # update the distribution for the bandit whose arm we just pulled bestbandit.update(x) experiment() # - # ## Correlated bandits # + NUM_TRIALS = 2000 PRICE_BINS = [i for i in range(11)] INITIAL_BANDIT_PROBABILITIES = [0.1, 0.2, 0.3, 0.4, 0.5, 0.9, 0.5, 0.4, 0.3, 0.2, 0.1] #[0.9, 0.8, 0.8, 0.7, 0.6, 0.5, 0.5, 0.4, 0.3, 0.2, 0.1] class Bandit2(object): def __init__(self, price, initial_probability): self.price = price self.probability = initial_probability self.a = 1 self.b = 1 def pull(self): return np.random.random() < self.probability def sample(self): return np.random.beta(self.a, self.b) def update(self, x): self.a += x self.b += 1 - x def experiment2(): bandits = [Bandit2(price, initial_probability) for price, initial_probability in zip(PRICE_BINS, INITIAL_BANDIT_PROBABILITIES)] sample_points = [5,10,20,50,100,200,500,1000,1500,1999] for i in range(NUM_TRIALS): # take a sample from each bandit bestbandit = None maxsample = -1 allsamples_f = [] allsamples = [] # let's collect these just to print for debugging for bandit in bandits: sample = bandit.sample() allsamples.append("%.4f" % sample) allsamples_f.append(sample) if sample > maxsample: maxsample = sample bestbandit = bandit if i in sample_points: avg_price = 0.0 prob = 0.0 for sample, bandit in zip(allsamples_f, bandits): prob += sample avg_price += sample * bandit.price avg_price = avg_price / prob print("current samples: %s" % allsamples) print("average price: %s" % avg_price) plot(bandits, i) # pull the arm for the bandit with the largest sample x = bestbandit.pull() if x: for bandit in bandits: if bandit.price < bestbandit.price: bandit.update(x) else: for bandit in bandits: if bandit.price > bestbandit.price: bandit.update(x) # update the distribution for the bandit whose arm we just pulled bestbandit.update(x) experiment2() # - [i/10 for i in range(201)]
notebooks/BayesianBandit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import pandas as pd # + sugars = ['cellobiose', 'fructose', 'galactose', 'glucose', 'maltose', 'mannose', 'sucrose', 'xylose'] with open("C:/Users/heineib/Google Drive/Crick_LMS/Ralser_lab/projects/9_strains/wu_data/sugar_metabolites.tsv", 'w') as f: #Write first two lines sugar = 'cellobiose' metabolite_data = pd.read_table(os.path.normpath("C:/Users/heineib/Google Drive/Crick_LMS/Ralser_lab/projects/9_strains/wu_data/" + sugar + ".txt"), sep=' ', skiprows=0, index_col=0) f.write('sugar\tmetabolite\t'+'\t'.join(metabolite_data.columns)+'\n') f.write('sugar\tmetabolite\t'+'\t'.join(metabolite_data.loc['species'].values)+'\n') for sugar in sugars: print(sugar) metabolite_data = pd.read_table(os.path.normpath("C:/Users/heineib/Google Drive/Crick_LMS/Ralser_lab/projects/9_strains/wu_data/" + sugar + ".txt"), sep=' ', skiprows=1, index_col=0) for row in metabolite_data.iterrows(): row_str = ['{:0.2f}'.format(item) for item in row[1]] f.write(sugar + '\t' + row[0] + '\t' + '\t'.join(row_str) + '\n') # - metabolite_data = pd.read_table(os.path.normpath("C:/Users/heineib/Google Drive/Crick_LMS/Ralser_lab/projects/9_strains/wu_data/" + sugar + ".txt"), sep=' ', skiprows=0, index_col=0)
wu_data_import.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: matan_env # language: python # name: matan_env # --- # + """ Here, we create a custom dataset """ import torch import pickle import argparse import os import sys import json import numpy as np import re import pickle import utils import tqdm from utils.types import PathT import torch.utils.data as data from torch.utils.data import DataLoader from typing import Any, Tuple, Dict, List import torchvision.transforms as transforms from PIL import Image from models.base_model import MyModel from torch.nn.utils.rnn import pack_padded_sequence import h5py # from __future__ import print_function # %matplotlib inline # %load_ext autoreload # %autoreload 2 # - class MyDataset(data.Dataset): """ Custom dataset template. Implement the empty functions. """ def __init__(self, image_path, questions_path, answers_path, train=True, answerable_only=False):#, answerable_only=False): # Set variables self.image_features_path = image_path self.questions_path = questions_path self.answers_path = answers_path #load the dataset of I, Q, A including the vocab of Q and A with open(questions_path, 'r') as fd: self.questions_json = json.load(fd) if (train): dataset_type = "train" else: dataset_type = "val" self.dataset_type = dataset_type #load question vocab with open("../data/cache/question_vocab_"+dataset_type, 'r') as fd: vocab_json = json.load(fd) #Vocab self.vocab = vocab_json self.token_to_index = self.vocab#['question'] with open("../data/cache/trainval_ans2label.pkl", "rb") as f: unpickler = pickle.Unpickler(f) # if file is not empty scores will be equal # to the value unpickled dict_answers = unpickler.load() self.number_of_answers_per_question = len(dict_answers) print("files upload was done") #load Q if os.path.isfile("../data/questions_"+dataset_type): self.questions = torch.load("../data/questions_"+dataset_type) else: self.questions = list(self.prepare_questions()) self.questions = [self._encode_question(q, self.token_to_index) for q in self.questions] torch.save(self.questions, "../data/questions_"+dataset_type) print("questions done") #change Q to Q dict if os.path.isfile("../data/questions_dict_"+dataset_type): with open("../data/questions_dict_"+dataset_type, 'rb') as handle: self.questions_dict = pickle.load(handle) else: self.questions_dict = self.questions_to_dict() with open("../data/questions_dict_"+dataset_type, 'wb') as handle: pickle.dump(self.questions_dict, handle) print("questions dict done") #Load question_id_to_image_id if os.path.isfile("../data/question_id_to_image_id_"+dataset_type): with open("../data/question_id_to_image_id_"+dataset_type, 'r') as fd: self.question_id_to_image_id = json.load(fd) else: self.question_id_to_image_id = self.question_id_to_image_id() with open("../data/question_id_to_image_id_"+dataset_type, 'w') as fd: json.dump(self.question_id_to_image_id, fd) print("question_id_to_image_id done") #load A self.answerable_only = answerable_only if os.path.isfile("../data/answerable_with_labels_only_"+dataset_type+"_"+str(answerable_only)): with open("../data/answerable_with_labels_only_"+dataset_type+"_"+str(answerable_only), 'rb') as handle: self.answerable = pickle.load(handle) else: #preprocess A self.answerable = self.preprocess_answers(train) if self.answerable_only: self.answerable = self._find_answerable() with open("../data/answerable_with_labels_only_"+dataset_type+"_"+str(answerable_only), 'wb') as handle: pickle.dump(self.answerable, handle) print("answers done") #load I # if os.path.isfile("../data/images_"+dataset_type): # with open("../data/images_"+dataset_type, 'rb') as handle: # self.images = pickle.load(handle) # else: # #preprocess A # self.images = self.load_images() # with open("../data/images_"+dataset_type, 'wb') as handle: # pickle.dump(self.images, handle) if os.path.isfile("../data/cache/"+dataset_type+".h5"): #self.images = h5py.File("../data/cache/"+dataset_type+".h5", 'r') with open("../data/cache/img2idx_"+dataset_type+".pkl", 'rb') as handle: self.img2idx = pickle.load(handle) else: #preprocess A # self.images = self.load_images() # with open("../data/images_"+dataset_type, 'wb') as handle: # pickle.dump(self.images, handle) print("need to implement") raise print("images done") #load coco_images_to_dict if os.path.isfile("../data/coco_images_to_dict"+dataset_type): with open("../data/coco_images_to_dict"+dataset_type, 'rb') as handle: self.images_dict = pickle.load(handle) else: self.coco_images_to_dict() with open("../data/coco_images_to_dict"+dataset_type, 'wb') as handle: pickle.dump(self.images_dict, handle) print("coco_images_to_dict done") if train==True: self.delete_q_without_answer() self.index_to_question_number_dict = self.index_to_question_number_func() def __getitem__(self, item): item = self.index_to_question_number_dict[item] q, q_length = self.questions_dict[item] a = self.answerable[item] temp = torch.zeros(self.number_of_answers_per_question) for answer_index in range(len(a[0])): temp[a[0][answer_index]] = a[1][answer_index] image_id = self.question_id_to_image_id[str(item)] images = h5py.File("../data/cache/"+self.dataset_type+".h5", 'r') image_index = self.img2idx[image_id] v = images['images'][image_index].astype('float32') v = torch.from_numpy(v) return v, temp, q, item, q_length def __len__(self) -> int: """ :return: the length of the dataset (number of sample). """ return len(self.questions_dict) def get_transform(self, target_size, central_fraction=1.0): return transforms.Compose([ transforms.Scale(int(target_size / central_fraction)), transforms.CenterCrop(target_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) def load_images(self): transform = self.get_transform(target_size=224, central_fraction=0.875) dataset = [CocoImages(self.image_features_path, transform=transform)] # dataset = Composite(dataset) return dataset @property def max_question_length(self): if not hasattr(self, '_max_length'): self._max_length = max(map(len, self.questions)) return self._max_length def preprocess_answers(self, train=True): if train: with open("../data/cache/train_target.pkl", "rb") as f: unpickler = pickle.Unpickler(f) scores = unpickler.load() else: with open("../data/cache/val_target.pkl", "rb") as f: unpickler = pickle.Unpickler(f) scores = unpickler.load() with open("../data/cache/trainval_ans2label.pkl", "rb") as f: unpickler = pickle.Unpickler(f) # if file is not empty scores will be equal # to the value unpickled dict_answers = unpickler.load() self.number_of_answers_per_question = len(dict_answers) answers_dict = {} for item in scores: answers_dict[item['question_id']] = ((item['labels'], item['scores'])) return answers_dict def questions_to_dict(self): question_dict = {} for i in range(len(self.questions_json['questions'])): question_dict[self.questions_json['questions'][i]['question_id']] = self.questions[i] return (question_dict) def question_id_to_image_id(self): question_id_dict = {} for i in range(len(self.questions_json['questions'])): question_id_dict[str(self.questions_json['questions'][i]['question_id'])] = self.questions_json['questions'][i]['image_id'] return (question_id_dict) def _find_answerable(self): update_answers = self.answerable.copy() for answer in tqdm.tqdm(self.answerable): if (sum(self.answerable[answer])==0): del update_answers[answer] return(update_answers) def prepare_questions(self): """ Tokenize and normalize questions from a given question json in the usual VQA format. """ questions = [q['question'] for q in self.questions_json['questions']] for question in questions: question = question.lower()[:-1] yield question.split(' ') def _encode_question(self, question, token_to_index): """ Turn a question into a vector of indices and a question length """ vec = torch.zeros(self.max_question_length).long() for i, token in enumerate(question): index = token_to_index.get(token, 0) vec[i] = index return vec, len(question) def index_to_question_number_func(self): index_to_question_number_dict = {} cnt = 0 for question in self.answerable: index_to_question_number_dict[cnt] = question cnt += 1 return index_to_question_number_dict def coco_images_to_dict(self): images_dict= {} images = self.images[0] cnt = 0 for image in tqdm.tqdm(images): images_dict[image[0]] = cnt cnt +=1 self.images_dict = images_dict def num_tokens(self): return len(self.vocab) + 1 def delete_q_without_answer(self): temp = [] for i in self.answerable: if len(train_dataset.answerable[i][0])<1: temp.append(i) for i in range(len(temp)): del self.answerable[temp[i]] def delete_q_without_answer(self): temp = [] for i in self.answerable: if len(train_dataset.answerable[i][0])<1: temp.append(i) for i in range(len(temp)): del self.answerable[temp[i]] train_dataset = MyDataset(image_path='../../../datashare/train2014', questions_path='../../../datashare/v2_OpenEnded_mscoco_train2014_questions.json', answers_path='../../../datashare/v2_mscoco_train2014_annotations.json', train=True, answerable_only = False ) train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True, num_workers=6) print(train_loader) for i in train_loader: print (i) import tqdm as tqdm for img, ans, ques, _, q_len in tqdm(train_loader): print(ques) temp = [] for i in train_dataset.answerable: train_dataset.answerable[i][0] if len(train_dataset.answerable[i][0])<1: temp.append(i) # print(i) # if i[0] == None: # print(i) len(train_dataset.answerable) # 443757 len(train_dataset.answerable)
.ipynb_checkpoints/playground-Copy1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Primer modelo automático usando TPOT # ## Importamos las librerías # + import numpy as np import pandas as pd from tpot import TPOTClassifier from sklearn.model_selection import train_test_split # - # ## Importamos los datos con pandas # Cargando los datos datos_titanic = pd.read_csv('./titanic_train.csv') entrenamiento, pruebas = train_test_split(datos_titanic,test_size=0.3) entrenamiento.describe() entrenamiento.head() # ## Hacemos una "limpieza" de nuestro datos antes de hacer el modelo combine = [entrenamiento, pruebas] # Convert string values 'male' and 'female' to int values sex_mapping = {'male': 0, 'female': 1} entrenamiento['Sex'] = entrenamiento['Sex'].map(sex_mapping) pruebas['Sex'] = pruebas['Sex'].map(sex_mapping) calculo_edades = np.zeros((2,3)) for dataset in combine: for sex in range(0, 2): for pclass in range(0, 3): guess_df = dataset[(dataset['Sex'] == sex) & (dataset['Pclass'] == pclass+1)]['Age'].dropna() age_guess = guess_df.median() calculo_edades[sex, pclass] = int(age_guess/0.5 + 0.5) * 0.5 for sex in range(0, 2): for pclass in range(0, 3): dataset.loc[(dataset.Age.isnull()) & (dataset.Sex == sex) &(dataset.Pclass == pclass+1),'Age'] = calculo_edades[sex, pclass] # + entrenamiento = entrenamiento.drop(['Ticket', 'Cabin', 'Name', 'PassengerId', 'SibSp', 'Parch', 'Embarked'], axis=1) pruebas = pruebas.drop(['Ticket', 'Cabin', 'Name', 'SibSp', 'Parch', 'Embarked'], axis=1) X_train = entrenamiento.drop('Survived', axis=1) Y_train = entrenamiento['Survived'] X_test = pruebas.drop(["PassengerId","Survived"], axis=1) # - # ## Creamos y entrenamos nuestro modelo tpot = TPOTClassifier(verbosity=2, max_time_mins=2) tpot.fit(X_train, Y_train) from sklearn.ensemble import GradientBoostingClassifier from sklearn.decomposition import PCA nuevo_modelo = GradientBoostingClassifier(PCA(PCA(iterated_power=10, svd_solver='randomized'), iterated_power=6, svd_solver='randomized'), learning_rate=0.1, max_depth=5, max_features=0.8500000000000001, min_samples_leaf=16, min_samples_split=9, n_estimators=100, subsample=0.5) nuevo_modelo.fit(X_train, Y_train) # ## Predecimos con nuestro árbol y la tasa de exactitud Y_pred = nuevo_modelo.predict(X_test) Y_pred decision_tree.score(X_train, Y_train) tpot.export("algo.py")
AI/Talleres/AutoML/solucion/AutoML 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Renaming Columns in Pandas #load Pandas import pandas as pd # + #Create Sample DataFrame with Columnns we want to Rename df1 = pd.DataFrame({'$Customer': ['Raj', 'Rahul', 'Ankit', 'Priyanka'] , '$Sales': [3453,5674, 9374,4282] , 'Qutity' : [3, 5, 9, 4]}) #Create DataFrame for example 2 df2 = pd.DataFrame({'Customer': ['Raj', 'Rahul', 'Ankit', 'Priyanka'] , 'Sales': [3453,5674, 9374,4282] , '##Qutity' : [3, 5, 9, 4]}) # - # ## Trick 1: Changing all Columns df1 #Use ".columns" property of DataFrame df1.columns = ['Customer', 'Sales', 'Quantity'] df1 # ## Trick 2: Changing Specific Columns df2 df2.rename(columns={'##Qutity' : 'Quantity'}) df2 # ## Trick 2.1: Changing Specific Columns using "inplace" argument df2.rename(columns={'##Qutity' : 'Quantity'}, inplace = True) df2
Python Pandas Smart Tricks/1_Renaming_Columns_in_Pandas_28Jul2019.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + id="w4vVcYbTI2ti" colab_type="code" colab={} ##################################################### ########## Welcome to TensorFlow World ############## ##################################################### # The tutorials in this section is just a start for math operations. # The TensorFlow flags are used for having a more user friendly environment. from __future__ import print_function import tensorflow as tf import os # + id="BQcDy29QJhDL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="37e471da-43d3-4766-e5a0-798b1d1bfb22" print(tf.__version__) # + id="URDgG-93I2tu" colab_type="code" colab={} # Defining some constant values a = tf.constant(20.0, name="a") b = tf.constant(10.0, name="b") # + id="mHyuDEsMI2t4" colab_type="code" colab={} # Some basic operations x = tf.add(a, b, name="add") y = tf.div(a, b, name="divide") # + id="7D4uIbLlI2uA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="0f8ed10c-60f7-4375-d6e9-a7a469af2e8f" # Run the session with tf.Session() as sess: print("a =", sess.run(a)) print("b =", sess.run(b)) print("a + b =", sess.run(x)) print("a/b =", sess.run(y)) # + id="UCyKhcDlI2uN" colab_type="code" colab={} # Closing the session. sess.close() # + id="qnV94elBI2uU" colab_type="code" colab={}
codes/1-basics/basic_math_operations/code/basic_math_operation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from keras.models import Model from keras.layers import Dense, Activation from keras.datasets import mnist from keras.optimizers import Adam from keras.models import Sequential from keras import backend as K import numpy as np import matplotlib.pyplot as plt # + def autoencode(model): decoded_imgs = model.predict(x_test) get_3rd_layer_output = K.function([model.layers[0].input], [model.layers[1].output]) compressed = get_3rd_layer_output([x_test]) return compressed def test_restoration(model): decoded_imgs = model.predict(x_test) get_3rd_layer_output = K.function([model.layers[0].input], [model.layers[1].output]) for i in range(2): print("original: ") plt.imshow(x_test[i].reshape(28, 28)) plt.gray() plt.show() #------------------- print("reconstructed: ") plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() plt.show() #------------------- print("compressed: ") current_compressed = get_3rd_layer_output([x_test[i:i+1]])[0][0] plt.imshow(current_compressed.reshape(8, 4)) plt.gray() plt.show() #print(current_compressed) # - (x_train, y_train), (x_test, y_test) = mnist.load_data() # + x_train = x_train.astype('float32') / 255.0 x_test = x_test.astype('float32') / 255.0 x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:]))) instances, input_features = x_train.shape # - model = Sequential() model.add(Dense(128, activation='relu', input_shape=(input_features,))) model.add(Dense(32, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(input_features, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam') model.fit(x_train, x_train, epochs=3, validation_data=(x_test, x_test)) # + #model.summary() # + layers = len(model.layers) for i in range(layers): inp, out = model.layers[i].output.get_shape() print(i,". ",model.layers[i].output," (",out,")") # + weights = model.get_weights() for i in range(len(weights)): print(i, ". ", len(weights[i])) # + com = autoencode(model) test_restoration(model) # - decoder = Sequential() decoder.add(Dense(128, activation='relu', input_shape=(32,))) decoder.add(Dense(input_features, activation='sigmoid')) #decoder.summary() # + decoder_weights = decoder.get_weights() for i in range(len(decoder_weights)): print(len(decoder_weights[i])) # + weights = model.get_weights() temp_weights = [] for i in range(4, 8): temp_weights.append(weights[i]) for i in range(len(temp_weights)): print(len(temp_weights[i])) decoder.set_weights(temp_weights) # - # #copy these matrix from the compressed representation value sample = np.array([2.59521651,5.63147545,2.05743384,4.32667685,6.58895254,0.0,3.75777841,15.93504238,8.22563648,10.76218987,9.22686291,7.61534977,8.59453583,7.27693939,0.0,0.0,6.01428413,5.45331526,4.60282326,5.23728228,6.86480665,13.43490028,5.81025648,10.99352455,10.77216816,2.27521825,5.83731318,7.09324121,3.53731441,6.28301096,4.87098885,3.07779741]) print(sample.shape) """restored = decoder.predict(sample.reshape(1,32)) plt.imshow(restored.reshape(28, 28)) plt.show()""" print(len(com[0]),"x",len(com[0][0])) from tensorflow.contrib.factorization.python.ops import clustering_ops import tensorflow as tf unsupervised_model = tf.contrib.learn.KMeansClustering( 10 , distance_metric = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE #SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE , initial_clusters=tf.contrib.learn.KMeansClustering.RANDOM_INIT ) def train_input_fn(): data = tf.constant(com[0], tf.float32) return (data, None) unsupervised_model.fit(input_fn=train_input_fn, steps=5000) # + clusters = unsupervised_model.predict(input_fn=train_input_fn) print("following samples are clustered in same cluster:") index = 0 for i in clusters: current_cluster = i['cluster_idx'] features = x_test[index] if index < 200 and current_cluster == 4: plt.imshow(x_test[index].reshape(28, 28)) plt.gray() plt.show() index = index + 1 # -
python/Autoencoder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # imports import numpy as np from scipy.stats import norm from filterpy.stats import gaussian, plot_gaussian_pdf import matplotlib.pyplot as plt # # SOT algorithms # ## Why not use original SOT recursion equations? # It is intractable to compute the posterior density $p(x_k|Z_{1:k})$ exactly, since the complexity of the original SOT recursions grow exponentially due to the number of hypothesis sequences at $k$ being $\prod_{i=1}^{k}(m_i + 1)$ where $m_i$ is the number of hypotheses at time $k=i$. It means that the amount of components grow with $k$ and each time step will require more processing time and memory. The components are, namely, weights (hypothesis probabilites) and pdfs (hypothesis priors/posteriors calculated by Kalman filter predict/update steps). In the following we are going to discuss few common strategies to approximate the posterior and obtain a feasible SOT algorithm. The main focus of the following are the posteriors that can be approximated by the gaussian mixture, although the principles will apply more generally. # ## Types of algorithms # The approximation boils down to the reduction of the amount of components in $p(x_k|Z_{1:k})$ to obtain a Gaussina mixture with fewer componens $\hat{p}(x_k|Z_{1:k})$ so that $p(x_k|Z_{1:k}) \approx \hat{p}(x_k|Z_{1:k})$. there are two main techniques to reduce a mixture - **pruning** and **merging**. # # - pruning - remove unlikely hypothesis with small weights (and renormalize) # - merging - approximate all components by a single density # ### Pruning and merging example # $p(x)$ is given by $p(x) = w_1p_1(x) + w_2p_2(x)$ where: # # $$ # \begin{cases} # w_1 = 0.07 & p_1(x) = \mathcal{N}(x:-2,1) \\ # w_2 = 0.93 & p_2(x) = \mathcal{N}(x:2,1.5) \\ # \end{cases} # $$ # # Pruning first hypothesis gives $\hat{p} = p_2(x)$ # # Merging gives # + xs = np.linspace(-5, 5, 50) w = [0.07, 0.93] get_pdf = lambda x, m, v: norm.pdf(x, m, np.sqrt(v)) p_1 = get_pdf(xs, -2, 1) p_2 = get_pdf(xs, 2, 1.5) p_exact = w[0] * p_1 + w[1] * p_2 p_pruning = p_2 mm = w[0] * -2 + w[1] * 2 vm = (w[0] * (1 + (-2)**2) + w[1] * (1.5 + (2)**2)) - mm ** 2 p_merging = get_pdf(xs, mm, vm) ## plotting fig=plt.figure(figsize=(12, 3), dpi= 200, facecolor='w', edgecolor='k') ax = plt.subplot(121, xlim=(-5, 5)) plt.plot(xs, w[0] * p_1, "--.", label=r"$w_1p_1(x)$") plt.plot(xs, w[1] * p_2, "--.", label=r"$w_1p_1(x)$") plt.plot(xs, p_exact, "k", label=r"$p(x)$") plt.plot(xs, p_pruning, "r", label=r"$\bar{p}_{pruning}(x)$") ax.legend() ax = plt.subplot(122, xlim=(-5, 5)) plt.plot(xs, w[0] * p_1, "--.", label=r"$w_1p_1(x)$") plt.plot(xs, w[1] * p_2, "--.", label=r"$w_1p_1(x)$") plt.plot(xs, p_exact, "k", label=r"$p(x)$") plt.plot(xs, p_merging, "r", label=r"$\bar{p}_{merging}(x)$") ax.legend() plt.show() # - # In the following we are presenting three SOT in clutter algorithms based on above approximation techniques: # # - Nearest neighbour (NN) filter uses **pruning** # - Probablistic data association (PDA) filter uses **merging** # - Gaussian sum filter (GSF) uses **pruning/merging** # # All of those algorithms are the assumed density filters, meaning that each recursion computation starts and ends with a density of assumed family. For NN and PDA it is Gaussian density and for GSF its a Gaussian mixture density. # ## Nearest neigbour (NN) filter # Being assumed density filter the NN filter assumes the Gausian posterior at time $k-1$ which becomes a prior at time $k$. # # $$ # p(x_{k-1}|Z_{1:k-1}) = \mathcal{N}(x_{k-1}: \bar{x}_{k-1|k-1}, P_{k-1|k-1}) # $$ # # Given the motion model (either linear or non-linear) we can propagate the prior in time and calcluate Kalman filter prediction $p(x_k|Z_{1:k-1})$. Linear case given below: # # $$ # x_k = F_{k-1}x_{k-1} + q_{k-1} \\ q_{k-1} \backsim \mathcal{N}(0, Q) \\ # p(x_k|Z_{1:k-1}) = \mathcal{N}(x_k:\bar{x}_{k|k-1},P_{k|k-1}) \\ # \text{where} # \\ # \bar{x}_{k|k-1} = F_{k-1}\bar{x}_{k-1|k-1} \\ # P_{k|k-1} = F_{k-1}P_{k-1|k-1}F_{k-1}^T + Q # $$ # # When its time to perform Kalman filter update, we now have $m_k$ hypotheses, therefore the posterior is the gausian mixture. Furthermore, we denote the nearest neigbour approximation of the predicted density $p^{NN}(x_k|Z_{1:k-1})$, but continue as if it was a true predicted density. We also assume constant probability of detection $P^D(x) = P^D$, linear object measurement model $g_k(o|x) = \mathcal{N}(o: H_kx, R_k)$ and any clutter intensity function $\lambda_c(c)$. The approximation of the Gaussian mixture by one Gaussian density in case of NN is to prune all hypotheses except the most probable one, or in other words, the one with the highest weight. # # Algorithm # 1. Compute $\tilde{w}_k^{\theta_k}$ for $\theta_k \in \{0,1,...,m_k\}$ # 2. Find $\theta_k^* = \arg\max_{\theta} \tilde{w}_k^{\theta_k}$ # 3. Compute by $\bar{x}^{NN}_{k|k} = \hat{x}_k^{\theta_k^*}$ and $P^{NN}_{k|k} = P_k^{\theta_k^*}$ by Kalman filter update # 4. Assume posterior $p^{NN}(x_k|Z_{1:k}) = \mathcal{N}(x_k:\bar{x}^{NN}_{k|k},P^{NN}_{k|k})$ # ### Example # Prior $p(x_1) = \mathcal{N}(x_1:0.5,0.2)$ # # Object likelihood $g_k(o_k|x_k)=\mathcal{N}(o_k:x_k,0.2)$ # # Motion model (random walk) $\pi_k(x_k|x_{k-1})=\mathcal{N}(x_k:x_{k-1},0.35)$ # # Probability of detection $P^D(x) = 0.9$ # # Clutter intensity # # $$\lambda(c) = \begin{cases} 0.4 &\text{if }|x| \le 4 \\ 0 &\text{otherwise} \end{cases}$$ # # Sequence of detections $Z_{1:k} = (Z_1, Z_2, ..., Z_k) = \{[-1.3, 1.7],\ [1.3],\ [-0.3, 2.3],\ [-2, 3],\ [2.6],\ [-3.5, 2.8]\}$ # + from sot.sot import GaussianMixture, update_mixture, predict_mixture from ipywidgets import interact, IntSlider Z = [ [-1.3, 1.7], [1.3], [-0.3, 2.3], [-2, 3], [2.8], [-3.5, 2.8] ] lamc = lambda c: 0.4 if np.abs(c) < 4 else 0 R = np.array([0.2]) H = np.array([1]) Q = np.array([0.35]) F = np.array([1]) PD = 0.9 priors = [GaussianMixture( [np.array(0.5)], [np.array(0.5)], [np.array(1.0)])] posteriors = [] priors_nn = [GaussianMixture( [np.array([0.5])], [np.array([0.5])], [np.array(1.0)])] posteriors_nn = [] # CALCULATE ORIGINAL RECURSION for k, z in enumerate(Z): # update xs_u, Ps_u, ws_u = update_mixture(priors[k].xs, priors[k].Ps, priors[k].ws, z, R, H, PD, lamc) posteriors.append(GaussianMixture(xs_u, Ps_u, ws_u)) # prediction xs_p, Ps_p = predict_mixture(xs_u, Ps_u, F, Q) priors.append(GaussianMixture(xs_p, Ps_p, ws_u)) # CALCULATE NN RECURSION for k, z in enumerate(Z): # update xs_u, Ps_u, ws_u = update_mixture(priors_nn[k].xs, priors_nn[k].Ps, priors_nn[k].ws, z, R, H, PD, lamc) posterior_mixture = GaussianMixture(xs_u, Ps_u, ws_u) # calculate NN approximation xs_unn, Ps_unn = posterior_mixture.get_pruned() posteriors_nn.append(GaussianMixture(xs_unn, Ps_unn, [1.0])) # prediction xs_p, Ps_p = predict_mixture(xs_unn, Ps_unn, F, Q) priors_nn.append(GaussianMixture(xs_p, Ps_p,[1.0])) xval = np.linspace(-4,4,150) def plot_nn_filter(k): pxz = posteriors[k-1].get_mixture(xval) pxz_nn = posteriors_nn[k-1].get_components(xval) px_nn = priors_nn[k-1].get_components(xval) no_detect = np.dot(priors[k-1].xs, priors[k-1].ws) plt.figure(figsize=(12, 6), dpi= 200, facecolor='w', edgecolor='k') plt.subplot(xlim=(-4,4)) plt.plot(Z[k-1], np.zeros(len(Z[k-1])), "rs", markersize=10, label=f"$Z_{{{k}}}$") plt.plot(no_detect, np.zeros(1), "bs", markersize=10, label=f"no detect: $E(p(x_{{{k}}}|Z_{{1:{k-1}}}))$") plt.plot(xval, pxz, "k", label=f"$p(x_{{{k}}}|Z_{{1:{k}}})$") plt.plot(xval, pxz_nn, "m", label=f"$p^{{NN}}(x_{{{k}}}|Z_{{1:{k}}})$") plt.plot(xval, px_nn, "r", label=f"$p^{{NN}}(x_{{{k}}}|Z_{{1:{k-1}}})$") plt.legend() plt.show() interact(plot_nn_filter, k=IntSlider(value=1, min=1., max=6, description="timestep k")); # - # ### Pros and cons # + (+) A fast algorithm which is simple to implement # + (+) Works well in simple scenarios, very high probability of detection $P^D$ and low clutter intensity $\lambda_c(c)$ # + (-) Ignores some hypotheses and thus underestimates the uncertainties which increases the risk that we will loose track of the object # + (-) Performs poorly in complicated scenarios # ## Probabilistic data association (PDA) # In contranst to NN filter the PDA filter merges all hypotheses into one Gaussian posterior un the update step, and thus takes into account the uncertainties from all hypotheses. the prediction step and the assumptions for the update step are exactly the same as for NN filter. The approximation of the posterior mixture in PDA is performed by finding a Gaussian with the same mean and covariance as the mixture. Such approximation minmizes the Kullback-Leibler divergence and is the best amoing all Gaussian approximations. # $$ # p^{PDA}(x_k|Z_{1:k}) = \mathcal{N}(x_k:\bar{x}^{PDA}_{k|k}, P^{PDA}_{k|k}) \\ # $$ # where the new expected value is the expected value of expected values of Gaussian components: # $$ # \bar{x}^{PDA}_{k|k} = \mathbb{E}_{p(x_k|Z_{1:k})}[x_k] = \sum_{\theta_k=0}^{m_k}w_k^{\theta_k}\hat{x}_k^{\theta_k} # $$ # amd the new covariance is the sum of covariances of the Gaussian componens and covariance between expected values of the Gaussian components: # $$ # P^{PDA}_{k|k} = Cov_{p(x_k|Z_{1:k})}[x_k] = \sum_{\theta_k=0}^{m_k}w_k^{\theta_k}P_k^{\theta_k} + w_k^{\theta_k}(\bar{x}^{PDA}_k - \hat{x}_k^{\theta_k})(\bar{x}^{PDA} - \hat{x}_k^{\theta_k})^T # $$ # # Algorithm: # 1. Compute $\tilde{w}_k^{\theta_k}$, $\hat{x}_k^{\theta_k}$ and $P_k^{\theta_k}$ for $\theta_k \in \{0,1,...,m_k\}$ # 2. Compute approximations expected value $\bar{x}^{PDA}_{k|k} = \sum_{\theta_k=0}^{m_k}w_k^{\theta_k}\hat{x}_k^{\theta_k}$ # 3. Compute approximations covariance $P^{PDA}_{k|k} = \sum_{\theta_k=0}^{m_k}w_k^{\theta_k}P_k^{\theta_k} + w_k^{\theta_k}(\bar{x}^{PDA}_k - \hat{x}_k^{\theta_k})(\bar{x}^{PDA} - \hat{x}_k^{\theta_k})^T$ # ### Examples # Same as example above, but with PDA # + priors = [GaussianMixture( [np.array(0.5)], [np.array(0.5)], [np.array(1.0)])] posteriors = [] priors_pda = [GaussianMixture( [np.array([0.5])], [np.array([0.5])], [np.array(1.0)])] posteriors_pda = [] # CALCULATE ORIGINAL RECURSION for k, z in enumerate(Z): # update xs_u, Ps_u, ws_u = update_mixture(priors[k].xs, priors[k].Ps, priors[k].ws, z, R, H, PD, lamc) posteriors.append(GaussianMixture(xs_u, Ps_u, ws_u)) # prediction xs_p, Ps_p = predict_mixture(xs_u, Ps_u, F, Q) priors.append(GaussianMixture(xs_p, Ps_p, ws_u)) # CALCULATE NN RECURSION for k, z in enumerate(Z): # update xs_u, Ps_u, ws_u = update_mixture(priors_pda[k].xs, priors_pda[k].Ps, priors_pda[k].ws, z, R, H, PD, lamc) posterior_mixture = GaussianMixture(xs_u, Ps_u, ws_u) # calculate PDA approximation xs_upda, Ps_upda = posterior_mixture.get_merged() posteriors_pda.append(GaussianMixture(xs_upda, Ps_upda, [1.0])) # prediction xs_p, Ps_p = predict_mixture(xs_upda, Ps_upda, F, Q) priors_pda.append(GaussianMixture(xs_p, Ps_p,[1.0])) xval = np.linspace(-4,4,150) def plot_nn_filter(k): pxz = posteriors[k-1].get_mixture(xval) pxz_pda = posteriors_pda[k-1].get_components(xval) px_pda = priors_pda[k-1].get_components(xval) no_detect = np.dot(priors[k-1].xs, priors[k-1].ws) plt.figure(figsize=(12, 6), dpi= 200, facecolor='w', edgecolor='k') plt.subplot(xlim=(-4,4)) plt.plot(Z[k-1], np.zeros(len(Z[k-1])), "rs", markersize=10, label=f"$Z_{{{k}}}$") plt.plot(no_detect, np.zeros(1), "bs", markersize=10, label=f"no detect: $E(p(x_{{{k}}}|Z_{{1:{k-1}}}))$") plt.plot(xval, pxz, "k", label=f"$p(x_{{{k}}}|Z_{{1:{k}}})$") plt.plot(xval, pxz_pda, "m", label=f"$p^{{PDA}}(x_{{{k}}}|Z_{{1:{k}}})$") plt.plot(xval, px_pda, "r", label=f"$p^{{PDA}}(x_{{{k}}}|Z_{{1:{k-1}}})$") plt.legend() plt.show() interact(plot_nn_filter, k=IntSlider(value=1, min=1., max=6, description="timestep k")); # - # ### Pros and cons # + (+) A fast algorithm which is simple to implement # + (+) Works well in simple scenarios, very high probability of detection $P^D$ and low clutter intensity $\lambda_c(c)$ # + (+) Acknowledges uncertainty slightly better than NN # + (-) Performs poorly in complicated scenarios when the posterior is far from Gaussian # ## Gaussian mixture filter (GMF) # The main idea of Gaussian mixture filtering or Gausian sum filtering is to recognize that the approximation of the posterior with the single Gaussian (be it by pruning or merging) is often very crude. Instead we will seek approximating the posteriar with the gaussian mixture with N components. We can often do that because even though the posterior contains many hypothesis, it is usually dominated by only a fraction of those. Lets assume that we end every recursion with a Gaussian mixture with a few components: # # $$ # p^{GSF}(x_{k-1}|Z_{1:k-1}) = \sum_{h_{k-1}=1}^{\mathcal{H}_{k-1}}w_{k-1}^{h_{k-1}}p_{k-1|k-1}^{h_{k-1}}(x_{k-1}) # $$ # # Assuming linear and Gaussian models, posterior at time $k$ is a Gaussian mixture that considers all the transition hyposesis sequences from previous $\mathcal{H}_{k-1}$ hypotheses to $m_k + 1$ new hypotheses obtained with measurements. The _breve_ $\breve{p}$ notation corresponds to the density before any approximations are applied. # # $$ # \breve{p}^{GSF}(x_k|Z_{1:k}) = \sum_{h_{k-1}=1}^{\mathcal{H}_{k-1} \cdot (m_k + 1)}\breve{w}_{k}^{h_{k}}\breve{p}_{k|k}^{h_{k}}(x_{k}) # $$ # # How can we limit the amount of hypotheses in {\breve{p}} to get a sufficient approximation. There are 3 strategies: # # Algorithm 1: Prune all hypotheses whose weight are smaller than a threshold $\gamma$, re-normalize w}eights and re-assign $\theta$ to be $\{1,2,...,\mathcal{H}_k\}$ # # 1. Input $\gamma$, $w^i$, $\hat{x}^i$, $P^i$ where $i \in \{1,2,...,\mathcal{H}\}$ # 2. Find indices that we intend to keep $\{ind \ | \ w^i > \gamma \ \forall i \ \in \{1,2,...,\mathcal{H}\}\}$ # 3. Set $\grave{\mathcal{H}} = |ind|$ and $c = \sum_{i=1}^{\grave{\mathcal{H}}}w^{ind(i)}$ # 4. For each $i \in \{1,2,...,\grave{\mathcal{H}}\}$ get $\grave{w}^i = \frac{w^{ind(i)}}{c}$, $\grave{x}^i = \hat{x}^{ind(i)}$ and $\grave{P}^i = P^{ind(i)}$ # # Algorithm 2: Merge similar components: # # For example suppose $p^1(x)$ and $p^2(x)$ are similar in the following expression. Set $w^{12} = w^1 + w^2$ and we get. # $$ # p(x) = w^1p^1(x) + w^2p^2(x) + w^3p^3(x) = w^{12}\left(\frac{w^1p^1(x)}{w^{12}} + \frac{w^2p^2(x)}{w^{12}}\right) + w^3p^3(x) # $$ # The expression in the parentheses can be viewed as $p^{12}(x)$ and obtained using the techniques fro PDA filtering. # # Algorithm 3: Prune hypotheses until we are left with at most $N_{max}$ hypotheses # # 1. Input $N_{max}$, $w^i$, $\hat{x}^i$, $P^i$ where $i \in \{1,2,...,\mathcal{H}\}$ # 2. If $\mathcal{H}\} > N_{max}$ Find indices that we intend to keep by sorting $ind = sort(\{w^1,w^2,...,w^{\mathcal{H}}\})$ # 3. Compute normalization factor $c = \sum_{i=1}^{N_{max}}w^{ind(i)}$ # 4. For each $i \in \{1,2,...,N_{max}\}$ get $\grave{w}^i = \frac{w^{ind(i)}}{c}$, $\grave{x}^i = \hat{x}^{ind(i)}$ and $\grave{P}^i = P^{ind(i)}$ # ### Estimator for the Gaussian mixture # In case of the posterior approximation that yields single Gaussian it is clear that the state estimate $x_k$ is just the expected value of the Gaussian. However, if the posterior is the Gaussian mixture, how can we estimate $x_k$? # # #### Minimum mean square error (MMSE) estimator # One possibility is to use the minimum mean square error (MMSE) estimation $\bar{x}_{k|k}$ that minimizes $\mathbb{E}[(x_k - \bar{x}_{k|k})^T(x_k - \hat{x}_{k|k})|Z_{1:k}]$. The MMSE estimator is calculated in the same way as the posterior mean in the PDA filtering approximation. # # $$ # \bar{x}_{k|k} = \mathbb{E}[x_k|Z_{1:k}] = \sum_{h_{k}=1}^{\mathcal{H}_{k}}w_{k}^{h_{k}}\hat{x}_{k|k}^{h_{k}} # $$ # # #### Most probably hypothesis estimation # Another alternative is to use the estimation of the most probable hypothesis, namely, the hypothesis with the highest weight. More preferable for multi modal densities to ensure that our posterior state estimate doesnt end up somewhere inbetween the modes. Both estimators are identical in case of posterior being a single Gaussian. # ### Pros and cons # + (+) Significantly more accurate than NN and PDA, but that difference is only noticed in medium-difficult settings, namely when the probability of detection is low and clutter is high. # + (+) Complexity can be adjustted to computational resources by choosing the pruning parameters. # + (-) More computationally demanding than NN and PDA and somewhat more complicated to implement # ### Examples # Same as example previous, but with GSF # + N_max = 5 priors = [GaussianMixture( [np.array(0.5)], [np.array(0.5)], [np.array(1.0)])] posteriors = [] priors_gsf = [GaussianMixture( [np.array([0.5])], [np.array([0.5])], [np.array(1.0)])] posteriors_gsf = [] # CALCULATE ORIGINAL RECURSION for k, z in enumerate(Z): # update xs_u, Ps_u, ws_u = update_mixture(priors[k].xs, priors[k].Ps, priors[k].ws, z, R, H, PD, lamc) posteriors.append(GaussianMixture(xs_u, Ps_u, ws_u)) # prediction xs_p, Ps_p = predict_mixture(xs_u, Ps_u, F, Q) priors.append(GaussianMixture(xs_p, Ps_p, ws_u)) # CALCULATE NN RECURSION for k, z in enumerate(Z): # update xs_u, Ps_u, ws_u = update_mixture(priors_gsf[k].xs, priors_gsf[k].Ps, priors_gsf[k].ws, z, R, H, PD, lamc) posterior_mixture = GaussianMixture(xs_u, Ps_u, ws_u) # Choose N best xs_ugsf, Ps_ugsf, ws_ugsf = posterior_mixture.get_n_best(N_max) posteriors_gsf.append(GaussianMixture(xs_ugsf, Ps_ugsf, ws_ugsf)) # prediction xs_p, Ps_p = predict_mixture(xs_ugsf, Ps_ugsf, F, Q) priors_gsf.append(GaussianMixture(xs_p, Ps_p,ws_ugsf)) xval = np.linspace(-4,4,150) def plot_nn_filter(k): pxz = posteriors[k-1].get_mixture(xval) pxz_gsf = posteriors_gsf[k-1].get_mixture(xval) px_gsf = priors_gsf[k-1].get_mixture(xval) no_detect = np.dot(priors[k-1].xs, priors[k-1].ws) plt.figure(figsize=(12, 6), dpi= 200, facecolor='w', edgecolor='k') plt.subplot(xlim=(-4,4)) plt.plot(Z[k-1], np.zeros(len(Z[k-1])), "rs", markersize=10, label=f"$Z_{{{k}}}$") plt.plot(no_detect, np.zeros(1), "bs", markersize=10, label=f"no detect: $E(p(x_{{{k}}}|Z_{{1:{k-1}}}))$") plt.plot(xval, pxz, "k", label=f"$p(x_{{{k}}}|Z_{{1:{k}}})$") plt.plot(xval, pxz_gsf, "m-.", label=f"$p^{{PDA}}(x_{{{k}}}|Z_{{1:{k}}})$") plt.plot(xval, px_gsf, "r", label=f"$p^{{PDA}}(x_{{{k}}}|Z_{{1:{k-1}}})$") plt.legend() plt.show() interact(plot_nn_filter, k=IntSlider(value=1, min=1., max=6, description="timestep k")); # - # As we see in $k = 1$ the posterior is calculate exactly, since there is only 3 hypothesis and we are not pruning anything. At step $k = 2$ there are 6 hypotheses sequences, we will only allow $N_{max} = 5$ so small approximation error can be seen. In the following steps, we see more error between the true and GSF approximated posterior, however the results suggest that GSF performs this task very well. # ## Gating # Suppose we have a very good sensor and we would like to apply PDA single object tracking. # - large $P^D$ # - small $\lambda_c$ # - huge field of view # # Under these conditions we can expect our SOT filters to perform well, but the huge field of view comes with the downside of expecting high number of clutter detections $m_k$. # # The the essence of PDA is performing a summation over all hypotheses to approximate all of them as a single Gaussian. # # $$ # \bar{x}^{PDA}_{k|k} = \sum_{\theta_k=0}^{m_k}w_k^{\theta_k}\hat{x}_k^{\theta_k} # \\ # P^{PDA}_{k|k} = \sum_{\theta_k=0}^{m_k}w_k^{\theta_k}P_k^{\theta_k} + w_k^{\theta_k}(\bar{x}^{PDA}_k - \hat{x}_k^{\theta_k})(\bar{x}^{PDA} - \hat{x}_k^{\theta_k})^T # $$ # # That summation could be computationally demanding if $m_k$ is sufficiently large. However, for the measurements that are far from the predicted measurement the weights are practically zero and dont contribute to the posterior Gaussian. To save the computation during the update step we would like to avoid computing $w_k^{\theta_k}$, $\hat{x}_k^{\theta_k}$ and $P_k^{\theta_k}$ for hypotheses with $w_k^{\theta_k}\approx 0$. It also means that we want to ignore the measurements that are far from the predicted measurements, which gating is all about. Gating can be though as a pruning method that does not compute the weights. The technique is not specific to PDA, but can be appled generally. # # There are few ways to form gates. Simpler types could be an axis alligned box, but we will apply a more popular and sophisticated gate based on a covariance ellipsoid of the predicted measurement distribution. Consider the unnormalized weight. # # $$ # \tilde{w}_k^{\theta_k} = \frac{P^D(x_k)\mathcal{N}(z_k^{\theta_k}:\bar{z}_{k|k}^{h_{k-1}},S_{k,h_{k-1}})}{\lambda_c(z_k^{\theta_k})} # $$ # # We note that the weight $\tilde{w}_k^{\theta_k}$ is small when the following Mahalanobis distance between the measurement $z_k^{\theta_k}$ and distribution $\mathcal{N}(z_k^{\theta_k}:\bar{z}_{k|k}^{h_{k-1}},S_{k,h_{k-1}})$ is large (given $\lambda_c \approx \ \text{constant}$). # # $$ # d_{h_{k-1},\theta_k}^2 = (z_k^{\theta_k} - \bar{z}_{k|k}^{h_{k-1}})^TS_{k,h_{k-1}}^{-1}(z_k^{\theta_k} - \bar{z}_{k|k}^{h_{k-1}}) # $$ # # This allows as to disregard the measurement $z_k^{\theta_k}$ as clutter under the hypothesis $h_{k-1}$ if $d_{h_{k-1},\theta_k}^2 > G$. The selection of the threshold $G$ should be such that the probability the object being selected outside the gate $P_G$ is very small. # # $$ # P_G = Pr[d_{h_{k-1},\theta_k}^2 > G|h_{k-1},\theta_k] # $$ # # The distance is chi square distributed $\chi^2(n_z)$ with the number of degrees of freedom equal to dimensionality of the measurement vector. # # $$ # d_{h_{k-1},\theta_k}^2 > G|h_{k-1},\theta_k \backsim \chi^2(n_z) # $$ # # A common strategy will be to assume as high $P_G$ value, for example $99.5\%$ and use the chi squared CDF to find the threshold $G$. # ### Examples # ## Summary # # gaussian mixture class?
py/SOT algorithms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:py37] * # language: python # name: conda-env-py37-py # --- # # Instructions # # * pip install planetarypy # # I wrote highly generalized PDS index readers, that wrap the Python Parameter Value Language module PVL (The syntax for Planetary Data System label files). Relevant functions are in https://github.com/michaelaye/planetarypy/blob/master/planetarypy/pdstools.py # # But possibly it's more efficient that you just quickly parse the specific data files yourself: # # Download cumulative RDR index and label file here: # # * http://hirise-pds.lpl.arizona.edu/PDS/INDEX/RDRCUMINDEX.LBL # * http://hirise-pds.lpl.arizona.edu/PDS/INDEX/RDRCUMINDEX.TAB # # The label file has the column names for the .TAB file, but in PVL format. # The .TAB file is a fixed format text file. # You can read out the column names and the column specification (i.e. start and end byte for each column) below, in case you can feed them to a Ruby text parser. from planetarypy import pdstools as pds # ### replace local file names here: # + jupyter={"outputs_hidden": true} labelfname = '/Users/klay6683/Dropbox/data/hirise/index/RDRCUMINDEX.LBL' tablefname = '/Users/klay6683/Dropbox/data/hirise/index/RDRCUMINDEX.TAB' # + jupyter={"outputs_hidden": true} label = pds.IndexLabel(labelfname) # + jupyter={"outputs_hidden": false} label.colnames # + jupyter={"outputs_hidden": false} label.colspecs # + jupyter={"outputs_hidden": false} label.index_path # + jupyter={"outputs_hidden": true} df = label.read_index_data() # + jupyter={"outputs_hidden": false} df.info() # - # > I have never seen the data-item "SOUTH_AZIMUTH". Possibly, this was confused with the item called "SUB-SOLAR AZIMUTH"? I put that column in there for now. # ### example on how to filter for HiRISE obsids (i.e. Planet4 image_names) # + jupyter={"outputs_hidden": true} list_of_image_names = [ 'ESP_040246_0935', 'ESP_039969_0935', 'ESP_039824_0935', 'ESP_039547_0935', 'ESP_039468_0935', 'ESP_038822_0935', 'ESP_038625_0930', 'ESP_038492_0935', 'ESP_038215_0935', 'ESP_038149_0935', 'ESP_038110_0930', 'ESP_037964_0935', 'ESP_040311_0940', 'ESP_040193_0940', 'ESP_037977_0940', 'ESP_037976_0940', ] # + jupyter={"outputs_hidden": true} df.set_index('OBSERVATION_ID', inplace=True) # + jupyter={"outputs_hidden": true} data = df.loc[list_of_image_names] # second line for the fact that lots/all metadata exists for _RED and _COLOR channels data = data[data.PRODUCT_ID.str.endswith('_COLOR')] # + jupyter={"outputs_hidden": false} data.info() # + jupyter={"outputs_hidden": true} # don't really have a center lat/lon in this index, so calculating a rough avg value data['MEAN_LATITUDE'] = (data.MAXIMUM_LATITUDE + data.MINIMUM_LATITUDE) / 2 data['MEAN_LONGITUDE'] = (data.MAXIMUM_LONGITUDE + data.MINIMUM_LONGITUDE) / 2 # + jupyter={"outputs_hidden": true} translator = { 'acquisition_date': 'START_TIME', 'local_mars_time': 'LOCAL_TIME', 'latitude': 'MEAN_LATITUDE', 'longitude': 'MEAN_LONGITUDE', 'range_to_target': 'TARGET_CENTER_DISTANCE', 'original_image_scale': 'MAP_SCALE', 'emission_angle': 'EMISSION_ANGLE', 'phase_angle': 'PHASE_ANGLE', 'solar_incidence': 'INCIDENCE_ANGLE', 'solar_longitude': 'SOLAR_LONGITUDE', 'north_azimuth': 'NORTH_AZIMUTH', 'south_azimuth': 'SUB_SOLAR_AZIMUTH', } # + jupyter={"outputs_hidden": false} output = data[list(translator.values())] output # - output.to_csv('planet4_metadata.csv')
notebooks/planet4_hirise_index_parsing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Sustainable energy transitions data model import pandas as pd, numpy as np, json, copy, zipfile, random # ## Country and region name converters # + #country name converters #EIA->pop clist1={'North America':'Northern America', 'United States':'United States of America', 'Central & South America':'Latin America and the Caribbean', 'Bahamas, The':'Bahamas', 'Saint Vincent/Grenadines':'Saint Vincent and the Grenadines', 'Venezuela':'Venezuela (Bolivarian Republic of)', 'Macedonia':'The former Yugoslav Republic of Macedonia', 'Moldova':'Republic of Moldova', 'Russia':'Russian Federation', 'Iran':'Iran (Islamic Republic of)', 'Palestinian Territories':'State of Palestine', 'Syria':'Syrian Arab Republic', 'Yemen':'Yemen ', 'Congo (Brazzaville)':'Congo', 'Congo (Kinshasa)':'Democratic Republic of the Congo', 'Cote dIvoire (IvoryCoast)':"C\xc3\xb4te d'Ivoire", 'Gambia, The':'Gambia', 'Libya':'Libyan Arab Jamahiriya', 'Reunion':'R\xc3\xa9union', 'Somalia':'Somalia ', 'Sudan and South Sudan':'Sudan', 'Tanzania':'United Republic of Tanzania', 'Brunei':'Brunei Darussalam', 'Burma (Myanmar)':'Myanmar', 'Hong Kong':'China, Hong Kong Special Administrative Region', 'Korea, North':"Democratic People's Republic of Korea", 'Korea, South':'Republic of Korea', 'Laos':"Lao People's Democratic Republic", 'Macau':'China, Macao Special Administrative Region', 'Timor-Leste (East Timor)':'Timor-Leste', 'Virgin Islands, U.S.':'United States Virgin Islands', 'Vietnam':'Viet Nam'} #BP->pop clist2={u' European Union #':u'Europe', u'Rep. of Congo (Brazzaville)':u'Congo (Brazzaville)', 'Republic of Ireland':'Ireland', 'China Hong Kong SAR':'China, Hong Kong Special Administrative Region', u'Total Africa':u'Africa', u'Total North America':u'Northern America', u'Total S. & Cent. America':'Latin America and the Caribbean', u'Total World':u'World', u'Total World ':u'World', 'South Korea':'Republic of Korea', u'Trinidad & Tobago':u'Trinidad and Tobago', u'US':u'United States of America'} #WD->pop clist3={u"Cote d'Ivoire":"C\xc3\xb4te d'Ivoire", u'Congo, Rep.':u'Congo (Brazzaville)', u'Caribbean small states':'Carribean', u'East Asia & Pacific (all income levels)':'Eastern Asia', u'Egypt, Arab Rep.':'Egypt', u'European Union':u'Europe', u'Hong Kong SAR, China':u'China, Hong Kong Special Administrative Region', u'Iran, Islamic Rep.':u'Iran (Islamic Republic of)', u'Kyrgyz Republic':u'Kyrgyzstan', u'Korea, Rep.':u'Republic of Korea', u'Latin America & Caribbean (all income levels)':'Latin America and the Caribbean', u'Macedonia, FYR':u'The former Yugoslav Republic of Macedonia', u'Korea, Dem. Rep.':u"Democratic People's Republic of Korea", u'South Asia':u'Southern Asia', u'Sub-Saharan Africa (all income levels)':u'Sub-Saharan Africa', u'Slovak Republic':u'Slovakia', u'Venezuela, RB':u'Venezuela (Bolivarian Republic of)', u'Yemen, Rep.':u'Yemen ', u'Congo, Dem. Rep.':u'Democratic Republic of the Congo'} #COMTRADE->pop clist4={u"Bosnia Herzegovina":"Bosnia and Herzegovina", u'Central African Rep.':u'Central African Republic', u'China, Hong Kong SAR':u'China, Hong Kong Special Administrative Region', u'China, Macao SAR':u'China, Macao Special Administrative Region', u'Czech Rep.':u'Czech Republic', u"Dem. People's Rep. of Korea":"Democratic People's Republic of Korea", u'Dem. Rep. of the Congo':"Democratic Republic of the Congo", u'Dominican Rep.':u'Dominican Republic', u'Fmr Arab Rep. of Yemen':u'Yemen ', u'Fmr Ethiopia':u'Ethiopia', u'Fmr Fed. Rep. of Germany':u'Germany', u'Fmr Panama, excl.Canal Zone':u'Panama', u'Fmr Rep. of Vietnam':u'Viet Nam', u"Lao People's Dem. Rep.":u"Lao People's Democratic Republic", u'Occ. Palestinian Terr.':u'State of Palestine', u'Rep. of Korea':u'Republic of Korea', u'Rep. of Moldova':u'Republic of Moldova', u'Serbia and Montenegro':u'Serbia', u'US Virgin Isds':u'United States Virgin Islands', u'Solomon Isds':u'Solomon Islands', u'United Rep. of Tanzania':u'United Republic of Tanzania', u'TFYR of Macedonia':u'The former Yugoslav Republic of Macedonia', u'USA':u'United States of America', u'USA (before 1981)':u'United States of America', } #Jacobson->pop clist5={u"Korea, Democratic People's Republic of":"Democratic People's Republic of Korea", u'All countries':u'World', u"Cote d'Ivoire":"C\xc3\xb4te d'Ivoire", u'Iran, Islamic Republic of':u'Iran (Islamic Republic of)', u'Macedonia, Former Yugoslav Republic of':u'The former Yugoslav Republic of Macedonia', u'Congo, Democratic Republic of':u"Democratic Republic of the Congo", u'Korea, Republic of':u'Republic of Korea', u'Tanzania, United Republic of':u'United Republic of Tanzania', u'Moldova, Republic of':u'Republic of Moldova', u'Hong Kong, China':u'China, Hong Kong Special Administrative Region' } #NREL solar->pop clist6={u"Antigua & Barbuda":u'Antigua and Barbuda', u"Bosnia & Herzegovina":u"Bosnia and Herzegovina", u"Brunei":u'Brunei Darussalam', u"Cote d'Ivoire":"C\xc3\xb4te d'Ivoire", u"Iran":u'Iran (Islamic Republic of)', u"Laos":u"Lao People's Democratic Republic", u"Libya":'Libyan Arab Jamahiriya', u"Moldova":u'Republic of Moldova', u"North Korea":"Democratic People's Republic of Korea", u"Reunion":'R\xc3\xa9union', u'Sao Tome & Principe':u'Sao Tome and Principe', u'Solomon Is.':u'Solomon Islands', u'St. Lucia':u'Saint Lucia', u'St. Vincent & the Grenadines':u'Saint Vincent and the Grenadines', u'The Bahamas':u'Bahamas', u'The Gambia':u'Gambia', u'Virgin Is.':u'United States Virgin Islands', u'West Bank':u'State of Palestine' } #NREL wind->pop clist7={u"Antigua & Barbuda":u'Antigua and Barbuda', u"Bosnia & Herzegovina":u"Bosnia and Herzegovina", u'Occupied Palestinian Territory':u'State of Palestine', u'China Macao SAR':u'China, Macao Special Administrative Region', #"C\xc3\xb4te d'Ivoire":"C\xc3\xb4te d'Ivoire", u'East Timor':u'Timor-Leste', u'TFYR Macedonia':u'The former Yugoslav Republic of Macedonia', u'IAM-country Total':u'World' } #country entroids->pop clist8={u'Burma':'Myanmar', u"Cote d'Ivoire":"C\xc3\xb4te d'Ivoire", u'Republic of the Congo':u'Congo (Brazzaville)', u'Reunion':'R\xc3\xa9union' } def cnc(country): if country in clist1: return clist1[country] elif country in clist2: return clist2[country] elif country in clist3: return clist3[country] elif country in clist4: return clist4[country] elif country in clist5: return clist5[country] elif country in clist6: return clist6[country] elif country in clist7: return clist7[country] elif country in clist8: return clist8[country] else: return country # - # # Population # Consult the notebook entitled *pop.ipynb* for the details of mining the data from the UN statistics division online database. # Due to being the reference database for country names cell, the cell below needs to be run first, before any other databases. #pop_path='https://dl.dropboxusercontent.com/u/531697/datarepo/Set/db/ #pop_path='E:/Dropbox/Public/datarepo/Set/db/' pop_path='C:/Users/dcsala/Dropbox/Public/datarepo/Set/db/' #suppres warnings import warnings warnings.simplefilter(action = "ignore") cc=pd.read_excel(pop_path+'Country Code and Name ISO2 ISO3.xls') #http://unstats.un.org/unsd/tradekb/Attachment321.aspx?AttachmentType=1 ccs=cc['Country Code'].values # ## Country neighbor list neighbors=pd.read_csv(pop_path+'contry-geotime.csv') #https://raw.githubusercontent.com/ppKrauss/country-geotime/master/data/contry-geotime.csv #country name converter from iso to comtrade and back iso2c={} isoc2={} for i in cc.T.iteritems(): iso2c[i[1][0]]=i[1][1] isoc2[i[1][1]]=i[1][0] #country name converter from pop to iso pop2iso={} for i in cc.T.iteritems(): pop2iso[cnc(i[1][1])]=int(i[1][0]) #country name converter from alpha 2 to iso c2iso={} for i in neighbors.T.iteritems(): c2iso[str(i[1][0])]=i[1][1] c2iso['NA']=c2iso['nan'] #adjust for namibia c2iso.pop('nan'); #create country neighbor adjacency list based on iso country number codes c2neighbors={} for i in neighbors.T.iteritems(): z=str(i[1][4]).split(' ') if (str(i[1][1])!='nan'): c2neighbors[int(i[1][1])]=[c2iso[k] for k in z if k!='nan'] #extend iso codes not yet encountered iso2c[729]="Sudan" iso2c[531]="Curacao" iso2c[535]="Bonaire, Sint Eustatius and Saba" iso2c[728]="South Sudan" iso2c[534]="Sint Maarten (Dutch part)" iso2c[652]="Saint Barthélemy" def save3(sd,countrylist=[]): #if True: try: import zlib compression = zipfile.ZIP_DEFLATED except: compression = zipfile.ZIP_STORED popsave={} countries=[] isocountries={} if countrylist==[]: c=sorted(data.keys()) else: c=countrylist for country in c: popdummy={} tosave=[] for year in data[country]: popdummy[year]=data[country][year]['population'] for fuel in data[country][year]['energy']: #for fuel in allfuels: if fuel not in {'nrg','nrg_sum'}: tosave.append({"t":year,"u":fuel,"g":"f","q1":"pp","q2":999, "s":round(0 if (('navg3' in data[country][year]['energy'][fuel]['prod']) \ and (np.isnan(data[country][year]['energy'][fuel]['prod']['navg3']))) else \ data[country][year]['energy'][fuel]['prod']['navg3'] if \ 'navg3' in data[country][year]['energy'][fuel]['prod'] else 0,3) }) tosave.append({"t":year,"u":fuel,"g":"m","q1":"cc","q2":999, "s":round(0 if (('navg3' in data[country][year]['energy'][fuel]['cons']) \ and (np.isnan(data[country][year]['energy'][fuel]['cons']['navg3']))) else \ data[country][year]['energy'][fuel]['cons']['navg3'] if \ 'navg3' in data[country][year]['energy'][fuel]['cons'] else 0,3) }) #save balances - only for dev #if (year > min(balance.keys())): # if year in balance: # if country in balance[year]: # tosave.append({"t":year,"u":"balance","g":"m","q1":"cc","q2":999, # "s":balance[year][country]}) #no import export flows on global if country not in {"World"}: flowg={"Import":"f","Export":"m","Re-Export":"m","Re-Import":"f"} if country in tradealpha: for year in tradealpha[country]: for fuel in tradealpha[country][year]: for flow in tradealpha[country][year][fuel]: for partner in tradealpha[country][year][fuel][flow]: if partner not in isocountries: isocountries[partner]=cnc(iso2c[int(float(partner))]) tosave.append({"t":int(float(year)),"u":fuel,"g":flowg[flow],"q1":flow,"q2":partner, "s":round(tradealpha[country][year][fuel][flow][partner],3) }) popsave[country]=popdummy countries.append(country) file('C:/Users/dcsala/Dropbox/Public/datarepo/Set/json/'+str(sd)+'/data.json','w').write(json.dumps(tosave)) zf = zipfile.ZipFile('C:/Users/dcsala/Dropbox/Public/datarepo/Set/json/'+str(sd)+'/'+str(country.encode('utf-8').replace('/','&&'))+'.zip', mode='w') zf.write('C:/Users/dcsala/Dropbox/Public/datarepo/Set/json/'+str(sd)+'/data.json','data.json',compress_type=compression) zf.close() #save all countries list file('C:/Users/dcsala/Dropbox/Public/datarepo/Set/json/'+str(sd)+'/countries.json','w').write(json.dumps(countries)) #save countries populations #file('E:/Dropbox/Public/datarepo/Set/json/pop.json','w').write(json.dumps(popsave)) #save all trade countries dictionary file('C:/Users/dcsala/Dropbox/Public/datarepo/Set/json/'+str(sd)+'/isocountries.json','w').write(json.dumps(isocountries)) #https://www.researchgate.net/publication/299824220_First_Insights_on_the_Role_of_solar_PV_in_a_100_Renewable_Energy_Environment_based_on_hourly_Modeling_for_all_Regions_globally cost=pd.read_excel(pop_path+'/maps/storage.xlsx') #load tradealpha d predata=json.loads(file(pop_path+'/trade/traded.json','r').read()) tradealpha={} for c in predata: tradealpha[c]={} for year in predata[c]: tradealpha[c][int(year)]=predata[c][year] predata={} #existing electricity trade grid (and other fuels) grid={} allgrid={} gridz={} allgridsize={} zgrid={} zzsize={} for fuel in {"oil","coal","gas","electricity"}: if fuel not in grid:grid[fuel]={} if fuel not in zgrid:zgrid[fuel]={} if fuel not in zzsize:zzsize[fuel]={} gridpartners={} gridsize={} for c in tradealpha: if c not in gridpartners:gridpartners[c]=[] if c not in allgrid:allgrid[c]=[] if c not in gridsize:gridsize[c]=0 if c not in allgridsize:allgridsize[c]=0 for y in tradealpha[c]: if y not in zgrid[fuel]:zgrid[fuel][y]={} if y not in zzsize[fuel]:zzsize[fuel][y]={} if fuel in tradealpha[c][y]: for f in tradealpha[c][y][fuel]: for i in tradealpha[c][y][fuel][f]: if i not in {"World","Northern America","Africa",'Latin America and the Caribbean','Europe'}: if int(float(i)) in iso2c: if not np.isnan(tradealpha[c][y][fuel][f][i]): if tradealpha[c][y][fuel][f][i]!=0: p=cnc(iso2c[int(float(i))]) gridsize[c]+=tradealpha[c][y][fuel][f][i] if p not in gridpartners[c]: gridpartners[c].append(p) allgridsize[c]+=tradealpha[c][y][fuel][f][i] if p not in allgrid[c]: allgrid[c].append(p) if c not in zzsize[fuel][y]:zzsize[fuel][y][c]=0 zzsize[fuel][y][c]+=tradealpha[c][y][fuel][f][i] if c not in zgrid[fuel][y]:zgrid[fuel][y][c]={} if p not in zgrid[fuel][y][c]: zgrid[fuel][y][c][p]=tradealpha[c][y][fuel][f][i] grid[fuel]=gridpartners gridz[fuel]=gridsize #existing electricity trade grid (and other fuels) last 5 years grid5={} gridz5={} allgrid5={} allgridsize5={} for fuel in {"oil","coal","gas","electricity"}: if fuel not in grid:grid[fuel]={} gridpartners={} gridsize={} for c in tradealpha: if c not in gridpartners:gridpartners[c]=[] if c not in allgrid5:allgrid5[c]=[] if c not in gridsize:gridsize[c]=0 if c not in allgridsize5:allgridsize5[c]=0 for y in tradealpha[c]: if y>2010: if fuel in tradealpha[c][y]: for f in tradealpha[c][y][fuel]: for i in tradealpha[c][y][fuel][f]: if i not in {"World","Northern America","Africa",'Latin America and the Caribbean','Europe'}: if int(float(i)) in iso2c: if not np.isnan(tradealpha[c][y][fuel][f][i]): if tradealpha[c][y][fuel][f][i]!=0: p=cnc(iso2c[int(float(i))]) if p not in gridpartners[c]: gridpartners[c].append(p) allgridsize[c]+=tradealpha[c][y][fuel][f][i] if p not in allgrid[c]: allgrid[c].append(p) grid5[fuel]=gridpartners gridz5[fuel]=gridsize # world country centroids for network visualizations import requests, StringIO #r = requests.get('http://gothos.info/resource_files/country_centroids.zip') #define URL path of zip file to read #z = zipfile.ZipFile(StringIO.StringIO(r.content)) z = zipfile.ZipFile('country_centroids.zip') coord=pd.read_csv(z.open('country_centroids_all.csv'),sep='\t').drop(['DMS_LAT','DMS_LONG','MGRS','JOG','DSG','FULL_NAME','ISO3136','AFFIL','FIPS10','MOD_DATE'],axis=1) coord.columns=['LAT','LONG','Country'] coord=coord.set_index('Country',drop=True) coord.head(2) #create normalized distance matrix of countries names=[] for i in coord.index: names.append(cnc(i)) coord['NAME']=names coord=coord.set_index('NAME',drop=True) # + from math import radians, cos, sin, asin, sqrt def haversine(lon1, lat1, lon2, lat2): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees) """ # convert decimal degrees to radians lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 c = 2 * asin(sqrt(a)) r = 6371 # Radius of earth in kilometers. Use 3956 for miles return c * r def distance(i,j): if i in coord.index and j in coord.index: return haversine(coord.loc[i]['LONG'],coord.loc[i]['LAT'], coord.loc[j]['LONG'],coord.loc[j]['LAT']) else: return 5000 # - predata=json.loads(file(pop_path+'savedata5.json','r').read()) data={} for c in predata: data[c]={} for year in predata[c]: data[c][int(year)]=predata[c][year] predata={} goodcountries=list(set(data.keys()).intersection(set(tradealpha.keys()))) limit=20 #dev #goodcountries=goodcountries[:limit] rgc={} #reverse goodcountries coder for i in range(len(goodcountries)): rgc[goodcountries[i]]=i matrix=[[0 for i in goodcountries] for j in goodcountries] #long, run once dists=[] for i in range(len(goodcountries)): for j in range(i): dists.append(distance(goodcountries[i],goodcountries[j])) distancenorm=np.mean(dists) def normdistance(i,j): if i in coord.index and j in coord.index: return haversine(coord.loc[i]['LONG'],coord.loc[i]['LAT'], coord.loc[j]['LONG'],coord.loc[j]['LAT'])/distancenorm else: return 5000.0/distancenorm # ## ALLOCATION # Create Hungarian cost matrix. this will be a normalized, per unit EROEI * willingness to do trade. this is the cumulative, directed trade history. # Calculate trade influence matrix. Trade influence is proportional to the total histrocial value of energy trade. If there is no energy trade, some trade is still better than no trade. We will two pairs of import-export flows for each pair, once in each entry's data. For deciding the impor cost influence, we will use 2/3 historical imports and 1/3 historcial exports, caluclated separately for sender and receiver flows, then averged. E.g. a good import partner of country A, let's call it country B, exports a large percentage of its exports to country A and country A receives a large share of its exports from B. The reverse flows are also factored in with a weight of 1/3. The partner-parnter weights are factored in with weights 2/3 (meaning that it is a directed flow but any country who depends on imports or exports signifcantly on another, will push hard politically to keep this trade flowing). We have no weighting to differentiate between energy products, all treated the same. We consider the direct flows more reliable information, hence we give them a weight of 2/3, while the reverse flows get 1/3. # ## Impex updating tradecutoff=0.01 # + #direct flow matrices importmatrix=[[0 for i in goodcountries] for j in goodcountries] exportmatrix=[[0 for i in goodcountries] for j in goodcountries] #reverse flow matrices rimportmatrix=[[0 for i in goodcountries] for j in goodcountries] rexportmatrix=[[0 for i in goodcountries] for j in goodcountries] cid={} for i in range(len(goodcountries)): cid[goodcountries[i]]=i #fill import-export matrix for year x with flow f of value v def impex(reporter,partner,flow,value): global importmatrix global exportmatrix global rimportmatrix global rexportmatrix i=cid[reporter] j=cid[partner] if flow in {"Export","Re-Export"}: exportmatrix[i][j]+=value rimportmatrix[j][i]+=value if flow in {"Import","Re-Import"}: importmatrix[i][j]+=value rexportmatrix[j][i]+=value return #fill up existing values def reloadimpex(): #runright after after resetting tradealpha for i in range(len(goodcountries)): reporter=goodcountries[i] for year in tradealpha[reporter]: for fuel in tradealpha[reporter][year]: for flow in tradealpha[reporter][year][fuel]: for p in tradealpha[reporter][year][fuel][flow]: pp=int(float(str(p))) if pp in iso2c: if cnc(iso2c[pp]) in goodcountries: #self trade allowed partner=cnc(iso2c[pp]) value=tradealpha[reporter][year][fuel][flow][p] if value>tradecutoff: impex(reporter,partner,flow,value) # + #create influence matrix (from normilzed trade matrices) #norm direct flow matrices nimportmatrix=[[0 for i in goodcountries] for j in goodcountries] nexportmatrix=[[0 for i in goodcountries] for j in goodcountries] #norm reverse flow matrices nrimportmatrix=[[0 for i in goodcountries] for j in goodcountries] nrexportmatrix=[[0 for i in goodcountries] for j in goodcountries] def normalizeimpex(): global nimportmatrix global nexportmatrix global nrimportmatrix global nrexportmatrix #initialize normalized matrices for i in range(len(goodcountries)): for j in range(len(goodcountries)): if np.nanmean(importmatrix[i])>0:nimportmatrix[i][j]=importmatrix[i][j]/np.nanmean(importmatrix[i]) if np.nanmean(exportmatrix[i])>0:nexportmatrix[i][j]=exportmatrix[i][j]/np.nanmean(exportmatrix[i]) if np.nanmean(rimportmatrix[i])>0:nrimportmatrix[i][j]=rimportmatrix[i][j]/np.nanmean(rimportmatrix[i]) if np.nanmean(rexportmatrix[i])>0:nrexportmatrix[i][j]=rexportmatrix[i][j]/np.nanmean(rexportmatrix[i]) # + def impex(reporter,partner,flow,value): global importmatrix global exportmatrix global rimportmatrix global rexportmatrix i=cid[reporter] j=cid[partner] if flow in {"Export","Re-Export"}: exportmatrix[i][j]+=value rimportmatrix[j][i]+=value if flow in {"Import","Re-Import"}: importmatrix[i][j]+=value rexportmatrix[j][i]+=value return def updatenormimpex(reporter,partner,flow,value,weight=0.1): global mimportmatrix global mexportmatrix global mrimportmatrix global mrexportmatrix i=cid[reporter] j=cid[partner] if flow in {"Export","Re-Export"}: nexportmatrix[i][j]=(nexportmatrix[i][j]*(1-weight))+(value*weight) nrimportmatrix[j][i]=(nrimportmatrix[j][i]*(1-weight))+(value*weight) if flow in {"Import","Re-Import"}: nimportmatrix[i][j]=(nrimportmatrix[i][j]*(1-weight))+(value*weight) nrexportmatrix[j][i]=(nrexportmatrix[j][i]*(1-weight))+(value*weight) return def influence(reporter,partner,selfinfluence=2,nonlinearitypower=0.6): i=cid[reporter] j=cid[partner] #max influence z=(12.0/36*np.max(nimportmatrix[i][j])\ +6.0/36*np.max([nexportmatrix[k][i] for k in range(len(nimportmatrix))])\ +4.0/36*np.max(nrimportmatrix[i][j])\ +2.0/36*np.max([nrexportmatrix[k][i] for k in range(len(nimportmatrix))])\ +6.0/36*np.max(nexportmatrix[i][j])\ +3.0/36*np.max([nimportmatrix[j][i] for k in range(len(nimportmatrix))])\ +2.0/36*np.max(nrexportmatrix[i][j])\ +1.0/36*np.max([nrimportmatrix[k][i] for k in range(len(nimportmatrix))])) if i==j: v=z*selfinfluence else: v=12.0/36*nimportmatrix[i][j]\ +6.0/36*nexportmatrix[j][i]\ +4.0/36*nrimportmatrix[i][j]\ +2.0/36*nrexportmatrix[j][i]\ +6.0/36*nexportmatrix[i][j]\ +3.0/36*nimportmatrix[j][i]\ +2.0/36*nrexportmatrix[i][j]\ +1.0/36*nrimportmatrix[j][i] #trade is naturally exponentially distributed, so we need to account for this return v**nonlinearitypower # - # Create energy cost by filling the matrix with the cost of row importing 1TWh from column. neglecting transport energy costs for now, this will be the extraction energy cost. Let us consider only solar for now. Try optimization with all three source, choose one with best objective value. 1TWh tier changes based on granurality. #weighted resource class calculator def re(dic,total): if dic!={}: i=max(dic.keys()) mi=min(dic.keys()) run=True keys=[] weights=[] counter=0 while run: counter+=1 #safety break if counter>1000: run=False if i in dic: if total<dic[i]: keys.append(i) weights.append(total) run=False else: total-=dic[i] keys.append(i) weights.append(dic[i]) i-=1 if i<mi: run=False if sum(weights)==0: return 0 else: return np.average(keys,weights=weights) else: return 0 # Allocation algorithm: # # 1. Step year # 1. Create list of countries with negative balances # 1. Pick a country at random from the list who have negative balances # 1. Tier the country's needs into k(=5) tiers # 1. Query cost matrix for all countries for the amount on the tier k for all energy sources # 1. Choose partner and energy source based on an indicator composed of: # 1. Lowest cost # 1. Country influence - partners are ranked from 0 to 1. Self has a weight of 2. # 1. Does not exceed diversification limits within the same year (providing max z%(=20) of the year's demand) # 1. Choose form of trade: # 1. Calculate costs for trade for: # 1. On existing grid or grid expansion # 1. PTL # 1. Calculate costs of storage based on: # 1. Global storage need in gird share and its distributions by type (from Breyer) # 1. Implement trade: # 1. Update receiver balance # 1. Reduce provider reserves # 1. Add provider investment costs: building the capacity # 1. Distribute equally on provider energy mix # 1. Add receiver investment costs: storage, transport, grid expansion, grid rent # 1. Distribute equally on receiver energy mix # - Record total cost of the trade # 1. Pick another ocuntry at random and repeat until all countries are finished # 1. Calculate total cost of year # 1. Try to minimize total cost through simulated annealing repeated within year: # 1. Start with random processing order for countries # 1. Complete the year with that order # 1. Swap two countries in the order at ranom # 1. Repeat the year and see if the total costs decreased # 1. If yes, keep the swap, then swap another one # 1. If no, revert the swap and swap another one # 1. Calculate total cost of transition for year, then step year # 1. After all years finished, record total cost of transition #average resource quality calculator for the globe def update_aroei(): global aroei aroei={} groei={} for c in res: for r in res[c]: if r not in groei: groei[r]={} for cl in res[c][r]['res']: if cl not in groei[r]: groei[r][cl]=0 groei[r][cl]+=res[c][r]['res'][cl] for r in groei: x=[] y=[] for i in range(len(sorted(groei[r].keys()))): x.append(float(sorted(groei[r].keys())[i])) y.append(float(groei[r][sorted(groei[r].keys())[i]])) aroei[r]=np.average(x,weights=y) #1Bdi - grid def gridtestimator(country,partner): def electricitytrade(country,partner): scaler=1 gridpartners=grid5['electricity'] #existing trade partners if ((partner in gridpartners[country]) or (country in gridpartners[partner])): scaler+=cost.loc[region.loc[country]]['egrid'].values[0]/2.0 #neighbors, but need to build elif pop2iso[country] in c2neighbors: if (pop2iso[partner] in c2neighbors[pop2iso[country]]): scaler+=cost.loc[region.loc[country]]['grid'].values[0]/2.0*normdistance(country,partner) #not neighbors or partners but in the same region, need to build elif (region.loc[country][0]==region.loc[partner][0]): scaler+=cost.loc[region.loc[country]]['grid'].values[0]*3.0/2.0*normdistance(country,partner) #need to build supergrid, superlative costs else: scaler+=cost.loc[region.loc[country]]['grid'].values[0]*10.0/2.0*normdistance(country,partner) return scaler def ptltrade(country,partner): #ptg costs scale with distance scaler=1+cost.loc[11]['ptg']*normdistance(country,partner) return scaler if ptltrade(country,partner)<electricitytrade(country,partner): return {"scaler":ptltrade(country,partner),"tradeway":"ptl"} else: return {"scaler":electricitytrade(country,partner),"tradeway":"grid"} #1Bdii - storage &curtailment def storagestimator(country): return cost.loc[region.loc[country]]['min'].values[0] #curtoversizer def curtestimator(country): return cost.loc[region.loc[country]]['curt'].values[0] #global eroei, due to state of technology #http://www.sciencedirect.com/science/article/pii/S0301421513003856 eroei0={ 'oil':13, 'coal':27, 'gas':14, 'nuclear':10, 'biofuels':1.5, 'hydro':84, 'geo_other':22, 'pv':16, 'csp':8, 'wind':16 #was 24 } #esoei #http://pubs.rsc.org/en/content/articlepdf/2013/ee/c3ee41973h #various, but especially CSP from https://en.wikipedia.org/wiki/EROEI #http://link.springer.com/chapter/10.1007/978-3-319-02940-5_5#Sec18 #charlie hall says number are 5-7 for csp, but without additional costs of the supporting infrastructure # # ALLINONE sd=3 region=pd.read_excel(pop_path+'regions.xlsx').set_index('Country') gi={"open":{},"notrade":{}} #initialize renewable totals for learning total2015={'csp':0,'solar':0,'wind':0} learning={'csp':0.04,'solar':0.08,'wind':0.02} year=2015 for fuel in total2015: total2015[fuel]=np.nansum([np.nansum(data[partner][year]['energy'][fuel]['prod']['navg3'])\ for partner in goodcountries if fuel in data[partner][year]['energy']]) x=[] y=[] for i in range(1,32): x.append(i) y.append(i**0.02) import matplotlib.pyplot as plt # %matplotlib inline plt.plot(x,y) # + #import resources ################################### ################################### #load resources predata=json.loads(file(pop_path+'maps/res.json','r').read()) res={} for c in predata: res[c]={} for f in predata[c]: res[c][f]={} for r in predata[c][f]: res[c][f][r]={} for year in predata[c][f][r]: res[c][f][r][int(year)]=predata[c][f][r][year] predata={} print 'scenario',sd,'loaded resources', ################################### ################################### #load demand2 predata=json.loads(file(pop_path+'demand2.json','r').read()) demand2={} for c in predata: demand2[c]={} for year in predata[c]: demand2[c][int(year)]=predata[c][year] predata={} print 'demand2', ################################### ################################### #load tradealpha d predata=json.loads(file(pop_path+'/trade/traded.json','r').read()) tradealpha={} for c in predata: tradealpha[c]={} for year in predata[c]: tradealpha[c][int(year)]=predata[c][year] predata={} print 'tradedata', ################################### ################################### #reload impex and normalize predata=json.loads(file(pop_path+'trade/nimpex.json','r').read()) nexportmatrix=predata["nexport"] nimportmatrix=predata["nimport"] nrexportmatrix=predata["nrexport"] nrimportmatrix=predata["nrimport"] predata={} print 'impex', ################################### ################################### #load latest savedata #we dont change the data for now, everything is handled through trade predata=json.loads(file(pop_path+'savedata5.json','r').read()) data={} for c in predata: data[c]={} for year in predata[c]: data[c][int(year)]=predata[c][year] predata={} print 'data' ################################### ################################### ################################### ################################### ################################### ################################### gi={"open":{},"notrade":{}} eroei={} once=True rampuplimit=0.15 #overall generation ramp up limit fuelrampuplimit=0.5 #inditvidual fuel ramp up limit for selfinfluence in {1.2}:#10 globalinvestment={} release={} #release reserves for year in range(2015,2100): print year #SET PARAMETERS #------------------------------------------------ #0-release reserves if year in release: for c in release[year]: for fuel in release[year][c]: for level in release[year][c][fuel]: if level in res[c][fuel]['res']: res[c][fuel]['res'][level]+=release[year][c][fuel][level] else: res[c][fuel]['res'][level]=release[year][c][fuel][level] #reset balance balance={} #recalculate balances for c in goodcountries: balance[c]=0 if c in tradealpha: f1=0 for fuel in tradealpha[c][year]: if 'Import' in tradealpha[c][year][fuel]: f1=np.nansum([f1,sum(tradealpha[c][year][fuel]['Import'].values())]) if 'Re-Import' in tradealpha[c][year][fuel]: f1=np.nansum([f1,sum(tradealpha[c][year][fuel]['Re-Import'].values())]) if 'Export' in tradealpha[c][year][fuel]: f1=np.nansum([f1,-sum(tradealpha[c][year][fuel]['Export'].values())]) if 'Re-Export' in tradealpha[c][year][fuel]: f1=np.nansum([f1,-sum(tradealpha[c][year][fuel]['Re-Export'].values())]) if fuel in data[c][year]['energy']: f1=np.nansum([f1,data[c][year]['energy'][fuel]['prod']['navg3']]) balance[c]=-(demand2[c][year]*8760*1e-12-f1) #1A avgbalance=np.mean(balance.values()) needers=sorted([c for c in balance if balance[c]<0])[:] givers=sorted([c for c in balance if balance[c]>avgbalance]) #update global eroei fuel2={'csp':'csp','pv':'solar','wind':'wind'} for t in fuel2: fuel=fuel2[t] eroei[t]=eroei0[t]*(np.nansum([np.nansum(data[partner][year]['energy'][fuel]['prod']['navg3'])\ for partner in goodcountries if fuel in data[partner][year]['energy']])*1.0/total2015[fuel])**learning[fuel] ################################################# #1B #import random #random.seed(sd*year) #shuffle order of parsing countries #random.shuffle(needers) #------------------------------------------------ #1Ba #country for parsing the needers list for counter in range(len(needers)): country=needers[counter] #print country, need=-balance[country] #as a convention switch to positive, defined as 'need' mintier=1 #in TWh midtier=10 #mid tier TWh hitier=100 #mid tier TWh if need>hitier: tiernumber=10 elif need>midtier: tiernumber=5 elif need>mintier: tiernumber=3 else: tiernumber=1 for tier in range(tiernumber): tierneed=need*1.0/tiernumber #------------------------------------------------ #1Bb costvector={} update_aroei() #update sate of the resources globally to be able to rank between technologies for partner in givers+[country]: if partner in res: for fuel in {'csp','pv','wind'}: #at each time step you much import each fuel typeat least once if res[partner][fuel]['res']!={}: #query if giver can ramp up production this fast #max investment cannot exceed rampuplimit (=15%) ok=False su=np.sum([data[partner][year]['energy'][ii]['prod']['navg3'] \ for ii in data[partner][year]['energy'] if ii not in {"nrg","nrg_sum"}]) if su*rampuplimit>tierneed: #not tierneed if fuel2[fuel] in data[partner][year]['energy']: if np.isnan(data[partner][year]['energy'][fuel2[fuel]]['prod']['navg3']): ok=True elif data[partner][year]['energy'][fuel2[fuel]]['prod']['navg3']==0: ok=True elif (tierneed<data[partner][year]['energy'][fuel2[fuel]]['prod']['navg3']*fuelrampuplimit):ok=True #again not tierneed else: ok=False else: ok=True #new resource, build it if ok: #rq (resource query) returns the average resource class at which this tierneed can be provided #we multiply by the storage/curtailment needs storagescaler=(1+storagestimator(partner)+curtestimator(partner)) rq=re(res[partner][fuel]['res'],tierneed)/storagescaler #the costvector takes the resource class and converts it to eroei by comparing it #the average resource class at a known point with a know eroei (at start in 2015) #we are looking figh highvalues, as a marginal quality of resource costvector[fuel+'_'+partner]=(rq/aroei[fuel]*eroei[fuel]) #normalized resource quality over eroei if costvector=={}: print 'impossible to fullfill demand', country #1Bbi - norlmalize costvector to be able to compare with trade influence else: normcostvector=copy.deepcopy(costvector) for i in normcostvector: costvector[i]/=np.nanmean(costvector.values()) #1Bbii - create costfactor, weights are tweakable costfactor={} for key in costvector: partner=key[key.find('_')+1:] costfactor[key]=((costvector[key]**2)*(influence(country,partner,selfinfluence)**2))**(1/4.0) #costfactor[key]=costvector[key] #The geometric mean is more appropriate than the arithmetic mean for describing proportional growth, #both exponential growth (constant proportional growth) and varying growth; i #n business the geometric mean of growth rates is known as the compound annual growth rate (CAGR). #The geometric mean of growth over periods yields the equivalent constant growth rate that would #yield the same final amount. #influence(country,partner,2) - third parameter : relative importance of #self comparted to most influential country #1Bc - choose partner best=max(costfactor, key=costfactor.get) tradepartner=best[best.find('_')+1:] tradefuel=best[:best.find('_')] #------------------------------------------------ #1Be - IMPLEMENT TRADE lt=int(20+random.random()*15) #lifetime #otherwise we have to implement resource updating #1Beii - Reduce provider reserves within year levels=res[tradepartner][tradefuel]['res'].keys() level=max(levels) tomeet=tierneed*1.0 #record release lt years in the future if year+lt not in release:release[year+lt]={} if tradepartner not in release[year+lt]:release[year+lt][tradepartner]={} if tradefuel not in release[year+lt][tradepartner]:release[year+lt][tradepartner][tradefuel]={} #hold resources for lt while level>min(levels): if level not in res[tradepartner][tradefuel]['res']: level-=1 elif res[tradepartner][tradefuel]['res'][level]<tomeet: tomeet-=res[tradepartner][tradefuel]['res'][level] if level not in release[year+lt][tradepartner][tradefuel]: release[year+lt][tradepartner][tradefuel][level]=0 release[year+lt][tradepartner][tradefuel][level]+=res[tradepartner][tradefuel]['res'][level] res[tradepartner][tradefuel]['res'].pop(level) level-=1 else: res[tradepartner][tradefuel]['res'][level]-=tomeet if level not in release[year+lt][tradepartner][tradefuel]: release[year+lt][tradepartner][tradefuel][level]=0 release[year+lt][tradepartner][tradefuel][level]+=tomeet level=0 #------------------------------------------------ #1Be-implement country trade #only production capacity stays, trade does not have to gyear=int(1.0*year) for year in range(gyear,min(2100,gyear+lt)): #update globalinvestment if year not in globalinvestment:globalinvestment[year]={"net":0,"inv":0} globalinvestment[year]["net"]+=tierneed globalinvestment[year]["inv"]+=tierneed/normcostvector[best] #add production if tradefuel not in data[tradepartner][year]['energy']: data[tradepartner][year]['energy'][tradefuel]={'prod':{'navg3':0},'cons':{'navg3':0}} data[tradepartner][year]['energy'][tradefuel]['prod']['navg3']+=tierneed #add storage if tradefuel not in {'csp'}: if 'storage' not in data[tradepartner][year]['energy']: data[tradepartner][year]['energy']['storage']={'prod':{'navg3':0},'cons':{'navg3':0}} data[tradepartner][year]['energy']['storage']['prod']['navg3']+=tierneed*storagestimator(tradepartner) data[tradepartner][year]['energy']['storage']['cons']['navg3']+=tierneed*storagestimator(tradepartner) year=gyear #add consumption if tradefuel not in data[country][year]['energy']: data[country][year]['energy'][tradefuel]={'prod':{'navg3':0},'cons':{'navg3':0}} data[country][year]['energy'][tradefuel]['cons']['navg3']+=tierneed #add trade flows if not self key=gridtestimator(country,partner)['tradeway']+'_'+tradefuel if country!=tradepartner: #add import flow if key not in tradealpha[country][year]:tradealpha[country][year][key]={} if 'Import' not in tradealpha[country][year][key]:tradealpha[country][year][key]["Import"]={} if str(pop2iso[tradepartner]) not in tradealpha[country][year][key]["Import"]: tradealpha[country][year][key]["Import"][str(pop2iso[tradepartner])]=0 tradealpha[country][year][key]["Import"][str(pop2iso[tradepartner])]+=tierneed #add export flow if key not in tradealpha[tradepartner][year]:tradealpha[tradepartner][year][key]={} if 'Export' not in tradealpha[tradepartner][year][key]:tradealpha[tradepartner][year][key]["Export"]={} if str(pop2iso[country]) not in tradealpha[tradepartner][year][key]["Export"]: tradealpha[tradepartner][year][key]["Export"][str(pop2iso[country])]=0 tradealpha[tradepartner][year][key]["Export"][str(pop2iso[country])]+=tierneed #record trade to influence - counld be weighted, deaful is 10% updatenormimpex(country,tradepartner,'Import',tierneed/need) updatenormimpex(tradepartner,country,'Export',tierneed/need) #save data for processed countries print 'saving...' if selfinfluence==10: sde=13 sdk="open" else: sde=14 sdk="notrade" gi[sdk]=globalinvestment save3(sde) file('C:/Users/dcsala/Dropbox/Public/datarepo/Set/json/'+str(sde)+'/gi.json','w').write(json.dumps(gi)) print 'done',sde # - file('C:/Users/dcsala/Dropbox/Public/datarepo/Set/json/'+str(sde)+'/gi.json','w').write(json.dumps(gi)) print 'done',sde if selfinfluence==10: sde=13 sdk="open" else: sde=14 sdk="notrade" gi[sdk]=globalinvestment save3(sde) file('C:/Users/dcsala/Dropbox/Public/datarepo/Set/json/'+str(sde)+'/gi.json','w').write(json.dumps(gi)) print 'done',sde
arh/netset6-1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # TABELAS VERDADE # TABELA DO AND (com 2 booleanos) print(" x | y | x and y ") print("-------------------------") print(" True | True |", True and True ) print(" True | False |", True and False ) print(" False | True |", False and True) print(" False | False |", False and False) # TABELA DO AND (com 3 booleanos) print(" x | y | z | x and y and z ") print("----------------------------------------") print(" True | True | True |",True and True and True) print(" True | True | False |",True and True and False) print(" True | False | True |",True and False and True) print(" True | False | False |",True and False and False) print(" False | True | True |",False and True and True) print(" False | True | False |",False and True and False) print(" False | False | True |",False and False and True) print(" False | False | False |",False and False and False) # TABELA DO OR print(" x | y | x or y ") print("-------------------------") print(" True | True |",True or True ) print(" True | False |",True or False ) print(" False | True |",False or True) print(" False | False |",False or False) # TABELA DO XOR (EXCLUSIVE OR) print(" x | y | x ^ y ") print("-------------------------") print(" True | True |",True ^ True ) print(" True | False |",True ^ False ) print(" False | True |",False ^ True) print(" False | False |",False ^ False) # TABELA DO NOT print(" x | not(x) ") print("----------------") print(" True |",not(True)) print(" False |",not(False))
Tabelas verdade.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import json import pickle import random from collections import defaultdict, Counter from indra.literature.adeft_tools import universal_extract_text from indra.databases.hgnc_client import get_hgnc_name, get_hgnc_id from adeft.discover import AdeftMiner from adeft.gui import ground_with_gui from adeft.modeling.label import AdeftLabeler from adeft.modeling.classify import AdeftClassifier from adeft.disambiguate import AdeftDisambiguator, load_disambiguator from adeft_indra.ground.ground import AdeftGrounder from adeft_indra.model_building.s3 import model_to_s3 from adeft_indra.model_building.escape import escape_filename from adeft_indra.db.content import get_pmids_for_agent_text, get_pmids_for_entity, \ get_plaintexts_for_pmids # - adeft_grounder = AdeftGrounder() shortforms = ['PC'] model_name = ':'.join(sorted(escape_filename(shortform) for shortform in shortforms)) results_path = os.path.abspath(os.path.join('../..', 'results', model_name)) # + miners = dict() all_texts = {} for shortform in shortforms: pmids = get_pmids_for_agent_text(shortform) text_dict = get_plaintexts_for_pmids(pmids, contains=shortforms) text_dict = {pmid: text for pmid, text in text_dict.items() if len(text) > 5} miners[shortform] = AdeftMiner(shortform) miners[shortform].process_texts(text_dict.values()) all_texts.update(text_dict) longform_dict = {} for shortform in shortforms: longforms = miners[shortform].get_longforms() longforms = [(longform, count, score) for longform, count, score in longforms if count*score > 2] longform_dict[shortform] = longforms combined_longforms = Counter() for longform_rows in longform_dict.values(): combined_longforms.update({longform: count for longform, count, score in longform_rows}) grounding_map = {} names = {} for longform in combined_longforms: groundings = adeft_grounder.ground(longform) if groundings: grounding = groundings[0]['grounding'] grounding_map[longform] = grounding names[grounding] = groundings[0]['name'] longforms, counts = zip(*combined_longforms.most_common()) pos_labels = [] # - list(zip(longforms, counts)) try: disamb = load_disambiguator(shortforms[0]) for shortform, gm in disamb.grounding_dict.items(): for longform, grounding in gm.items(): grounding_map[longform] = grounding for grounding, name in disamb.names.items(): names[grounding] = name pos_labels = disamb.pos_labels except Exception: pass names grounding_map, names, pos_labels = ground_with_gui(longforms, counts, grounding_map=grounding_map, names=names, pos_labels=pos_labels, no_browser=True, port=8891) result = [grounding_map, names, pos_labels] result grounding_map, names, pos_labels = [{'ipc': 'ungrounded', 'p cresol': 'ungrounded', 'pachyonychia congenita': 'ungrounded', 'pacinian': 'ungrounded', 'paclitaxel carboplatin': 'ungrounded', 'pain catastrophizing': 'ungrounded', 'pair center': 'ungrounded', 'pair complex': 'ungrounded', 'palliative care': 'MESH:D010166', 'palmitoyl carnitine': 'ungrounded', 'palmitoyl l carnitine': 'CHEBI:CHEBI:17490', 'pancreatic adenocarcinoma': 'DOID:DOID:1793', 'pancreatic cancer': 'DOID:DOID:1793', 'pancreatic carcinoma': 'DOID:1793', 'pancreatic ductal adenocarcinoma': 'DOID:DOID:1793', 'paneth cells': 'MESH:D019879', 'paracetamol': 'CHEBI:CHEBI:46195', 'parallel coordinates': 'ungrounded', 'paramyotonia congenita': 'MESH:D020967', 'parathyroid carcinoma': 'DOID:DOID:1540', 'parenchymal cells': 'ungrounded', 'paricalcitol': 'CHEBI:CHEBI:7931', 'parietal cells': 'ungrounded', 'parietal cortex': 'MESH:D010296', 'partial colectomy': 'MESH:D003082', 'partial cystectomy': 'MESH:D015653', 'participation coefficient': 'ungrounded', 'partition complex': 'ungrounded', 'partner choice': 'ungrounded', 'parvocellular': 'ungrounded', 'parylene c': 'ungrounded', 'pavement cells': 'ungrounded', 'pc': 'ungrounded', 'peer counseling': 'ungrounded', 'pelvic control': 'ungrounded', 'penile circumference': 'ungrounded', 'pentamer complex': 'ungrounded', 'pentameric complex': 'ungrounded', 'pentose cycle': 'ungrounded', 'pepc': 'ungrounded', 'perceived competence': 'ungrounded', 'perceived control': 'ungrounded', 'percutaneous cholecystostomy': 'MESH:D002767', 'pericytes': 'ungrounded', 'perioperative chemotherapy': 'ungrounded', 'periosteal circumference': 'ungrounded', 'perirhinal cortex': 'MESH:D000071039', 'peritoneal carcinomatosis': 'ungrounded', 'peritoneal carcinosis': 'ungrounded', 'peritoneal cavity': 'MESH:D010529', 'peritoneal cells': 'ungrounded', 'peritubular cells': 'ungrounded', 'perseverative cognition': 'ungrounded', 'personal computer': 'MESH:D003201', 'petroleum coke': 'MESH:D003077', 'pharmaceutical care': 'ungrounded', 'pharmacological chaperone': 'ungrounded', 'phase contrast': 'ungrounded', 'phase cycle': 'ungrounded', 'phatidylcholine': 'CHEBI:CHEBI:64482', 'phellodendri cortex': 'ungrounded', 'phenolic compounds': 'ungrounded', 'pheochromocytoma': 'MESH:D010673', 'pheochromocytoma crisis': 'MESH:D010673', 'phospatidylcholine': 'CHEBI:CHEBI:64482', 'phosphatidyicholine': 'CHEBI:CHEBI:64482', 'phosphatidyl choline': 'CHEBI:CHEBI:64482', 'phosphatidylcholine': 'CHEBI:CHEBI:64482', 'phosphocellulose': 'ungrounded', 'phosphocholine': 'CHEBI:CHEBI:18132', 'phosphocitrate': 'MESH:C014148', 'phosphocreatine': 'CHEBI:CHEBI:17287', 'phospholipids complex': 'ungrounded', 'phosphoryl choline': 'CHEBI:CHEBI:18132', 'phosphorylcholine': 'CHEBI:CHEBI:18132', 'photocatalytic': 'ungrounded', 'photocoagulation': 'MESH:D008028', 'photonic crystal': 'ungrounded', 'phthalocyanine': 'CHEBI:CHEBI:34921', 'phycocyanin': 'MESH:D010798', 'physical capacity': 'ungrounded', 'physical castration': 'MESH:D002369', 'physical concerns': 'ungrounded', 'physical cooling': 'ungrounded', 'phytochelatin': 'MESH:D054811', 'picryl chloride': 'CHEBI:CHEBI:53053', 'pilocarpine': 'CHEBI:CHEBI:39462', 'piriform cortex': 'MESH:D066195', 'pituitary carcinoma': 'ungrounded', 'place conditioning': 'ungrounded', 'plasma cells': 'MESH:D010950', 'plasma coloration': 'ungrounded', 'plasma concentrations': 'ungrounded', 'plasma cupping': 'ungrounded', 'plasmacytoma': 'MESH:D010954', 'plasticity compression': 'ungrounded', 'plastocyanin': 'IP:IPR002387', 'platelet component': 'ungrounded', 'platelet concentrations': 'ungrounded', 'platelet count': 'ungrounded', 'pleomorphic carcinoma': 'DOID:DOID:5662', 'pneumocephalus': 'MESH:D011007', 'pneumocystis carinii': 'MESH:D045363', 'podocalyxin': 'MESH:C001643', 'policosanols': 'ungrounded', 'polycarbonate': 'ungrounded', 'polycomb': 'ungrounded', 'polycomb protein': 'ungrounded', 'polygonum cuspidatum': 'MESH:D045468', 'polymerase complex': 'ungrounded', 'polymeric conjugate': 'ungrounded', 'polysaccharide': 'CHEBI:CHEBI:18154', 'polysome content': 'ungrounded', 'pontine cistern': 'ungrounded', 'portland cement': 'ungrounded', 'positive control': 'ungrounded', 'positive control group': 'ungrounded', 'post challenge': 'ungrounded', 'post conditioning': 'ungrounded', 'postchallenge': 'ungrounded', 'postconditioning': 'ungrounded', 'posterior capsulotomy': 'MESH:D064727', 'posterior chamber': 'ungrounded', 'posterior circulating': 'ungrounded', 'posterior commissure': 'ungrounded', 'posterior crossbite': 'ungrounded', 'potassium citrate': 'MESH:D019357', 'power chain': 'ungrounded', 'praeruptorin c': 'MESH:C058808', 'pre conditioning': 'ungrounded', 'pre core': 'ungrounded', 'precardiac cells': 'ungrounded', 'preconditioning': 'ungrounded', 'precore': 'ungrounded', 'predicted component measured': 'ungrounded', 'prednicarbate': 'CHEBI:CHEBI:135791', 'prefrontal cortex': 'MESH:D017397', 'preventive chemotherapy': 'ungrounded', 'primary care': 'MESH:D011320', 'primary cilia': 'ungrounded', 'primary cilium': 'ungrounded', 'primary closure': 'ungrounded', 'principal': 'ungrounded', 'principal cells': 'ungrounded', 'principal component': 'ungrounded', 'principle component': 'ungrounded', 'proanthocyanidins': 'CHEBI:CHEBI:26267', 'probiotic combination': 'ungrounded', 'procapsid': 'ungrounded', 'procerebral': 'ungrounded', 'process cheeses': 'ungrounded', 'prochymosin': 'MESH:C032443', 'procollagen': 'MESH:D011347', 'procyanidins': 'ungrounded', 'procyclic': 'ungrounded', 'progenitor cells': 'MESH:D013234', 'prohormone convertase': 'ungrounded', 'proliferative cholangitis': 'MESH:D002761', 'propargyl carbonate': 'ungrounded', 'proprotein convertase': 'MESH:D043484', 'propylene carbonate': 'ungrounded', 'prosencephalon': 'MESH:D016548', 'prostacyclin': 'CHEBI:CHEBI:15552', 'prostate cancer': 'MESH:D011471', 'prostate carcinoma': 'MESH:D011471', 'protein c': 'HGNC:9451', 'protein carbonyl': 'MESH:D050050', 'protein coding': 'ungrounded', 'protein content': 'ungrounded', 'protein convertase': 'ungrounded', 'protein corona': 'MESH:D000066970', 'provocation concentrations': 'ungrounded', 'proximal colon': 'ungrounded', 'pseudotumor cerebri': 'MESH:D011559', 'psoralea corylifolia l': 'ungrounded', 'psychological contract': 'ungrounded', 'pulmonary contusion': 'ungrounded', 'pulp colour': 'ungrounded', 'pulsed current': 'ungrounded', 'punicalagin': 'ungrounded', 'purkinje cells': 'MESH:D011689', 'pyloric caeca': 'ungrounded', 'pyramidal cells': 'MESH:D017966', 'pyridyl cholesterol': 'ungrounded', 'pyrroline 5 carboxylation': 'ungrounded', 'pyruvate carboxylase': 'HGNC:8636', 'pyruvate carboxylation': 'HGNC:8636'}, {'MESH:D010166': 'Palliative Care', 'CHEBI:CHEBI:17490': 'O-palmitoyl-L-carnitine', 'DOID:DOID:1793': 'pancreatic cancer', 'DOID:1793': 'pancreatic cancer', 'MESH:D019879': 'Paneth Cells', 'CHEBI:CHEBI:46195': 'paracetamol', 'MESH:D020967': 'Myotonic Disorders', 'DOID:DOID:1540': 'parathyroid carcinoma', 'CHEBI:CHEBI:7931': 'paricalcitol', 'MESH:D010296': 'Parietal Lobe', 'MESH:D003082': 'Colectomy', 'MESH:D015653': 'Cystectomy', 'MESH:D002767': 'Cholecystostomy', 'MESH:D000071039': 'Perirhinal Cortex', 'MESH:D010529': 'Peritoneal Cavity', 'MESH:D003201': 'Computers', 'MESH:D003077': 'Coke', 'MESH:D010673': 'Pheochromocytoma', 'CHEBI:CHEBI:64482': 'phosphatidylcholine', 'CHEBI:CHEBI:18132': 'phosphocholine', 'MESH:C014148': 'phosphocitrate', 'CHEBI:CHEBI:17287': 'N-phosphocreatine', 'MESH:D008028': 'Light Coagulation', 'CHEBI:CHEBI:34921': 'phthalocyanine', 'MESH:D010798': 'Phycocyanin', 'MESH:D002369': 'Castration', 'MESH:D054811': 'Phytochelatins', 'CHEBI:CHEBI:53053': '1-chloro-2,4,6-trinitrobenzene', 'CHEBI:CHEBI:39462': 'pilocarpine', 'MESH:D066195': 'Piriform Cortex', 'MESH:D010950': 'Plasma Cells', 'MESH:D010954': 'Plasmacytoma', 'IP:IPR002387': 'Plastocyanin', 'DOID:DOID:5662': 'pleomorphic carcinoma', 'MESH:D011007': 'Pneumocephalus', 'MESH:D045363': 'Pneumocystis carinii', 'MESH:C001643': 'podocalyxin', 'MESH:D045468': 'Fallopia japonica', 'CHEBI:CHEBI:18154': 'polysaccharide', 'MESH:D064727': 'Posterior Capsulotomy', 'MESH:D019357': 'Potassium Citrate', 'MESH:C058808': 'praeruptorin C', 'CHEBI:CHEBI:135791': 'prednicarbate', 'MESH:D017397': 'Prefrontal Cortex', 'MESH:D011320': 'Primary Health Care', 'CHEBI:CHEBI:26267': 'proanthocyanidin', 'MESH:C032443': 'prorennin', 'MESH:D011347': 'Procollagen', 'MESH:D013234': 'Stem Cells', 'MESH:D002761': 'Cholangitis', 'MESH:D043484': 'Proprotein Convertases', 'MESH:D016548': 'Prosencephalon', 'CHEBI:CHEBI:15552': 'prostaglandin I2', 'MESH:D011471': 'Prostatic Neoplasms', 'HGNC:9451': 'PROC', 'MESH:D050050': 'Protein Carbonylation', 'MESH:D000066970': 'Protein Corona', 'MESH:D011559': 'Pseudotumor Cerebri', 'MESH:D011689': 'Purkinje Cells', 'MESH:D017966': 'Pyramidal Cells', 'HGNC:8636': 'PC'}, ['CHEBI:CHEBI:64482', 'HGNC:8636', 'HGNC:9451']] excluded_longforms = ['pc', 'ipc'] # + grounding_dict = {shortform: {longform: grounding_map[longform] for longform, _, _ in longforms if longform in grounding_map and longform not in excluded_longforms} for shortform, longforms in longform_dict.items()} result = [grounding_dict, names, pos_labels] if not os.path.exists(results_path): os.mkdir(results_path) with open(os.path.join(results_path, f'{model_name}_preliminary_grounding_info.json'), 'w') as f: json.dump(result, f) # - additional_entities = {} unambiguous_agent_texts = {} # + labeler = AdeftLabeler(grounding_dict) corpus = labeler.build_from_texts((text, pmid) for pmid, text in all_texts.items()) agent_text_pmid_map = defaultdict(list) for text, label, id_ in corpus: agent_text_pmid_map[label].append(id_) entity_pmid_map = {entity: set(get_pmids_for_entity(*entity.split(':', maxsplit=1), major_topic=True))for entity in additional_entities} # - intersection1 = [] for entity1, pmids1 in entity_pmid_map.items(): for entity2, pmids2 in entity_pmid_map.items(): intersection1.append((entity1, entity2, len(pmids1 & pmids2))) intersection2 = [] for entity1, pmids1 in agent_text_pmid_map.items(): for entity2, pmids2 in entity_pmid_map.items(): intersection2.append((entity1, entity2, len(set(pmids1) & pmids2))) intersection1 intersection2 # + all_used_pmids = set() for entity, agent_texts in unambiguous_agent_texts.items(): used_pmids = set() for agent_text in agent_texts[1]: pmids = set(get_pmids_for_agent_text(agent_text)) new_pmids = list(pmids - all_texts.keys() - used_pmids) text_dict = get_plaintexts_for_pmids(new_pmids, contains=agent_texts) corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items() if len(text) >= 5]) used_pmids.update(new_pmids) all_used_pmids.update(used_pmids) for entity, pmids in entity_pmid_map.items(): new_pmids = list(set(pmids) - all_texts.keys() - all_used_pmids) if len(new_pmids) > 10000: new_pmids = random.choices(new_pmids, k=10000) _, contains = additional_entities[entity] text_dict = get_plaintexts_for_pmids(new_pmids, contains=contains) corpus.extend([(text, entity, pmid) for pmid, text in text_dict.items() if len(text) >= 5]) # - names.update({key: value[0] for key, value in additional_entities.items()}) names.update({key: value[0] for key, value in unambiguous_agent_texts.items()}) pos_labels = list(set(pos_labels) | additional_entities.keys() | unambiguous_agent_texts.keys()) # + # %%capture classifier = AdeftClassifier(shortforms, pos_labels=pos_labels, random_state=1729) param_grid = {'C': [100.0], 'max_features': [10000]} texts, labels, pmids = zip(*corpus) classifier.cv(texts, labels, param_grid, cv=5, n_jobs=5) # - classifier.stats disamb = AdeftDisambiguator(classifier, grounding_dict, names) disamb.dump(model_name, results_path) print(disamb.info()) model_to_s3(disamb) preds = [disamb.disambiguate(text) for text in all_texts.values()] texts = [text for pred, text in zip(preds, all_texts.values()) if pred[0] == 'HGNC:10967'] texts[3]
model_notebooks/PC/model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # In Depth: Naive Bayes Classification # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import seaborn as sns; sns.set() from sklearn.datasets import make_blobs X, y = make_blobs(100, 2, centers=2, random_state=2, cluster_std=1.5) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu'); from sklearn.naive_bayes import GaussianNB model = GaussianNB() model.fit(X, y); rng = np.random.RandomState(0) Xnew = [-6, -14] + [14, 18] * rng.rand(2000, 2) ynew = model.predict(Xnew) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu') lim = plt.axis() plt.scatter(Xnew[:, 0], Xnew[:, 1], c=ynew, s=20, cmap='RdBu', alpha=0.1) plt.axis(lim); yprob = model.predict_proba(Xnew) yprob[-8:].round(2) # + from sklearn.datasets import fetch_20newsgroups data = fetch_20newsgroups() data.target_names # - categories = ['talk.religion.misc', 'soc.religion.christian', 'sci.space', 'comp.graphics'] train = fetch_20newsgroups(subset='train', categories=categories) test = fetch_20newsgroups(subset='test', categories=categories) print(train.data[5]) # + from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import make_pipeline model = make_pipeline(TfidfVectorizer(), MultinomialNB()) # - model.fit(train.data, train.target) labels = model.predict(test.data) from sklearn.metrics import confusion_matrix mat = confusion_matrix(test.target, labels) sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False, xticklabels=train.target_names, yticklabels=train.target_names) plt.xlabel('true label') plt.ylabel('predicted label'); def predict_category(s, train=train, model=model): pred = model.predict([s]) return train.target_names[pred[0]] predict_category('sending a payload to the ISS') predict_category('discussing islam vs atheism') predict_category('determining the screen resolution')
code_listings/05.05-Naive-Bayes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chapter 2 - housing project front to back # # Here is my own version of the code as I walk through the chapter. # # first step is to load in the data from # + #imports for preprocess import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import Imputer from sklearn.base import BaseEstimator, TransformerMixin from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import LabelBinarizer from sklearn.pipeline import FeatureUnion from sklearn.model_selection import StratifiedShuffleSplit import gc #imports for ml training and predicting from sklearn.model_selection import cross_val_score from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_squared_error from sklearn.ensemble import RandomForestRegressor #for gridsearch from sklearn.model_selection import GridSearchCV #for random hypterparam search from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint #for inline graphing in a jupyter notebook # %matplotlib inline import matplotlib.pyplot as plt # - housing = pd.read_csv('./supplemental/datasets/housing/housing.csv') housing.head() # After loading the data, always have a look at it! housing.info() #note the missing data in total_bedrooms #all numerical except for ocean proximity #describe only returns the numerical arrtibutes housing.describe() #use value counts to look at an individual columns with categoricals housing['ocean_proximity'].value_counts() # ### Visually inspect the data # Note the use of the magic command %matplotlib inline below. This reders the histograms inside the jupyter notebook. The code line is useless in a regular script. plt.show() is optional for inline plotting as well! # %matplotlib inline import matplotlib.pyplot as plt housing.hist(bins=50,figsize=(20,15)) plt.show() # Note the cap on house value in the above graph. This poses a problem as it means the median values aren't being accurately recorded beyond the limit of $500,000. This limits the range over which our model can make accurate predictions. # # Create a test set of data # We pull this subsection from the main dataframe and put it to the side to not be looked at prior to testing out models. Don't look at it, as snooping the test data introduces a bias to your models and work! # # Things to consider: # 1. We want the same data to always be in the test set, not different data each time we run the script np.random.seed(12) # 2. As we add new data to the dataset, we want the same values to stay in the test set as on previous runs # # Fancy way on page 50 is to go off the last byte in the has of the ID column(seems a little overkill). In the past I've used a random number generator to add a column of the proper length. This doesn't deal with consideration #2 though. # # # ### Use Scikit-Learn for train-test split # - has a random state paramater that allows you to set the random generator seed and keep the split consistent across multiple runs. Can pass in multiple datasets with the same numbers of rows and it will split them at the same time! # # Scikit method: # -train_test_split import at start of notebook train_set, test_set = train_test_split(housing, test_size = 0.2, random_state=42) train_set.head() test_set.head() #sanity check to make sure all rows accounted for. len(test_set['longitude']) + len(train_set['longitude']) == len(housing['longitude']) # Split worked, note the jumbled indexes on the dataframes above # ## Ensuring a representitive test set # # If you have a particular variable that is important and you do not wish to produce a skewed test set. You can use the stratified sampling method below. First make a categorical variable that can be sampled on, and the the function will ensure an 80-20 split of data is taken from each category. This is not random, but instead makes sure you're training and test are both representitve of the population as a whole. # + # Divide by 1.5 to limit the number of income categories housing["income_cat"] = np.ceil(housing["median_income"] / 1.5) # Label those above 5 as 5 housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True) #look a the categories housing["income_cat"].hist() #import the StratifiedShuffleSplit from sklearn.model_selection import StratifiedShuffleSplit #make a stratified split of the data split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing["income_cat"]): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] # - # below we can see that the purely random method produces a test set with skew to it, so we would not be accurately assessing the models. # + def income_cat_proportions(data): return data["income_cat"].value_counts() / len(data) train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42) compare_props = pd.DataFrame({ "Overall": income_cat_proportions(housing), "Stratified": income_cat_proportions(strat_test_set), "Random": income_cat_proportions(test_set), }).sort_index() compare_props["Rand. %error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100 compare_props["Strat. %error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100 # - compare_props for set_ in (strat_train_set, strat_test_set): set_.drop("income_cat", axis=1, inplace=True) len(strat_test_set['longitude']) + len(strat_train_set['longitude']) == len(housing['longitude']) # + #change the names so you can work with just train and test set test_set = strat_test_set train_set = strat_train_set gc.collect() # - # # Explore and visualize data for initial insights # # ### scatterplot of the latitude and longitude #result resembles an outline of california! train_set.plot(kind = 'scatter', x='longitude', y='latitude') #the aplha adds transparency so we can see the high density areas better. train_set.plot(kind = 'scatter', x='longitude', y='latitude', alpha=0.1) #below extends the scatterplot, it changes the circle size based on the population of the district #and it changes the colour based on the median housing cost(low = blue, high = red) train_set.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4, s=train_set['population']/100, label='population', figsize=(10,7), c='median_house_value', cmap=plt.get_cmap('jet'), colorbar=True) plt.legend() # ## Finding correlations in the data # # With a small number of predictors like we have here (10) we can compute the piarwise pearson's correlation coefficients using the corr() method. # # Note that the corr() method only detects linear relationships, and there may in fact be more complex relationships between the variables. # # Scatter matrix function of pandas gives all the pairwise data comparisons (same as the pairs() function in R). # # # corr_matrix = train_set.corr() corr_matrix['median_house_value'].sort_values(ascending = False) #this finds the correlations with housing value # # ?pd.DataFrame.corr() #value close to one is a strong positive correlation #value close to -1 is strong negative correlation # + from pandas.tools.plotting import scatter_matrix attributes = ['median_house_value', 'median_income', 'total_rooms', 'housing_median_age'] scatter_matrix(housing[attributes], figsize=(12,8)) #note the diagonal boxes are the histograms of each of the attributes, as plotting the data to look for a correlation #with itself would be pretty useless. # - #from the above, the median income/ median_house_value is fairly strong #this looks just at that plot #from the below we can see that the $500,000 celing on the price is as issue, #seriously messes with the distribution cloud. train_set.plot(kind = 'scatter', x= 'median_income', y='median_house_value', alpha=0.1) # ## Munging the data # # There are certain aspects of our dataset that arent especially informative, but we can make them so through basic transformation. # + train_set.head() # total rooms --> rooms_per_household # total bedrooms --> bedrooms per household def housing_data_clean(input_df): input_df['rooms_per_household'] = input_df['total_rooms']/input_df['households'] input_df['bedrooms_per_household'] = input_df['total_bedrooms']/input_df['households'] input_df['bedrooms_per_room'] = input_df['total_bedrooms']/input_df['total_rooms'] input_df['population_per_household'] = input_df['population']/input_df['households'] #input_df = input_df.drop(['total_bedrooms','total_rooms'], axis=1) return input_df train_set = housing_data_clean(train_set) train_set.head() #do the same to the test set at the same time so they remain consistent with one another! test_set = housing_data_clean(test_set) # - corr_matrix = train_set.corr() corr_matrix['median_house_value'].sort_values(ascending = False) # ## Preparing data for machine learning algorithms # # Best to use functions for this, it makes the process easily repeatiable and you can use the same function on the train and test data without needing to look at the test data. # # Gradually you can build a library of repurpoable data munging functions. # + X_train = train_set.drop('median_house_value', axis=1) y_train = train_set['median_house_value'].values.astype(float) X_test = test_set.drop('median_house_value', axis=1) y_test = test_set['median_house_value'].values.astype(float) # + def fill_median(dataframe, cols): """impute the mean for a list of columns in the dataframe""" for i in cols: dataframe[i].fillna(dataframe[i].median(skipna=True), inplace = True) return dataframe def cols_with_missing_values(dataframe): """ query a dataframe and find the columns that have missing values""" return list(dataframe.columns[dataframe.isnull().any()]) def fill_value(dataframe, col, val): """impute the value for a list column in the dataframe""" """ use this to impute the median of the train into the test""" dataframe[i].fillna(val, inplace = True) return dataframe missing_vals = cols_with_missing_values(X_train) X_train = fill_median(X_train,missing_vals) # - print(missing_vals) #see which were missing in the train set for i in missing_vals: X_test = fill_value(X_test, i, X_train[i].median(skipna=True)) # ## Working with categorical values # Most machine learning algorithms with need labels converted to numbers # # Scikit-Learn has a transformer that turns categoricals into a ML friendly numerical fmt. It is LabelEncoder #here we turn the ocean_proximity categorical into ml labels encoder = LabelEncoder() #below I went slightly off script from the example housing_cat = X_train['ocean_proximity'] housing_cat_encoded = encoder.fit(housing_cat) encoded_ocean_train = housing_cat_encoded.transform(list(housing_cat.values)) encoded_ocean_test = housing_cat_encoded.transform(list(X_test['ocean_proximity'].values)) print(encoder.classes_) # the above is not perfect as it will assume a relationship between the values (i.e. 1 is more similar to 2 than to 4, when in fact this is not the case). To solve this we need to make some boolean dummies. # # Boolean dummies also called 'one-hot' encoding. as in 1 is a hot(yes) and 0 is a cold (no) encoder = OneHotEncoder() encoded_ocean_train_1hot = encoder.fit_transform(encoded_ocean_train.reshape(-1,1)) encoded_ocean_train_1hot encoded_ocean_test_1hot = encoder.transform(encoded_ocean_test.reshape(-1,1)) encoded_ocean_train_1hot encoded_ocean_test_1hot # The above outputs are SciPy sparse matricies, not numpy arrays. This is more memory efficient than a full numpy matrix for large numbers of categoricals, as it stores only presence (1s) not the thousands of zeros. # # ### combine the above into one step # # the LabelBinarizer method lets us combine the to numbers and the one hot encoding into a single step. # + from sklearn.preprocessing import LabelBinarizer encoder = LabelBinarizer() encoded_ocean_train_1hot = encoder.fit_transform(X_train['ocean_proximity']) #I'm using just transform below to ensure that the categories are sorted and used the same as in the train fit. encoded_ocean_test_1hot = encoder.transform(X_test['ocean_proximity']) # - encoded_ocean_train_1hot #this returns a dense numpy array, pass in sparse_output=True to get the SciPy variant # The out of box methods from scikit learn are great, but you will need to customize the data munging at times. # Using class inheritence, these are easy to build on your own! see page 65 if you need to do this. example below. # # The more of these munging steps you can automate, the easier it is to try different variants and experiment with tweaks to workflows. # # + from sklearn.base import BaseEstimator, TransformerMixin # column index rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6 class CombinedAttributesAdder(BaseEstimator, TransformerMixin): def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs self.add_bedrooms_per_room = add_bedrooms_per_room def fit(self, X, y=None): return self # nothing else to do def transform(self, X, y=None): rooms_per_household = X[:, rooms_ix] / X[:, household_ix] population_per_household = X[:, population_ix] / X[:, household_ix] if self.add_bedrooms_per_room: bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix] return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room] else: return np.c_[X, rooms_per_household, population_per_household] attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False) housing_extra_attribs = attr_adder.transform(housing.values) # - # ## Feature Scaling # # If the input numerical attributes have very different scales, then the machine learning algorithms don't tend to perform well. i.e. if one has a range of 0-15 and another of 0-10000 then # ## Transformation piplines # # The large number of scikit learn methods for transforming, imputing etc can be combined using the pipeline function. # The only consideration is that all used frunctions must have a fit_transform() method. # # Below we impute and scale the data using a custom pipeline: # + from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler num_pipeline = Pipeline([ ('imputer', Imputer(strategy="median")), ('attribs_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler()), ]) housing_num_tr = num_pipeline.fit_transform(housing_num) # - # custom functions can also be used to make scikit learn act on a pandas dataframe directly without the conversion to a numpy array. example below: # + from sklearn.base import BaseEstimator, TransformerMixin # Create a class to select numerical or categorical columns # since Scikit-Learn doesn't handle DataFrames yet class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names].values # - # # Textbook example of a built pipeline for cleaning the data # # This is a consilidated set of functions that perform the tasks from above in a condensed manner, do it the way you're comfortable though, a lot of this is pretty abstracted so best to go through it yourself. # # There is a large amount of imports used here. it is essentially just splitting the categoricals and numericals, imputing the median for the numericals, creating the new columns based on the bedrooms per house etc and then doing one hot encoding for the categoricals and adding them to the main df. # # ### benefits # - code the munging once and apply it for both the train and test dataframes using the same functions. # - deal with the categorical and numerical values separately before combining them back into a single output array # - pass in a dataframe and recieve a numpy array for model input without me having to worry about the in between steps each time # # Note that the book saved the transformation of the test set to the very end, but I have here moved to to directly follow the test set transformation. This is so I can check that they are transformed at the same time. Both were built using the stratified train test split and then renamed to train_set and test_set prior to being split into the X and Y values # + from sklearn.base import BaseEstimator, TransformerMixin # column index rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6 class CombinedAttributesAdder(BaseEstimator, TransformerMixin): def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs self.add_bedrooms_per_room = add_bedrooms_per_room def fit(self, X, y=None): return self # nothing else to do def transform(self, X, y=None): rooms_per_household = X[:, rooms_ix] / X[:, household_ix] population_per_household = X[:, population_ix] / X[:, household_ix] if self.add_bedrooms_per_room: bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix] return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room] else: return np.c_[X, rooms_per_household, population_per_household] attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False) housing_extra_attribs = attr_adder.transform(housing.values) # + from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import Imputer #list the numeric and then list the categoricals num_attribs = list(X_train.drop("ocean_proximity",axis=1).columns) cat_attribs = ["ocean_proximity"] num_pipeline = Pipeline([ ('selector', DataFrameSelector(num_attribs)), ('imputer', Imputer(strategy="median")), ('attribs_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler()), ]) cat_pipeline = Pipeline([ ('selector', DataFrameSelector(cat_attribs)), ('label_binarizer', LabelBinarizer()), ]) # + from sklearn.pipeline import FeatureUnion full_pipeline = FeatureUnion(transformer_list=[ ("num_pipeline", num_pipeline), ("cat_pipeline", cat_pipeline), ]) # - housing_prepared = full_pipeline.fit_transform(X_train) housing_prepared housing_prepared.shape # + #clean the test data in the same way X_test_prepared = full_pipeline.transform(X_test) X_test_prepared.shape # - # ## Variable review X_test_prepared #the test x values y_test #the actual y values housing_prepared # the x train values y_train #the y_train values # ### with our data now manipulated into a format that is usable by the Scikit learn ml functions, we move on to training models # # Select and train a model # # With our data now in the correct format, we just need to import the proper model with sklearn and then .fit() and .predict() for an initial run prior to optimization # ## Linear regression # + from sklearn.linear_model import LinearRegression #initiate the instance lin_reg = LinearRegression() #fit the model with the train_x and train_y lin_reg.fit(housing_prepared , y_train) # - # to make an initial assessment of efficacy, have a look at the root mean squared error on the training data itself # + from sklearn.metrics import mean_squared_error #predict on the training data housing_predictions = lin_reg.predict(housing_prepared) #pass y first, then yhat linear_reg_mse = mean_squared_error(y_train , housing_predictions) linear_reg_rmse = np.sqrt(linear_reg_mse) linear_reg_rmse # - # The above value indicates that the predictions are still off by an average of $68,000 which isn't very close since most data fall between $120,000 and $265,000. The linear regression model is therefore a definite underfit. # # In this case we likely need a more powerful model to explain the data. Next up, a decision tree regression # + from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor(random_state=42) tree_reg.fit(housing_prepared, y_train) # - housing_predictions = tree_reg.predict(housing_prepared) tree_mse = mean_squared_error(y_train, housing_predictions) tree_rmse = np.sqrt(tree_mse) tree_rmse # This gives a RMSE of 0.0. which is a good red flag of overfit. # To test this we can don't want to immediately test on the test data, we should first do internal cross validation on the training data, to seek if the model is overfit or in fact a good predictor. # ## Evalution of models using cross validation # # # + from sklearn.model_selection import cross_val_score #pass in the model, the train_x and the train_y. along with the k-fold # and the scoring param to be used scores = cross_val_score(tree_reg, housing_prepared, y_train, scoring='neg_mean_squared_error', cv = 10 ) tree_rmse_scores = np.sqrt(-scores) tree_rmse_scores #the output gives all 10 cross validation scores # - # below is a function to help intrepret the k-fold output def display_scores(scores): """ pass in a cross_val_score output and this returns the data in formatted manner """ print('Scores:', scores) print('Mean:', scores.mean()) print('std_dev:', scores.std()) display_scores(tree_rmse_scores) # + scores = cross_val_score(lin_reg, housing_prepared, y_train, scoring='neg_mean_squared_error', cv = 10 ) lin_reg_rmse_scores = np.sqrt(-scores) display_scores(lin_reg_rmse_scores) # - # Above we see that the linear regression actually out performed the decision trees, which were over fitting the model. This would not have been evident without the cross validation step. # # ## Compare random forest to the decision tree and linear regression # + from sklearn.ensemble import RandomForestRegressor forest_reg = RandomForestRegressor() forest_reg.fit(housing_prepared, y_train) #test on itself, is it overfitting the training set? forest_self = forest_reg.predict(housing_prepared) forest_mse = mean_squared_error(y_train,forest_self) scores = cross_val_score(forest_reg, housing_prepared, y_train, scoring='neg_mean_squared_error', cv = 10 ) forest_rmse = np.sqrt(-scores) # - forest_mse display_scores(forest_rmse) # Note how much lower the mean rmse is for the random forest, predictions are about $10,000 closer than with the linear regression. # ## Tuning the model # ### grid search cross validation # Go through numerous pairwise comparisons of hyperparamaters and find the optimal set to use in the model. # # + from sklearn.model_selection import GridSearchCV param_grid = [ # try 12 (3×4) combinations of hyperparameters {'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]}, # then try 6 (2×3) combinations with bootstrap set as False {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}, ] #set the random state to ensure results are consistent. forest_reg = RandomForestRegressor(random_state=42) # train across 5 folds, that's a total of (12+6)*5=90 rounds of training #if below passed refit = True, it would train the model with all the data once the optimal #paramater set was found. grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error') #fit the grid search with the training data grid_search.fit(housing_prepared, y_train) # - # with the grid search cv run, the following let you observe the optimal paramater combinations #get the top tuned hyperparamaters grid_search.best_params_ #which model was the best estimator? grid_search.best_estimator_ #what were the scores for the different paramater combinations. cvres = grid_search.cv_results_ for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): print(np.sqrt(-mean_score), params) #long form version to see the cv results all laid out. pd.DataFrame(grid_search.cv_results_) # If your number of hyperparamaters is large, then using gridsearchCV will take too long. # RandomizedSearchCV will sample a subsample of the hyperparam combos to keep things efficient. # + from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint param_distribs = { 'n_estimators': randint(low=1, high=200), 'max_features': randint(low=1, high=8), } forest_reg = RandomForestRegressor(random_state=42) rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs, n_iter=10, cv=5, scoring='neg_mean_squared_error', random_state=42) rnd_search.fit(housing_prepared, y_train) # - # looking at feature importance can help you refine the model by trimming the predictors that introduce error feature_importances = grid_search.best_estimator_.feature_importances_ feature_importances extra_attribs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"] cat_one_hot_attribs = list(encoder.classes_) attributes = num_attribs + extra_attribs + cat_one_hot_attribs sorted(zip(feature_importances, attributes), reverse=True) grid_search.best_estimator_ # with the final model picked (through rf gridCV) we can evalute on the test set. Recall our X_test is already prepared above. # + final_model = grid_search.best_estimator_ final_predictions = final_model.predict(X_test_prepared) final_mse = mean_squared_error(y_test, final_predictions) final_rmse = np.sqrt(final_mse) final_rmse # - # The final model scores off by an average of $49,000 on the test data, a great improvement from our initial models!
california_housing/housing_front_to_back.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import molsysmt as msm # # Atomic radius file_path = msm.demo_systems.files['1tcd.pdb'] molecular_system_A = msm.convert(file_path) molecular_system_A = msm.remove_solvent(molecular_system_A) molecular_system_A = msm.add_missing_hydrogens(molecular_system_A) msm.info(molecular_system_A) molecular_system_B = msm.demo_systems.metenkephalin() molecular_system_B = msm.add_terminal_capping(molecular_system_B) radii = msm.physchem.atomic_radius(molecular_system_A, selection = 'group_index==1', type = 'vdw') radii
docs/contents/physchem/atomic_radius.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # <h1>CS4618: Artificial Intelligence I</h1> # <h1>Error Estimation</h1> # <h2> # <NAME><br> # School of Computer Science and Information Technology<br> # University College Cork # </h2> # + [markdown] slideshow={"slide_type": "skip"} # <h1>Initialization</h1> # $\newcommand{\Set}[1]{\{#1\}}$ # $\newcommand{\Tuple}[1]{\langle#1\rangle}$ # $\newcommand{\v}[1]{\pmb{#1}}$ # $\newcommand{\cv}[1]{\begin{bmatrix}#1\end{bmatrix}}$ # $\newcommand{\rv}[1]{[#1]}$ # $\DeclareMathOperator{\argmax}{arg\,max}$ # $\DeclareMathOperator{\argmin}{arg\,min}$ # $\DeclareMathOperator{\dist}{dist}$ # $\DeclareMathOperator{\abs}{abs}$ # + slideshow={"slide_type": "skip"} # %reload_ext autoreload # %autoreload 2 # %matplotlib inline # + slideshow={"slide_type": "skip"} import pandas as pd import numpy as np from sklearn.linear_model import LinearRegression from sklearn.neighbors import KNeighborsRegressor from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import KFold from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from joblib import dump, load # - # <h1>Our Linear Model</h1> # <ul> # <li>We'll repeat the code from the end of a previous lecture.</li> # </ul> # Use pandas to read the CSV file into a DataFrame df = pd.read_csv("../datasets/dataset_corkA.csv") # + # The features we want to select features = ["flarea", "bdrms", "bthrms"] # Extract those features and convert to a numpy array X = df[features].values # Extract the target values and convert to a numpy array y = df["price"].values # + # Fit the model linear_model = LinearRegression() linear_model.fit(X, y) # + [markdown] slideshow={"slide_type": "slide"} # <h1>How Good Is This Model?</h1> # <ul> # <li>We've built an estimator by learning a model from a dataset.</li> # <li>We want to know how well it will do in practice, once we start to use it to make predictions. # <ul> # <li>This is called <b>error estimation</b>.</li> # </ul> # </li> # <li>Easy right? # <ul> # <li>The dataset comes with <em>actual</em> target values.</li> # <li>We can ask the estimator to <em>predict</em> target values for each example in the dataset.</li> # <li>So now we have actual and predicted values, we can compute the mean squared error.</li> # </ul> # </li> # </ul> # - y_predictions = linear_model.predict(X) mean_squared_error(y, y_predictions) # <ul> # <li>But, for at least two reasons, we don't do this! # <ul> # <li>We might want to use a different performance measure than what we used as the loss function.</li> # <li>We want to know how well the model <b>generalizes</b> to <b>unseen data</b>.</li> # </ul> # </li> # </ul> # + [markdown] slideshow={"slide_type": "slide"} # <h1>Choosing a Different Performance Measure</h1> # <ul> # <li>Often in machine learning, we use one measure during learning and another for evaluation.</li> # <li>Our loss function (mean squared error or half of it!) was ideal for learning (why?) # but may not be so good as a performance measure. # <ul> # <li>We could use <b>root mean squared error</b> (RMSE): # $$\sqrt{\frac{1}{m}\sum_{i=1}^m(h_{\v{\beta}}(\v{x}^{(i)}) - \v{y}^{(i)})^2}$$ # i.e don't halve the MSE, and take its square root. RMSE is the standard deviation # of the errors in the predictions. # </li> # <li>Or we could use <b>mean absolute error</b> (MAE): # $$\frac{1}{m}\sum_{i=1}^m\abs(h_{\v{\beta}}(\v{x}^{i)}) - \v{y}^{(i)})$$ # </li> # </ul> # </li> # </ul> # - mean_absolute_error(y, y_predictions) # + [markdown] slideshow={"slide_type": "slide"} # <h1>Generalizing to Unseen Data</h1> # <ul> # <li>The error on the <b>training set</b> is called the <b>training error</b> # (also 'resubstitution error' and 'in-sample error'). # </li> # <li>But we want to know how well we will perform in the future, on <em>unseen data</em>. # <ul> # <li>The training error is not, in general a good indicator of performance on unseen data.</li> # <li>It's often too optimistic. Why?</li> # </ul> # </li> # <li>To predict future performance, we need to measure error on an <em>independent</em> dataset: # <ul> # <li>We want a dataset that has played no part in creating the model.</li> # <li>This second dataset is called the <b>test set</b>.</li> # <li>The error on the test set is called the <b>test error</b> (also 'out-of-sample error' and # 'extra-sample error'). # </li> # </ul> # </li> # </ul> # + [markdown] slideshow={"slide_type": "slide"} # <h1>Holdout</h1> # <ul> # <li>So we use the following method: # <ul> # <li><em>Shuffle</em> the dataset and <em>partition</em> it into two: # <ul> # <li>training set (e.g. 80% of the full dataset); and</li> # <li>test set (the rest of the full dataset).</li> # </ul> # </li> # <li>Train the estimator on the training set.</li> # <li>Test the model (evaluate the predictions) on the test set.</li> # </ul> # </li> # <li> # This method is called the <b>holdout</b> method, because the test set # is withheld (held-out) during training. # <ul> # <li>It is essential that the test set is not used in any way to create # the model. # </li> # <li><em>Don't even look at it!</em> # </li> # <li>'Cheating' is called <b>leakage</b>.</li> # <li>('Cheating' is one cause of <b>overfitting</b>; see later.)</li> <!-- (see <i>CS4619</i>)</li>--> # </ul> # </li> # <li>Class exercise: Standardization, as we know, is about scaling the data. It requires calculation # of the mean and standard deviation. When should the mean and standard deviation be calculated: # (a) before splitting, on the entire dataset, or (b) after splitting, on just the training set? # Why? # </li> # </ul> # + [markdown] slideshow={"slide_type": "slide"} # <h2>Holdout in scikit-learn: one method</h2> # - # Shufle and split the data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2) # Fit the model on the training data linear_model.fit(X_train, y_train) # + # Predict on the test data y_test_predictions = linear_model.predict(X_test) # Calculate MAE on the test data mean_absolute_error(y_test, y_test_predictions) # - # <h2>Holdout in scikit-learn: another way</h2> # <ul> # <li>This alternative involves writing less code.</li> # </ul> # Create the object that shuffles and splits the data ss = ShuffleSplit(n_splits=1, train_size=0.8, random_state=2) # Shuffle & split the data, fit the model on the training data, predict on the test data, calculate MAE on the test data cross_val_score(linear_model, X, y, scoring="neg_mean_absolute_error", cv=ss) # <ul> # <li>This is the negative of the MAE &mdash; so that higher values (closer to zero ) are better.</li> # <li>Often this number (the test error) will be higher than the training error.</li> # <li>Run it again with a different random state.</li> # </ul> # + [markdown] slideshow={"slide_type": "slide"} # <h1>Pros and Cons of Holdout</h1> # <ul> # <li>The advantage of holdout is: # <ul> # <li>The test error is independent of the training set.</li> # </ul> # </li> # <li>The disadvantages of this method are: # <ul> # <li>Results can vary quite a lot across different runs. # <ul> # <li>Informally, you might get lucky &mdash; or unlucky</li> # </ul> # I.e. in any one split, the data used for training or testing might not be representative. # </li> # <li>We are training on only a subset of the available dataset, perhaps as little as 50% of it. # <ul> # <li>From so little data, we may learn a worse model and so our error measurement may # be pessimistic. # </li> # </ul> # </li> # </ul> # </li> # <li>In practice, we only use the holdout method when we have a very large dataset. # <ul> # <li>The size of the dataset mitigates the above problems.</li> # </ul> # </li> # <li> # When we have a smaller dataset, we use a <b>resampling</b> method: # <ul> # <li>The examples get re-used for training and testing.</li> # </ul> # </li> # </ul> # + [markdown] slideshow={"slide_type": "slide"} # <h1>$k$-Fold Cross-Validation</h1> # <ul> # <li>The most-used resampling method is $k$-fold cross-validation: # <ul> # <li>Shuffle the dataset and partition it into $k$ disjoint subsets of equal size. # <ul> # <li>Each of the partitions is called a <b>fold</b>.</li> # <li>Typically, $k = 10$, so you have 10 folds.</li> # <!-- # <li>But, for conventional statistical significance testing to be applicable, you should probably ensure # that the number of examples in each fold does not fall below 30. (If this isn't possible, then either # use a smaller value for $k$, or do not use $k$-fold cross validation!) # </li> # --> # </ul> # </li> # <li>You take each fold in turn and use it as the test set, training the learner on # the remaining folds. # </li> # <li>Clearly, you can do this $k$ times, so that each fold gets 'a turn' at being the test set. # <ul> # <li> # By this method, each example is used exactly once for testing, and $k - 1$ times for training. # </li> # </ul> # </li> # </ul> # <li>In pseudocode: # <ul style="background: lightgray; list-style: none"> # <li> # shuffle the dataset $D$ and partition it into $k$ disjoint equal-sized subsets, $T_1, T_2,\ldots,T_k$ # <li> # <li> # <b>for</b> $i = 1$ to $k$ # <ul> # <li>train on $D \setminus T_i$</li> # <li>make predictions for $T_i$</li> # <li>measure error (e.g. MAE)</li> # </ul> # report the mean of the errors # </li> # </ul> # </li> # </ul> # + [markdown] slideshow={"slide_type": "slide"} # <h2>$k$-Fold Cross Validation in scikit-learn</h2> # + # Create the object that shuffles & splits the data kf = KFold(n_splits=10, shuffle=True, random_state=2) # Shuffle & split the data # Repeat k times: fit the model on all but one fold, predict on the remaining fold, calculate MAE # This gives k MAEs, so take their mean np.mean(cross_val_score(linear_model, X, y, scoring="neg_mean_absolute_error", cv=kf)) # - # <ul> # <li>But $k$-fold cross-validation is so common, there's a shorthand:</li> # </ul> np.mean(cross_val_score(linear_model, X, y, scoring="neg_mean_absolute_error", cv=10)) # <ul> # <li>Be warned, however, the shorthand does not shuffle the dataset before splitting it into folds. # <ul> # <li>Why might that be a problem?</li> # </ul> # </li> # <li>If you use the shorthand, you should probably shuffle the <code>DataFrame</code> just after reading it in from # the CSV file (see example below). # </li> # </ul> # <h2>Pros and Cons of $k$-Fold Cross-Validation</h2> # <ul> # <li>Pros: # <ul> # <li> # The test errors of the folds are independent &mdash; because examples are included in only one test set. # </li> # <li> # Better use is made of the dataset: for $k = 10$, for example, we train using 9/10 of the dataset. # </li> # </ul> # </li> # <li>Cons: # <ul> # <li> # While the test sets are independent of each other, the training sets are not: # <ul> # <li>They will overlap with each other to some degree.</li> # <li>(This effect of this will be less, of course, for larger datasets.)</li> # </ul> # </li> # <li> # The number of folds is constrained by the size of the dataset and the desire sometimes on the part of # statisticians to have folds of at least 30 examples. # </li> # <li> # It can be costly to train the learning algorithm $k$ times. # </li> # <li> # There may still be some variability in the results due to 'lucky'/'unlucky' splits. # </li> # </ul> # </li> # </ul> # <h1>A Little Case Study</h1> # <ul> # <li>Let's learn a linear model and compare it with 3NN.</li> # <li>For 3NN, we will need to standardize the data.</li> # <li>We will also standardize it for the linear model. scikit-learn's <code>LinearRegression</code> class # does not require us to do this but no harm is done by doing it. The advantage is that it makes our # code for the two regressors more consistent. # </li> # <li>We'll use 10-fold cross-validation and we'll use the shorthand so we'll shuffle the dataset ourselves.</li> # </ul> # + # Use pandas to read the CSV file into a DataFrame df = pd.read_csv("../datasets/dataset_corkA.csv") # Shuffle the dataset df = df.sample(frac=1, random_state=2) df.reset_index(drop=True, inplace=True) # The features we want to select features = ["flarea", "bdrms", "bthrms"] # Extract the features but leave as a DataFrame X = df[features] # Target values, converted to a 1D numpy array y = df["price"].values # - # Create a preprocessor preprocessor = ColumnTransformer([ ("scaler", StandardScaler(), features)], remainder="passthrough") # Create a pipeline that combines the preprocessor with the linear model linear_model = Pipeline([ ("preprocessor", preprocessor), ("predictor", LinearRegression())]) # Create a pipeline that combines the preprocessor with 3NN knn_model = Pipeline([ ("preprocessor", preprocessor), ("predictor", KNeighborsRegressor(n_neighbors=3))]) # Error estimation for the linear model np.mean(cross_val_score(linear_model, X, y, scoring="neg_mean_absolute_error", cv=10)) # Error estimation for 3NN np.mean(cross_val_score(knn_model, X, y, scoring="neg_mean_absolute_error", cv=10)) # <ul> # <li>Notice how much work <code>cross_val_score</code> is doing for us: # <ul> # <li>It partitions the data.</li> # <li>Then, on the training set, for each transformer in the pipeline, it calls <code>fit</code> # and <code>transform</code> and, for the predictor at the end of the pipeline, it also # calls <code>fit</code>. # <img src="images/pipeline1.png" /> # </li> # <li>Then, on the test set, for each transformer in the pipeline, it calls <code>transform</code> and, # for the predictor at the end of the pipeline, it calls <code>predict</code>. # <img src="images/pipeline2.png" /> # </li> # <li>And, in the case of $k$-fold cross-validation, it repeats thre above $k$ times. # </ul> # </li> # </ul> # <h2>Some remarks</h2> # <ul> # <li> # In the past, students have tried holdout and $k$-fold as if they were in # competition with each other. This betrays a misunderstanding. You do not try them # both and see which one gives the lower error. You pick one of them &mdash; the one # that makes most sense for your data &mdash; and use it. # </li> # <li> # There are many resampling methods other than $k$-Fold Cross-Validation: # <ul> # <li>Repeated $k$-Fold Cross-Validation, Leave-One-Out-Cross-Validation, # &hellip; # </li> # <li>See the classes in <code>sklearn.model_selection</code>.</li> # </ul> # </li> # <li> # So you've used one of the above methods and found the test error of your predictor. # <ul> # <li>This is supposed to give you an idea of how your predictor will perform in practice.</li> # <li>What if you are dissatisfied with the test error? It seems too high. # <ul> # <li>It is tempting to tweak your learning algorithm or try different algorithms # to try to bring down the test error. # </li> # <li>This is wrong! It is <b>leakage</b> again: you will be using knowledge of the test # set to develop the predictor and is likely to result in an optimistic view of the # ultimate performance of the predictor on unseen data. # </li> # <li>Ideally, error estimation on the test set is the last thing you do. # </ul> # </li> # </ul> # </li> # <li> # Finally, suppose you have used one of the above methods to estimate the error of your predictor. # You are ready to release your predictor on the world. At this point, you can train it on # <em>all</em> the examples in your dataset, so as to maximize the use of the data. # </li> # </ul> # + [markdown] slideshow={"slide_type": "slide"} # <h2>Finishing the case study</h2> # <ul> # <li>Since the linear model is better than 3NN, this is the model we will deploy.</li> # <li>At this point, we can retrain on the entire dataset.</li> # </ul> # - # Train the linear model on the entire dataset linear_model.fit(X, y) # <ul> # <li>We can save this model using <code>dump</code> from <code>pickle</code> or <code>joblib</code>.</li> # </ul> dump(linear_model, "models/my_model.pkl") # For this to work, create a folder called models! # <ul> # <li>We can read it into, e.g., our web app's backend using <code>load</code>.</li> # </ul> model = load("models/my_model.pkl") # <ul> # <li>Then, when we want to make predictions, we can create a <code>DataFrame</code> of objects for which # we want predictions and call <code>model.predict</code>. # </li> # </ul>
ai1/lectures/AI1_08_error_estimation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Demo SingleRun # This notebook shows a single run consisting of the following four phases: # * sail empty # * loading # * sail full # * unloading # + import datetime, time import simpy import shapely.geometry import pandas as pd import openclsim.core as core import openclsim.model as model import openclsim.plot as plot # setup environment simulation_start = 0 my_env = simpy.Environment(initial_time=simulation_start) registry = {} keep_resources = {} # + Site = type( "Site", ( core.Identifiable, core.Log, core.Locatable, core.HasContainer, core.HasResource, ), {}, ) TransportProcessingResource = type( "TransportProcessingResource", ( core.Identifiable, core.Log, core.ContainerDependentMovable, core.Processor, core.HasResource, core.LoadingFunction, core.UnloadingFunction, ), {}, ) location_from_site = shapely.geometry.Point(4.18055556, 52.18664444) location_to_site = shapely.geometry.Point(4.25222222, 52.11428333) # - # ## Definition of Site # + from_site = Site( env=my_env, name="Winlocatie", ID="6dbbbdf4-4589-11e9-a501-b469212bff5d", geometry=location_from_site, capacity=100, level=50, ) to_site = Site( env=my_env, name="Dumplocatie", ID="6dbbbdf5-4589-11e9-82b2-b469212bff5c", geometry=location_to_site, capacity=50, level=0, ) # - # ## Definition of Vessels hopper = TransportProcessingResource( env=my_env, name="Hopper 01", ID="6dbbbdf6-4589-11e9-95a2-b469212bff5b", geometry=location_from_site, loading_rate=0.00001, unloading_rate=0.00001, capacity=4, compute_v=lambda x: 10, ) # ## Defenition of the activities # + single_run = [ model.MoveActivity( env=my_env, name="sailing empty", ID="6dbbbdf7-4589-11e9-bf3b-b469212bff5d", registry=registry, mover=hopper, destination=from_site, ), model.ShiftAmountActivity( env=my_env, name="Transfer MP", ID="6dbbbdf7-4589-11e9-bf3b-b469212bff52", registry=registry, processor=hopper, origin=from_site, destination=hopper, amount=4, duration=1000, ), model.MoveActivity( env=my_env, name="sailing filler", ID="6dbbbdf7-4589-11e9-bf3b-b469212bff5b", registry=registry, mover=hopper, destination=to_site, ), model.ShiftAmountActivity( env=my_env, name="Transfer TP", ID="6dbbbdf7-4589-11e9-bf3b-b469212bff54", registry=registry, processor=hopper, origin=hopper, destination=to_site, amount=4, duration=1000, ), model.BasicActivity( env=my_env, name="Basic activity", ID="6dbbbdf7-4589-11e9-bf3b-b469212bff5h", registry=registry, duration=0, additional_logs=[hopper], ), ] activity = model.SequentialActivity( env=my_env, name="Single run process", registry=registry, sub_processes=single_run, ) while_activity = model.WhileActivity( env=my_env, name="while", registry=registry, sub_processes=[activity], condition_event=[{"type": "container", "concept": to_site, "state": "full"}], ) # - model.register_processes([while_activity]) my_env.run() plot.get_log_dataframe(hopper, [while_activity, *single_run, activity]) plot.vessel_planning([hopper, from_site, to_site]) fig = plot.get_step_chart([from_site, hopper, to_site])
notebooks/14_SingleRun_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Titanic KFP Example # # This example demonstrates how to use managed AI Pipelines, CAIP Training, and CAIP Predictions with the Kaggle titanic dataset. We will build a KFP that will: # 1. Split the dataset into train and test # 1. Peform feature engineering on the train dataset and apply that feature engineering to the test dataset. # 1. Train a keras model on the data (maybe on Cloud AI Platform Training) # 1. Hyperparameter search the keras model. # 1. Push the best keras model to Cloud AI Platform Serving # # ## Prereqs: # 1. Install the [KFP SDK](https://www.kubeflow.org/docs/pipelines/sdk/install-sdk/). # 1. Create an AI Pipelines instance. # 1. Copy the [Kaggle titanic train.csv and test.csv](https://www.kaggle.com/c/titanic) to GCS import os import kfp input_data = "gs://xoonij-titanic-mlops/input.csv" train_output = "gs://xoonij-titanic-mlops/train.csv" val_output = "gs://xoonij-titanic-mlops/val.csv" split_dataset_op = kfp.components.load_component_from_file(os.path.join('components/split_dataset', 'component.yaml')) @kfp.dsl.pipeline( name = "Titanic KFP Pipeline", description = "Example pipeline using the Titanic Dataset." ) def titanic_kfp_pipeline(input_data: str, train_output: str, val_output:str): """KubeFlow pipeline example for Titanic Dataset""" split_dataset_op(input_data, train_output, val_output) kfp.compiler.Compiler().compile(titanic_kfp_pipeline, 'titanic-kfp-pipeline.zip') client = kfp.Client(host='bdcf010b13b422b-dot-us-central2.pipelines.googleusercontent.com') my_experiment = client.create_experiment(name='titanic-kfp-pipeline') my_run = client.run_pipeline(my_experiment.id, 'titanic-kfp-pipeline', 'titanic-kfp-pipeline.zip')
titanic-kfp-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true import os import sys import struct import pprint import matplotlib.pyplot as plt import pickle import math import time import statistics import numpy as np # %pylab inline pp = pprint.PrettyPrinter(indent=2) from TraceInc import AutoDict # + deletable=true editable=true def out_degree(tmap): partners = [] recvSet = set() for sk in tmap: for cta in tmap[sk]: for sit in tmap[sk][cta]: for rk in tmap[sk][cta][sit]: partners.append(len(tmap[sk][cta][sit][rk])) return partners # + deletable=true editable=true def in_degree(tmap): partners = AutoDict() degree = [] for sk in tmap: for cta in tmap[sk]: for sit in tmap[sk][cta]: for rk in tmap[sk][cta][sit]: for rcta in tmap[sk][cta][sit][rk]: for rit in tmap[sk][cta][sit][rk][rcta]: if not partners[rcta][sit]: partners[rcta][sit] = set() partners[rcta][sit].add(cta) for cta in partners: for it in partners[cta]: degree.append(len(partners[cta][it])) return degree # + deletable=true editable=true binArray = [1,2,4,8,16,64,128,256,512,768,1024,10000] # + deletable=true editable=true files = ['hs2d', 'hs3d','hist', 'nbody', 'path', 'bfs'] fpath = '../data/' # + deletable=true editable=true volsIn = {} volsOut = {} for n in binArray: volsIn[n] = [] volsOut[n] = [] for f in files: tmap = pickle.load( open(fpath+f+'.transfermap', "rb")) indeg = in_degree(tmap) print(f) (cnt, bins,_) = hist(indeg, alpha=0.75, bins=binArray) for i,n in enumerate(cnt): volsIn[binArray[i]].append((n/sum(cnt))*100) outdeg = out_degree(tmap) (cnt, bins,_) = hist(outdeg, alpha=0.75, bins=binArray) for i,n in enumerate(cnt): volsOut[binArray[i]].append((n/sum(cnt))*100) pp.pprint(volsIn) pp.pprint(volsOut) plt.clf() # + deletable=true editable=true # + deletable=true editable=true # + deletable=true editable=true pylab.rcParams['figure.figsize'] = (18, 12) plt.style.use('ggplot') matplotlib.rcParams.update({'font.size': 18}) cmap = matplotlib.cm.get_cmap('tab20b') c = cmap.colors ind = np.arange(len(files)) width = 0.4 plt.subplot(211) bottom = np.zeros(len(files)) for (inx,key) in enumerate(binArray[0:-1]): plt.bar(ind,volsIn[key], label=str(key), width=width, bottom=bottom, color=c[inx]) bottom += np.array(volsIn[key]) #plt.xlabel('Transaction Members') plt.ylabel('%') plt.title('CTA In Degree', x=0.095) plt.xticks(ind, files) plt.legend(bbox_to_anchor=(-0.06,1.0)) ax = plt.gca() handles, labels = ax.get_legend_handles_labels() plt.legend(handles[::-1], labels[::-1], bbox_to_anchor=(-0.06,1.0)) plt.subplot(212) bottom=np.zeros(len(files)) for (inx,key) in enumerate(binArray[0:-1]): plt.bar(ind,volsOut[key], label=str(key), width=width, bottom=bottom,color=c[inx]) bottom += np.array(volsOut[key]) plt.xticks(ind, files) ax = plt.gca() handles, labels = ax.get_legend_handles_labels() plt.legend(handles[::-1], labels[::-1], bbox_to_anchor=(-0.06,1.0)) #plt.xlabel('Transaction Members') plt.ylabel('%') plt.title('CTA Out Degree', x=0.105) filename = '../plots/cta-degree.pdf' plt.savefig(filename, papertype='a4', bbox_inches='tight', orientation='landscape') plt.show() # + deletable=true editable=true
memtrace-pass/post-processing/com-degree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Parallel Monto-Carlo options pricing # This notebook shows how to use `IPython.parallel` to do Monte-Carlo options pricing in parallel. We will compute the price of a large number of options for different strike prices and volatilities. # ## Problem setup # %matplotlib inline import matplotlib.pyplot as plt import sys import time from ipyparallel import Client import numpy as np # Here are the basic parameters for our computation. price = 100.0 # Initial price rate = 0.05 # Interest rate days = 260 # Days to expiration paths = 10000 # Number of MC paths n_strikes = 6 # Number of strike values min_strike = 90.0 # Min strike price max_strike = 110.0 # Max strike price n_sigmas = 5 # Number of volatility values min_sigma = 0.1 # Min volatility max_sigma = 0.4 # Max volatility strike_vals = np.linspace(min_strike, max_strike, n_strikes) sigma_vals = np.linspace(min_sigma, max_sigma, n_sigmas) print("Strike prices: ", strike_vals) print( "Volatilities: ", sigma_vals) # ## Monte-Carlo option pricing function # The following function computes the price of a single option. It returns the call and put prices for both European and Asian style options. def price_option(S=100.0, K=100.0, sigma=0.25, r=0.05, days=260, paths=10000): """ Price European and Asian options using a Monte Carlo method. Parameters ---------- S : float The initial price of the stock. K : float The strike price of the option. sigma : float The volatility of the stock. r : float The risk free interest rate. days : int The number of days until the option expires. paths : int The number of Monte Carlo paths used to price the option. Returns ------- A tuple of (E. call, E. put, A. call, A. put) option prices. """ import numpy as np from math import exp,sqrt h = 1.0/days const1 = exp((r-0.5*sigma**2)*h) const2 = sigma*sqrt(h) stock_price = S*np.ones(paths, dtype='float64') stock_price_sum = np.zeros(paths, dtype='float64') for j in range(days): growth_factor = const1*np.exp(const2*np.random.standard_normal(paths)) stock_price = stock_price*growth_factor stock_price_sum = stock_price_sum + stock_price stock_price_avg = stock_price_sum/days zeros = np.zeros(paths, dtype='float64') r_factor = exp(-r*h*days) euro_put = r_factor*np.mean(np.maximum(zeros, K-stock_price)) asian_put = r_factor*np.mean(np.maximum(zeros, K-stock_price_avg)) euro_call = r_factor*np.mean(np.maximum(zeros, stock_price-K)) asian_call = r_factor*np.mean(np.maximum(zeros, stock_price_avg-K)) return (euro_call, euro_put, asian_call, asian_put) # We can time a single call of this function using the `%timeit` magic: # %timeit -n1 -r1 print(price_option(S=100.0, K=100.0, sigma=0.25, r=0.05, days=260, paths=10000)) # ## Parallel computation across strike prices and volatilities # The Client is used to setup the calculation and works with all engines. rc = Client() # A `LoadBalancedView` is an interface to the engines that provides dynamic load # balancing at the expense of not knowing which engine will execute the code. view = rc.load_balanced_view() # Submit tasks for each (strike, sigma) pair. Again, we use the `%%timeit` magic to time the entire computation. async_results = [] # + # %%timeit -n1 -r1 for strike in strike_vals: for sigma in sigma_vals: # This line submits the tasks for parallel computation. ar = view.apply_async(price_option, price, strike, sigma, rate, days, paths) async_results.append(ar) rc.wait(async_results) # Wait until all tasks are done. # - len(async_results) # ## Process and visualize results # Retrieve the results using the `get` method: results = [ar.get() for ar in async_results] # Assemble the result into a structured NumPy array. # + prices = np.empty(n_strikes*n_sigmas, dtype=[('ecall',float),('eput',float),('acall',float),('aput',float)] ) for i, price in enumerate(results): prices[i] = tuple(price) prices.shape = (n_strikes, n_sigmas) # - # Plot the value of the European call in (volatility, strike) space. plt.figure() plt.contourf(sigma_vals, strike_vals, prices['ecall']) plt.axis('tight') plt.colorbar() plt.title('European Call') plt.xlabel("Volatility") plt.ylabel("Strike Price") # Plot the value of the Asian call in (volatility, strike) space. plt.figure() plt.contourf(sigma_vals, strike_vals, prices['acall']) plt.axis('tight') plt.colorbar() plt.title("Asian Call") plt.xlabel("Volatility") plt.ylabel("Strike Price") # Plot the value of the European put in (volatility, strike) space. plt.figure() plt.contourf(sigma_vals, strike_vals, prices['eput']) plt.axis('tight') plt.colorbar() plt.title("European Put") plt.xlabel("Volatility") plt.ylabel("Strike Price") # Plot the value of the Asian put in (volatility, strike) space. plt.figure() plt.contourf(sigma_vals, strike_vals, prices['aput']) plt.axis('tight') plt.colorbar() plt.title("Asian Put") plt.xlabel("Volatility") plt.ylabel("Strike Price")
Chapter12/c12_16_Monte Carlo Options.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- #导入模块 import pandas as pd import numpy as np #创建特征列表表头 column_names = ['Sample code number','Clump Thickness','Uniformity of Cell Size','Uniformity of Cell Shape','Marginal Adhesion','Single Epithelial Cell Size','Bare Nuclei','Bland Chromatin','Normal Nucleoli','Mitoses','Class'] #使用pandas.read_csv函数从网上读取数据集 data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data',names=column_names) #将?替换为标准缺失值表示 data = data.replace(to_replace='?',value = np.nan) #丢弃带有缺失值的数据(只要有一个维度有缺失便丢弃) data = data.dropna(how='any') #查看data的数据量和维度 data.shape #使用sklearn.cross_validation里的train_test_split模块分割数据集 from sklearn.cross_validation import train_test_split #随机采样25%的数据用于测试,剩下的75%用于构建训练集 X_train,X_test,y_train,y_test = train_test_split(data[column_names[1:10]],data[column_names[10]],test_size = 0.25,random_state = 33) #查看训练样本的数量和类别分布 y_train.value_counts() #查看测试样本的数量和类别分布 y_test.value_counts() # + #从sklearn.preprocessing导入StandardScaler from sklearn.preprocessing import StandardScaler #从sklearn.linear_model导入LogisticRegression(逻辑斯蒂回归) from sklearn.linear_model import LogisticRegression #从sklearn.linear_model导入SGDClassifier(随机梯度参数) from sklearn.linear_model import SGDClassifier #标准化数据,保证每个维度的特征数据方差为1,均值为,使得预测结果不会被某些过大的特征值而主导(在机器学习训练之前, 先对数据预先处理一下, 取值跨度大的特征数据, <br>我们浓缩一下, 跨度小的括展一下, 使得他们的跨度尽量统一.) ss = StandardScaler() X_train = ss.fit_transform(X_train) X_test = ss.transform(X_test)#初始化两种模型 lr = LogisticRegression() sgdc = SGDClassifier()#调用逻辑斯蒂回归,使用fit函数训练模型参数 lr.fit(X_train,y_train)#使用训练好的模型lr对x_test进行预测,结果储存在变量lr_y_predict中 lr_y_predict = lr.predict(X_test)#调用随机梯度的fit函数训练模型 sgdc.fit(X_train,y_train)#使用训练好的模型sgdc对X_test进行预测,结果储存在变量sgdc_y_predict中 sgdc_y_predict = sgdc.predict(X_test) # - lr_y_predict sgdc_y_predict # + #从sklearn.metrics导入classification_report from sklearn.metrics import classification_report #使用逻辑斯蒂回归模型自带的评分函数score获得模型在测试集上的准确性结果 print('Accuracy of LR Classifier:',lr.score(X_test,y_test)) #使用classification_report模块获得逻辑斯蒂模型其他三个指标的结果(召回率,精确率,调和平均数) print(classification_report(y_test,lr_y_predict,target_names=['Benign','Malignant'])) # - #使用随机梯度下降模型自带的评分函数score获得模型在测试集上的准确性结果 print('Accuarcy of SGD Classifier:',sgdc.score(X_test,y_test)) ##使用classification_report模块获得随机梯度下降模型其他三个指标的结果 print(classification_report(y_test,sgdc_y_predict,target_names=['Benign','Malignant']))
breast_cancer_logistic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Interacting with histories in Galaxy API # ======================================== # # We are going to use the [requests](http://python-requests.org/) Python library to communicate via HTTP with the Galaxy server. To start, let's define the connection parameters. # # **You need to insert the API key for your Galaxy server in the cell below**: # 1. Open the Galaxy server in another browser tab # 2. Click on "User" on the top menu, then "Preferences" # 3. Click on "Manage API key" # 4. Generate an API key if needed, then copy the alphanumeric string and paste it as the value of the `api_key` variable below. # + from __future__ import print_function import json import requests from six.moves.urllib.parse import urljoin server = 'https://usegalaxy.org/' api_key = '1a6625037e2a986f36dc17e0324743fa' base_url = urljoin(server, 'api') base_url # - # We now make a GET request to retrieve all histories owned by a user: params = {'key': api_key} r = requests.get(base_url + '/histories', params) print(r.text) hists = r.json() hists # As you can see, GET requests in Galaxy API return JSON strings, which need to be **deserialized** into Python data structures. In particular, GETting a resource collection returns a list of dictionaries. # # Each dictionary returned when GETting a resource collection gives basic info about a resource, e.g. for a history you have: # - `id`: the unique **identifier** of the history, needed for all specific requests about this resource # - `name`: the name of this history as given by the user # - `deleted`: whether the history has been deleted # - `url`: the relative URL to get all info about this resource. # # There is no readily-available filtering capability, but it's not difficult to filter histories **by name**: [_ for _ in hists if _['name'] == 'Unnamed history'] # If you are interested in more **details** about a given resource, you just need to append its `id` to the previous collection request, e.g. to the get more info for a history: hist0_id = hists[0]['id'] print(hist0_id) params = {'key': api_key} r = requests.get(base_url + '/histories/' + hist0_id, params) r.json() # As you can see, there are much more entries in the returned dictionary, e.g.: # - `create_time` # - `size`: total disk space used by the history # - `state_ids`: ids of history datasets for each possible state. # # To get the list of **datasets contained** in a history, simply append `/contents` to the previous resource request. params = {'key': api_key} r = requests.get(base_url + '/histories/' + hist0_id + '/contents', params) hdas = r.json() hdas # The dictionaries returned when GETting the history content give basic info about each dataset, e.g.: `id`, `name`, `deleted`, `state`, `url`... # # To get the details about a specific dataset, you can use the `datasets` controller: hda0_id = hdas[0]['id'] print(hda0_id) params = {'key': api_key} r = requests.get(base_url + '/datasets/' + hda0_id, params) r.json() # Some of the interesting additional dictionary entries are: # - `create_time` # - `creating job`: id of the job which created this dataset # - `download_url`: URL to download the dataset # - `file_ext`: the Galaxy data type of this dataset # - `file_size` # - `genome_build`: the genome build (dbkey) associated to this dataset. # # **New resources** are created with POST requests. The uploaded **data needs to be serialized** in a JSON string. For example, to create a new history: params = {'key': api_key} data = {'name': 'New history'} r = requests.post(base_url + '/histories', data=json.dumps(data), params=params, headers={'Content-Type': 'application/json'}) new_hist = r.json() new_hist # The return value of a POST request is a dictionary with detailed info about the created resource. # # To **update** a resource, make a PUT request, e.g. to change the history name: params = {'key': api_key} data = {'name': 'Updated history'} r = requests.put(base_url + '/histories/' + new_hist['id'], json.dumps(data), params=params, headers={'Content-Type': 'application/json'}) print(r.status_code) r.json() # The return value of a PUT request is usually a dictionary with detailed info about the updated resource. # # Finally to **delete** a resource, make a DELETE request, e.g.: params = {'key': api_key} r = requests.delete(base_url + '/histories/' + new_hist['id'], params=params) print(r.status_code)
galaxy_api_histories.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # HW 2 - Multilayer Perceptron # ## <NAME> # The goal of this assignment is to implement a multilayer perceptron (aka feed-forward artificial neural network) with two layers (i.e. one hidden layer) and logistic transfer functions. I will use online back-propagation of error as the learning algorithm. # # I will vary the parameters: learning rate and number of iterations. For each value of the parameters, I report average error of the algorithm on both a training set and a test set, along with statistical error bars on those quantities. # # Lastly, for the best network, I will show a visual representation (similar to a heat map) of the final trained weights. import numpy as np import matplotlib.pyplot as plt # ### Problem 1 the ANN will solve: "exactly-one-on" # Our data input will be a vector $x \in R^{20}$ whose entries are either 0 or 1. If $x$ has exactly one 1, then the target output will be 1. If $x$ does not have exactly one 1, then the target output will be 0. # # First, let's generate some data keeping in mind the following things: # 1. We need a lot of training data, say 500 vectors. # # 1. We must guarantee that we have training data vectors that have exactly-one-on (since they make up just $\frac{20}{2^{20}}$ possible vectors). # # 1. Furthermore, since there are only 20 vectors that have exactly-one-on (out of $2^{20}$ possible vectors), we need to make sure they have a good chance of being picked by our stochastic weight update process, so let's repeat these vectors in our training data to boost their chance of being selected. # # Therefore, we'll pick $16*30 = 480$ total training vectors, where a third of the data comes from exactly-one-on data (so we'll pick 16 of the exactly-one-on vectors and include them each 10 times, making $16*10$ vectors of this form). dim_of_data_point = 20 num_train = 16*30 # each row of x will be a data vector, and we will make sure to have 8 vectors with exactly one 1 x = np.vstack((np.random.randint(2, size=(num_train-16*10,dim_of_data_point)), \ [[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]*10, \ [[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]*10, \ [[0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]*10, \ [[0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]*10, \ [[0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]*10, \ [[0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]*10, \ [[0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0]]*10, \ [[0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0]]*10, \ [[0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0]]*10, \ [[0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]]*10, \ [[0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0]]*10, \ [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0]]*10, \ [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0]]*10, \ [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0]]*10, \ [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0]]*10, \ [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]]*10)) # Now we'll add a column of 1's to the front of $x$ so we can simply dot product a row of $x$ with weights and not have to deal with adding the constant term that allows for linear combinations not through the origin. xhat = np.hstack((np.ones((num_train,1)), x)) # Before we get too far, let's make some training data that is NOT in the test data. Again, add a 1 to the front of the vector. # + #exactly-one-on points NOT in training set: test_eoo = [[1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],[1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0],[1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0],[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]] for i in range(num_train): for k in range(4): if np.linalg.norm(test_eoo[k] - xhat[i,:])==0: print "Bad luck! One of these test exactly-one-on points happened to be in your training set" num_noo_tests=16*20 #noo stands for not-one-on test_noo = np.hstack((np.ones((num_noo_tests,1)), np.random.randint(2, size=(num_noo_tests,dim_of_data_point)))) for k in range(num_noo_tests): for i in range(num_train): if np.linalg.norm(test_noo[k] - xhat[i,:])==0: print 'bad row is row number', k # - # Great! We didn't happen to get unlucky and randomly select testing points that happened to be in our set of training points. If you run this and find that one of them is bad, then delete it using np.delete(test_noo, k, 0) and remember to then adjust num_noo_tests. # # Similarly, we'll want to store our exactly-one-on training points. #let's store which exactly-one-one training points we used so later we can find accuracy from within the training set train_eoo = [[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], [1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\ [1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], \ [1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0], \ [1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0],[1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0], \ [1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],[1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0], \ [1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0],[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0], \ [1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0],[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0], \ [1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]] # Now we will choose how many nodes we'll put in the hidden layer, say M=10. And we'll initialize the weights for the first layer to come from a standard Gaussian. Similarly, we'll initialize the weights for the second layer. # # Note on dimension of the weight matrices for each layer: # <br> # &nbsp; &nbsp; $\bullet$ The number of rows of the weight matrix = the number of nodes you want in the next layer # <br> # &nbsp; &nbsp; $\bullet$ The number of columns of the weight matrix = 1 + the number of nodes you had in the previous layer # + # I changed my mind and added this to the training funtion since we need to re-initialize the weights for each # time we train the weights, but I left this code block for understanding the setup-purpose. M=10 #w_layer_1 = np.random.randn(M,dim_of_data_point+1) # num rows = how many things you want in the next layer, num columns = 1+num things you have in current layer #w_layer_2 = np.random.randn(1,M+1) # - # We'll need our logistic transfer function, which I'm going to name sigmoid: def sigmoid(x): return 1 / (1 + np.exp(-x)) # We're all set up! Now we will use stochastic gradient descent to train the weights. Specifically, for each iteration, we randomly pick a training point. We send that training point through the network. Then we will find the partial derivative of the error function for each weight ($\frac{\partial E}{\partial w_{j,i}}$) by back-propagation, and update $w_{j,i} = w_{j,i}-\eta \frac{\partial E}{\partial w_{j,i}}$ where $\eta$ is the learning rate (which we will vary). We will also vary the number of iterations we allow. def feed_forward(x, w_layer_1, w_layer_2): #put in a vector of length 1+dim_of_data_point whose first entry is a 1 a_layer_1 = np.zeros((1,M)) for i in range(M): a_layer_1[0,i] = np.dot(w_layer_1[i,:],x) z = np.zeros((1,M)) for i in range(M): z[0,i] = sigmoid(a_layer_1[0,i]) zhat = np.hstack((np.ones((1,1)),z)) #add a one in front to allow for linear functions not through the origin #return zhat.shape a_layer_2 = np.dot(w_layer_2,zhat.T) y = sigmoid(a_layer_2) #the output of the feed-forward return y, a_layer_2, zhat, a_layer_1 def mlp_training(num_iter,eta): #we'll need to clear the weights each time we do a run M=10 w_layer_1 = np.random.randn(M,dim_of_data_point+1) # num rows = how many things you want in the next layer, num columns = 1+num things you have in current layer w_layer_2 = np.random.randn(1,M+1) for l in range(num_iter): rand_train = np.random.randint(num_train) #select a random number from 0 to 459 train = xhat[rand_train,:] # pick out that random training point for your stochatic gradient descent ff = feed_forward(train, w_layer_1, w_layer_2) if np.linalg.norm(train) == np.sqrt(2): # if the training point has exactly one on #update layer_1 weights for j in range(M): for i in range(dim_of_data_point+1): w_layer_1[j,i] = w_layer_1[j,i] - eta * (ff[0][0,0]-1)*sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) \ *w_layer_2[0,j+1]*sigmoid(ff[3][0,j])*(1-sigmoid(ff[3][0,j]))*train[i] #update layer_2 weights for j in range(M+1): w_layer_2[0,j] = w_layer_2[0,j] - eta * (ff[0][0,0]-1)*sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) * ff[2][0,j] else: #update layer_1 weights for j in range(M): for i in range(dim_of_data_point+1): w_layer_1[j,i] = w_layer_1[j,i] - eta * ff[0][0,0] *sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) \ *w_layer_2[0,j]*sigmoid(ff[3][0,j])*(1-sigmoid(ff[3][0,j]))*train[i] #update layer_2 weights for j in range(M+1): w_layer_2[0,j] = w_layer_2[0,j] - eta * ff[0][0,0]*sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) * ff[2][0,j] return w_layer_1,w_layer_2 # Now we'll make a function that evaluates how accurate our trained weights are. Notice that I defined accuracy in an abnormal way. I didn't want to simply have the accuracy be $\frac{\#correctly\_classified}{total\_number}$ because this wouldn't be a good measure for this problem. For example, I could've forced the output to always be 0, which would be correct $\frac{2^{20}-20}{2^{20}} = 0.99998\%$ of the time, but that doesn't solve our classification problem at all since it classifies everything the same! Therefore, I chose to make the accuracy be the average given by: $\frac{1}{2}\frac{\#\_correctly\_classified\_exactly\_one\_on}{total\_\#\_exactly\_one\_on\_tests} + \frac{1}{2} \frac{\#\_correctly\_classifed\_not\_one\_on}{total\_\#\_not\_one\_on\_tests}$ def accuracy(w_layer_1, w_layer_2): #accuracy of exactly-one-on points NOT in training set: test_eoo_count = 0 for i in range(4): if feed_forward(test_eoo[i], w_layer_1, w_layer_2)[0][0,0] >=0.5: test_eoo_count = test_eoo_count +1 #print 'Percentage of TEST exactly-one-on points that are correctly classified is', float(test_eoo_count)/float(4) #accuracy of exactly-one-on points IN training set: train_eoo_count = 0 for i in range(16): if feed_forward(train_eoo[i], w_layer_1, w_layer_2)[0][0,0] >=0.5: train_eoo_count = train_eoo_count +1 #print 'Percentage of TRAIN exactly-one-on points that are correctly classified is', float(train_eoo_count)/float(16) #accuracy of not-one-on points NOT in training set: test_noo_count = 0 for i in range(num_noo_tests): if feed_forward(test_noo[i], w_layer_1, w_layer_2)[0][0,0] < 0.5 and np.linalg.norm(test_noo[i]) != np.sqrt(2): test_noo_count = test_noo_count+1 elif feed_forward(test_noo[i], w_layer_1, w_layer_2)[0][0,0] >= 0.5 and np.linalg.norm(test_noo[i]) == np.sqrt(2): test_noo_count = test_noo_count+1 else: test_noo_count = test_noo_count #print 'Percentage of TEST random (most likely not-one-on) points that are correctly classified is', float(test_noo_count)/float(num_noo_tests) #accuracy of not-one-on points IN training set: train_noo_count = 0 for i in range(16*20): if feed_forward(xhat[i], w_layer_1, w_layer_2)[0][0,0] < 0.5 and np.linalg.norm(xhat[i]) != np.sqrt(2): train_noo_count = train_noo_count+1 elif feed_forward(xhat[i], w_layer_1, w_layer_2)[0][0,0] >= 0.5 and np.linalg.norm(xhat[i]) == np.sqrt(2): train_noo_count = train_noo_count+1 else: train_noo_count = train_noo_count #print 'Percentage of TRAIN random (most likely not-one-on) points that are correctly classified is', float(train_noo_count)/float(16*20) train_accuracy = 0.5*float(train_eoo_count)/float(16)+0.5*float(train_noo_count)/float(16*20) test_accuracy = 0.5*float(test_eoo_count)/float(4)+0.5*float(test_noo_count)/float(num_noo_tests) return train_accuracy, test_accuracy # + w_layer_1,w_layer_2 = mlp_training(500,0.7) train_acc, test_acc = accuracy(w_layer_1,w_layer_2) print train_acc, test_acc # - # Great! We can train our weights and then find the testing/training data accuracy by running the two simple lines above! Now we'll make 3 loops: # 1. for choosing the learning rate (eta), # 1. for choosing how many iterations we update the weights, and # 1. for repeating the trial with the same eta and same number of iterations so that we can get the mean percentage of accuracy and standard deviation. # # *NOTE: the following code block takes about 10 minutes to run. If you'd like to make it faster, just change the "5" in the first line to a "2". This will only loop over two iteration choices (specifically the smaller iteration choices), so it'll be much faster (a few minutes). You'll have to adjust the table created below too then. Or alternatively, you can just change the iter_choices to be smaller numbers, but the smaller the number, the worse the accuracy. Another way to speed it up is to change $repeat\_trial$ to be a smaller number (like 2 or 3) so that we average over fewer repeat trials per $(eta,\#\_iterations)$ pair. # + num_iter_choices = 5 iter_choices = [100,250,500,750,1000] num_learning_rates = 6 learning_rates = [1.8,1.5,1.2,0.9,0.6,0.3] train_mean = np.zeros((num_iter_choices,num_learning_rates)) train_std = np.zeros((num_iter_choices,num_learning_rates)) test_mean = np.zeros((num_iter_choices,num_learning_rates)) test_std = np.zeros((num_iter_choices,num_learning_rates)) for i in range(num_iter_choices): #loop over iteration choicies for j in range(num_learning_rates): #loop over learning rates repeat_trial = 5 #we'll repeat the trial 5 different times so we can get a mean and std for these parameters train_acc = np.zeros((1,repeat_trial)) test_acc = np.zeros((1,repeat_trial)) for k in range(repeat_trial): w_layer_1,w_layer_2 = mlp_training(iter_choices[i],learning_rates[j]) train_acc[0,k], test_acc[0,k] = accuracy(w_layer_1,w_layer_2) train_mean[i,j] = np.mean(train_acc) train_std[i,j] = np.std(train_acc) test_mean[i,j] = np.mean(test_acc) test_std[i,j] = np.std(test_acc) # - print train_mean print print test_mean # Lastly, we put all our results into tables. One table for how accurately the algorithm classifies the testing data, and a second table for how accurately the algorithm classifies the training data. # + test_result_str = [['','','','','',''],['','','','','',''],['','','','','',''],['','','','','',''],['','','','','','']] for i in range(num_iter_choices): for j in range(num_learning_rates): test_result_str[i][j] = str(round(test_mean[i,j], 3)) + " " + u"\u00B1" + " " + str(round(test_std[i,j], 3)) # + #make a table whose entries are strings of the mean +/- the std for each eta and number of iterations pair fig = plt.figure() ax = fig.add_subplot(111) col_labels = ('eta=1.8', 'eta=1.5', 'eta=1.2', 'eta=0.9', 'eta=0.6', 'eta=0.3') row_labels = ['100 iterations', '250 iterations', '500 iterations', '750 iterations', '1000 iterations'] the_table = plt.table(cellText=test_result_str, rowLabels=row_labels, colLabels=col_labels, loc='center') the_table.set_fontsize(30) the_table.scale(2, 2) ax.axis("off") plt.title("Percent Accuracy for the Testing Data") plt.show() #fig.savefig('Name.pdf') # + train_result_str = [['','','','','',''],['','','','','',''],['','','','','',''],['','','','','',''],['','','','','','']] for i in range(num_iter_choices): for j in range(num_learning_rates): train_result_str[i][j] = str(round(train_mean[i,j], 3)) + " " + u"\u00B1" + " " + str(round(train_std[i,j], 3)) # + #make a table whose entries are strings of the mean +/- the std for each eta and number of iterations pair fig = plt.figure() ax = fig.add_subplot(111) col_labels = ('eta=1.8', 'eta=1.5', 'eta=1.2', 'eta=0.9', 'eta=0.6', 'eta=0.3') row_labels = ['100 iterations', '250 iterations', '500 iterations', '750 iterations', '1000 iterations'] the_table = plt.table(cellText=train_result_str, rowLabels=row_labels, colLabels=col_labels, loc='center') the_table.set_fontsize(30) the_table.scale(2, 2) ax.axis("off") plt.title("Percent Accuracy for the Training Data") plt.show() #fig.savefig('Name.pdf') # - # We see that the accuracy on the testing data and on the training data are comparable. This is very good, as it signifies that we have not overfit our model to the training data. # Let's display some of the information from the TESTING data accuracy table into graphs: # + x = [100,250,500,750,1000] #plt.figure() eta18 = plt.errorbar(x, test_mean[:,0], test_std[:,0], fmt='o', label='eta=1.8') eta09 = plt.errorbar(x, test_mean[:,3], test_std[:,3], fmt='o', label='eta=0.9') eta03 = plt.errorbar(x, test_mean[:,5], test_std[:,5], fmt='o', label='eta=0.3') plt.legend(handles=[eta18,eta09,eta03]) plt.xlabel('Number of Training Iterations of Stochastic Gradient Descent') plt.ylabel('Mean Accuracy +/- Standard Deviation') plt.title("Accuracy vs. the number of training iterations for three choices of the learning rate") plt.show() # + x = [0.3,0.6,0.9,1.2,1.5,1.8] #eta values from SMALLEST to largest #plt.figure() iter100 = plt.errorbar(x, test_mean[0,:][::-1], test_std[0,:][::-1], fmt='o', label='100 iterations') #the [::-1] part reads off the list in reverse order, so graphically smaller eta is on the left iter500 = plt.errorbar(x, test_mean[2,:][::-1], test_std[2,:][::-1], fmt='o', label='500 iterations') iter1000 = plt.errorbar(x, test_mean[4,:][::-1], test_std[4,:][::-1], fmt='o', label='1000 iterations') plt.legend(handles=[iter100,iter500,iter1000]) plt.xlabel('Learning Rate') plt.ylabel('Mean Accuracy +/- Standard Deviation') plt.title("Accuracy vs. learning rate for three choices of the number of training iterations") plt.show() # - # The figures clearly show that when we choose a really small learning rate, we need many more iterations to get a good level of accuracy. For a large number of iterations such as 1,000 iterations, all of the learning rates (except the very smallest, eta=0.3) did really well. However, given fewer iterations, only the larger learning rates did well since the smaller learning rates didn't have enough time to reach the minimum in the gradient descent. # # Overall, we see that choosing the number of iterations and the learning rate cannot be choosen independently; picking a small number of iterations forces you to need a larger learning rate, and picking a small learning rate forces you to need a larger number of iterations. Of course a small learning rate and huge number of iterations is probably ideal, but that would take a ton of computational power. # Now let's get a good run of the code to collect good weight values so we can plot them like a heat map. # + w_layer_1,w_layer_2 = mlp_training(1000,1.8) train_acc, test_acc = accuracy(w_layer_1,w_layer_2) print train_acc, test_acc # - plt.imshow(w_layer_1, cmap='Blues') plt.show() # Interestingly, it appears the bias weights (column 0) were very important. plt.imshow(w_layer_2, cmap='Blues') plt.show() # *** # ### Problem 2 the ANN will solve: is the first entry a 1? # Our data input will be a vector $x \in R^{15}$ whose entries are either 0 or 1. If $x$ has a 1 as its first entry, then the target output will be 1. If $x$ has a 0 as its first entry, then the target output will be 0. # # First, let's generate some data. Notice that this data set is nicer than in Problem 1 in the sense that we can randomly pick vectors and 50% of them should have the first entry being a 1 and 50% the first entry being 0, so we don't need to "boost" the likelihood of our stocastic gradient descent picking one verses the other. # + dim_of_data_point = 15 num_train = 500 # each row of x will be a data vector x = np.random.randint(2, size=(num_train,dim_of_data_point)) #again add a ones column to the front of x to account for the "bias" aka linear combinations not though the origin xhat = np.hstack((np.ones((num_train,1)), x)) #similarly, let's make some test data: #(I'm not going to bother checking if the random test data happened to be in the training data - it's highly unlikely) num_test = 500 test = np.hstack((np.ones((num_test,1)), np.random.randint(2, size=(num_test,dim_of_data_point)))) #we'll choose M=10 nodes in hidden layer 1 again for simplicity M=10 # - # We can reuse the $feed\_forward$ and $sigmoid$ functions above, but we'll need to edit the $mlp\_training$ and $accuracy$ functions since we'll be training on different criteria and evaluating accuracy differently as well. For the training function, we only have to edit the first if statement to be our new criteria for the target being a 1. As for the accuracy, here the accuracy will be easier, we'll simply give it our random test data and get an overall accuracy (not bothering to force a certain number of tests of each parity since it will be roughly half of each naturally). def mlp_training_2(num_iter,eta): #we'll need to clear the weights each time we do a run w_layer_1 = np.random.randn(M,dim_of_data_point+1) # num rows = how many things you want in the next layer, num columns = 1+num things you have in current layer w_layer_2 = np.random.randn(1,M+1) for l in range(num_iter): rand_train = np.random.randint(num_train) #select a random number from 0 to 499 train = xhat[rand_train,:] # pick out that random training point for your stochatic gradient descent ff = feed_forward(train, w_layer_1, w_layer_2) if train[1] == 1: for j in range(M): for i in range(dim_of_data_point+1): w_layer_1[j,i] = w_layer_1[j,i] - eta * (ff[0][0,0]-1)*sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) \ *w_layer_2[0,j+1]*sigmoid(ff[3][0,j])*(1-sigmoid(ff[3][0,j]))*train[i] #update layer_2 weights for j in range(M+1): w_layer_2[0,j] = w_layer_2[0,j] - eta * (ff[0][0,0]-1)*sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) * ff[2][0,j] else: #update layer_1 weights for j in range(M): for i in range(dim_of_data_point+1): w_layer_1[j,i] = w_layer_1[j,i] - eta * ff[0][0,0] *sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) \ *w_layer_2[0,j]*sigmoid(ff[3][0,j])*(1-sigmoid(ff[3][0,j]))*train[i] #update layer_2 weights for j in range(M+1): w_layer_2[0,j] = w_layer_2[0,j] - eta * ff[0][0,0]*sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) * ff[2][0,j] return w_layer_1,w_layer_2 def accuracy_2(w_layer_1, w_layer_2): #accuracy of training set: train_count = 0 for i in range(num_train): #if you have 1 as your first entry if xhat[i,1] == 1: #if you were accurately labeled if feed_forward(xhat[i,:], w_layer_1, w_layer_2)[0][0,0] >= 0.5: train_count = train_count + 1 else: # if you have 0 as your first entry #if you were accurately labeled if feed_forward(xhat[i,:], w_layer_1, w_layer_2)[0][0,0] < 0.5: train_count = train_count + 1 train_accuracy = float(train_count)/float(num_train) #print 'Percentage of TRAINING points that are correctly classified is', train_accuracy #accuracy of testing set: test_count = 0 for i in range(num_test): #if you have 1 as your first entry if test[i,1] == 1: #if you were accurately labeled if feed_forward(test[i,:], w_layer_1, w_layer_2)[0][0,0] >= 0.5: test_count = test_count + 1 else: # if you have 0 as your first entry #if you were accurately labeled if feed_forward(test[i,:], w_layer_1, w_layer_2)[0][0,0] < 0.5: test_count = test_count + 1 test_accuracy = float(test_count)/float(num_test) #print 'Percentage of TESTING points that are correctly classified is', test_accuracy return train_accuracy, test_accuracy # + w_layer_1,w_layer_2 = mlp_training_2(750,1.5) train_acc, test_acc = accuracy_2(w_layer_1,w_layer_2) print train_acc, test_acc # - # Great! It works! Now we'll again loop over a variety of learning rates and number of iterations as we did before. # + num_iter_choices = 5 iter_choices = [100,250,500,750,1000] num_learning_rates = 6 learning_rates = [1.8,1.5,1.2,0.9,0.6,0.3] train_mean = np.zeros((num_iter_choices,num_learning_rates)) train_std = np.zeros((num_iter_choices,num_learning_rates)) test_mean = np.zeros((num_iter_choices,num_learning_rates)) test_std = np.zeros((num_iter_choices,num_learning_rates)) for i in range(num_iter_choices): #loop over iteration choicies for j in range(num_learning_rates): #loop over learning rates repeat_trial = 4 #we'll repeat the trial 4 different times so we can get a mean and std for these parameters train_acc = np.zeros((1,repeat_trial)) test_acc = np.zeros((1,repeat_trial)) for k in range(repeat_trial): w_layer_1,w_layer_2 = mlp_training_2(iter_choices[i],learning_rates[j]) train_acc[0,k], test_acc[0,k] = accuracy_2(w_layer_1,w_layer_2) train_mean[i,j] = np.mean(train_acc) train_std[i,j] = np.std(train_acc) test_mean[i,j] = np.mean(test_acc) test_std[i,j] = np.std(test_acc) # - print train_mean print print test_mean # + #### TESTING DATA RESULTS test_result_str = [['','','','','',''],['','','','','',''],['','','','','',''],['','','','','',''],['','','','','','']] for i in range(num_iter_choices): for j in range(num_learning_rates): test_result_str[i][j] = str(round(test_mean[i,j], 3)) + " " + u"\u00B1" + " " + str(round(test_std[i,j], 3)) # + #table for the TESTING DATA RESULTS whose entries are strings of the mean +/- the std for each (eta, #iterations) pair fig = plt.figure() ax = fig.add_subplot(111) col_labels = ('eta=1.8', 'eta=1.5', 'eta=1.2', 'eta=0.9', 'eta=0.6', 'eta=0.3') row_labels = ['100 iterations', '250 iterations', '500 iterations', '750 iterations', '1000 iterations'] the_table = plt.table(cellText=test_result_str, rowLabels=row_labels, colLabels=col_labels, loc='center') the_table.set_fontsize(30) the_table.scale(2, 2) ax.axis("off") plt.title("Percent Accuracy for the Testing Data") plt.show() #fig.savefig('Name.pdf') # + #### TRAINING DATA RESULTS train_result_str = [['','','','','',''],['','','','','',''],['','','','','',''],['','','','','',''],['','','','','','']] for i in range(num_iter_choices): for j in range(num_learning_rates): train_result_str[i][j] = str(round(train_mean[i,j], 3)) + " " + u"\u00B1" + " " + str(round(train_std[i,j], 3)) # + #table for the TRAINING DATA RESULTS whose entries are strings of the mean +/- the std for each (eta, #iterations) pair fig = plt.figure() ax = fig.add_subplot(111) col_labels = ('eta=1.8', 'eta=1.5', 'eta=1.2', 'eta=0.9', 'eta=0.6', 'eta=0.3') row_labels = ['100 iterations', '250 iterations', '500 iterations', '750 iterations', '1000 iterations'] the_table = plt.table(cellText=train_result_str, rowLabels=row_labels, colLabels=col_labels, loc='center') the_table.set_fontsize(30) the_table.scale(2, 2) ax.axis("off") plt.title("Percent Accuracy for the Training Data") plt.show() #fig.savefig('Name.pdf') # - # As in the first problem, our testing and training accuracy are comparable, which is a good sign! # # Let's display some of the information from the TESTING data accuracy table into graphs: # + x = [100,250,500,750,1000] #plt.figure() eta18 = plt.errorbar(x, test_mean[:,0], test_std[:,0], fmt='o', label='eta=1.8') eta09 = plt.errorbar(x, test_mean[:,3], test_std[:,3], fmt='o', label='eta=0.9') eta03 = plt.errorbar(x, test_mean[:,5], test_std[:,5], fmt='o', label='eta=0.3') plt.legend(handles=[eta18,eta09,eta03]) plt.xlabel('Number of Training Iterations of Stochastic Gradient Descent') plt.ylabel('Mean Accuracy +/- Standard Deviation') plt.title("Accuracy vs. the number of training iterations for three choices of the learning rate") plt.show() # + x = [0.3,0.6,0.9,1.2,1.5,1.8] #eta values SMALLEST to largest #plt.figure() iter100 = plt.errorbar(x, test_mean[0,:][::-1], test_std[0,:][::-1], fmt='o', label='100 iterations') #the [::-1] part reads off the list in reverse order, so graphically smaller eta is on the left iter500 = plt.errorbar(x, test_mean[2,:][::-1], test_std[2,:][::-1], fmt='o', label='500 iterations') iter1000 = plt.errorbar(x, test_mean[4,:][::-1], test_std[4,:][::-1], fmt='o', label='1000 iterations') plt.legend(handles=[iter100,iter500,iter1000]) plt.xlabel('Learning Rate') plt.ylabel('Mean Accuracy +/- Standard Deviation') plt.title("Accuracy vs. learning rate for three choices of the number of training iterations") plt.show() # - # This problem again shows the same relationship between choosing a learning rate and the number of iterations. It appears that I should've considered even larger learning rates, which might allow for similar accuracy with fewer iterations. # Now let's get a good run of the code to collect good weight values so we can plot them like a heat map. # + w_layer_1,w_layer_2 = mlp_training_2(1000,1.8) train_acc, test_acc = accuracy_2(w_layer_1,w_layer_2) print train_acc, test_acc # - plt.imshow(w_layer_1, cmap='Blues') plt.show() # As expected, column 1 is the most important, because this column is the weights that multiply against the first entry of the input, and for this problem, the first entry tells us everything. plt.imshow(w_layer_2, cmap='Blues') plt.show() # Even more interesting: now the column 7 is the most useful, which we could've guessed from the heat map of the weights from layer 1 because the darkest spot in column 1 is row 6 (which later corresponds to column 7 of weights from layer 2 because we add a bias to the front of the vector). Very cool to see it work! # *** # ### Problem 3 the ANN will solve: Image Classification # Our data input will be two black and white $5 \times 7$ images (one a giraffe head - imagination required - and the other $\pi$) with Gaussian noise added. Specifically, we take a $5 \times 7$ "picture" made from twenty-six 0's and nine 1's. We then flatten it into a vector $x \in R^{35}$ whose entries are either 0 or 1. Then we add Gaussian mean 0, variance 0.3 to independently to each entry. If $x$ came from the first image with noise added, then the target output will be 1. If $x$ came from the second image with noise added, then the target output will be 0. # # First, let's generate some data: # + # make the training data: dim_of_data_point = 35 alpha=0.3 #alpha in (0,1) -- smaller alpha makes each point cloud more tightly together num_train=1000 #so 250 points per cluster...make sure you picked an even number! center1 = [[0],[0],[0],[0],[0],[0],[0],[0],[0],[1],[0],[1],[0],[0],[0],[1],[1],[1],[1],[1],[0],[0],[0],[1],[1],[1],[0],[0],[0],[0],[0],[1],[0],[0],[0]] center2 = [[0],[0],[0],[0],[0],[0],[0],[0],[1],[1],[1],[1],[1],[0],[0],[0],[1],[0],[1],[0],[0],[0],[0],[1],[0],[1],[0],[0],[0],[0],[1],[0],[1],[0],[0]] points_c1 = (alpha * np.random.randn(35,num_train/2) + center1).T points_c2 = (alpha * np.random.randn(35,num_train/2) + center2).T #each row of points_c2 is a noisy image of pi points = np.vstack((points_c1, points_c2)) xhat = np.hstack((np.ones((num_train,1)), points)) #add ones to the front for bias purposes #while we're at it, let's make some testing data: num_test = 300 test_pts_c1 = (alpha * np.random.randn(35,num_test/2) + center1).T test_pts_c2 = (alpha * np.random.randn(35,num_test/2) + center2).T #each row of points_c2 is a noisy image of pi test_points = np.vstack((test_pts_c1, test_pts_c2)) test = np.hstack((np.ones((num_test,1)), test_points)) #we'll choose M=15 nodes in hidden layer 1 M=10 # - # First, let's take a look at the images I picked as the "centers": image1 = [[0,0,0,0,0,0,0],[0,0,1,0,1,0,0],[0,1,1,1,1,1,0],[0,0,1,1,1,0,0],[0,0,0,1,0,0,0]] image2 = [[0,0,0,0,0,0,0],[0,1,1,1,1,1,0],[0,0,1,0,1,0,0],[0,0,1,0,1,0,0],[0,0,1,0,1,0,0]] plt.imshow(image1, cmap='Blues') plt.show() plt.imshow(image2, cmap='Blues') plt.show() def mlp_training_3(num_iter,eta): #we'll need to clear the weights each time we do a run w_layer_1 = np.random.randn(M,dim_of_data_point+1) # num rows = how many things you want in the next layer, num columns = 1+num things you have in current layer w_layer_2 = np.random.randn(1,M+1) for l in range(num_iter): rand_train = np.random.randint(num_train) #select a random number from 0 to 499 train = xhat[rand_train,:] # pick out that random training point for your stochatic gradient descent ff = feed_forward(train, w_layer_1, w_layer_2) if rand_train < num_train/2: for j in range(M): for i in range(dim_of_data_point+1): w_layer_1[j,i] = w_layer_1[j,i] - eta * (ff[0][0,0]-1)*sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) \ *w_layer_2[0,j+1]*sigmoid(ff[3][0,j])*(1-sigmoid(ff[3][0,j]))*train[i] #update layer_2 weights for j in range(M+1): w_layer_2[0,j] = w_layer_2[0,j] - eta * (ff[0][0,0]-1)*sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) * ff[2][0,j] else: #update layer_1 weights for j in range(M): for i in range(dim_of_data_point+1): w_layer_1[j,i] = w_layer_1[j,i] - eta * ff[0][0,0] *sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) \ *w_layer_2[0,j]*sigmoid(ff[3][0,j])*(1-sigmoid(ff[3][0,j]))*train[i] #update layer_2 weights for j in range(M+1): w_layer_2[0,j] = w_layer_2[0,j] - eta * ff[0][0,0]*sigmoid(ff[1][0,0])*(1-sigmoid(ff[1][0,0])) * ff[2][0,j] return w_layer_1,w_layer_2 def accuracy_3(w_layer_1, w_layer_2): #accuracy of training set: train_count = 0 for i in range(num_train): #if you have 1 as your first entry if i<num_train/2: #if you were accurately labeled if feed_forward(xhat[i,:], w_layer_1, w_layer_2)[0][0,0] >= 0.5: train_count = train_count + 1 else: # if you have 0 as your first entry #if you were accurately labeled if feed_forward(xhat[i,:], w_layer_1, w_layer_2)[0][0,0] < 0.5: train_count = train_count + 1 train_accuracy = float(train_count)/float(num_train) #print 'Percentage of TRAINING points that are correctly classified is', train_accuracy #accuracy of testing set: test_count = 0 for i in range(num_test): #if you have 1 as your first entry if i<num_test/2: #if you were accurately labeled if feed_forward(test[i,:], w_layer_1, w_layer_2)[0][0,0] >= 0.5: test_count = test_count + 1 else: # if you have 0 as your first entry #if you were accurately labeled if feed_forward(test[i,:], w_layer_1, w_layer_2)[0][0,0] < 0.5: test_count = test_count + 1 test_accuracy = float(test_count)/float(num_test) #print 'Percentage of TESTING points that are correctly classified is', test_accuracy return train_accuracy, test_accuracy # + w_layer_1,w_layer_2 = mlp_training_3(150,0.9) train_acc, test_acc = accuracy_3(w_layer_1,w_layer_2) print train_acc, test_acc # - # Excellent! It worked! Now to loop over a variety of parameters as before. This time, we find that we don't need as many iterations, even for the same size learning rates. # + num_iter_choices = 4 iter_choices = [10,50,100,150] num_learning_rates = 6 learning_rates = [1.7,1.3,0.9,0.7,0.5,0.3] train_mean = np.zeros((num_iter_choices,num_learning_rates)) train_std = np.zeros((num_iter_choices,num_learning_rates)) test_mean = np.zeros((num_iter_choices,num_learning_rates)) test_std = np.zeros((num_iter_choices,num_learning_rates)) for i in range(num_iter_choices): #loop over iteration choicies for j in range(num_learning_rates): #loop over learning rates repeat_trial = 4 #we'll repeat the trial 4 different times so we can get a mean and std for these parameters train_acc = np.zeros((1,repeat_trial)) test_acc = np.zeros((1,repeat_trial)) for k in range(repeat_trial): w_layer_1,w_layer_2 = mlp_training_3(iter_choices[i],learning_rates[j]) train_acc[0,k], test_acc[0,k] = accuracy_3(w_layer_1,w_layer_2) train_mean[i,j] = np.mean(train_acc) train_std[i,j] = np.std(train_acc) test_mean[i,j] = np.mean(test_acc) test_std[i,j] = np.std(test_acc) # - print train_mean print print test_mean # + #### TESTING DATA RESULTS test_result_str = [['','','','','',''],['','','','','',''],['','','','','',''],['','','','','','']] for i in range(num_iter_choices): for j in range(num_learning_rates): test_result_str[i][j] = str(round(test_mean[i,j], 3)) + " " + u"\u00B1" + " " + str(round(test_std[i,j], 3)) # + #table for the TESTING DATA RESULTS whose entries are strings of the mean +/- the std for each (eta, #iterations) pair fig = plt.figure() ax = fig.add_subplot(111) col_labels = ('eta=1.7', 'eta=1.3', 'eta=0.9', 'eta=0.7', 'eta=0.5', 'eta=0.3') row_labels = ['10 iterations', '50 iterations', '100 iterations', '150 iterations'] the_table = plt.table(cellText=test_result_str, rowLabels=row_labels, colLabels=col_labels, loc='center') the_table.set_fontsize(30) the_table.scale(2, 2) ax.axis("off") plt.title("Percent Accuracy for the Testing Data") plt.show() #fig.savefig('Name.pdf') # + #### TRAINING DATA RESULTS train_result_str = [['','','','','',''],['','','','','',''],['','','','','',''],['','','','','','']] for i in range(num_iter_choices): for j in range(num_learning_rates): train_result_str[i][j] = str(round(train_mean[i,j], 3)) + " " + u"\u00B1" + " " + str(round(train_std[i,j], 3)) # + #table for the TRAINING DATA RESULTS whose entries are strings of the mean +/- the std for each (eta, #iterations) pair fig = plt.figure() ax = fig.add_subplot(111) col_labels = ('eta=1.7', 'eta=1.3', 'eta=0.9', 'eta=0.7', 'eta=0.5', 'eta=0.3') row_labels = ['10 iterations', '50 iterations', '100 iterations', '150 iterations'] the_table = plt.table(cellText=train_result_str, rowLabels=row_labels, colLabels=col_labels, loc='center') the_table.set_fontsize(30) the_table.scale(2, 2) ax.axis("off") plt.title("Percent Accuracy for the Training Data") plt.show() #fig.savefig('Name.pdf') # - # Again, our testing and training accuracy are comparable, which is a good sign! # # Let's display some of the information from the TESTING data accuracy table into graphs: # + x = [10,50,100,150] #plt.figure() eta17 = plt.errorbar(x, test_mean[:,0], test_std[:,0], fmt='o', label='eta=1.7') eta09 = plt.errorbar(x, test_mean[:,2], test_std[:,2], fmt='o', label='eta=0.9') eta05 = plt.errorbar(x, test_mean[:,4], test_std[:,4], fmt='o', label='eta=0.5') plt.legend(handles=[eta17,eta09,eta05]) plt.xlabel('Number of Training Iterations of Stochastic Gradient Descent') plt.ylabel('Mean Accuracy +/- Standard Deviation') plt.title("Accuracy vs. the number of training iterations for three choices of the learning rate") plt.show() # + x = [0.3,0.5,0.7,0.9,1.3,1.7] #eta values SMALLEST TO LARGEST #plt.figure() iter10 = plt.errorbar(x, test_mean[0,:][::-1], test_std[0,:][::-1], fmt='o', label='10 iterations') #the [::-1] part reads off the list in reverse order, so graphically smaller eta is on the left iter50 = plt.errorbar(x, test_mean[1,:][::-1], test_std[1,:][::-1], fmt='o', label='50 iterations') iter150 = plt.errorbar(x, test_mean[3,:][::-1], test_std[3,:][::-1], fmt='o', label='150 iterations') plt.legend(handles=[iter10,iter50,iter150]) plt.xlabel('Learning Rate') plt.ylabel('Mean Accuracy +/- Standard Deviation') plt.title("Accuracy vs. learning rate for three choices of the number of training iterations") plt.show() # - # This problem again shows the same relationship between choosing a learning rate and the number of iterations. It appears that I really should've considered even larger learning rates, which might allow for similar accuracy with fewer iterations. # Now let's make a heat map of the weights for a good collection of weights. First we make such "good weights". # + w_layer_1,w_layer_2 = mlp_training_3(200,1.7) train_acc, test_acc = accuracy_3(w_layer_1,w_layer_2) print train_acc, test_acc # - # Now if we simply displayed the weights for layer 1, we'd have get a heat map that has 10 $\times$ 36 boxes, the first column is the bias weight (to allow linear combinations not through the origin), and the remaining 35 columns relate to weights applied to a certain "pixel" of the 5 $\times$ 7 image; e.g. columns 2-8 are the weights that go to the top row of the image. Thus we see that instead of displaying the weights in layer 1 as a 10 $\times$ 36 map, it's more informative to make each row into a 5$\times$7 matrix and give all the bias weights separately. We do this now. # this simply shows all of the weights from layer 1 as a 10x36 matrix. plt.imshow(w_layer_1, cmap='Blues') plt.show() for i in range(M): plt.imshow(np.reshape(w_layer_1[i,1:36],(5,7)), cmap='Blues') plt.show() # here are the weights for the bias components plt.imshow(np.reshape(w_layer_1[:,0],(10,1)), cmap='Blues') plt.show() #lastly, we show the weights from layer 2 plt.imshow(w_layer_2, cmap='Blues') plt.show() # So what did we learn from this? From the layer 2 weights, we see that the 5th column is the heaviest weighted. So now we'll look back at the 4th image of weights (4th because the first column of weights_layer_2 is for the bias), and we expect to see dark spots in places where the two original images differ. Indeed, this is true. Very cool to visualize!
multilayer_perceptron_complete.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import json import os import pandas as pd from os import listdir from os.path import isfile, join # - path = os.getcwd() path # ### sample_click_stream with open(path+'/sample_click_stream/part-00000-c14add30-acfa-4196-96e6-1435f3d1a18a-c000.json', encoding = 'utf-8') as f: log = pd.DataFrame(json.loads(line) for line in f) click_example = log[log['account_id']==11175352].sort_values(by=['request_date_time']) log click_example # ### sample_accounts with open(path+'/sample_accounts/part-00000-c0992726-860e-4a76-923a-b03ca563a001-c000.json', encoding = 'utf-8') as f: acc = pd.DataFrame(json.loads(line) for line in f) files = [f for f in listdir(path+'/sample_accounts') if isfile(join(path+'/sample_accounts', f))] json_files = pd.DataFrame({"file_id" : files}) json_files for i in range(199): with open(path+'/sample_accounts/'+json_files['file_id'][i+1], encoding = 'utf-8') as f: tmp = pd.DataFrame(json.loads(line) for line in f) acc = pd.concat([acc, tmp]) acc # ### sample_orders with open(path+'/sample_orders/part-00000-aea0677d-bd53-4596-a545-18c40e5840b4-c000.json', encoding = 'utf-8') as f: order = pd.DataFrame(json.loads(line) for line in f) files = [f for f in listdir(path+'/sample_orders') if isfile(join(path+'/sample_orders', f))] json_files = pd.DataFrame({"file_id" : files}) json_files for i in range(199): with open(path+'/sample_orders/'+json_files['file_id'][i+1], encoding = 'utf-8') as f: tmp = pd.DataFrame(json.loads(line) for line in f) order = pd.concat([order, tmp]) order # ### sample_products with open(path+'/sample_products/part-00000-7e5849e0-60c6-4afb-8e03-924fb746da68-c000.json', encoding = 'utf-8') as f: pro = pd.DataFrame(json.loads(line) for line in f) files = [f for f in listdir(path+'/sample_products') if isfile(join(path+'/sample_products', f))] json_files = pd.DataFrame({"file_id" : files}) json_files for i in range(199): with open(path+'/sample_products/'+json_files['file_id'][i+1], encoding = 'utf-8') as f: tmp = pd.DataFrame(json.loads(line) for line in f) pro = pd.concat([pro, tmp]) pro pro['shop_price'].astype(float).describe() order['price'].astype(float).describe() click_example.count() pd.merge(click_example, pro, how = 'left', on = 'product_id') pro[pro['product_id'] == 17092788] order from datetime import datetime print(datetime.fromtimestamp(1588731078973/1000.0)) order["created_at_map"] = order.apply(lambda row: datetime.fromtimestamp(row["created_at"]/1000.0), axis = 1) order.drop(['created_at'], axis = 1, inplace = True) tmp = order[order['account_id'] == 11175352] tmp.drop(['price'], axis = 1, inplace = True) tmp tmp = pd.merge(tmp, pro, how = 'left', on = 'product_id') click_tmp = pd.merge(click_example, tmp, how = 'left', on = 'product_id') pro # ### category path with open(path+'/Category_information.json', encoding = 'utf-8') as f: cat = pd.DataFrame(json.loads(line) for line in f) pro_tmp = pro.copy() pro_tmp['category_id'] = pro_tmp.apply(lambda row: row['category_id'][:6], axis = 1) pro_tmp tmp = pd.merge(pro_tmp, cat, how = 'left', on = 'category_id') tmp['category_id'].value_counts().plot(kind = 'barh', figsize = (6, 10)) tmp['category_id'].nunique() cat[cat['category_id']=='001008'] acc[acc['account_id'] == 11175352] click_tmp log['account_id'].nunique() a = acc['account_id']==11175352 b = acc['account_id']==1731150 acc_tmp = acc[a|b] acc_tmp = acc_tmp.drop_duplicates() acc_tmp a = log['account_id']==11175352 b = log['account_id']==1731150 log_tmp = log[a|b] log_tmp = log_tmp.drop_duplicates() log_tmp acc[:60] acc_pre = acc.copy() acc_pre acc_pre.info() acc_pre['account_id'] = acc_pre['account_id'].astype(str) object_col = [col for col in acc_pre.columns if acc_pre[col].dtypes == 'object'] object_col acc_pre['age'] from sklearn.preprocessing import MinMaxScaler scaler =MinMaxScaler() acc_pre['age'] = pd.DataFrame(scaler.fit_transform(acc_pre[['age']])) acc_pre['address'].fillna('') import numpy as np acc_pre['address'] = acc_pre['address'].replace(np.nan,'') acc_pre.reset_index().iloc[0]['address'].split(' ')[0] acc_pre = acc_pre.reset_index() for i in range(len(acc_pre['address'])): if '경기' in acc_pre.reset_index().iloc[i]['address'].split(' ')[0]: acc_pre['loc'][i] = 0 elif '서울' in acc_pre.reset_index().iloc[i]['address'].split(' ')[0]: acc_pre['loc'][i] = 0 else: acc_pre['loc'][i] = 1 from sklearn.preprocessing import LabelEncoder encoder = LabelEncoder() acc_pre['gender'] = acc_pre['gender'].fillna('M') acc_pre['gender'] acc_pre['gender'] = pd.DataFrame(encoder.fit_transform(acc_pre['gender'])) acc_pre = acc_pre[['account_id','gender','age','loc']] acc_sort = acc_pre.sort_values(by=['gender','age','loc']).reset_index()[['account_id','gender','age','loc']] acc_sort log log.info() log_pre = log.copy() log_pre['device_type'] = pd.DataFrame(encoder.fit_transform(log_pre['device_type'])) log_pre pro_pre = pro.copy() pro_pre = pro_pre.reset_index().drop(columns='index') pro_pre pro_pre['shop_price'] = scaler.fit_transform(pro_pre[['shop_price']]) pro_pre['published_at']= pro_pre['published_at'].fillna('2099-12-31') pro_pre['published_at'] = pro_pre['published_at'].astype('datetime64') import datetime click = datetime.date.today() -datetime.timedelta(3) for i in range(len(pro_pre['published_at'])): pro_pre['published_at'][i] = (pro_pre['published_at'][i].date() - datetime.date.today()).days (pro_pre['published_at'][0].date() - datetime.date.today()).days pro_pre['published_at'] = scaler.fit_transform(pro_pre[['published_at']]) pro_pre for i in range(len(pro_pre['category_id'])): pro_pre['category_id'][i] = pro_pre['category_id'][i][3:6] cat_dummy = pd.get_dummies(pro_pre['category_id'], prefix='cat') pro_pre = pd.concat([pro_pre,cat_dummy],axis=1) pro_pre order_check2 order order[['account_id','product_id']] = order[['account_id','product_id']].astype(str) order_check = Log[['account_id','product_id']].drop_duplicates() order_check1 = Log['account_id'].unique().tolist() order_check1 order_check2 = Log['product_id'].unique().tolist() order_check2 = [str(product) for product in order_check2] order_check2 order_check_fin = order[order['account_id'].isin(order_check1)] order_check_fin U1 = acc.sort_values(by=['gender','age','zip_code'])[50:][0:2] U1 U1 = acc_sort[14976:][0:2] U1 U1_list = acc_sort[14976:][0:2]['account_id'].values.tolist() U1_list U2 = acc.sort_values(by=['gender','age','zip_code'])[30000:][0:2] U2 U2 = acc_sort[30859:][0:2] U2 U2_list = acc.sort_values(by=['gender','age','zip_code'])[30000:][0:2]['account_id'].values.tolist() U2_list U2_list = acc_sort[30859:][0:2]['account_id'].values.tolist() U2_list U1_list = [int(U1) for U1 in U1_list] U2_list = [int(U2) for U2 in U2_list] [ide for ide in log['account_id'] if ide in U2_list] User = pd.concat([U1,U2]) User User Log = pd.DataFrame() for U1 in U1_list: Log = pd.concat([Order,log_pre[log_pre['account_id']==U1]],axis=0) for U2 in U2_list: Log = pd.concat([Order,log_pre[log_pre['account_id']==U2]],axis=0) Log Log['account_id']= Log['account_id'].astype(str) U = pd.merge(User, Log, on = 'account_id') U book_list = U['product_id'].unique().tolist() book_list pro_pre cat_list = [col for col in pro_pre.columns if (col[0:3] == 'cat') & (col !='category_id')] cat_list Book = pd.DataFrame() for bookid in book_list: Book = pd.concat([Book,pro_pre[pro_pre['product_id']==bookid]],axis=0) Book = pd.concat([Book[['product_id','published_at','shop_price']],Book[cat_list]],axis=1) Book U U U['product_id'] = U['product_id'].astype(str) U_fin = pd.merge(U,order_check_fin,how='left') U_fin = U_fin[0:54] U_fin U_fin['buy'] = np.nan for i in range(len(U_fin)): if U_fin['price'][i] == np.nan: U_fin['buy'][i] = 0 else: U_fin['buy'][i] = 1 U_fin['price'][0]
0.preprocessing/1.0sample_data_preprocess.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example 4. Karate club network # In this example, we fit SBMs to the famous Zachary's Karate Club network[1]. The university's karate club has split up into two groups due to the internal conflict. The available information is the social interactions of 34 participants outside the club. The task is to separate the individuals into groups using this data, and the members of each faction are known. As the number of connections significantly varies among the persons, it was demonstrated that DCSBM better describes this network as the standard SBM, those treat the node's degree as the main feature[2]. # # ![](images/karate_dcsbm.png) # #### Zachary's Karate club network divided into groups by DCSBM [2] # # [1] <NAME>, An information flow model for conflict and fission in small groups, Journal of Anthropological Research 33, 452-473 (1977). # # [2] <NAME> and <NAME>, Stochastic blockmodels and community structure in networks, Phys. Rev. E 83, 016107 (2011). import torch import numpy as np np.set_printoptions(precision=2) import pandas as pd import matplotlib.pyplot as plt from virgmo.vi_sbm import VI_SBM, VI_DCSBM from virgmo.graph_models import SBM, DCSBM, EdgesDataset import seaborn as sns sns.reset_orig() torch.manual_seed(42) # We start with the data import and aggregate the links into the adjacency matrix $A$. The true group assignments `z_true` are known. # + karate = pd.read_csv('../data/karate', sep=' ') N = 34 num_classes = 2 A = torch.zeros((N,N)) for e in np.array(karate): A[e[0]-1,e[1]-1] = 1 A[e[1]-1,e[0]-1] = 1 z_true = torch.tensor([ [0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1], [1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0]]).t() # - # Here is the network's adjacency matrix $A$. # + sns.reset_orig() class_sizes = lambda classes: np.array(pd.Series(classes.numpy()).value_counts().sort_index()) cs = class_sizes(z_true.argmax(dim=1)) order = z_true.argmax(dim=1).argsort() fig, axs = plt.subplots(1,2, figsize=(18,9)) axs[0].imshow(A.numpy(), cmap='viridis') ax = axs[1].imshow(A[order,:][:,order].numpy(), cmap='viridis') acc = -0.5 for i in cs[:-1]: acc += i axs[1].axvline(acc, color='r', linestyle='-', linewidth=1, alpha=0.9) axs[1].axhline(acc, color='r', linestyle='-', linewidth=1, alpha=0.9) fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.83, 0.188, 0.02, 0.615]) fig.colorbar(ax, cax=cbar_ax, aspect=30) plt.savefig('../examples/images/ex4_A.pdf') plt.show() # - # ## SBM # First, we try the standard SBM. The default prior distribution in `VIRGMo` is quite flat but for this example we may use a more sharp one. We assume that the sizes of both groups are similar, and the nodes are connected densely within the groups as outside. # + theta_p = torch.ones([num_classes])*5 w_p = torch.tensor([ [[9., 1.], [1., 9.]], [[1., 9.], [9., 1.]] ]) dataloader = DataLoader(EdgesDataset(A.float()), batch_size=N, shuffle=True, num_workers=0) vi = VI_SBM(num_nodes=N, num_classes=num_classes, priors={'theta_p':theta_p, 'B_p':w_p}, init_values={'etas':None, 'thetas':None, 'Bs':None}) vi.multi_train(dataloader, epochs=20, lrs = [0.1, 0.05, 0.01], trials=20) # - best_trial = vi.get_multi_losses()[:,-1].argmin() # With the smallest loss ar the end vi.load_state_dict(vi.state_dicts[best_trial]) # Load the parameters from the best trial q_eta, q_theta, q_B = vi.constrained_params() vi.multi_results[1][best_trial] # Class probability of the best trial print('True class probabilities:', z_true.sum(dim=0).float()/34) # We see that the standard SBM struggles to find the correct class probabilities $\theta$. Let us look at the accuracy of the class assignments in all trials. for i in range(len(vi.multi_results[0])): print(vi.class_accuracy(z_true,vi.multi_results[0][i]).item()) # It is clear that the standart SBM does not suit for this task. Next, we try the DCSBM. # ## DCSBM # Here, we use the same prior as for the SBM. As in the preivious examples we initialize $\delta$ with the observed degree distributions. theta_p = torch.ones([num_classes])*5 w_p = torch.tensor([ [[9., 1.], [1., 9.]], [[1., 9.], [9., 1.]] ]) delta_init = torch.ones([N,2]) delta_mu = A.sum(dim=1)/A.sum(dim=1).mean() delta_init[:,0]=delta_mu.log().clone() print('Contains infinities:', bool(torch.isinf(delta_init).sum().item())) # + dataloader = DataLoader(EdgesDataset(torch.tensor(A).float()), batch_size=N, shuffle=True, num_workers=0) vi = VI_DCSBM(num_nodes=N, num_classes=num_classes, priors={'theta_p':theta_p, 'B_p':w_p, 'delta_p':None}, init_values={'etas':None, 'thetas':None, 'Bs':None, 'deltas':delta_init}) vi.multi_train(dataloader, epochs=20, lrs = [0.1, 0.05, 0.01], trials=10) # - best_trial = vi.get_multi_losses()[:,-1].argmin() # With the smallest loss ar the end vi.load_state_dict(vi.state_dicts[best_trial]) # Load the parameters from the best trial q_eta, q_theta, q_B, q_delta = vi.constrained_params() vi.multi_results[1][best_trial] # Class probability of the best trial theta_true = z_true.sum(dim=0).float()/34 print('True class probabilities:', theta_true) # Again, let us look at the class acuracy. for i in range(len(vi.multi_results[0])): print(vi.class_accuracy(z_true,vi.multi_results[0][i]).item()) # The class assignments of the DCSBM is correct in the majority of the trials. Further, we may inspect the estimated $\theta$ and $B$ distributions. permutation = [1,0] q_theta_perm = q_theta.detach()[permutation] num_samples = 1000 theta_samples = Dirichlet(q_theta_perm).sample([num_samples]) sns.set() fig, axs = plt.subplots(1, num_classes, figsize=(18,4), sharey=True) bins, alpha = 20, 0.8 for i in range(num_classes): axs[i].hist(theta_samples[:,i], bins=bins, alpha=alpha) axs[i].vlines(theta_true[i], 0, num_samples*0.15, colors='r') fig.suptitle(r'Posterior class probabilities $\theta$', fontsize=18) #plt.savefig('../examples/images/ex4_theta_dcsbm.pdf') plt.show() # + q_B_perm = q_B.detach()[permutation,:,:][:,permutation,:] B_samples = Beta(q_B_perm[:,:,0], q_B_perm[:,:,1]).sample([num_samples]) fig, axs = plt.subplots(num_classes, num_classes, figsize=(18,18), sharey=True) bins, alpha = 20, 0.8 for i in range(num_classes): for j in range(num_classes): axs[i,j].hist(B_samples[:,i, j], bins=bins, alpha=alpha) fig.suptitle(r'Posterior connection probabilities $B$', fontsize=18) #plt.savefig('../examples/images/ex4_B_dcsbm.pdf') plt.show() # -
examples/Example 4 - Karate club network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split, KFold, cross_val_score from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn import svm # - # # Study on the Football Statistics and the Odds # ### by <NAME> # ## Abstract # This paper will study how and when goals are scored and after that will try to compare the odds, which bookies give to games with the real results. In the final part we will use couple of models to predict results. # ## Introduction # The statistics and football have been getting closer and closer during the past decade. In this paper we will use some ot the statistics, which are collected from _oddsportal_ to answer some interesting questions. When are most goals scored? Where and which are the beset free-kick takers? Are the odds justify and is there a better way to predict scores? The data gives us a lot of possibilities, but we will use small portion of it, so we can answer the given question. # ## Data Manipulation # ### Data Collection # The data is divided into 2 datasets in the '''ginf.csv''' is the data from the _oddsportal_ with the odds for the games. The columns in the dataset are: # * id_odsp - unique game identifier # * link_odsp - link to oddsportal page # * adv_stats - boolean if the game has detailed event data # * data # * league # * country # * season # * ht - home team # * at - away team # * fthg - full time home goals # * ftag - full time away goals # * odd_h - home win odds # * odd_d - draw odds # * odd_a - away win odds # * odd_over - over 2.5 goals odds # * odd_under - under 2.5 goals odds # * odd_bts - both teams to score odds # * odd_bts_n - both teams not to score odds # # In the ```events.csv``` is the data from the events. We will use it to compare it with the data from ```ginf.csv``` and see if there are other factor, which affect games. The columns in the dataset are: # # * id_odsp - unique game identifier # * id_event - unique identifier of event (id_odsp + sort_order) # * sort_order - chronological sequence of events in a game # * time - minutes of the game # * text - text commentary # * event_type - primary event. 11 unique events # * event_type2 - secondary event. 4 unique events # * side - 1-Home, 2-Away # * event_team - team that produced the event # * opponent - team that the event happened against # * player - name of the player involved in the main event # * player2 - name of player involved in secondary event # * player_in - substitude in # * player_out - substitude out # * shot_place - 13 possible placements of the shot # * shot_outcome # * is_goal # * location - location on the pitch # * bodypart # * assist_method # * situation # * fast_break # + odds_data = pd.read_csv('data/ginf.csv') events_data = pd.read_csv('data/events.csv') odds_data = odds_data.drop(['link_odsp','adv_stats','season', 'odd_over', 'odd_under', 'odd_bts', 'odd_bts_n'], axis=1) events_data = events_data.drop(['fast_break', 'sort_order', 'assist_method', 'player2', 'text', 'player_in', 'player_out'], axis=1) # - odds_data.head() odds_data.columns = ['id','data','league', 'country','home_team','away_team','home_goals', 'away_goals','odd_home','odd_draw','odd_away'] events_data.columns = ['id_game','id_event', 'time', 'primary_event', 'secondary_event', 'side', 'event_team', 'opponent', 'player', 'show_place', 'show_outcome', 'is_goal', 'location', 'bodypart', 'situation'] # Most of the stats are given index and the index corresponds to the dictionary. In the dictionary is the full meaning if the values in the columns. encoding = pd.read_csv('data/dictionary.txt', sep='\t', index_col=0, names=('id', 'event_type'), header=None) primary_event=encoding[1:12] secondary_event=encoding[14:18] side=encoding[19:21] shot_place=encoding[22:35] shot_outcome=encoding[36:40] location=encoding[41:60] bodypart=encoding[61:64] assist_method=encoding[65:70] situition=encoding[71:75] primary_event secondary_event side shot_place shot_outcome location bodypart assist_method situition # ## When Goals are Scored # Lets see when the teams score moast and does this corrensponds to the odds. The logical conclusion shoud be that thez are almost equalz distributed. # + events_data['is_goal'] = events_data['is_goal'].astype('bool') goals = events_data[events_data['is_goal']] substitude = events_data[events_data['is_goal']] fig=plt.figure(figsize=(8,6)) plt.hist(goals.time, width=1, bins=100, color='orange') plt.xlabel('Minutes') plt.ylabel('Number of goals') plt.title('Number of goals per minute in games') plt.show() # - # We can easily see spikes in around 45-th and 90-th minute, those are mainly because of the way data is reported in the games. Goals in the extra time are reported in the 45-th and 90-th minute respectively and because of that the spikes exist. Given that, lets see how many more goals are scored in the second half compared to the first half. goals_first_half = np.size(goals[goals.time <= 45]) goals_second_half = np.size(goals[goals.time <= 90]) print((goals_second_half/goals_first_half)) # Out of those goals the distrubution home/away goals are as follow: fig=plt.figure(figsize=(8,6)) plt.hist(goals[goals['side']==1]['time'], width=1, bins=100, color='green', label='home goals', alpha=0.5) plt.hist(goals[goals['side']==2]['time'], width=1, bins=100, color='red', label='away goals', alpha=0.5) plt.xlabel('Minutes') plt.ylabel('Number of goals') plt.title('Number of goals (by home/away side) against Time during match') plt.legend() plt.show() # For every minute more goals are scored by the home team compared to the away team. This supports the general notion, that the home team has the advantage. On other hand completely dismissed our assumption for near equal goals in the first and second half. In the second half there are far more goals. # ### How goals are scored # Interessting statistic will be also how goals are scored def show_pie_chart_bodyparts(goals_df, title='Percentage of bodyparts for goals'): """ Shows a pie chart as a subplot of the @input goals_df - datafreame with the goals. Muss have the column bodupart @input title: default = 'Percentage of bodyparts for goals' - title of the pie chart @return - none """ plt.subplot(2,1,1) labels=['Right Foot', 'Left Foot', 'Headers'] sizes=[goals_df[goals_df['bodypart']==1].shape[0], goals_df[goals_df['bodypart']==2].shape[0], goals_df[goals_df['bodypart']==3].shape[0]] colors=['cyan', 'grey', 'pink'] plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', startangle=60) plt.axis('equal') plt.title(title, fontsize=14, fontweight="bold") fig=plt.gcf() fig.set_size_inches(10,10) plt.show() show_pie_chart_bodyparts(goals) # The logic is quite clear, as most of the players are right-footed. The percentages look a bit different, when we see how they go in the different leagues: goals = goals.assign(league = odds_data['league']) grouped_goals = goals.groupby('league') index = ['Right foot', 'Left foor', 'Head'] ax = grouped_goals.bodypart.value_counts().unstack(0).plot.bar() ax.legend(['Germany', 'England', 'France', 'Italy', 'Spain']) plt.xticks(np.arange(3), ('Right Foot', 'Left Foot', 'Head')) plt.title('Goals grouped by bodypart for the five leagues') plt.ylabel('Percentage of the goals') plt.xlabel('Bodypart') plt.show() # Surprisingly here most goals with head are scored in France and Spain, but in countries, which are famous for more "physical" approach as England and Germany the statistics are very different. Which comfirms the data in the research made by <NAME>, <NAME> and <NAME> - [Player valuation in European football](https://www.ida.liu.se/~nikca89/papers/mlsa18-football.pdf). # If we look into the situations, which lead to goal, we migh be able to see more clearly why this result have happened. # + plt.subplot(2,1,1) labels=['Open Play', 'Set Piece(Excluding direct Free kick)','Corners','Direct Free Kick'] sizes=[goals[goals['situation']==1].shape[0],goals[goals['situation']==2].shape[0],goals[goals['situation']==3].shape[0],goals[goals['situation']==4].shape[0]] colors=['cyan','grey','blue','yellow'] plt.pie(sizes,labels=labels,colors=colors,autopct='%1.1f%%',startangle=60) plt.axis('equal') plt.title('Percentage of each situation for goals',fontsize=14,fontweight='bold') fig=plt.gcf() fig.set_size_inches(10,10) ax = grouped_goals.situation.value_counts().unstack(0).plot.bar() ax.legend(['Germany', 'England', 'France', 'Italy', 'Spain']) plt.xticks(np.arange(5), ('Open Play', 'Set Piece','Corners","Direct Free Kick')) plt.title('Goals grouped by situation for the five leagues') plt.ylabel("Percentage of the goals") plt.xlabel('Situation') plt.show() # - # Again in Spain and France a lot of goals are scored from set pieces and corners, so crossings from open play are not so common and because of that most goals are scored with head. Italy has very little goals scored fro mset pieces and cornners, which confirms the lower amount of goals, scored with the head. From this data laso seems like in Spain are the best direct free kick takers, as there are most of the goals scored from free kicks. # # Lets find the best free kick takers and see if this is true. free_kicks=events_data[(events_data['situation']==4)] scored_free_kicks = free_kicks[free_kicks['is_goal']] top_five_players = scored_free_kicks.groupby('player')['player'].count().nlargest(5) plt.xlabel('goals') top_five_players.plot.barh() plt.show() # The dominations in the free kicks is also visible and in the player's statistics top two players are from Spain and the other three are from Italy (In the data, we have <NAME> still plays for Real Madrid). Lets see the conversion rate for the top 5 players and see which one is the most deadly from free kicks. # + def free_kicks_conversion_rate(player): ''' How much of the free kicks, which player takes are converted into goals @input player @return percentage of the free-kicks which convert into goals. ''' player_free_kicks=free_kicks[free_kicks['player']==player] scored = player_free_kicks[player_free_kicks['is_goal']] conversion_rate = scored.shape[0] / player_free_kicks.shape[0] return (player, conversion_rate) all_players = [] for player in top_five_players.index.values.tolist(): all_players.append(free_kicks_conversion_rate(player)) df = pd.DataFrame(all_players, columns=['player', 'conversion rate']) df.plot(kind='barh', x='player', legend=False) plt.xlabel('conversion rate') plt.show() # - # Аlthough both plazes from Spain had scored more goals, the players from Italy have far better conversion rate and prove to be much more potent from free kicks. # ### Odds Comparison # Odds are interpreted as the amount you will recieve back for every 1 USD you bet on that result. For example, if the odds for a home win is 4, you will recieve 4 USD for every 1 USD you bet on a home win. Thus, from the perspective of the bookmaker, they would set a lower odd for the result they predict. The following function would return the bookmakers' predicted result of a match based on the highest odds for each result for a particular match. def odds_pred_result(odds_home, odd_draw, odd_away): ''' Return prediction based on the odds of the bookies @input - odds_home, odd_draw, odd_away @return string which indicates the result ''' if odds_home < odd_draw and odds_home < odd_away: return ('Home Win') elif odd_draw < odds_home and odd_draw < odd_away: return ('Draw') elif odd_away < odd_draw and odd_away < odds_home: return ('Away Win') # And this function will return the actual result: # + def actual_result(home_goals, away_goals): ''' @input home_goals, away_goals @return string which indicates the result ''' if home_goals > away_goals: return ('Home Win') elif home_goals == away_goals: return ('Draw') elif home_goals < away_goals: return ('Away Win') def actual_result_encode(home_goals,away_goals): ''' @input home_goals, away_goals @return integer which indicates the result ''' if home_goals > away_goals: return (1) elif home_goals == away_goals: return (2) elif home_goals < away_goals: return (3) # - # We now compare the chances of us predicting the result of the game correctly just by looking at the odds. def check_prediction(data): ''' @input data @return the correctnes of the results ''' correct=0 wrong=0 for i in range(1,data.shape[0]+1): odd_h = data[i-1:i]['odd_home'].item() odd_d = data[i-1:i]['odd_draw'].item() odd_a = data[i-1:i]['odd_away'].item() fthg = data[i-1:i]['home_goals'].item() ftag = data[i-1:i]['away_goals'].item() oddsresult=odds_pred_result(odd_h,odd_d,odd_a) actresult=actual_result(fthg,ftag) if oddsresult==actresult: correct+=1 else: wrong+=1 return(str('%.8f'%(correct/(correct+wrong)))+str('% correct')) check_prediction(odds_data) # So, if we try to predict results purely on the base of the odds, we will be right in just a little more of half of the cases. # ### Another way of Predicting # We will try to use the odds and the difference among the odds to predict the result of a match. x_var=odds_data.iloc[:,6:11] x_var.head() x_var=odds_data.iloc[:,6:11] result=[] for i in range(1,odds_data.shape[0]+1): result.append(actual_result_encode(odds_data[i-1:i]['home_goals'].item(),odds_data[i-1:i]['away_goals'].item())) y=pd.DataFrame(result) x_var['diff_h_d']=abs(x_var['odd_home']-x_var['odd_draw']) x_var['diff_d_a']=abs(x_var['odd_draw']-x_var['odd_away']) x_var['diff_h_a']=abs(x_var['odd_home']-x_var['odd_away']) x_var=x_var.drop(['home_goals','away_goals'],axis=1) x_var.head() # We will visualise the results of out predictions using confusion matrix. It will show how many of our guesses were right and how many were wrong. def confusion_matrix_model(model_used): ''' Generates confusion matrix from the used model @input model ''' cm=confusion_matrix(y_train,model_used.predict(x_train)) cm=pd.DataFrame(cm) cm.columns=['Predicted Home Win','Predicted Draw','Predicted Away Win'] cm.index=['Actual Home Win','Actual Draw','Actual Away Win'] return cm x_train,x_test,y_train,y_test=train_test_split(x_var,y,test_size=0.2,random_state=0) # #### Logistic Regression log_reg=LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=10000) log_reg.fit(x_train,y_train.values.ravel()) print('Logistic Regression Accuracy: {}'.format(accuracy_score(y_train, log_reg.predict(x_train)))) confusion_matrix_model(log_reg) # The logistic regression did not gave us much better percentage as compared to the odds. It is true, that our dataset is not so big, so if we add cross validation, maybe the accuracy would improve. We will use K-Fold cross validation with 10 bins. k_fold = KFold(n_splits=10, shuffle=True, random_state=0) def cross_validate_score(model): return np.mean(cross_val_score(model,x_train,y_train.values.ravel(),cv=k_fold,scoring="accuracy")) print('Logistic Regression Cross Validation Accuracy: {}'.format(cross_validate_score(log_reg))) # Giving that the cross-calidation has made tests on all of our data the accuracy metrics of the second try should be more close to the truth and we can see, that the regression model is even more error prone. # #### Decision Tree Regression # Decision tree builds regression or classification models in the form of a tree structure. It breaks down a dataset into smaller and smaller subsets while at the same time an associated decision tree is incrementally developed. The final result is a tree with decision nodes and leaf nodes. As suggested by <NAME> in the article [Decision Trees Are Usually Better Than Logistic Regression](https://www.displayr.com/decision-trees-are-usually-better-than-logistic-regression/Decision) the decision trees, although more memory expensive, are with better chance of prediction, compared to the Logistic regression. Lets see if this will confirm with out dataset. decision_tree=DecisionTreeClassifier() decision_tree.fit(x_train,y_train) print('Decision Tree Regression Accuracy (without cross validation): {}'.format(accuracy_score(y_train, decision_tree.predict(x_train)))) print('Decision Tree Regression Cross Validation Accuracy: {}'.format(cross_validate_score(decision_tree))) confusion_matrix_model(decision_tree) # For our dataset wuthout cross validation the score is really hugh, but when we put it on cross validation the average value from the ten validations is much lower and shows us in out case the tree might even be much more error prone. # #### Support Vector Machine # The objective of the support vector machine algorithm is to find the hyperplane that has the maximum margin in an N-dimensional space(N — the number of features) that distinctly classifies the data points. It is svm_reg=svm.SVC(gamma = 'auto') svm_reg.fit(x_train,y_train.values.ravel()) print('SVM Regression Accuracy (without cross validation): {}'.format(accuracy_score(y_train, svm_reg.predict(x_train)))) print('SVM Regression Cross Validation Accuracy: {}'.format(cross_validate_score(svm_reg))) confusion_matrix_model(svm_reg) # #### K-Nearest Neighbour Regression # K Nearest Neighbour is an algorithm that stores all the available cases and classifies the new data or case based on a similarity measure. It is mostly used to classifies a data point based on how its neighbours are classified. KNN = KNeighborsClassifier(n_neighbors=100) KNN.fit(x_train,y_train.values.ravel()) print('KNN Regression Accuracy (without cross validation):{}'.format(accuracy_score(y_train, KNN.predict(x_train)))) print('KNN Regression Cross Validation Accuracy: {}'.format(cross_validate_score(KNN))) confusion_matrix_model(KNN) # Still the predictions are worse than those made by the odds, so still there is no better way of predicting the games.
DataScience/z_project/Football Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Initial setup # + executionInfo={"elapsed": 375, "status": "ok", "timestamp": 1646079030153, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gju19TlDpXtCMx8eR63MmP-uY5vmiu9noZCmgzY=s64", "userId": "01394071077629115142"}, "user_tz": -60} id="KBW-jkcl7T9U" import sys sys.path.append("../") # import libraries from os import path, listdir import numpy as np import pandas as pd import seaborn import utils.data_utils as data_utils import matplotlib.pyplot as plt # + executionInfo={"elapsed": 3, "status": "ok", "timestamp": 1646079040494, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gju19TlDpXtCMx8eR63MmP-uY5vmiu9noZCmgzY=s64", "userId": "01394071077629115142"}, "user_tz": -60} id="MTMcu0d9OKhg" # display all columns in command outputs (e.g. head()) pd.set_option('display.max_columns', None) # + [markdown] id="8w1ylQ7JNnVi" # # EDA # # - result = data_utils.load_financial(encode_ticker=True, raw=False) print(result.describe()) print(result.head()) print(result.shape) print(result.info()) # + # This block prints the correlation table and heatmap for result df print(result.corr()) seaborn.heatmap(result.corr()); # - plt.figure(figsize=(7,7)) plt.style.use('seaborn-whitegrid') plt.hist(result['SPX_Close'], bins=100, facecolor = '#2ab0ff', edgecolor='#169acf', linewidth=0.5, range=(-2,2)) plt.title('SPX_Close') plt.xlabel('Bins') plt.ylabel('Values') plt.show() plt.figure(figsize=(7,7)) plt.style.use('seaborn-whitegrid') plt.hist(result['Close'], bins=100, facecolor = '#33ff99', edgecolor='#169acf', linewidth=0.5, range=(-5e-04,-3e-04)) plt.title('Close') plt.xlabel('Bins') plt.ylabel('Values') plt.show() plt.figure(figsize=(7,7)) plt.style.use('seaborn-whitegrid') plt.hist(result['Volume'], bins=100, facecolor = '#ff8000', edgecolor='#169acf', linewidth=0.5, range=(-0.002,0.002)) plt.title('Volume') plt.xlabel('Bins') plt.ylabel('Values') plt.show() # + # Save file in csv format #normalised_result.to_csv('data/normalised_result.csv')
data_visualisation/EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="KesQRvxkm1PH" import os PROJECT = "PROJECT" # REPLACE WITH YOUR PROJECT ID BUCKET = "BUCKET" # REPLACE WITH A BUCKET NAME (PUT YOUR PROJECT ID AND WE CREATE THE BUCKET ITSELF NEXT) REGION = "us-central1" # REPLACE WITH YOUR REGION e.g. us-central1 # do not change these os.environ["PROJECT"] = PROJECT os.environ["BUCKET"] = BUCKET os.environ["REGION"] = REGION # + colab={} colab_type="code" id="TLTq9A_dm1PL" outputId="5619ad91-841f-41ee-dc52-fd248fa2ad1f" language="bash" # gcloud config set project $PROJECT # gcloud config set compute/region $REGION # + colab={} colab_type="code" id="L4MZZN-Om1PQ" # # %%bash # OUTDIR=gs://${BUCKET}/taxifare/trained_model/yellow_trips # JOBNAME=taxifare_yellow_trips_$(date -u +%y%m%d_%H%M%S) # # echo $OUTDIR $REGION $JOBNAME # # Clear the Cloud Storage Bucket used for the training job # gsutil -m rm -rf $OUTDIR # gcloud ml-engine jobs submit training $JOBNAME \ # --region=$REGION \ # --module-name=trainer.task \ # --package-path=${PWD}/cloud_composer_automated_ml_pipeline_taxifare_module/trainer \ # --job-dir=$OUTDIR \ # --staging-bucket=gs://$BUCKET \ # --scale-tier=BASIC \ # --runtime-version="1.13" \ # -- \ # --train_data_paths=gs://$BUCKET/taxifare/data/yellow_trips/train-* \ # --eval_data_paths=gs://$BUCKET/taxifare/data/yellow_trips/valid-* \ # --output_dir=$OUTDIR \ # --train_steps=500 # + colab={} colab_type="code" id="HxR6rmGBm1PT" language="bash" # cd cloud_composer_automated_ml_pipeline_taxifare_module # touch README.md # python setup.py sdist # + colab={} colab_type="code" id="fjzhe-U2m1PV" language="bash" # gsutil cp cloud_composer_automated_ml_pipeline_taxifare_module/dist/taxifare-0.1.tar.gz gs://$BUCKET/taxifare/code/ # + [markdown] colab_type="text" id="O241YkN7m1PX" # *** # # Part Two: Setup a scheduled workflow with Cloud Composer # In this section you will complete a partially written training.py DAG file and copy it to the DAGS folder in your Composer instance. # + [markdown] colab_type="text" id="BczwG3zsm1PY" # ## Copy your Airflow bucket name # 1. Navigate to your Cloud Composer [instance](https://console.cloud.google.com/composer/environments?project=)<br/><br/> # 2. Select __DAGs Folder__<br/><br/> # 3. You will be taken to the Google Cloud Storage bucket that Cloud Composer has created automatically for your Airflow instance<br/><br/> # 4. __Copy the bucket name__ into the variable below (example: us-central1-composer-08f6edeb-bucket) # + colab={} colab_type="code" id="1r6tYEaDm1PZ" AIRFLOW_BUCKET = "AIRFLOW_BUCKET" # REPLACE WITH AIRFLOW BUCKET NAME os.environ["AIRFLOW_BUCKET"] = AIRFLOW_BUCKET # + [markdown] colab_type="text" id="7viNmxubm1Pb" # ## Complete the training.py DAG file # Apache Airflow orchestrates tasks out to other services through a [DAG (Directed Acyclic Graph)](https://airflow.apache.org/concepts.html) file which specifies what services to call, what to do, and when to run these tasks. DAG files are written in python and are loaded automatically into Airflow once present in the Airflow/dags/ folder in your Cloud Composer bucket. # # Execute the code cells to create the files. # - # ## Multi # + # %%writefile airflow/dags/taxifare_multi.py # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DAG definition for taxifare automated pipeline.""" import airflow from airflow import DAG # Reference for all available airflow operators: # https://github.com/apache/incubator-airflow/tree/master/airflow/contrib/operators from airflow.contrib.operators.bigquery_check_operator import BigQueryCheckOperator from airflow.contrib.operators.bigquery_operator import BigQueryOperator from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import BranchPythonOperator from airflow.operators.dummy_operator import DummyOperator from airflow.hooks.base_hook import BaseHook from airflow.contrib.operators.mlengine_operator import MLEngineTrainingOperator, MLEngineModelOperator, MLEngineVersionOperator from airflow.models import TaskInstance import datetime import logging def _get_project_id(): """Get project ID from default GCP connection.""" extras = BaseHook.get_connection("google_cloud_default").extra_dejson key = "extra__google_cloud_platform__project" if key in extras: project_id = extras[key] else: raise ("Must configure project_id in google_cloud_default " "connection from Airflow Console") return project_id PROJECT_ID = _get_project_id() # Data set constants, used in BigQuery tasks. You can change these # to conform to your data. # Specify your source BigQuery project, dataset, and table names SOURCE_BQ_PROJECT = "nyc-tlc" SOURCE_DATASET_TABLE_NAMES = "yellow.trips,green.trips_2014,green.trips_2015".split(",") # Specify your destination BigQuery dataset DESTINATION_DATASET = "taxifare" # GCS bucket names and region, can also be changed. BUCKET = "gs://" + PROJECT_ID + "-bucket" REGION = "us-east1" # # The code package name comes from the model code in the wals_ml_engine # # directory of the solution code base. PACKAGE_URI = BUCKET + "/taxifare/code/taxifare-0.1.tar.gz" JOB_DIR = BUCKET + "/jobs" default_args = { "owner": "airflow", "depends_on_past": False, "start_date": airflow.utils.dates.days_ago(2), "email": ["<EMAIL>"], "email_on_failure": True, "email_on_retry": False, "retries": 5, "retry_delay": datetime.timedelta(minutes=5) } # Default schedule interval using cronjob syntax - can be customized here # or in the Airflow console. # Specify a schedule interval in CRON syntax to run once a day at 2100 hours (9pm) # Reference: https://airflow.apache.org/scheduler.html schedule_interval = "00 21 * * *" # Title your DAG dag = DAG( "taxifare_multi", default_args=default_args, schedule_interval=None ) dag.doc_md = __doc__ # # # Task Definition # # for model in SOURCE_DATASET_TABLE_NAMES: # BigQuery data query bql=""" SELECT (tolls_amount + fare_amount) AS fare_amount, EXTRACT(DAYOFWEEK FROM pickup_datetime) * 1.0 AS dayofweek, EXTRACT(HOUR FROM pickup_datetime) * 1.0 AS hourofday, pickup_longitude AS pickuplon, pickup_latitude AS pickuplat, dropoff_longitude AS dropofflon, dropoff_latitude AS dropofflat, passenger_count*1.0 AS passengers, CONCAT(CAST(pickup_datetime AS STRING), CAST(pickup_longitude AS STRING), CAST(pickup_latitude AS STRING), CAST(dropoff_latitude AS STRING), CAST(dropoff_longitude AS STRING)) AS key FROM `{0}.{1}` WHERE trip_distance > 0 AND fare_amount >= 2.5 AND pickup_longitude > -78 AND pickup_longitude < -70 AND dropoff_longitude > -78 AND dropoff_longitude < -70 AND pickup_latitude > 37 AND pickup_latitude < 45 AND dropoff_latitude > 37 AND dropoff_latitude < 45 AND passenger_count > 0 AND rand() < 0.00001 """ bql = bql.format(SOURCE_BQ_PROJECT, model) bql_train = "SELECT * EXCEPT (key) FROM({0}) WHERE ABS(MOD(FARM_FINGERPRINT(key), 5)) < 4".format(bql) bql_eval = "SELECT * EXCEPT (key) FROM({0}) WHERE ABS(MOD(FARM_FINGERPRINT(key), 5)) = 4".format(bql) # Complete the BigQueryOperator task to truncate the table if it already exists before writing # Reference: https://airflow.apache.org/integration.html#bigqueryoperator bq_train_data_op = BigQueryOperator( task_id="bq_train_data_{}_task".format(model.replace(".","_")), bql=bql_train, destination_dataset_table="{}.{}_train_data".format(DESTINATION_DATASET, model.replace(".","_")), write_disposition="WRITE_TRUNCATE", # specify to truncate on writes use_legacy_sql=False, dag=dag ) bq_eval_data_op = BigQueryOperator( task_id="bq_eval_data_{}_task".format(model.replace(".","_")), bql=bql_eval, destination_dataset_table="{}.{}_eval_data".format(DESTINATION_DATASET, model.replace(".","_")), write_disposition="WRITE_TRUNCATE", # specify to truncate on writes use_legacy_sql=False, dag=dag ) sql = """ SELECT COUNT(*) FROM [{0}:{1}.{2}] """ # Check to make sure that the data tables won"t be empty bq_check_train_data_op = BigQueryCheckOperator( task_id="bq_check_train_data_{}_task".format(model.replace(".","_")), sql=sql.format(PROJECT_ID, DESTINATION_DATASET, model.replace(".","_") + "_train_data"), dag=dag ) bq_check_eval_data_op = BigQueryCheckOperator( task_id="bq_check_eval_data_{}_task".format(model.replace(".","_")), sql=sql.format(PROJECT_ID, DESTINATION_DATASET, model.replace(".","_") + "_eval_data"), dag=dag ) # BigQuery training data export to GCS bash_remove_old_data_op = BashOperator( task_id="bash_remove_old_data_{}_task".format(model.replace(".","_")), bash_command="if gsutil ls {0}/taxifare/data/{1} 2> /dev/null; then gsutil -m rm -rf {0}/taxifare/data/{1}/*; else true; fi".format(BUCKET, model.replace(".","_")), dag=dag ) # Takes a BigQuery dataset and table as input and exports it to GCS as a CSV train_files = BUCKET + "/taxifare/data/" bq_export_gcs_train_csv_op = BigQueryToCloudStorageOperator( task_id="bq_export_gcs_train_csv_{}_task".format(model.replace(".","_")), source_project_dataset_table="{}.{}_train_data".format(DESTINATION_DATASET, model.replace(".","_")), destination_cloud_storage_uris=[train_files + "{}/train-*.csv".format(model.replace(".","_"))], export_format="CSV", print_header=False, dag=dag ) eval_files = BUCKET + "/taxifare/data/" bq_export_gcs_eval_csv_op = BigQueryToCloudStorageOperator( task_id="bq_export_gcs_eval_csv_{}_task".format(model.replace(".","_")), source_project_dataset_table="{}.{}_eval_data".format(DESTINATION_DATASET, model.replace(".","_")), destination_cloud_storage_uris=[eval_files + "{}/eval-*.csv".format(model.replace(".","_"))], export_format="CSV", print_header=False, dag=dag ) # ML Engine training job job_id = "taxifare_{}_{}".format(model.replace(".","_"), datetime.datetime.now().strftime("%Y%m%d%H%M%S")) output_dir = BUCKET + "/taxifare/trained_model/{}".format(model.replace(".","_")) job_dir = JOB_DIR + "/" + job_id training_args = [ "--job-dir", job_dir, "--train_data_paths", train_files, "--eval_data_paths", eval_files, "--output_dir", output_dir, "--train_steps", str(500), "--train_batch_size", str(32), "--eval_steps", str(500), "--eval_batch_size", str(32), "--nbuckets", str(8), "--hidden_units", "128,32,4" ] # Reference: https://airflow.apache.org/integration.html#cloud-ml-engine ml_engine_training_op = MLEngineTrainingOperator( task_id="ml_engine_training_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, job_id=job_id, package_uris=[PACKAGE_URI], training_python_module="trainer.task", training_args=training_args, region=REGION, scale_tier="BASIC", runtime_version="1.13", python_version="3.5", dag=dag ) MODEL_NAME = "taxifare_" MODEL_VERSION = "v1" MODEL_LOCATION = BUCKET + "/taxifare/saved_model/" bash_remove_old_saved_model_op = BashOperator( task_id="bash_remove_old_saved_model_{}_task".format(model.replace(".","_")), bash_command="if gsutil ls {0} 2> /dev/null; then gsutil -m rm -rf {0}/*; else true; fi".format(MODEL_LOCATION + model.replace(".","_")), dag=dag ) bash_copy_new_saved_model_op = BashOperator( task_id="bash_copy_new_saved_model_{}_task".format(model.replace(".","_")), bash_command="gsutil -m rsync -d -r `gsutil ls {0}/export/exporter/ | tail -1` {1}".format(output_dir, MODEL_LOCATION + model.replace(".","_")), dag=dag ) # Create model on ML-Engine bash_ml_engine_models_list_op = BashOperator( task_id="bash_ml_engine_models_list_{}_task".format(model.replace(".","_")), xcom_push=True, bash_command="gcloud ml-engine models list --filter='name:{0}'".format(MODEL_NAME + model.replace(".","_")), dag=dag ) def check_if_model_already_exists(templates_dict, **kwargs): cur_model = templates_dict["model"].replace(".","_") ml_engine_models_list = kwargs["ti"].xcom_pull(task_ids="bash_ml_engine_models_list_{}_task".format(cur_model)) logging.info("check_if_model_already_exists: {}: ml_engine_models_list = \n{}".format(cur_model, ml_engine_models_list)) create_model_task = "ml_engine_create_model_{}_task".format(cur_model) dont_create_model_task = "dont_create_model_dummy_branch_{}_task".format(cur_model) if len(ml_engine_models_list) == 0 or ml_engine_models_list == "Listed 0 items.": return create_model_task return dont_create_model_task check_if_model_already_exists_op = BranchPythonOperator( task_id="check_if_model_already_exists_{}_task".format(model.replace(".","_")), templates_dict={"model": model.replace(".","_")}, python_callable=check_if_model_already_exists, provide_context=True, dag=dag ) ml_engine_create_model_op = MLEngineModelOperator( task_id="ml_engine_create_model_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model={"name": MODEL_NAME + model.replace(".","_")}, operation="create", dag=dag ) create_model_dummy_op = DummyOperator( task_id="create_model_dummy_{}_task".format(model.replace(".","_")), trigger_rule="all_done", dag=dag ) dont_create_model_dummy_branch_op = DummyOperator( task_id="dont_create_model_dummy_branch_{}_task".format(model.replace(".","_")), dag=dag ) dont_create_model_dummy_op = DummyOperator( task_id="dont_create_model_dummy_{}_task".format(model.replace(".","_")), trigger_rule="all_done", dag=dag ) # Create version of model on ML-Engine bash_ml_engine_versions_list_op = BashOperator( task_id="bash_ml_engine_versions_list_{}_task".format(model.replace(".","_")), xcom_push=True, bash_command="gcloud ml-engine versions list --model {0} --filter='name:{1}'".format(MODEL_NAME + model.replace(".","_"), MODEL_VERSION), dag=dag ) def check_if_model_version_already_exists(templates_dict, **kwargs): cur_model = templates_dict["model"].replace(".","_") ml_engine_versions_list = kwargs["ti"].xcom_pull(task_ids="bash_ml_engine_versions_list_{}_task".format(cur_model)) logging.info("check_if_model_version_already_exists: {}: ml_engine_versions_list = \n{}".format(cur_model, ml_engine_versions_list)) create_version_task = "ml_engine_create_version_{}_task".format(cur_model) create_other_version_task = "ml_engine_create_other_version_{}_task".format(cur_model) if len(ml_engine_versions_list) == 0 or ml_engine_versions_list == "Listed 0 items.": return create_version_task return create_other_version_task check_if_model_version_already_exists_op = BranchPythonOperator( task_id="check_if_model_version_already_exists_{}_task".format(model.replace(".","_")), templates_dict={"model": model.replace(".","_")}, python_callable=check_if_model_version_already_exists, provide_context=True, dag=dag ) OTHER_VERSION_NAME = "v_{0}".format(datetime.datetime.now().strftime("%Y%m%d%H%M%S")[0:12]) ml_engine_create_version_op = MLEngineVersionOperator( task_id="ml_engine_create_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=MODEL_VERSION, version={ "name": MODEL_VERSION, "deploymentUri": MODEL_LOCATION + model.replace(".","_"), "runtimeVersion": "1.13", "framework": "TENSORFLOW", "pythonVersion": "3.5", }, operation="create", dag=dag ) ml_engine_create_other_version_op = MLEngineVersionOperator( task_id="ml_engine_create_other_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=OTHER_VERSION_NAME, version={ "name": OTHER_VERSION_NAME, "deploymentUri": MODEL_LOCATION + model.replace(".","_"), "runtimeVersion": "1.13", "framework": "TENSORFLOW", "pythonVersion": "3.5", }, operation="create", dag=dag ) ml_engine_set_default_version_op = MLEngineVersionOperator( task_id="ml_engine_set_default_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=MODEL_VERSION, version={"name": MODEL_VERSION}, operation="set_default", dag=dag ) ml_engine_set_default_other_version_op = MLEngineVersionOperator( task_id="ml_engine_set_default_other_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=OTHER_VERSION_NAME, version={"name": OTHER_VERSION_NAME}, operation="set_default", dag=dag ) # Build dependency graph, set_upstream dependencies for all tasks bq_check_train_data_op.set_upstream(bq_train_data_op) bq_check_eval_data_op.set_upstream(bq_eval_data_op) bash_remove_old_data_op.set_upstream([bq_check_train_data_op, bq_check_eval_data_op]) bq_export_gcs_train_csv_op.set_upstream([bash_remove_old_data_op]) bq_export_gcs_eval_csv_op.set_upstream([bash_remove_old_data_op]) ml_engine_training_op.set_upstream([bq_export_gcs_train_csv_op, bq_export_gcs_eval_csv_op]) bash_remove_old_saved_model_op.set_upstream(ml_engine_training_op) bash_copy_new_saved_model_op.set_upstream(bash_remove_old_saved_model_op) bash_ml_engine_models_list_op.set_upstream(ml_engine_training_op) check_if_model_already_exists_op.set_upstream(bash_ml_engine_models_list_op) ml_engine_create_model_op.set_upstream(check_if_model_already_exists_op) create_model_dummy_op.set_upstream(ml_engine_create_model_op) dont_create_model_dummy_branch_op.set_upstream(check_if_model_already_exists_op) dont_create_model_dummy_op.set_upstream(dont_create_model_dummy_branch_op) bash_ml_engine_versions_list_op.set_upstream([dont_create_model_dummy_op, create_model_dummy_op]) check_if_model_version_already_exists_op.set_upstream(bash_ml_engine_versions_list_op) ml_engine_create_version_op.set_upstream([bash_copy_new_saved_model_op, check_if_model_version_already_exists_op]) ml_engine_create_other_version_op.set_upstream([bash_copy_new_saved_model_op, check_if_model_version_already_exists_op]) ml_engine_set_default_version_op.set_upstream(ml_engine_create_version_op) ml_engine_set_default_other_version_op.set_upstream(ml_engine_create_other_version_op) # - # ## Module # + # %%writefile airflow/dags/module/taxifare_module.py # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DAG definition for taxifare automated pipeline.""" import airflow from airflow import DAG # Reference for all available airflow operators: # https://github.com/apache/incubator-airflow/tree/master/airflow/contrib/operators from airflow.hooks.base_hook import BaseHook from airflow.models import TaskInstance import datetime from module import preprocess from module import training from module import deploy def _get_project_id(): """Get project ID from default GCP connection.""" extras = BaseHook.get_connection("google_cloud_default").extra_dejson key = "extra__google_cloud_platform__project" if key in extras: project_id = extras[key] else: raise ("Must configure project_id in google_cloud_default " "connection from Airflow Console") return project_id # Constants # Get project ID and GCS bucket PROJECT_ID = _get_project_id() BUCKET = "gs://" + PROJECT_ID + "-bucket" # Specify your source BigQuery dataset and table names SOURCE_DATASET_TABLE_NAMES = "yellow.trips,green.trips_2014,green.trips_2015".split(",") # Where to write out data in GCS DATA_DIR = BUCKET + "/taxifare/data/" # Base model parameters MODEL_NAME = "taxifare_" MODEL_VERSION = "v1" MODEL_LOCATION = BUCKET + "/taxifare/saved_model/" default_args = { "owner": "airflow", "depends_on_past": False, "start_date": airflow.utils.dates.days_ago(2), "email": ["<EMAIL>"], "email_on_failure": True, "email_on_retry": False, "retries": 5, "retry_delay": datetime.timedelta(minutes=5) } # Default schedule interval using cronjob syntax - can be customized here # or in the Airflow console. # Specify a schedule interval in CRON syntax to run once a day at 2100 hours (9pm) # Reference: https://airflow.apache.org/scheduler.html schedule_interval = "00 21 * * *" # Title your DAG dag = DAG( "taxifare_module", default_args=default_args, schedule_interval=None ) dag.doc_md = __doc__ # # # Task Definition # # for model in SOURCE_DATASET_TABLE_NAMES: (bq_export_gcs_train_csv_op, bq_export_gcs_eval_csv_op) = preprocess.preprocess_tasks( model, dag, PROJECT_ID, BUCKET, DATA_DIR) (ml_engine_training_op, bash_copy_new_saved_model_op) = training.training_tasks( model, dag, PROJECT_ID, BUCKET, DATA_DIR, MODEL_NAME, MODEL_VERSION, MODEL_LOCATION) (bash_ml_engine_models_list_op, check_if_model_version_already_exists_op, ml_engine_create_version_op, ml_engine_create_other_version_op) = deploy.deploy_tasks( model, dag, PROJECT_ID, MODEL_NAME, MODEL_VERSION, MODEL_LOCATION) # Build dependency graph, set_upstream dependencies for all tasks ml_engine_training_op.set_upstream([bq_export_gcs_train_csv_op, bq_export_gcs_eval_csv_op]) bash_ml_engine_models_list_op.set_upstream(ml_engine_training_op) ml_engine_create_version_op.set_upstream([bash_copy_new_saved_model_op, check_if_model_version_already_exists_op]) ml_engine_create_other_version_op.set_upstream([bash_copy_new_saved_model_op, check_if_model_version_already_exists_op]) # + # %%writefile airflow/dags/module/preprocess.py from airflow.contrib.operators.bigquery_operator import BigQueryOperator from airflow.contrib.operators.bigquery_check_operator import BigQueryCheckOperator from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator from airflow.operators.bash_operator import BashOperator def preprocess_tasks(model, dag, PROJECT_ID, BUCKET, DATA_DIR): # Constants # Specify your source BigQuery project, dataset, and table names SOURCE_BQ_PROJECT = "nyc-tlc" SOURCE_DATASET_TABLE_NAMES = "yellow.trips,green.trips_2014,green.trips_2015".split(",") # Specify your destination BigQuery dataset DESTINATION_DATASET = "taxifare" # BigQuery data query bql=""" SELECT (tolls_amount + fare_amount) AS fare_amount, EXTRACT(DAYOFWEEK FROM pickup_datetime) * 1.0 AS dayofweek, EXTRACT(HOUR FROM pickup_datetime) * 1.0 AS hourofday, pickup_longitude AS pickuplon, pickup_latitude AS pickuplat, dropoff_longitude AS dropofflon, dropoff_latitude AS dropofflat, passenger_count*1.0 AS passengers, CONCAT(CAST(pickup_datetime AS STRING), CAST(pickup_longitude AS STRING), CAST(pickup_latitude AS STRING), CAST(dropoff_latitude AS STRING), CAST(dropoff_longitude AS STRING)) AS key FROM `{0}.{1}` WHERE trip_distance > 0 AND fare_amount >= 2.5 AND pickup_longitude > -78 AND pickup_longitude < -70 AND dropoff_longitude > -78 AND dropoff_longitude < -70 AND pickup_latitude > 37 AND pickup_latitude < 45 AND dropoff_latitude > 37 AND dropoff_latitude < 45 AND passenger_count > 0 AND rand() < 0.00001 """ bql = bql.format(SOURCE_BQ_PROJECT, model) bql_train = "SELECT * EXCEPT (key) FROM({0}) WHERE ABS(MOD(FARM_FINGERPRINT(key), 5)) < 4".format(bql) bql_eval = "SELECT * EXCEPT (key) FROM({0}) WHERE ABS(MOD(FARM_FINGERPRINT(key), 5)) = 4".format(bql) # Complete the BigQueryOperator task to truncate the table if it already exists before writing # Reference: https://airflow.apache.org/integration.html#bigqueryoperator bq_train_data_op = BigQueryOperator( task_id="bq_train_data_{}_task".format(model.replace(".","_")), bql=bql_train, destination_dataset_table="{}.{}_train_data".format(DESTINATION_DATASET, model.replace(".","_")), write_disposition="WRITE_TRUNCATE", # specify to truncate on writes use_legacy_sql=False, dag=dag ) bq_eval_data_op = BigQueryOperator( task_id="bq_eval_data_{}_task".format(model.replace(".","_")), bql=bql_eval, destination_dataset_table="{}.{}_eval_data".format(DESTINATION_DATASET, model.replace(".","_")), write_disposition="WRITE_TRUNCATE", # specify to truncate on writes use_legacy_sql=False, dag=dag ) sql = """ SELECT COUNT(*) FROM [{0}:{1}.{2}] """ # Check to make sure that the data tables won"t be empty bq_check_train_data_op = BigQueryCheckOperator( task_id="bq_check_train_data_{}_task".format(model.replace(".","_")), sql=sql.format(PROJECT_ID, DESTINATION_DATASET, model.replace(".","_") + "_train_data"), dag=dag ) bq_check_eval_data_op = BigQueryCheckOperator( task_id="bq_check_eval_data_{}_task".format(model.replace(".","_")), sql=sql.format(PROJECT_ID, DESTINATION_DATASET, model.replace(".","_") + "_eval_data"), dag=dag ) # BigQuery training data export to GCS bash_remove_old_data_op = BashOperator( task_id="bash_remove_old_data_{}_task".format(model.replace(".","_")), bash_command="if gsutil ls {0}/taxifare/data/{1} 2> /dev/null; then gsutil -m rm -rf {0}/taxifare/data/{1}/*; else true; fi".format(BUCKET, model.replace(".","_")), dag=dag ) # Takes a BigQuery dataset and table as input and exports it to GCS as a CSV bq_export_gcs_train_csv_op = BigQueryToCloudStorageOperator( task_id="bq_export_gcs_train_csv_{}_task".format(model.replace(".","_")), source_project_dataset_table="{}.{}_train_data".format(DESTINATION_DATASET, model.replace(".","_")), destination_cloud_storage_uris=[DATA_DIR + "{}/train-*.csv".format(model.replace(".","_"))], export_format="CSV", print_header=False, dag=dag ) bq_export_gcs_eval_csv_op = BigQueryToCloudStorageOperator( task_id="bq_export_gcs_eval_csv_{}_task".format(model.replace(".","_")), source_project_dataset_table="{}.{}_eval_data".format(DESTINATION_DATASET, model.replace(".","_")), destination_cloud_storage_uris=[DATA_DIR + "{}/eval-*.csv".format(model.replace(".","_"))], export_format="CSV", print_header=False, dag=dag ) # Build dependency graph, set_upstream dependencies for all tasks bq_check_train_data_op.set_upstream(bq_train_data_op) bq_check_eval_data_op.set_upstream(bq_eval_data_op) bash_remove_old_data_op.set_upstream([bq_check_train_data_op, bq_check_eval_data_op]) bq_export_gcs_train_csv_op.set_upstream(bash_remove_old_data_op) bq_export_gcs_eval_csv_op.set_upstream(bash_remove_old_data_op) return (bq_export_gcs_train_csv_op, bq_export_gcs_eval_csv_op) # + # %%writefile airflow/dags/module/training.py import datetime from airflow.contrib.operators.mlengine_operator import MLEngineTrainingOperator from airflow.operators.bash_operator import BashOperator def training_tasks(model, dag, PROJECT_ID, BUCKET, DATA_DIR, MODEL_NAME, MODEL_VERSION, MODEL_LOCATION): # Constants # The code package name comes from the model code in the module directory REGION = "us-east1" PACKAGE_URI = BUCKET + "/taxifare/code/taxifare-0.1.tar.gz" JOB_DIR = BUCKET + "/jobs" # ML Engine training job job_id = "taxifare_{}_{}".format(model.replace(".","_"), datetime.datetime.now().strftime("%Y%m%d%H%M%S")) train_files = DATA_DIR + "{}/train-*.csv".format(model.replace(".","_")) eval_files = DATA_DIR + "{}/eval-*.csv".format(model.replace(".","_")) output_dir = BUCKET + "/taxifare/trained_model/{}".format(model.replace(".","_")) job_dir = JOB_DIR + "/" + job_id training_args = [ "--job-dir", job_dir, "--train_data_paths", train_files, "--eval_data_paths", eval_files, "--output_dir", output_dir, "--train_steps", str(500), "--train_batch_size", str(32), "--eval_steps", str(500), "--eval_batch_size", str(32), "--nbuckets", str(8), "--hidden_units", "128,32,4" ] # Reference: https://airflow.apache.org/integration.html#cloud-ml-engine ml_engine_training_op = MLEngineTrainingOperator( task_id="ml_engine_training_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, job_id=job_id, package_uris=[PACKAGE_URI], training_python_module="trainer.task", training_args=training_args, region=REGION, scale_tier="BASIC", runtime_version="1.13", python_version="3.5", dag=dag ) bash_remove_old_saved_model_op = BashOperator( task_id="bash_remove_old_saved_model_{}_task".format(model.replace(".","_")), bash_command="if gsutil ls {0} 2> /dev/null; then gsutil -m rm -rf {0}/*; else true; fi".format(MODEL_LOCATION + model.replace(".","_")), dag=dag ) bash_copy_new_saved_model_op = BashOperator( task_id="bash_copy_new_saved_model_{}_task".format(model.replace(".","_")), bash_command="gsutil -m rsync -d -r `gsutil ls {0}/export/exporter/ | tail -1` {1}".format(output_dir, MODEL_LOCATION + model.replace(".","_")), dag=dag ) # Build dependency graph, set_upstream dependencies for all tasks bash_remove_old_saved_model_op.set_upstream(ml_engine_training_op) bash_copy_new_saved_model_op.set_upstream(bash_remove_old_saved_model_op) return (ml_engine_training_op, bash_copy_new_saved_model_op) # + # %%writefile airflow/dags/module/deploy.py import datetime import logging from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import BranchPythonOperator from airflow.operators.dummy_operator import DummyOperator from airflow.contrib.operators.mlengine_operator import MLEngineModelOperator, MLEngineVersionOperator def deploy_tasks(model, dag, PROJECT_ID, MODEL_NAME, MODEL_VERSION, MODEL_LOCATION): # Constants OTHER_VERSION_NAME = "v_{0}".format(datetime.datetime.now().strftime("%Y%m%d%H%M%S")[0:12]) # Create model on ML-Engine bash_ml_engine_models_list_op = BashOperator( task_id="bash_ml_engine_models_list_{}_task".format(model.replace(".","_")), xcom_push=True, bash_command="gcloud ml-engine models list --filter='name:{0}'".format(MODEL_NAME + model.replace(".","_")), dag=dag ) def check_if_model_already_exists(templates_dict, **kwargs): cur_model = templates_dict["model"].replace(".","_") ml_engine_models_list = kwargs["ti"].xcom_pull(task_ids="bash_ml_engine_models_list_{}_task".format(cur_model)) logging.info("check_if_model_already_exists: {}: ml_engine_models_list = \n{}".format(cur_model, ml_engine_models_list)) create_model_task = "ml_engine_create_model_{}_task".format(cur_model) dont_create_model_task = "dont_create_model_dummy_branch_{}_task".format(cur_model) if len(ml_engine_models_list) == 0 or ml_engine_models_list == "Listed 0 items.": return create_model_task return dont_create_model_task check_if_model_already_exists_op = BranchPythonOperator( task_id="check_if_model_already_exists_{}_task".format(model.replace(".","_")), templates_dict={"model": model.replace(".","_")}, python_callable=check_if_model_already_exists, provide_context=True, dag=dag ) ml_engine_create_model_op = MLEngineModelOperator( task_id="ml_engine_create_model_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model={"name": MODEL_NAME + model.replace(".","_")}, operation="create", dag=dag ) create_model_dummy_op = DummyOperator( task_id="create_model_dummy_{}_task".format(model.replace(".","_")), trigger_rule="all_done", dag=dag ) dont_create_model_dummy_branch_op = DummyOperator( task_id="dont_create_model_dummy_branch_{}_task".format(model.replace(".","_")), dag=dag ) dont_create_model_dummy_op = DummyOperator( task_id="dont_create_model_dummy_{}_task".format(model.replace(".","_")), trigger_rule="all_done", dag=dag ) # Create version of model on ML-Engine bash_ml_engine_versions_list_op = BashOperator( task_id="bash_ml_engine_versions_list_{}_task".format(model.replace(".","_")), xcom_push=True, bash_command="gcloud ml-engine versions list --model {0} --filter='name:{1}'".format(MODEL_NAME + model.replace(".","_"), MODEL_VERSION), dag=dag ) def check_if_model_version_already_exists(templates_dict, **kwargs): cur_model = templates_dict["model"].replace(".","_") ml_engine_versions_list = kwargs["ti"].xcom_pull(task_ids="bash_ml_engine_versions_list_{}_task".format(cur_model)) logging.info("check_if_model_version_already_exists: {}: ml_engine_versions_list = \n{}".format(cur_model, ml_engine_versions_list)) create_version_task = "ml_engine_create_version_{}_task".format(cur_model) create_other_version_task = "ml_engine_create_other_version_{}_task".format(cur_model) if len(ml_engine_versions_list) == 0 or ml_engine_versions_list == "Listed 0 items.": return create_version_task return create_other_version_task check_if_model_version_already_exists_op = BranchPythonOperator( task_id="check_if_model_version_already_exists_{}_task".format(model.replace(".","_")), templates_dict={"model": model.replace(".","_")}, python_callable=check_if_model_version_already_exists, provide_context=True, dag=dag ) ml_engine_create_version_op = MLEngineVersionOperator( task_id="ml_engine_create_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=MODEL_VERSION, version={ "name": MODEL_VERSION, "deploymentUri": MODEL_LOCATION + model.replace(".","_"), "runtimeVersion": "1.13", "framework": "TENSORFLOW", "pythonVersion": "3.5", }, operation="create", dag=dag ) ml_engine_create_other_version_op = MLEngineVersionOperator( task_id="ml_engine_create_other_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=OTHER_VERSION_NAME, version={ "name": OTHER_VERSION_NAME, "deploymentUri": MODEL_LOCATION + model.replace(".","_"), "runtimeVersion": "1.13", "framework": "TENSORFLOW", "pythonVersion": "3.5", }, operation="create", dag=dag ) ml_engine_set_default_version_op = MLEngineVersionOperator( task_id="ml_engine_set_default_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=MODEL_VERSION, version={"name": MODEL_VERSION}, operation="set_default", dag=dag ) ml_engine_set_default_other_version_op = MLEngineVersionOperator( task_id="ml_engine_set_default_other_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=OTHER_VERSION_NAME, version={"name": OTHER_VERSION_NAME}, operation="set_default", dag=dag ) # Build dependency graph, set_upstream dependencies for all tasks check_if_model_already_exists_op.set_upstream(bash_ml_engine_models_list_op) ml_engine_create_model_op.set_upstream(check_if_model_already_exists_op) create_model_dummy_op.set_upstream(ml_engine_create_model_op) dont_create_model_dummy_branch_op.set_upstream(check_if_model_already_exists_op) dont_create_model_dummy_op.set_upstream(dont_create_model_dummy_branch_op) bash_ml_engine_versions_list_op.set_upstream([dont_create_model_dummy_op, create_model_dummy_op]) check_if_model_version_already_exists_op.set_upstream(bash_ml_engine_versions_list_op) ml_engine_set_default_version_op.set_upstream(ml_engine_create_version_op) ml_engine_set_default_other_version_op.set_upstream(ml_engine_create_other_version_op) return (bash_ml_engine_models_list_op, check_if_model_version_already_exists_op, ml_engine_create_version_op, ml_engine_create_other_version_op) # - # ## Subdag # + # %%writefile airflow/dags/subdag/taxifare_subdag.py # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DAG definition for taxifare automated pipeline.""" import airflow from airflow import DAG # Reference for all available airflow operators: # https://github.com/apache/incubator-airflow/tree/master/airflow/contrib/operators from airflow.operators.subdag_operator import SubDagOperator from airflow.hooks.base_hook import BaseHook from airflow.models import TaskInstance import datetime from subdag import preprocess from subdag import training from subdag import deploy def _get_project_id(): """Get project ID from default GCP connection.""" extras = BaseHook.get_connection("google_cloud_default").extra_dejson key = "extra__google_cloud_platform__project" if key in extras: project_id = extras[key] else: raise ("Must configure project_id in google_cloud_default " "connection from Airflow Console") return project_id # Constants # Get project ID and GCS bucket PROJECT_ID = _get_project_id() BUCKET = "gs://" + PROJECT_ID + "-bucket" # Specify your source BigQuery dataset and table names SOURCE_DATASET_TABLE_NAMES = "yellow.trips,green.trips_2014,green.trips_2015".split(",") # Where to write out data in GCS DATA_DIR = BUCKET + "/taxifare/data/" # Base model parameters MODEL_NAME = "taxifare_" MODEL_VERSION = "v1" MODEL_LOCATION = BUCKET + "/taxifare/saved_model/" default_args = { "owner": "airflow", "depends_on_past": False, "start_date": airflow.utils.dates.days_ago(2), "email": ["<EMAIL>"], "email_on_failure": True, "email_on_retry": False, "retries": 5, "retry_delay": datetime.timedelta(minutes=5) } # Default schedule interval using cronjob syntax - can be customized here # or in the Airflow console. # Specify a schedule interval in CRON syntax to run once a day at 2100 hours (9pm) # Reference: https://airflow.apache.org/scheduler.html schedule_interval = "00 21 * * *" # Title your DAG DAG_NAME = "taxifare_subdag" dag = DAG( DAG_NAME, default_args=default_args, schedule_interval=None ) dag.doc_md = __doc__ # # # Task Definition # # for model in SOURCE_DATASET_TABLE_NAMES: subdag_preprocess_op = SubDagOperator( task_id="subdag_preprocess_{}_task".format(model.replace(".","_")), subdag=preprocess.preprocess_tasks( model, DAG_NAME, "subdag_preprocess_{}_task".format(model.replace(".","_")), default_args, PROJECT_ID, BUCKET, DATA_DIR), dag=dag ) subdag_training_op = SubDagOperator( task_id="subdag_training_{}_task".format(model.replace(".","_")), subdag=training.training_tasks( model, DAG_NAME, "subdag_training_{}_task".format(model.replace(".","_")), default_args, PROJECT_ID, BUCKET, DATA_DIR, MODEL_NAME, MODEL_VERSION, MODEL_LOCATION), dag=dag ) subdag_deploy_op = SubDagOperator( task_id="subdag_deploy_{}_task".format(model.replace(".","_")), subdag=deploy.deploy_tasks( model, DAG_NAME, "subdag_deploy_{}_task".format(model.replace(".","_")), default_args, PROJECT_ID, MODEL_NAME, MODEL_VERSION, MODEL_LOCATION), dag=dag ) # Build dependency graph, set_upstream dependencies for all tasks subdag_training_op.set_upstream(subdag_preprocess_op) subdag_deploy_op.set_upstream(subdag_training_op) # + # %%writefile airflow/dags/subdag/preprocess.py from airflow import DAG from airflow.contrib.operators.bigquery_operator import BigQueryOperator from airflow.contrib.operators.bigquery_check_operator import BigQueryCheckOperator from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator from airflow.operators.bash_operator import BashOperator def preprocess_tasks(model, parent_dag_name, child_dag_name, default_args, PROJECT_ID, BUCKET, DATA_DIR): # Create inner dag dag = DAG( "{0}.{1}".format(parent_dag_name, child_dag_name), default_args=default_args, schedule_interval=None ) # Constants # Specify your source BigQuery project, dataset, and table names SOURCE_BQ_PROJECT = "nyc-tlc" SOURCE_DATASET_TABLE_NAMES = "yellow.trips,green.trips_2014,green.trips_2015".split(",") # Specify your destination BigQuery dataset DESTINATION_DATASET = "taxifare" # BigQuery data query bql=""" SELECT (tolls_amount + fare_amount) AS fare_amount, EXTRACT(DAYOFWEEK FROM pickup_datetime) * 1.0 AS dayofweek, EXTRACT(HOUR FROM pickup_datetime) * 1.0 AS hourofday, pickup_longitude AS pickuplon, pickup_latitude AS pickuplat, dropoff_longitude AS dropofflon, dropoff_latitude AS dropofflat, passenger_count*1.0 AS passengers, CONCAT(CAST(pickup_datetime AS STRING), CAST(pickup_longitude AS STRING), CAST(pickup_latitude AS STRING), CAST(dropoff_latitude AS STRING), CAST(dropoff_longitude AS STRING)) AS key FROM `{0}.{1}` WHERE trip_distance > 0 AND fare_amount >= 2.5 AND pickup_longitude > -78 AND pickup_longitude < -70 AND dropoff_longitude > -78 AND dropoff_longitude < -70 AND pickup_latitude > 37 AND pickup_latitude < 45 AND dropoff_latitude > 37 AND dropoff_latitude < 45 AND passenger_count > 0 AND rand() < 0.00001 """ bql = bql.format(SOURCE_BQ_PROJECT, model) bql_train = "SELECT * EXCEPT (key) FROM({0}) WHERE ABS(MOD(FARM_FINGERPRINT(key), 5)) < 4".format(bql) bql_eval = "SELECT * EXCEPT (key) FROM({0}) WHERE ABS(MOD(FARM_FINGERPRINT(key), 5)) = 4".format(bql) # Complete the BigQueryOperator task to truncate the table if it already exists before writing # Reference: https://airflow.apache.org/integration.html#bigqueryoperator bq_train_data_op = BigQueryOperator( task_id="bq_train_data_{}_task".format(model.replace(".","_")), bql=bql_train, destination_dataset_table="{}.{}_train_data".format(DESTINATION_DATASET, model.replace(".","_")), write_disposition="WRITE_TRUNCATE", # specify to truncate on writes use_legacy_sql=False, dag=dag ) bq_eval_data_op = BigQueryOperator( task_id="bq_eval_data_{}_task".format(model.replace(".","_")), bql=bql_eval, destination_dataset_table="{}.{}_eval_data".format(DESTINATION_DATASET, model.replace(".","_")), write_disposition="WRITE_TRUNCATE", # specify to truncate on writes use_legacy_sql=False, dag=dag ) sql = """ SELECT COUNT(*) FROM [{0}:{1}.{2}] """ # Check to make sure that the data tables won"t be empty bq_check_train_data_op = BigQueryCheckOperator( task_id="bq_check_train_data_{}_task".format(model.replace(".","_")), sql=sql.format(PROJECT_ID, DESTINATION_DATASET, model.replace(".","_") + "_train_data"), dag=dag ) bq_check_eval_data_op = BigQueryCheckOperator( task_id="bq_check_eval_data_{}_task".format(model.replace(".","_")), sql=sql.format(PROJECT_ID, DESTINATION_DATASET, model.replace(".","_") + "_eval_data"), dag=dag ) # BigQuery training data export to GCS bash_remove_old_data_op = BashOperator( task_id="bash_remove_old_data_{}_task".format(model.replace(".","_")), bash_command="if gsutil ls {0}/taxifare/data/{1} 2> /dev/null; then gsutil -m rm -rf {0}/taxifare/data/{1}/*; else true; fi".format(BUCKET, model.replace(".","_")), dag=dag ) # Takes a BigQuery dataset and table as input and exports it to GCS as a CSV bq_export_gcs_train_csv_op = BigQueryToCloudStorageOperator( task_id="bq_export_gcs_train_csv_{}_task".format(model.replace(".","_")), source_project_dataset_table="{}.{}_train_data".format(DESTINATION_DATASET, model.replace(".","_")), destination_cloud_storage_uris=[DATA_DIR + "{}/train-*.csv".format(model.replace(".","_"))], export_format="CSV", print_header=False, dag=dag ) bq_export_gcs_eval_csv_op = BigQueryToCloudStorageOperator( task_id="bq_export_gcs_eval_csv_{}_task".format(model.replace(".","_")), source_project_dataset_table="{}.{}_eval_data".format(DESTINATION_DATASET, model.replace(".","_")), destination_cloud_storage_uris=[DATA_DIR + "{}/eval-*.csv".format(model.replace(".","_"))], export_format="CSV", print_header=False, dag=dag ) # Build dependency graph, set_upstream dependencies for all tasks bq_check_train_data_op.set_upstream(bq_train_data_op) bq_check_eval_data_op.set_upstream(bq_eval_data_op) bash_remove_old_data_op.set_upstream([bq_check_train_data_op, bq_check_eval_data_op]) bq_export_gcs_train_csv_op.set_upstream(bash_remove_old_data_op) bq_export_gcs_eval_csv_op.set_upstream(bash_remove_old_data_op) return dag # + # %%writefile airflow/dags/subdag/training.py import datetime from airflow import DAG from airflow.contrib.operators.mlengine_operator import MLEngineTrainingOperator from airflow.operators.bash_operator import BashOperator def training_tasks(model, parent_dag_name, child_dag_name, default_args, PROJECT_ID, BUCKET, DATA_DIR, MODEL_NAME, MODEL_VERSION, MODEL_LOCATION): # Create inner dag dag = DAG( "{0}.{1}".format(parent_dag_name, child_dag_name), default_args=default_args, schedule_interval=None ) # Constants # The code package name comes from the model code in the module directory REGION = "us-east1" PACKAGE_URI = BUCKET + "/taxifare/code/taxifare-0.1.tar.gz" JOB_DIR = BUCKET + "/jobs" # ML Engine training job job_id = "taxifare_{}_{}".format(model.replace(".","_"), datetime.datetime.now().strftime("%Y%m%d%H%M%S")) train_files = DATA_DIR + "{}/train-*.csv".format(model.replace(".","_")) eval_files = DATA_DIR + "{}/eval-*.csv".format(model.replace(".","_")) output_dir = BUCKET + "/taxifare/trained_model/{}".format(model.replace(".","_")) job_dir = JOB_DIR + "/" + job_id training_args = [ "--job-dir", job_dir, "--train_data_paths", train_files, "--eval_data_paths", eval_files, "--output_dir", output_dir, "--train_steps", str(500), "--train_batch_size", str(32), "--eval_steps", str(500), "--eval_batch_size", str(32), "--nbuckets", str(8), "--hidden_units", "128,32,4" ] # Reference: https://airflow.apache.org/integration.html#cloud-ml-engine ml_engine_training_op = MLEngineTrainingOperator( task_id="ml_engine_training_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, job_id=job_id, package_uris=[PACKAGE_URI], training_python_module="trainer.task", training_args=training_args, region=REGION, scale_tier="BASIC", runtime_version="1.13", python_version="3.5", dag=dag ) bash_remove_old_saved_model_op = BashOperator( task_id="bash_remove_old_saved_model_{}_task".format(model.replace(".","_")), bash_command="if gsutil ls {0} 2> /dev/null; then gsutil -m rm -rf {0}/*; else true; fi".format(MODEL_LOCATION + model.replace(".","_")), dag=dag ) bash_copy_new_saved_model_op = BashOperator( task_id="bash_copy_new_saved_model_{}_task".format(model.replace(".","_")), bash_command="gsutil -m rsync -d -r `gsutil ls {0}/export/exporter/ | tail -1` {1}".format(output_dir, MODEL_LOCATION + model.replace(".","_")), dag=dag ) # Build dependency graph, set_upstream dependencies for all tasks bash_remove_old_saved_model_op.set_upstream(ml_engine_training_op) bash_copy_new_saved_model_op.set_upstream(bash_remove_old_saved_model_op) return dag # + # %%writefile airflow/dags/subdag/deploy.py import datetime import logging from airflow import DAG from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import BranchPythonOperator from airflow.operators.dummy_operator import DummyOperator from airflow.contrib.operators.mlengine_operator import MLEngineModelOperator, MLEngineVersionOperator def deploy_tasks(model, parent_dag_name, child_dag_name, default_args, PROJECT_ID, MODEL_NAME, MODEL_VERSION, MODEL_LOCATION): # Create inner dag dag = DAG( "{0}.{1}".format(parent_dag_name, child_dag_name), default_args=default_args, schedule_interval=None ) # Constants OTHER_VERSION_NAME = "v_{0}".format(datetime.datetime.now().strftime("%Y%m%d%H%M%S")[0:12]) # Create model on ML-Engine bash_ml_engine_models_list_op = BashOperator( task_id="bash_ml_engine_models_list_{}_task".format(model.replace(".","_")), xcom_push=True, bash_command="gcloud ml-engine models list --filter='name:{0}'".format(MODEL_NAME + model.replace(".","_")), dag=dag ) def check_if_model_already_exists(templates_dict, **kwargs): cur_model = templates_dict["model"].replace(".","_") ml_engine_models_list = kwargs["ti"].xcom_pull(task_ids="bash_ml_engine_models_list_{}_task".format(cur_model)) logging.info("check_if_model_already_exists: {}: ml_engine_models_list = \n{}".format(cur_model, ml_engine_models_list)) create_model_task = "ml_engine_create_model_{}_task".format(cur_model) dont_create_model_task = "dont_create_model_dummy_branch_{}_task".format(cur_model) if len(ml_engine_models_list) == 0 or ml_engine_models_list == "Listed 0 items.": return create_model_task return dont_create_model_task check_if_model_already_exists_op = BranchPythonOperator( task_id="check_if_model_already_exists_{}_task".format(model.replace(".","_")), templates_dict={"model": model.replace(".","_")}, python_callable=check_if_model_already_exists, provide_context=True, dag=dag ) ml_engine_create_model_op = MLEngineModelOperator( task_id="ml_engine_create_model_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model={"name": MODEL_NAME + model.replace(".","_")}, operation="create", dag=dag ) create_model_dummy_op = DummyOperator( task_id="create_model_dummy_{}_task".format(model.replace(".","_")), trigger_rule="all_done", dag=dag ) dont_create_model_dummy_branch_op = DummyOperator( task_id="dont_create_model_dummy_branch_{}_task".format(model.replace(".","_")), dag=dag ) dont_create_model_dummy_op = DummyOperator( task_id="dont_create_model_dummy_{}_task".format(model.replace(".","_")), trigger_rule="all_done", dag=dag ) # Create version of model on ML-Engine bash_ml_engine_versions_list_op = BashOperator( task_id="bash_ml_engine_versions_list_{}_task".format(model.replace(".","_")), xcom_push=True, bash_command="gcloud ml-engine versions list --model {0} --filter='name:{1}'".format(MODEL_NAME + model.replace(".","_"), MODEL_VERSION), dag=dag ) def check_if_model_version_already_exists(templates_dict, **kwargs): cur_model = templates_dict["model"].replace(".","_") ml_engine_versions_list = kwargs["ti"].xcom_pull(task_ids="bash_ml_engine_versions_list_{}_task".format(cur_model)) logging.info("check_if_model_version_already_exists: {}: ml_engine_versions_list = \n{}".format(cur_model, ml_engine_versions_list)) create_version_task = "ml_engine_create_version_{}_task".format(cur_model) create_other_version_task = "ml_engine_create_other_version_{}_task".format(cur_model) if len(ml_engine_versions_list) == 0 or ml_engine_versions_list == "Listed 0 items.": return create_version_task return create_other_version_task check_if_model_version_already_exists_op = BranchPythonOperator( task_id="check_if_model_version_already_exists_{}_task".format(model.replace(".","_")), templates_dict={"model": model.replace(".","_")}, python_callable=check_if_model_version_already_exists, provide_context=True, dag=dag ) ml_engine_create_version_op = MLEngineVersionOperator( task_id="ml_engine_create_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=MODEL_VERSION, version={ "name": MODEL_VERSION, "deploymentUri": MODEL_LOCATION + model.replace(".","_"), "runtimeVersion": "1.13", "framework": "TENSORFLOW", "pythonVersion": "3.5", }, operation="create", dag=dag ) ml_engine_create_other_version_op = MLEngineVersionOperator( task_id="ml_engine_create_other_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=OTHER_VERSION_NAME, version={ "name": OTHER_VERSION_NAME, "deploymentUri": MODEL_LOCATION + model.replace(".","_"), "runtimeVersion": "1.13", "framework": "TENSORFLOW", "pythonVersion": "3.5", }, operation="create", dag=dag ) ml_engine_set_default_version_op = MLEngineVersionOperator( task_id="ml_engine_set_default_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=MODEL_VERSION, version={"name": MODEL_VERSION}, operation="set_default", dag=dag ) ml_engine_set_default_other_version_op = MLEngineVersionOperator( task_id="ml_engine_set_default_other_version_{}_task".format(model.replace(".","_")), project_id=PROJECT_ID, model_name=MODEL_NAME + model.replace(".","_"), version_name=OTHER_VERSION_NAME, version={"name": OTHER_VERSION_NAME}, operation="set_default", dag=dag ) # Build dependency graph, set_upstream dependencies for all tasks check_if_model_already_exists_op.set_upstream(bash_ml_engine_models_list_op) ml_engine_create_model_op.set_upstream(check_if_model_already_exists_op) create_model_dummy_op.set_upstream(ml_engine_create_model_op) dont_create_model_dummy_branch_op.set_upstream(check_if_model_already_exists_op) dont_create_model_dummy_op.set_upstream(dont_create_model_dummy_branch_op) bash_ml_engine_versions_list_op.set_upstream([dont_create_model_dummy_op, create_model_dummy_op]) check_if_model_version_already_exists_op.set_upstream(bash_ml_engine_versions_list_op) ml_engine_create_version_op.set_upstream(check_if_model_version_already_exists_op) ml_engine_create_other_version_op.set_upstream(check_if_model_version_already_exists_op) ml_engine_set_default_version_op.set_upstream(ml_engine_create_version_op) ml_engine_set_default_other_version_op.set_upstream(ml_engine_create_other_version_op) return dag # + [markdown] colab_type="text" id="2-YF4aOUm1Pl" # ### Copy local Airflow DAG file and plugins into the DAGs folder # + colab={} colab_type="code" id="KAd9I3icm1Pm" language="bash" # gsutil -m cp -r airflow/dags/* gs://${AIRFLOW_BUCKET}/dags # overwrite if it exists # + [markdown] colab_type="text" id="0ixqSbrmm1Po" # 1. Navigate to your Cloud Composer [instance](https://console.cloud.google.com/composer/environments?project=)<br/><br/> # # 2. Trigger a __manual run__ of your DAG for testing<br/><br/> # # 3. Ensure your DAG runs successfully (all nodes outlined in dark green and 'success' tag shows) # -
courses/machine_learning/asl/open_project/cloud_composer_automated_ml_pipeline_taxifare/cloud_composer_automated_ml_pipeline_taxifare.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Template Network Definitions # <i>Version 2</i> # ## References # 1. ["Maintaining Knowledge about Temporal Intervals" by <NAME>](https://cse.unl.edu/~choueiry/Documents/Allen-CACM1983.pdf) - Allen's original paper (PDF) # 1. [Allen's Interval Algebra](https://www.ics.uci.edu/~alspaugh/cls/shr/allen.html) or [here](https://thomasalspaugh.org/pub/fnd/allen.html) - summarizes Allen's algebra of proper time intervals # 1. [W3C Time Ontology in OWL](https://www.w3.org/TR/owl-time/) - temporal vocabulary used here is based on the W3C vocabulary of time # 1. [bitsets Python package](https://bitsets.readthedocs.io/en/stable/) - used to implement Algebra relation sets and operations # 1. [NetworkX Python package](http://networkx.github.io/) - used to represent directed graph of constraints # 1. [Python format string syntax](https://docs.python.org/3/library/string.html#format-string-syntax) - used in Algebra summary method # 1. [Spatial Ontology](https://www.w3.org/2017/sdwig/bp/) - I'm still looking for a standard spatial vocabulary; maybe start here # 1. [Qualitative Spatial Relations (QSR) Library](https://qsrlib.readthedocs.io/en/latest/index.html) - an alternative library to the one defined here # ## Dependencies import os import qualreas as qr import numpy as np path = os.path.join(os.getenv('PYPROJ'), 'qualreas') #pt_alg = qr.Algebra(os.path.join(path, "Algebras/LinearPointAlgebra.json")) #pt_alg = qr.Algebra(os.path.join(path, "Algebras/RightBranchingPointAlgebra.json")) pt_alg = qr.Algebra(os.path.join(path, "Algebras/LeftBranchingPointAlgebra.json")) # ## Create Algebra Elements using 4-Point Networks # ### Define 4-Point Network Generator # + active="" # class Four_Point(qr.Network): # '''Create four Temporal Entities that represent time points and use them # to express two independent intervals. For example, (s1,e1) and (s2,e2), # where s1 < e1 and s2 < e2, represents two proper intervals. Using '<|=' # instead of '<', would represent two intervals where one or both might # be points. Return the network and the four temporal entities.''' # # def __init__(self, algebra, name, lessthanstr, startname="StartPt", endname="EndPt"): # self.algebra = algebra # self.lessthan = algebra.relset(lessthanstr) # # Start & End Points of Interval 1 # start1 = qr.TemporalEntity(["Point"], name=startname+"1") # end1 = qr.TemporalEntity(["Point"], name=endname+"1") # # Start & End Points of Interval 2 # start2 = qr.TemporalEntity(["Point"], name=startname+"2") # end2 = qr.TemporalEntity(["Point"], name=endname+"2") # super().__init__(algebra, name) # self.add_constraint(start1, end1, self.lessthan, verbose=False) # self.add_constraint(start2, end2, self.lessthan, verbose=False) # # def get_points(self): # return [start1, end1, start2, end2] # + active="" # def four_point_network(alg, lessthan_symbol, startname="StartPt", endname="EndPt", # verbose=False): # '''Create four Temporal Entities that represent time points and use them # to express two independent intervals. For example, (s1,e1) and (s2,e2), # where s1 < e1 and s2 < e2, represents two proper intervals. Using '<|=' # instead of '<', would represent two intervals where one or both might # be points. Return the network and the four temporal entities.''' # net = qr.Network(alg, "Four Point Network") # lessthan = alg.relset(lessthan_symbol) # # Start & End Points of Interval 1 # start1 = qr.TemporalEntity(["Point"], name=startname+"1") # end1 = qr.TemporalEntity(["Point"], name=endname+"1") # # Start & End Points of Interval 2 # start2 = qr.TemporalEntity(["Point"], name=startname+"2") # end2 = qr.TemporalEntity(["Point"], name=endname+"2") # net.add_constraint(start1, end1, lessthan, verbose) # net.add_constraint(start2, end2, lessthan, verbose) # entities = [start1, end1, start2, end2] # return net, entities # + active="" # def constraint_matrix_to_list(net, entities=None): # if not entities: # entities = net.nodes # result = [] # for a in entities: # row = [] # for b in entities: # row.append(str(net.edges[a, b]['constraint'])) # result.append(row) # return result # + active="" # # Map 4-Point Network "Signatures" to Typical Names # key_name_mapping = { # '<,<,<,<': 'B', '>,>,>,>': 'BI', # '>,<,>,<': 'D', '<,<,>,>': 'DI', # '=,<,>,=': 'E', '=,=,=,=': 'PE', # '>,<,>,=': 'F', '<,<,>,=': 'FI', # '<,<,=,<': 'M', '>,=,>,>': 'MI', # '<,<,>,<': 'O', '>,<,>,>': 'OI', # '=,<,>,<': 'S', '=,<,>,>': 'SI', # '>,=,>,=': 'PF', '<,<,=,=': 'PFI', # '=,<,=,<': 'PS', '=,=,>,>': 'PSI', # '<,<,>,r~': 'RO', '<,<,r~,r~': 'RB', # '=,<,>,r~': 'RS', '>,<,>,r~': 'ROI', # '>,r~,>,r~': 'RBI', 'r~,r~,r~,r~': 'R~', # 'l~,<,>,<': 'LO', 'l~,<,>,=': 'LF', # 'l~,<,>,>': 'LOI', 'l~,l~,>,>': 'LBI', # 'l~,<,l~,<': 'LB', 'l~,l~,l~,l~': 'L~' # } # - foobar = qr.FourPoint(pt_alg, "Foobar", "<|=") foobar.summary() # + # Viewing the network as a matrix, 'elem13', below, refers to the element in row 1 col 3, # and so on for 'elem23', etc. The matrix is 4x4, so if we partition it into four 2x2 # matrices, then the two partiions on the diagonal represent two intervals and the two # off-diagonal partitions represent how those two intervals relate to each other. # Also, the off-diagonal 2x2 partitions are converse transposes of each other. # Oh, and the intervals represented by the diagonal partitions could be intervals, # proper intervals, or points. def generate_consistent_networks(alg, lessthan="<", startname="StartPt", endname="EndPt", verbose=False): consistent_nets = dict() for elem13 in alg.elements: for elem23 in alg.elements: for elem14 in alg.elements: for elem24 in alg.elements: four_pt_net_name = elem13 + ',' + elem23 + ',' + elem14 + ',' + elem24 #net, pts = four_point_network(alg, lessthan, startname, endname) net = qr.FourPoint(alg, four_pt_net_name, lessthan, startname, endname) #pt1, pt2, pt3, pt4 = pts pt1, pt2, pt3, pt4 = net.get_points() rs13 = alg.relset(elem13) rs23 = alg.relset(elem23) rs14 = alg.relset(elem14) rs24 = alg.relset(elem24) net.add_constraint(pt1, pt3, rs13) net.add_constraint(pt2, pt3, rs23) net.add_constraint(pt1, pt4, rs14) net.add_constraint(pt2, pt4, rs24) if net.propagate(): elem_key = ",".join([str(rs13), str(rs14), str(rs23), str(rs24)]) consistent_nets[qr.key_name_mapping[elem_key]] = net if verbose: print("==========================") if elem_key in qr.key_name_mapping: print(qr.key_name_mapping[elem_key]) else: print("UNKNOWN") #print(np.matrix(constraint_matrix_to_list(net, pts))) print(np.matrix(net.to_list())) print(f"\n{len(consistent_nets)} consistent networks") return consistent_nets # - # ### Generating a 4-Point Network that Represents 2 Intervals #net4pt = four_point_network(pt_alg, "=|<") net4pt = qr.FourPoint(pt_alg, "FourPointNetwork", "=|<") pts = net4pt.get_points() net4pt.summary() for pt in pts: print(pt) # ## Derive Algebra Elements # A 4-point network, like that generated above, only has constraints specified so that the first two points define an interval, and same for the second two points. No constraints are specified between the two implied intervals (e.g., no constraint between StartPt1/EndPt1 and StartPt2/EndPt2). Depending on which point algebra is used there are either 3^4 (81) or 4^4 (256) different ways the unassigned constraint pairs can be made. The function, <i>generate_consistent_networks</i> tries all of these possibilities and returns the ones that are consistent. Doing this for the linear point algebra ('<', '=', '>') results in 13 consistent networks that correspond to Allen's Temporal Algebra of Proper Time Intervals. Using ('<|=', '=', '>|=") results in 18 consistent networks that are a superset of Allen's relations that includes 5 additional relations that integrate points into the algebra. Using ('<|=', '=', '>|=', '~'), where '~' is either the left-incomparable or right-incomparable relation of the left- or right-branching time point algebra will result in 24 consistent networks that integrate points into a left- or right-branching time interval algebra. consistent_nets = generate_consistent_networks(pt_alg, lessthan="=|<", verbose=True) consistent_nets #def print_as_matrix(net, entities=None): before = consistent_nets['B'] print(np.matrix(before.to_list())) # + def ontology_classes(net, start, end): """The constraints between the start and end points of a temporal entity determine whether it belongs to the class of Point, ProperIntervals, or both. Return a list containing the class names for the input network, net.""" class_list = [] constr = net.edges[net.get_entity(start), net.get_entity(end)]['constraint'] if '=' in constr: class_list.append('Point') if '<' in constr: class_list.append('ProperInterval') return class_list def domain_and_range(four_point_network, startname="StartPt", endname="EndPt"): """Return a tuple, (domain, range), for the interval/point relation represented by the input 4-point network.""" return (ontology_classes(four_point_network, startname+"1", endname+"1"), ontology_classes(four_point_network, startname+"2", endname+"2")) # - for rel in consistent_nets: print(f"{rel}: {domain_and_range(consistent_nets[rel])}") for rel in consistent_nets: print(f"{rel}: {consistent_nets[rel].name}")
Notebooks/Obsolete_Maybe/template_networks_v2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from orphics import sehgal, maps import healpy as hp from pixell import utils, enmap, curvedsky, enplot, wcsutils import os import numpy as np import matplotlib.pyplot as plt import lmdb from cosmikyu import datasets, transforms, config, stats from cosmikyu import utils as cutils # %matplotlib inline # %load_ext autoreload # %autoreload 2 # + data_dir = config.default_data_dir sehgal_dir = os.path.join(data_dir, 'sehgal') def data_path(x): return os.path.join(sehgal_dir, x) SDS_validation = datasets.SehgalDataSet(sehgal_dir, "validation281220_fromcat", transforms=[], dummy_label=False) data = np.zeros((5, 128, 128*len(SDS_validation))) compts = ["kappa", "ksz", "tsz", "ir_pts", "rad_pts"] for i in range(len(SDS_validation)): sidx = 128*i data[...,sidx: sidx+128] = SDS_validation[i] def sehgal_path(x): return os.path.join(sehgal_dir, x) #enplot.pshow(data[:,:128,:128]) # + zfact = 1 def log_normalize(data): emap = data["emap"] info = data["info"] loc = np.where(emap!=0) std = np.std(emap[loc]) std = np.std(emap) info["lognorm_std"] = std.copy() loc = np.where(emap>=0) emap[loc] = np.log(emap[loc]/std+1) loc = np.where(emap<0) emap[loc] = -1*np.log(np.abs(emap[loc]/std)+1) data["emap"] = emap return data def meansub(data): emap = data["emap"] info = data["info"] mean = np.mean(emap) info["meansub_mean"] = mean.copy() data["emap"] = emap - mean return data def z_normalize(data, zfact = zfact, ignore_zero =False): emap = data["emap"] info = data["info"] if not ignore_zero: std = np.std(emap) mean = emap.mean() else: loc = np.where(emap!=0) std = np.std(emap[loc]) mean = emap[loc].mean() info["znorm_mean"] = mean info["znorm_std"] = std info["znorm_zfact"] = zfact data["emap"] = (emap-mean)/(std*zfact) return data def shrink(data): emap = data["emap"] info = data["info"] factor = np.max(np.abs(np.array([emap.min(), emap.max()])))*1.1 info["shrink_fact"] = factor data["emap"] = emap/factor return data def minmax(data): emap = data["emap"] info = data["info"] maxval, minval = emap.max(),emap.min() #maxval = np.max(np.abs([maxval, minval])) info["minmax_min"] = minval info["minmax_max"] = maxval valrange = (maxval-minval) midval = (maxval+minval)/2 info["minmax_mean"] = midval data["emap"] = (emap-midval)/valrange*2 return data freq_idx = 148 ns = {"kappa": lambda x: (z_normalize(meansub(x))), "ksz": lambda x: (z_normalize(meansub(x))), "ir_pts": lambda x: (z_normalize(log_normalize(x),ignore_zero=True)), "rad_pts": lambda x: (z_normalize(log_normalize(x),ignore_zero=True)), "tsz": lambda x: (z_normalize(log_normalize(x),ignore_zero=True)), } nbins = 10000 norm_info_validation = {} compts = ["kappa", "ksz", "tsz", "ir_pts", "rad_pts"] for i, compt_idx in enumerate(compts[:]): #if i < 2: continue print(compt_idx) storage = {} storage["emap"] = data[i].copy() storage["info"] = {} minval, maxval, mean = storage["emap"].min(), storage["emap"].max(), storage["emap"].mean() print(minval, maxval, mean) MB = stats.FastBINNER(minval, maxval, nbins) bin_center, hist = MB.bin(data[i].copy()) fig = plt.figure(figsize=(10, 5)) plt.plot(bin_center, hist/np.sum(hist), label=compt_idx) plt.legend() plt.axvline(x=1, ls="--", color="k") plt.axvline(x=-1, ls="--", color="k") plt.yscale("log") plt.show() storage = ns[compt_idx](storage) norm_info_validation[compt_idx] = storage["info"] MB = stats.FastBINNER(-30, 30, nbins) print(np.min(storage["emap"]), np.max(storage["emap"])) bin_center, hist = MB.bin(storage["emap"]) fig = plt.figure(figsize=(10, 5)) plt.plot(bin_center, hist/np.sum(hist), label=compt_idx) plt.axvline(x=1, ls="--", color="k") plt.axvline(x=-1, ls="--", color="k") plt.axhline(y=1e-5, ls="--", color="k") plt.legend() plt.xlim(-5,5) plt.yscale("log") plt.show() # + for idx in norm_info_validation.keys(): print(idx, norm_info_validation[idx]) np.savez(data_path("281220_logz_normalization_info_validation.npz"), **norm_info_validation) # + norm_info_file = data_path("281220_logz_normalization_info_validation.npz") SDN = transforms.SehgalDataNormalizerScaledLogZShrink(norm_info_file) SDS_test = datasets.SehgalDataSet(sehgal_dir, "test281220_fromcat", transforms=[SDN], dummy_label=False) nsample = len(SDS_test) data = np.zeros((5, 128, 128*nsample)) SDS_test nbins = 10000 for i in range(nsample): if i % 5000 == 0: print(i) sidx = 128*i data[...,sidx: sidx+128] = SDS_test[i] print(data.min(), data.max(), data.mean()) print("start binning") MB = stats.FastMultBinner((-15,15), nbins, data.shape[0]) MB.bin(data) ret = MB.get_info() out = {} for key in range(5): print(key) out[SDN.channel_idxes[key]] = ret[key].copy() ret = out np.savez(sehgal_path("281220_normalized_histogram_test_{}.npz".format(nbins)), **out) # -
notebooks/281120_sehgal_dataset_prep_step3_normalization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !kubectl create -f hyperparameter-tuning/random-search-job.yaml # # Check out the Parameter Optimizations tab # # ![Successful Hyperparameter Tuning](https://raw.githubusercontent.com/PipelineAI/site/master/assets/img/successful-hyperparameter-tuning.png) # # ![Successful Hyperparameter Tuning](https://raw.githubusercontent.com/PipelineAI/site/master/assets/img/successful-hyperparameter-tuning-2.png)
kubeflow/notebooks/11_Hyper_Parameter_Tuning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Good Coding Style # # (Credit Chapter 2, Newman). # # Usually many ways to write a programme to do what you need to do. # # e.g. # # * you choose the variable names # * lists or arrays? # * how to break code into user defined functions? # # Well written code: # # * a simple structure # * easy to read and understand # * ideally, run fast # # Poorly written code: # # * convoluted or unnecessarily long # * difficult to follow # * may run slowly # # Making code easy to understand is incredibly important (not just to make marking it easier, but for your own future use, and if anyone else needs to edit your code in future). # # ## Top Tips for Good Coding Style # # # ### 1. Include comments in your programs. # # You may think this is less important with Notebooks, but if you wish to copy just the code box, having comments to remind yourself of what the variable mean, or what is happening where is very useful. # # ```python # #This is a comment # print("This would write a comment out which is also useful") # ``` # # ### 2. Use meaningful variable names. # # e.g. Use $E$ for energy and $t$ for time. # # Use full words if necessary # # ### 3. Use the right types of variables. # # Integer type for integers (etc). # # ### 4. Import functions first. # # Start your code with a set of import statements. # # e.g. # # ```python # import numpy as np # import scipy as sci # ``` # # ### 5. Give your constants names. # # For standard constants make use of the scipy.constants module. # # For custom values have a section at the start of the code to define them (makes it easier to change later). # # # ### 6. Employ user-defined functions, where appropriate. # # Make your code shorter by writing a function for any extended operation which will repeat many times. # # However - avoid overuse: simple operations that can be represented by just a line or two of code are better kept in the main body. # # Normally put all functions at the top of the code (not mixed in with the main body of the code). # # ### 7. Print out partial results and updates throughout your program. # # Particularly important for code that runs for a long time. # # ```python # for n in range(1000000): # if n%1000==0: # print("Step",n) # ``` # # # ### 8. Lay out your programs clearly. # # Split your code into logical blocks. # # Make use of white space (has no meaning in python). # # Split long lines # # ```python # energy = mass*(vx**2 + vy**2)/2 + mass*g*y \ # + moment_of_inertia*omega**2/2 # ``` # # ### 9. Don’t make your programs unnecessarily complicated. # # (A general rule for life as well as coding!). # # Write your code in as few lines as possible.
IntroductiontoPython/Good Coding Style.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="8fnxlkI_3Mxb" # #code for task1 & task3 # + id="KUKue_Q7Iy8h" # %tensorflow_version 1.x from google.colab import auth auth.authenticate_user() project_id = 'woven-fountain-288502' # !gcloud config set project {project_id}4/uQFWNACaDu4HXj5iBtuZWia5G7YQ3XZ34IEhuPCsQpyME_bHeWq-4gM # !gsutil ls gs://tangliang-commit # + id="DolYiXeuJZ68" from google.colab import drive drive.mount('/content/drive') # + id="fEahQu4K1ALH" # cd /content/drive/MyDrive # + id="bmnDqAmN_SPa" # cd dstc10/task1/code # + id="MOg1hZnGA_IC" # !pip install -r requirement.txt # + id="6R0tF4W9BESR" import stanza stanza.download('en') # + [markdown] id="5LUmC_bf3k8C" # #preprocess of step1 # + id="-gPAk3W0BgkY" # !/bin/bash run_step1_preprocess.sh # + id="fqhrYfjp23YV" # !/bin/bash run_step1_preprocess_teststd.sh # + [markdown] id="Nuv4kK7G3roz" # #generate feature # + id="k_CjnQwYCbvK" # !/bin/bash run_step1_feature.sh # + id="1gKQDBcC26X1" # !/bin/bash run_step1_preprocess_teststd.sh # + [markdown] id="G2PvFhPB3xea" # #finetune & predict # + id="32_OyrhjCgtE" # !/bin/bash run_step1_finetune.sh # + id="EePnnO1629cu" # !/bin/bash run_step1_finetune_teststd.sh # + [markdown] id="D0lL1Qm_32AC" # embedding & format final result # + id="WyQL_26fCjAi" # !/bin/bash run_step1_predict.sh # + id="0MdVaEuZ2_w8" # !/bin/bash run_step1_predict_teststd.sh # + [markdown] id="eZazIICY38kK" # #same step for step2 # + colab={"base_uri": "https://localhost:8080/"} id="ihImM1AvdsiT" outputId="33aa19ea-4eae-44ea-c9de-4eb436a2e568" # cd ../../task3/code # + id="p52j8ZfVdyLd" # !/bin/bash run_step3_preprocess.sh # + id="ApnOmNF33CTG" # !/bin/bash run_step3_preprocess_teststd.sh # + id="y7_8cVekdyLe" # !/bin/bash run_step3_feature.sh # + id="SJNJxed_3Eul" # !/bin/bash run_step3_feature_teststd.sh # + id="71RK8whbdyLe" # !/bin/bash run_step3_finetune.sh # + id="j4iDo3BB3ILk" # !/bin/bash run_step3_finetune_teststd.sh # + id="kVEYR97PdyLf" # !/bin/bash run_step3_predict.sh # + id="4-rOXGWB3KU0" # !/bin/bash run_step3_predict_teststd.sh
task1/dstc10_track3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import ipywidgets as widgets from IPython.display import display ''' -width -height -background_color -border_color -border_width -border_style -font_style -font_weight -font_size -font_family ''' # + button = widgets.Button( description = 'Hello World!', width = 100, # Intergers are interpreted as pixel measurements. height = '2em', # em is valid HTML unit of measurement. color = 'lime', # Colors can be set by name. background_color = '#0022FF', # Can also be set by color code. border_color = 'cyan', ) display(button) # + from IPython.display import display float_range = widgets.FloatSlider() string = widgets.Text(value = 'hi') container = widgets.Box(children = [float_range, string]) container.border_color = 'red' container.border_style = 'dotted' container.border_width = 3 display(container) #displays the container and all of its children. # + #Accordion name1 = widgets.Text(description = 'Location:') zip1 = widgets.BoundedIntText(description = 'Zip:', min = 0, max = 99999) page1 = widgets.Box(children = [name1, zip1]) name2 = widgets.Text(description = 'Location:') zip2 = widgets.BoundedIntText(description = 'Zip:', min = 0, max = 99999) page2 = widgets.Box(children = [name2, zip2]) accord = widgets.Accordion(children = [page1, page2], width = 400) display(accord) accord.set_title(0, 'From') accord.set_title(1, 'To') # + #TabWidget name = widgets.Text(description = 'Name:', padding = 4) color = widgets.Dropdown(description = 'Color', padding = 4, options = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet']) page1 = widgets.Box(children = [name, color], padding = 4) age = widgets.IntSlider(description = 'Age:', padding = 4, min = 0, max = 120, value = 50) gender = widgets.RadioButtons(description = 'Gender:', padding = 4, options = ['Male', 'Female']) page2 = widgets.Box(children = [age, gender], padding = 4) tabs = widgets.Tab(children = [page1, page2]) display(tabs) tabs.set_title(0, 'Name') tabs.set_title(1, 'Details') # - #Alignment display(widgets.Text(description="a:")) display(widgets.Text(description="aa:")) display(widgets.Text(description="aaa:")) display(widgets.Text(description="aaaaaaaaaaaaaaaaaa:")) display(widgets.Text()) #Flex Boxes buttons = [widgets.Button(description=str(i)) for i in range(3)] display(*buttons) container = widgets.HBox(children=buttons) display(container) #Visibility w1 = widgets.Label(value="First line") w2 = widgets.Label(value="Second line") w3 = widgets.Label(value="Third line") display(w1, w2, w3) w2.visible=None w2.visible=False w2.visible=True # + form = widgets.VBox() first = widgets.Text(description="First:") last = widgets.Text(description="Last:") student = widgets.Checkbox(description="Student:", value=False) school_info = widgets.VBox(visible=False, children=[ widgets.Text(description="School:"), widgets.IntText(description="Grade:", min=0, max=12) ]) pet = widgets.Text(description="Pet:") form.children = [first, last, student, school_info, pet] display(form) def on_student_toggle(name, value): if value: school_info.visible = True else: school_info.visible = False student.on_trait_change(on_student_toggle, 'value') # -
Introduction to Python/GUI - Widget Styling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Correlação # Correlação é qualquer associação estatística entre um par de variáveis. Quanto mais correlacionadas estão duas variáveis, mais "alinhamento" há entre elas. Isto é, uma análise de correlação fornece um número que resume o grau de relacionamento linear entre duas variáveis. Introduziremos este assunto com alguns conceitos fundamentais. # # ## Associação entre variáveis e causalidade # # A associação entre duas variáveis pode ocorrer de duas formas: # # - _correlacional_: neste caso, não há interferência alguma sobre as variáveis observadas. As variáveis são _aleatórias_ e seus comportamentos ocorrem conforme a "natureza" determina. Por exemplo, o exercício físico e a queima calórica possuem correlação positiva, pois quanto mais intensamente nos exercitamos, mais queimamos caloria. # # - _experimental_: neste caso, uma das variáveis é controlada e esta interfere sobre a outra. Isto é, uma variável A é a _causa_ (variável independente) e a outra, B, o _efeito_ (variável independente). Uma sentença lógica do tipo "se A, então B" estabelece a idéia de _causalidade_. Por exemplo: quando cientistas estudam a administração de fármacos em um organismo, eles analisam os efeitos dessa droga naquele organismo. Logo, a droga é a causa; a resposta orgânica, o efeito. # # A causalidade é um assunto muito mais delicado e possui fortes implicações filosóficas, pois, em geral, para se concluir que B é consequência de A, é necessário ter uma _prova_ disso. Porém, nem sempre é possível provar muitas relações de causa/efeito que presenciamos. Para uma discussão no âmbito filosófico, consulte este [post](https://philosophyterms.com/causality/). # # ### Correlação e dependência linear # # Podemos interpretar a _correlação_ também pelo ponto de vista de "dependência linear". Duas variáveis perfeitamente correlacionadas são similares a dois vetores paralelos, ou seja, linearmente dependentes. Por outro lado, duas variáveis totalmente não correlacionadas são similares a dois vetores perpendiculares, ou seja, linearmente independentes. # ## Escore padronizado # # Medir correlações envolve desafios. Um deles surge quando as variáveis que queremos comparar não estão expressas na mesma unidade. Por exemplo, você pode desejar correlacionar alturas medidas em centímetros e pesos medidos em quilogramas. Por outro lado, ainda que as unidades sejam as mesmas, o segundo desafio aparece quando as variáveis provêm de distribuições diferentes. # # As duas soluções comuns para lidar com esses problemas são: # # 1. Transformar todos os valores para um _escore padronizado_. # 2. Transformar todos os valores para ranques baseados em percentis. # # Como veremos adiante, a primeira solução leva-nos ao _coeficiente de correlação de Pearson_; a segunda, ao _coeficiente de Spearman_. # # ```{note} # Outros nomes para o escore padronizado são _escore Z_, _teste Z_ ou _Z-score_. # ``` # # Para converter uma série $X$ de valores $x_i$ em uma escala padronizada de escores, subtraímos a média dos dados e dividimos esta diferença pelo desvio padrão. Isto é: # # $$z_i = \dfrac{x_i - \mu}{\sigma},$$ # # onde $\mu$ é a média e $\sigma$ o desvio padrão. # # Ao dividir os desvios (numerador) pelo desvio padrão, na verdade, estamos _normalizando_ o desvio, de modo que os valores $z_i$ da nova série $Z$ são adimensionais (não possuem unidades), possuam média 0 e variância 1. A série $Z$ herda a "forma" de $X$. # Primeiro, importemos os módulos que usaremos. import pandas as pd import numpy as np import matplotlib.pyplot as plt import scipy.stats as sts # Vejamos exemplos: # dataframe dfp = pd.DataFrame({'Idade': np.array([20,19,21,22,20]), 'Peso': np.array([55,80,62,67,73]), 'Altura': np.array([162,178,162,165,171]), 'IMC':np.array([20.96, 25.25, 23.62, 24.61, 24.96])}, index=['Ana','João','Maria','Pedro','Túlio']) dfp # Vamos calcular o _z-score_ para todas as _Series_ do _DataFrame_. def zScore(df,colname): s = df[colname] return (s - s.mean())/s.std(ddof=0) # ddof = 0 para dividir por N # + # cria novo dataframe de z-scores Z = {} for c in dfp.columns: Z[c + ':Z-score'] = zScore(dfp,c) dfpz = pd.DataFrame(Z) dfpz # - # Comentários: # # - Os _z-score_ ajudam a entender se uma observação específica é comum ou excepcional: # - _z-score_ < 0 representam valores abaixo da média; # - _z-score_ > 0 representam valores acima da média; # - _z-score_ da média é 0, uma vez que ela é ponto médio; # - A soma dos _z-score_ = 0; # - _z-scores_ com valores positivos extremamente altos indicam uma distribuição com _assimetria à direita_ (mais sobre isso adiante); # - _z-scores_ com valores negativos extremamente altos indicam uma distribuição com _assimetria à esquerda_ (mais sobre isso adiante); # - se |_z-score_| > 2, a distribuição é incomum ou excepcional. # Z-Score das séries têm soma nula dfpz.sum(axis=0) # Calculando o z-score por função predefinida. # + Z2 = {} for c in dfp.columns: Z2[c + ':Z-score'] = sts.zscore(dfp[c]) dfpz2 = pd.DataFrame(Z2,index=dfp.index) dfpz2 # - # ambos os métodos dão resultados idênticos (dfpz == dfpz2).all() # ### Plot de _z-scores_ # # A plotagem dos _z-scores_ pode ser feita diretamente com `plot` a partir da _Series_ $Z$ de interesse. dfpz['Peso:Z-score'].plot(marker='o',ls=''); dfpz['IMC:Z-score'].plot(marker='o',ls='',color='g'); # Comentários: # # - A partir desses plots, vemos claramente qual discente está "acima da", "abaixo da" ou "na" média perante a variável escolhida. # ## Covariância # # Já vimos que a variância de um conjunto de dados mede quanto uma variável desvia-se do valor médio, seja em nível de amostra ou de população. Quando queremos compreender como duas variáveis variam juntas, aplicamos o conceito de _covariância_. Se $X$ e $Y$ são duas _Series_, a covariância entre ambas é dada por # # $$\textrm{cov}(X,Y) = \frac{1}{n}\sum_{i=1}^n(x_i - \mu_X)(y_i - \mu_Y),$$ # # onde $n$ é o número de elementos na série (igual em ambas) e $\mu_X$ ($\mu_Y$) é a média de $X$($Y$). # # Notemos que a covariância é uma "média" do produto dos desvios. # **Exemplo:** vamos criar uma função para o cálculo da variância. # covariância def cov(df,colname1,colname2): s1,s2 = df[colname1],df[colname2] return np.dot( s1 - s1.mean(), s2 - s2.mean() )/(len(s1)-1) # Testemos a covariância entre as variáveis de nosso _DataFrame_ de estudo. cov(dfp,'Altura','Peso'), cov(dfp,'Idade','Peso'), cov(dfp,'Idade','Altura') # Comentários: # - Esses cálculos mostram que variações de _altura_ e _peso_ interferem consideravelmente uma na outra "na mesma direção". # - Porém, não notamos o mesmo comportamento para _idade_ e _peso_ ou para _idade_ e _altura_. # Podemos checar o cálculo de nossa função com a função `var` do _pandas_, sabendo que $\text{cov}(X,X) = \text{var}(X) = S^2(X), \, \forall X$. cov(dfp,'Altura','Altura'), dfp['Altura'].var() # Outra forma de calcular a covariância é usar o método `cov` de uma `pandas.Series`. dfp['Altura'].cov(dfp['Peso']) # ### Matriz de covariâncias # Podemos usar a função `numpy.cov()` para computar a covariância entre duas _Series_ $X$ e $Y$. Para tanto, devemos passar a matriz $[X \ \ Y]$ como parâmetro para a função. # # A resposta é uma _matriz de covariâncias_ 2x2 cujas entradas são: # # $$\begin{bmatrix} # \text{cov}(X,X) & \text{cov}(X,Y) \\ # \text{cov}(Y,X) & \text{cov}(Y,Y) # \end{bmatrix}$$ X, Y = dfp['Altura'], dfp['Peso'] np.cov(np.array([X,Y])) # ## Correlação # # Uma das dificuldades conhecidas da covariância é a sua interpretação. Uma vez que ela é dada pelo produto das unidades de suas entradas, muito frequentemente será inviável atribuir significado ao número. Por exemplo, se a unidade de $X$ for quilogramas e $Y$ for anos, a unidade da covariância seria quilogramas vezes anos. Um meio de solucionar este problema é dividir o produto da covariância pelo desvio padrão de cada série de dados, assim formando o conceito de _correlação_, dado por: # # $$\rho(X,Y) = \frac{1}{n}\sum_{i=1}^n\frac{(x_i - \mu_X)}{\sigma_X}\frac{(y_i - \mu_Y)}{\sigma_Y}.$$ # # Em outras palavras, a correlação é a soma do produto de escores padronizados. # # ### Coeficiente de correlação de Pearson # # Se os desvios forem retirados da somatória, a expressão torna-se: # # $$\rho(X,Y) = \frac{1}{\sigma_X \sigma_Y}\frac{1}{n}\sum_{i=1}^n(x_i - \mu_X)(y_i - \mu_Y) = \frac{\textrm{cov}(X,Y)}{{\sigma_X \sigma_Y}}.$$ # # O número $\rho$ é chamado de _coeficiente de correlação de Pearson_, ou simplesmente _correlação de Pearson_, e vale que $-1 \leq \rho \leq 1$. A magnitude de $\rho$ determina a _força de correlação_ entre as variáveis. Em geral, a seguinte interpretação é utilizada: # # - $\rho = 1$: as variáveis são perfeitamente correlacionadas. # - $\rho = 0$: as variáveis são correlacionadas de alguma forma, mas **não** linearmente. Neste sentido, $\rho$ subestimará a força da dependência linear. # - $\rho = -1$: idem, porém negativamente. # # Na finalidade de predizer algo nos dados, correlação máxima negativa, assim como positva, podem ser igualmente boas. No mundo real, correlações perfeitas são raridade, mas estimativas "quase" perfeitas podem ser feitas. # # ```{info} # O coeficiente de correlação de Pearson é devido ao estatístico Karl Pearson. # ``` # # A {numref}`correlation` mostra _datasets_ que possuem correlação linear e não-linear, acompanhados dos valores de $\rho$. Como se observa na linha superior, $\rho = \pm 1$ mostram plotagens de retas com inclinação positiva ou negativa, ao passo que valores intermediários mostram "manchas" que variam de formas elípticas inclinadas à esquerda ou à direita e circular. Na linha inferior, vemos diversos casos em que $\rho=0$, mas "manchas" com estruturas "não lineares" bem definidas são distinguíveis. # # ```{figure} ../figs/12/correlation.png # --- # width: 600px # name: correlation # --- # Datasets com diferentes correlações de Pearson. Fonte: Wikipedia. # ``` # **Exemplo:** A tabela a seguir contém dados coletados na administração de um zoológico para alguns dias do mês de abril de 2021. Nesta ordem, a tabela mostra o número de visitantes no zoológico, o número de tickets de estacionamento adquiridos e a temperatura média contabilizados por dia. # # | Visitantes | Tickets | Temperatura | # |-------------:|------------------:|------------------:| # | 1580 | 8 | 35 | # | 1230 | 6 | 38 | # | 1950 | 9 | 32 | # | 890 | 4 | 26 | # | 1140 | 6 | 31 | # | 1760 | 9 | 36 | # | 1650 | 10 | 38 | # | 1470 | 3 | 30 | # | 390 | 1 | 21 | # | 1460 | 9 | 34 | # | 1000 | 7 | 36 | # | 1030 | 6 | 32 | # | 740 | 2 | 25 | # | 1340 | 6 | 37 | # | 1150 | 7 | 34 | # # O arquivo com os dados está disponível [aqui]('../database/visitantes-zoo.csv'). Vamos buscar correlações nos dados. # Carregando o arquivo: zoo = pd.read_csv('../database/visitantes-zoo.csv'); zoo # Para calcular a correlação de Pearson entre duas séries, podemos usar a função `pearsonr()` do módulo `scipy.stats`. # # ```{note} # O segundo argumento de `pearsonr()` é o [_valor p_](https://pt.wikipedia.org/wiki/Valor-p). # + corr1,_ = sts.pearsonr(zoo['Visitantes'],zoo['Tickets:Parking']); corr2,_ = sts.pearsonr(zoo['Visitantes'],zoo['Temperatura (C)']); corr1,corr2 # - # Comentários: # # - O coeficiente de Pearson mostra que há uma "força" de correlação não desprezível entre o número de visitantes e tickets vendidos. # - Esta correlação é menor para a faixa de temperaturas médias. # A correlação pode também ser calculada através do método `corr` de uma _Series_ do pandas. zoo['Visitantes'].corr(zoo['Tickets:Parking']) zoo['Visitantes'].corr(zoo['Temperatura (C)']) # #### Correlações pareadas # # Usando o método `pandas.DataFrame.corrwith()` é possível calcular correlações pareadas entre colunas de um _DataFrame_ ou linhas de outra _Series_ ou _DataFrame_. # # No exemplo abaixo, passamos uma _Series_ como argumento. A resposta são os mesmos valores obtidos anteriormente, porém na forma de uma _Series_. O valor unitário é devido à correlação da variável com ela própria. zoo.corrwith(zoo['Visitantes']) # ### Gráfico de dispersão # # Antes de calcular cegamente o valor de $\rho$ para séries de dados, é interessante fazer um gráfico de _dispersão_ (_scatter plot_) entre as variáveis. Podemos fazer isto com o `matplotlib.pyplot.plot()` e tipo de marcador `o` ou com `matplotlib.pyplot.scatter()`. fig,ax = plt.subplots(1,2,figsize=(10,3)) # plot 1 ax[0].plot(zoo['Visitantes'],zoo['Tickets:Parking'],'o',label=f'corr={round(corr1,2)}') ax[0].set_xlabel('No. visitantes'); ax[0].set_ylabel('Tickets de estacionamento'); ax[0].legend() # plot 2 ax[1].plot(zoo['Visitantes'],zoo['Temperatura (C)'],'or',label=f'corr={round(corr2,2)}') ax[1].set_xlabel('No. visitantes'); ax[1].set_ylabel('Temperatura (C)'); ax[1].legend(); # Reproduzindo com `plt.scatter`: plt.scatter(zoo['Visitantes'],zoo['Tickets:Parking']); # ## Leituras recomendadas # # - [Paradoxo de Simpson](https://pt.wikipedia.org/wiki/Paradoxo_de_Simpson) # - [statsmodels](https://www.statsmodels.org/stable/index.html) # - [Outliers, o que são e como tratá-los em uma análise de dados?](https://www.aquare.la/o-que-sao-outliers-e-como-trata-los-em-uma-analise-de-dados/)
_build/html/_sources/ipynb/12-correlacao.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import geopandas as gpd import geoviews as gv import cartopy.crs as ccrs gv.extension('matplotlib') # %output fig='svg' dpi=120 # - # ## Define data # + tiles = gv.tile_sources.Wikipedia # Project data to Web Mercator nybb = gpd.read_file(gpd.datasets.get_path('nybb')) poly_data = nybb.to_crs(ccrs.GOOGLE_MERCATOR.proj4_init) polys = gv.Polygons(poly_data, vdims='BoroName', crs=ccrs.GOOGLE_MERCATOR) # - # ## Plot # %%opts Polygons [fig_size=200 color_index='BoroName'] (cmap='tab20') WMTS [zoom=10] tiles * polys
examples/gallery/matplotlib/new_york_boroughs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np data = np.loadtxt("data.csv", delimiter = ",") def step_gradient1(points, learning_rate, m, c): x = points[:,0] y = points[:,1] m_slope = (((y - m*x - c)*x).sum()) * (-2/len(points)) new_m = m - learning_rate*m_slope c_slope = ((y - m*x - c).sum()) * (-2/len(points)) new_c = c - learning_rate*c_slope return new_m, new_c def step_gradient(points, learning_rate, m, c): m_slope = 0 c_slope = 0 M = len(points) for i in range(M): x = points[i,0] y = points[i,1] m_slope += (-2/M) * (y - m*x - c)*x c_slope += (-2/M) * (y - m*x - c) new_m = m - learning_rate*m_slope new_c = c - learning_rate*c_slope #print(new_m, new_c) return new_m, new_c def gd(points, learning_rate, num_iterations): m = 0 c = 0 for i in range(num_iterations): m, c = step_gradient(points, learning_rate, m, c) print(i, "cost: ", cost(points, m, c)) return m, c def cost(points, m, c): total_cost = 0 M = len(points) for i in range(M): x = points[i,0] y = points[i,0] total_cost = (1/M) * ((y- m*x - c)**2) return total_cost def run(): learning_rate = 0.0001 num_iteration = 100 m, c = gd(data, learning_rate, num_iteration) print(m, c) run()
GradientDescent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Practical work 8: Split and Merge # # In this practical work, we implement and test the split and merge algorithm. # + ### Usefull libraries from PIL import Image import numpy as np import matplotlib.pyplot as plt ### Data img_test = np.full((64,64),150,dtype=np.uint8) img_test[32:48,16:16+32] = 100 img_test[8:24,40:56] = 50 angio = np.array(Image.open('img/angiogra.png')) cam = np.array(Image.open('img/cameraman.png')) muscle = np.array(Image.open('img/muscle.png')) prisme = np.array(Image.open('img/prisme.png')) seiche = np.array(Image.open('img/seiche.png')) ### Usefull functions def neighbors(b,K): """ blockStat*list[blockStat]->list[blockStat] returns the list of neighbors of b and elements of K """ def belongsTo(x,y,a): """ int*int*BlockStat -> bool Test if pixel (x,y) belongs to block a """ return x>=a[0] and y>=a[1] and x<a[0]+a[2] and y<a[1]+a[3] def areNeighbors(a,b): """ BlockStat**2 -> bool Test if a and b are neighbors """ if a[2]>b[2] and a[3]>b[3]: a,b=b,a x,y = a[0]+a[2]//2,a[1]+a[3]//2 return belongsTo(x+a[2],y,b) or belongsTo(x-a[2],y,b) or belongsTo(x,y+a[3],b) or belongsTo(x,y-a[3],b) N = [] for n in K: if areNeighbors(b,n): N.append(n) return N def imshow(I, title=None, size=500, axis=False): """ display an image, with title, size, and axis """ plt.figure(figsize=(size//80, size//80)) # plt.gray() plt.imshow(I) if not axis: plt.axis('off') if title: plt.title(title) plt.show() # - # ## Exercise 1 # # ### Question 1 # Write the recursive function ``split()`` discussed in tutorial work. # It takes as input the image, a region, a predicate, and a variable number of arguments. # The region is a Python formal type ``Block`` defined by: # ```python # type Block = tuple[int**4] # ``` # The function ``split()`` returns a quadtree, a Python formal type, recursivelly defined by: # ```python # type QuadTree = list[(QuadTree**4|Block)] # ``` # # The predicate is a Python function with the following signature: # ```python # Array*Block*...->bool # ``` # It can take a variable number of parameters which correspond to the parameters required by the predicate. # + # type Block = tuple[int**4] # type QuadTree = list[(QuadTree**4|Block)] def split(I,R,*args): """ Array*Block*(Array*Block*...->bool)*... -> 4-aire Performs a quadtree splitting of image I drived by a predicate """ x,y,n,m = R ndiv = n//2 mdiv = m//2 if predsplit(I,R,*args): no = split(I, (x, y, ndiv,mdiv), *args) ne = split(I, (x, y+mdiv,ndiv,mdiv), *args) so = split(I, (x+ndiv,y, ndiv,mdiv), *args) se = split(I, (x+ndiv,y+mdiv,ndiv,mdiv), *args) return [no,ne,so,se] return [R] # - # ### Question 2 # Write the function ```predsplit(I,B,*args)``` with signature: # ```python # Array*Block*... -> bool # ``` # that returns True if the standard deviation of image ``I`` computed in region ``B`` is greater than the first value of argument ``*args`` (it can be accessed simply by ``*args[0]``). def predsplit(I,R,*args): """ Array*Block*... -> bool """ x,y,n,m = R std = I[x:x+n,y:y+m].std() return std > args[0] and n > args[1] and m > args[2] # Write the function ``listRegions()`` which applies a depth-first search on the quadtree given as parameter, and returns the list of the leaves of the quadtree. # # Some recalls about lists in Python; # - Initialization: `L = []` (empty list) # - Add a element `a` into a list `L`: `L.append(a)` def listRegions(L): """ QuadTree -> list[Block] """ res = [] if len(L) == 1: res.append(L[0]) else: for l in L: res += listRegions(l) return res # ### Question 4 # Test your codes on the synthetic image ``img_test`` seen in tutorial work. Print the value returned by ``split()`` as well as the one returned by ``listRegions()``. # + seuil_predsplit = 0 seuil_n = 2 seuil_m = 2 R_init = [0, 0, img_test.shape[0], img_test.shape[1]] tree = split(img_test, R_init, seuil_predsplit, seuil_n, seuil_m) listreg = listRegions(tree) print(tree) print("=================") print(listreg) # - # ### Question 5 # Write the function ```drawRegions(L,I)``` which takes as arguments a list of regions, an image, and returns an image where the boundaries of each region have been traced with red color. Indication: the returned image is a hypermatrix of dimension 3, the third dimension is of size 3 and encodes the red, green and blue components of a RGB colorspace. Test the function on the previous example. def drawRegions(LL,I): """ list[Block]*Array -> Array parcours de la liste dessin des régions """ u, v = I.shape n_can = 3 new_I = np.zeros((u, v, n_can)) for i in range(u): for j in range(v): for k in range(n_can): new_I[i][j][k] = I[i][j] for l in LL: x,y,n,m = l if x+n >= 64: n -= 1 if y+m >= 64: m -= 1 new_I[x:x+n, y, 0] = 0 new_I[x+n , y:y+m, 0] = 0 new_I[x , y:y+m, 0] = 0 new_I[x:x+n, y+m, 0] = 0 return new_I # + borderimg = drawRegions(listreg, img_test) print(borderimg.shape) plt.imshow(img_test) plt.imshow(borderimg, alpha=0.5) plt.show() # - # ### Question 6 # Add a Gaussian noise with standard deviation 5 to the image ``img_test``. # Apply the quadtree splitting on the noisy image by adjusting the threshold to obtain the same result as in the previous question. # Which threshold value should be chosen? Does this make sense to you? # # Hint: use the Numpy function ``random.randn()`` which generates random values according to a normal distribution (Gaussian distribution of null mean and variance 1). To obtain realizations of a Gaussian distribution of standard deviation $\sigma$, it is sufficient to multiply by $\sigma$ the realizations of a normal distribution. # + from numpy import random sigma = 5 def apply_gauss(I, sigma): n,m = I.shape new_I = np.zeros(I.shape) for i in range(n): for j in range(m): new_I[i][j] = I[i][j] + (random.rand() * sigma) return new_I img_test_gauss = apply_gauss(img_test, sigma) imshow(img_test_gauss) # + seuil_predsplit = sigma # sigma pour retrouver la meme chose seuil_n = 2 seuil_m = 2 R_init = [0, 0, img_test_gauss.shape[0], img_test_gauss.shape[1]] tree = split(img_test_gauss, R_init, seuil_predsplit, seuil_n, seuil_m) listreg = listRegions(tree) borderimg = drawRegions(listreg, img_test) plt.imshow(img_test_gauss) plt.imshow(borderimg, alpha=0.5) plt.show() # - # ## Exercise 2 # # Experiment the split algorithm on the 4 natural images provided. For each image try to find the threshold that seems to you visually the best. Display the number of regions obtained after splitting. # ## Exercise 3 # ### Question 1 # Modify the function ``listRegions(L)`` to make it a function ``listRegionsStat(L,I)`` which computes the list of leaves of the quadtree ``L``. Each element of this list will be enriched with three scalar values: the first being the size, the second the mean and the third the variance of pixel values of the block in the image ``I``. This function then returns a list whose elements have the following formal type: # ```python # type BlockStat = tuple[int**4,int,float**2] # ``` # # The first four values are those of the ``Block`` type, the fifth is the size of the block (in number of pixels) and the last two values are the mean and variance calculated over the region. # + # type BlockStat = tuple[int**4,int,float**2] def listRegionsStat(L,I): """ QuadTree*Array -> list[BlockStat] """ res = [] if len(L) == 1: x,y,n,m = L res.append(L[0]) res.append(n*m, I[x:x+n,y:y+m].mean(), I[x:x+n,y:y+m].std()) else: for l in L: res += listRegions(l) return res # - # ### Question 2 # In the remainder, the formal type is considered: # ```python # type Region = list[BlocStats] # ``` # A region, as seen during the tutorial work, is therefore a list of blocks. Write the predicate ``predmerge(b,R,*args)`` as seen in tutorial work. This function returns ``True`` if the ``b`` block should merge into the ``R`` region. If a merge happens, then the first item of ``R`` will have its statistics updated to describe the statistics of the region ``R`` merged with `b`. def predmerge(b,R,*args): """ BlocsStat*Region*... -> bool If merge, R[0] is modified """ x,y,n,m = R n1 = n*m sig1 = I[x:x+n,y:y+m].std() mean1 = I[x:x+n,y:y+m].mean() x2,y2,nt,mt = b n2 = nt*mt sig2 = I[x2:x2+nt,y2:y2+mt].std() mean2 = I[x2:x2+nt,y2:y2+mt].mean() new_n = n1 + n2 new_mean = n1/new_n * mean1 + n2/new_n * mean2 new_std = (n1*(sig1**2 + mean1**2) + n2*(sig2**2 + mean2**2))/new_n - new_mean**2 if np.abs(std - new_std) > args[0]: return True return False # ### Question 3 # Using `predmerge()` and `neighbors()` functions, given at the beginning of the notebook, write the function ``merge()`` discussed in tutorial work (exercise 7.6). # # Recalls on Python lists: # - Remove an element `a` from a list `L`: `L.remove(a)` # - Test if `a` belongs to a list `L`: `a in L` # - Iterate the elements of a list `L`: `for a in L:` # - Access to an element of a list: as with numpy arrays def merge(S,I,*args): """ QuadTree*Array*(BlockStat*Region*...->bool) -> list[Region] Merge the leaves of S in a list of regions """ K = listRegionsStat(S, I) L = [] if len(K) > 0: b = K[0] R = [] R.append(b) K.remove(b) N = neighbors(b, K) for bn in N: if predmerge(b, R, *args): R.append(bn) K.remove(bn) K.append(neighbors(bn, K)) L.append(R) return L # ### Question 4 # Test the previous functions using the synthetic image `img_test`. In particular, check that merge() returns a list of 3 elements (i.e. 3 regions). QT = split(img_test, R_init, seuil_predsplit, seuil_n, seuil_m) M = merge(QT,predmerge) print(M) # assert(len(M) == 3) # ### Question 5 # Write a function ``regions(LR,shape)`` that takes as arguments a list of regions (such as returned by the function ``merge()``) and an image size, and returns an image of the regions. Each region will be colored with the gray level corresponding to the average of the region. The ``shape`` parameter gives the size of the image to be produced. # # Test the function on the previous example. def regions(LR,shape): """ list[Region]*tuple[int,int] -> Array """ # ## Exercise 4: experiments # ### Question 1 # Test the function ``merge()`` on the images ``angio``, ``cam``, ``muscle``, ``prisme`` and ``seiche``. Try to produce the best segmentations. # ### Question 2 # The result of the merge algorithm highly depends on how you visit the regions. One can then sort the leaves of the quadtree, for example, from the smallest to the largest blocks, or the opposite (use the Python function ``sorted()``). # The same question arises when calculating the set of neighbors of the merged region. Should they be sorted? If yes, according to which criteria? their size? their proximity? # Obviously there is no universal answer but it should be adapted to each type of problem. # Do some tests to see the influence of these sortings on the result of the merger. # ### Question 3 (bonus) # Imagine and experiment alternative predicates for both the split and the merge steps. It is possible to use edges-based predicates, and also to combine with variance-based predicates. #
S1/BIMA/TME/TME8/TME8/.ipynb_checkpoints/TME8-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: python38 # language: python # name: python38 # --- # + # default_exp shopping # - import sys sys.path # # Shopping # simulates shopping with python code. # this is designed to test the backend and allow conversion to frontend #export from villaProductSdk.products import ProductSdk from villaBackendSdk.basket import BasketSdk from dataclasses_json import dataclass_json from dataclasses import dataclass, field from awsSchema.apigateway import Response, Event from nicHelper.wrappers import add_class_method, add_method, add_static_method from typing import List, Optional from s3bz.s3bz import S3 import pandas as pd import json, zlib from requests import post, get import pandas as pd import requests from requests import post from PIL import Image from io import BytesIO # ## browsing Items # + #export class Basket: def __init__(self, basketId = ''): self.basketId = basketId def setBasketId(self, basketId): self.basketId = basketId class Order: def __init__(self, orderId = ''): self.orderId = orderId def setOrderId (self, orderId): self.orderId = orderId class Browser: def __init__( self, searchEndpoint = 'https://8av9li9v82.execute-api.ap-southeast-1.amazonaws.com/Prod/orismaSearch/', branch = 'dev-manual', basketId = '', orderId = '' ): self.searchEndpoint = searchEndpoint self.productSdk = ProductSdk(branch = branch) self.Basket = Basket self.basket = Basket() self.Order = Order self.order = Order(orderId) # - ## crate a test object browser = Browser() # ### Getting all database items # + #export @add_method(Browser) def getAllItems(self, url = 'https://fgeag95vy6.execute-api.ap-southeast-1.amazonaws.com/Prod/all/'): r = get(url) if r.status_code != 200: raise Exception(f'request for signedUrl failed with error {r.json()}') presignedUrl = r.json()['url'] r = get(presignedUrl) if r.status_code != 200: raise Exception (f'signedUrl download failed with error {r.json()}') jsonItems = zlib.decompress(r.content).decode() return pd.read_json(jsonItems, orient='split') # - # %%time browser.getAllItems().head() # ### Search for items @add_method(Browser) def search(self, searchQuery:str)-> List[str]: endpoint = self.searchEndpoint queries = { 'data_t3':searchQuery } response = get(endpoint, params=queries) result = response.json() try: resultList = pd.DataFrame(result)['pr_code'].to_list() except: print(result) return resultList # %%time resultList = browser.search('rose') resultList[:10] # ### get info for searched item @add_method(Browser) def getProductInfo(self,products:list)->(List[dict],List[dict]): # products = [] # errors = [] items=self.productSdk.queryList(products) # for result in resultList: # if (item := self.productSdk.(result)).get('error'): # products.append(item) # else: # errors.append(result) print(f'found {len(items)} products out of {len(products)} products') return items # %time products = browser.getProductInfo(resultList) # %time productsDf = pd.DataFrame(products) print(productsDf.shape) productsDf.head() # ## Basket #export ## general functions basketRootUrl = 'https://ad3pw0tze2.execute-api.ap-southeast-1.amazonaws.com/Prod' basketEndpoints = lambda path: f'{basketRootUrl}/{path}' @add_class_method(Basket) def generalInvoke(cls, path:str, payload:dict, successHandler = lambda x: f'function successful {x}', failureHandler = lambda x: f'function failed due to {x}'): r:requests.Response = post(url = basketEndpoints(path), json = payload) responsePayload = r.json() if r.status_code == 200: print(successHandler(responsePayload)) else: print(failureHandler(responsePayload)) return responsePayload # ### Create a new basket #export @dataclass_json @dataclass class Create: basketId:str basketName:str ownerId:str branchId:str @add_class_method(Basket) def createBasket(self, basketId, basketName, ownerId, branchId): payload:dict = Create(basketId=basketId, basketName = basketName, ownerId=ownerId, branchId=branchId).to_dict() return self.generalInvoke(path='create', payload=payload) data = { 'basketId': '0987654321', 'basketName': 'hello', 'ownerId': '1234', 'branchId': '123' } browser.basket.createBasket(**data) # ### add/remove products to basket # to remove just put a negative addition # + #export @dataclass_json @dataclass class Item: sku: str quantity: int @dataclass_json @dataclass class Add: basketId:str items:List[Item] @add_class_method(Basket) def addToBasket(self, basketId:str, items:List[Item]): payload:dict = Add(basketId=basketId, items=items).to_dict() return self.generalInvoke(path='add', payload=payload) # + items = [ {'sku': '12345r', 'quantity': 12}, {'sku': 'asf', 'quantity': 3} ] browser.basket.addToBasket(basketId='0987654321', items = items) # - # ### update quantity in the basket # ignores the values previously in the basket and set the new values #export @dataclass_json @dataclass class Update: basketId:str ownerId:Optional[str] = field(default_factory=str) items:List[Item] = field(default_factory=list) @add_class_method(Basket) def update(self, basketId:str, ownerId:str, items:List[Item]): payload = Update(basketId=basketId, ownerId=ownerId, items=items).to_dict() return self.generalInvoke(path='update', payload=payload) items = [ {'sku': '12345r', 'quantity': 12}, {'sku': 'asf', 'quantity': 3} ] browser.basket.update(basketId='0987654321', ownerId= '1234', items = items) @add_method(Order) def createOrder(self, userId:str, skus:[str]): self.orderId = 'randomOrderId' print(f'created order {self.orderId}') browser.order.createOrder(userId='123', skus = ['123', '123']) @add_method(Order) def checkOut(self, skus:[str]): print(f'add {skus} to order {self.orderId}') browser.order.checkOut(['0000009', '9999913']) # ## Get images def showImage(im: Image): width, height = im.size return im.resize((int(100* width/height),100)) @add_method(Browser) def getLargeImage(self, sku:str)->Image: url = 'http://d19oj5aeuefgv.cloudfront.net' imageContent = requests.get(f'{url}/{sku}').content return Image.open(BytesIO(imageContent),formats=['PNG']) sku = '0189194' # %time largeImage:Image = browser.getLargeImage(sku) print(f'image is size {largeImage.size}') showImage(largeImage) @add_method(Browser) def getSmallImage(self, sku:str)->Image: url = 'https://d1vl5j0v241n75.cloudfront.net' imageContent = requests.get(f'{url}/{sku}').content return Image.open(BytesIO(imageContent),formats=['PNG']) # %time smallImage = browser.getSmallImage(sku) showImage(smallImage)
shopping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install psycopg2-binary import psycopg2 dir(psycopg2) # + dbname = 'dicbrdqkflh99' user = 'vxnwetvoobravp' password = '<PASSWORD>' host = 'ec2-54-83-202-132.compute-1.amazonaws.com' port = '5432' # - # Establishing Connection to Heroku App pg_conn = psycopg2.connect(dbname = dbname, user = user, password = password, host = host ) #Checking Connection pg_conn pg_curs = pg_conn.cursor() # + #Reading in Smaller Training Set import pandas as pd brsm = pd.read_csv('br_smaller.csv') brsm.head(10) # + #Renaming False Index Columns to Drop brsm = brsm.rename(columns = {'Unnamed: 0':'zero'}) brsm = brsm.rename(columns = {'Unnamed: 0.1':'one'}) brsm.head(10) # - #Dropping index columns: brsm = brsm.drop(columns = 'zero') brsm = brsm.drop(columns = 'one') brsm.head(10) #Making new DF into csv brsm.to_csv("br_smaller.csv") brsm = pd.read_csv('br_smaller.csv') brsm = brsm.rename(columns = {'Unnamed: 0':'zero'}) brsm = brsm.drop(columns = 'zero') brsm.to_csv("br_smaller.csv", index=False) brsm = pd.read_csv('br_smaller.csv') brsm.head(10) pg_conn.close() # Establishing Connection to Heroku App pg_conn = psycopg2.connect(dbname = dbname, user = user, password = password, host = host ) #Checking Connection pg_conn pg_curs = pg_conn.cursor() book_table = """ CREATE TABLE book ( id SERIAL PRIMARY KEY, webpage BIGINT, title VARCHAR(300), author VARCHAR(100), description VARCHAR(25000), rating REAL, num_ratings VARCHAR(30), num_reviews VARCHAR(30), isbn VARCHAR(110), isbn13 VARCHAR(110), binding VARCHAR(100), edition VARCHAR(125), num_pages VARCHAR(100), published_on VARCHAR(150), genres VARCHAR(300) ); """ pg_curs.execute(book_table) insert_book = """ INSERT INTO book ( webpage, title, author, description, rating, num_ratings, num_reviews, isbn, isbn13, binding, edition, num_pages, published_on, genres) VALUES(""" brsm.n = brsm.n.astype(int) for idx, row in brsm.iterrows(): webpage = row['n'] title = row['title'] author = row['author'] description = row['descrip'] rating = row['rating'] num_ratings = row['num_ratings'] num_reviews = row['num_reviews'] isbn = row['isbn'] isbn13 = row['isbn13'] binding = row['binding'] edition = row['edition'] num_pages = row['pages'] published_on = row['published_on'] genres = row['genres'] pg_curs.execute(''' INSERT INTO book ( webpage, title, author, description, rating, num_ratings, num_reviews, isbn, isbn13, binding, edition, num_pages, published_on, genres) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)''', (webpage, 'title', 'author', 'description', rating, 'num_ratings', 'num_reviews', 'isbn', 'isbn13', 'binding', 'edition', 'num_pages', 'published_on', 'genres')) pg_curs.close() pg_conn.commit()
notebooks/.ipynb_checkpoints/Database-notebook-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- def isPrime(n): if n<=1: return False for i in range(2,n): if n%i==0: return False return True isPrime(7) isPrime(4) def isPrime(n): if n<=1: return False for i in range(2,n//2+1): if n%i==0: return False return True isPrime(8) isPrime(37) import math def isPrime(n): if n<=1: return False for i in range(2,int(math.sqrt(n))+1): if n%i==0: return False return True isPrime(37) isPrime(16) def factorF(n): factors = [] if n<=0: return False for i in range(1,n+1): if n%i==0: factors.append(i) return factors factorF(18) def factorW(n): factors = [] i = 1 while i<=n: if n%i==0: factors.append(i) i +=1 return factors factorW(14) import math as m def factorW2(n): factors_list1 = [] factors_list2 = [] i = 1 while i<=m.sqrt(n): if n%i==0: if n/i == i: factors_list1.append(i) else: factors_list1.append(i) factors_list2.append(n//i) i +=1 factors_list2.reverse() factors_list1.extend(factors_list2) return factors_list1 factorW2(100) import math as m def primeFactors(n): factors = [] while n%2==0: factors.append(2) n = n//2 for i in range(3,int(m.sqrt(n)+1),2): while n%i == 0: factors.append(i) n = n//i if n>2: factors.append(n) return factors primeFactors(28) def gcd1(a,b): f1 = primeFactors(a) f2 = primeFactors(b) f1_count = {} f2_count = {} for i in f1: if i in f1_count: f1_count[i] = f1_count[i]+1 else: f1_count[i] = 1 for i in f2: if i in f2_count: f2_count[i] = f2_count[i]+1 else: f2_count[i] = 1 gcd_val = 1 for i in list(set(f1+f2)): if i in f1_count and i in f2_count: gcd_val = gcd_val*i*min(f1_count[i], f2_count[i]) return gcd_val gcd1(20,28) def gcd2(a,b): if a==0: return b if b==0: return a if a==b: return a if a>b: return gcd2(a-b,b) return gcd2(a,b-a) gcd2(20,28) def gcd3(a,b): if b==0: return a return gcd3(b,a%b) gcd3(20,28) def lcm1(a,b): f1 = primeFactors(a) f2 = primeFactors(b) f1_count = {} f2_count = {} for i in f1: if i in f1_count: f1_count[i]=f1_count[i]+1 else: f1_count[i] = 1 for i in f2: if i in f2_count: f2_count[i]=f2_count[i]+1 else: f2_count[i] = 1 lcm_val = 1 print(f1_count,f2_count) for i in list(set(f1+f2)): if i in f1_count and i in f2_count: if f1_count[i]>f2_count[i]: alpha = f2_count[i]+(f1_count[i]-f2_count[i]) else: alpha = f1_count[i]+(f2_count[i]-f1_count[i]) elif i in f1_count: alpha = f1_count[i] else: alpha = f2_count[i] lcm_val = lcm_val*i*alpha return lcm_val lcm1(20,15) def lcm2(a,b): return a*b//gcd3(a,b) lcm2(20,15) def fact(n): fac = 1 for i in range(1,n+1): fac = i*fac return fac fact(5) def trailingZeros(n): fact = 1 for i in range(1,n+1): fact = i*fact print(fact) count = 0 while 1: rem = fact%10 if rem != 0: break fact = fact//10 count = count + 1 return count trailingZeros(10) def trailingZeros2(n): fact = 1 for i in range(1,n+1): fact = i*fact print(fact) count2 = 0 count5 = 0 f = fact while fact%2==0: count2 = count2 + 1 fact = fact//2 while fact%5==0: count5 = count5 + 1 fact = fact//5 return min(count2,count5) trailingZeros2(100) def trailingZeros3(n): count2 = 0 count5 = 0 for i in range(1,n+1): f = i while i%2==0: count2 = count2 + 1 i = i//2 while i%5==0: count5 = count5 + 1 i = i//5 return min(count2,count5) trailingZeros3(100) def trailingZeros4(n): if n<0: return -1 count = 0 while n>=5: n//=5 count+=n return count trailingZeros4(100) def search(mat,target): n=len(mat) m = len(mat[0]) for i in range(n): for j in range(m): if mat[i][j] == target: return [i,j] return -1 mat = [ [1,2,3], [4,5,6], [7,8,9] ] target = 5 search(mat,target) def binarySearch(arr,ele,start,end): ind = -1 while start <= end : mid = start + (end-start)//2 if arr[mid] == ele: ind = mid break elif ele<arr[mid]: end = mid-1 else: start = mid+1 return ind arr = [1,2,3,4,5,9,14] ele = 14 start = 0 end = len(arr)-1 binarySearch(arr,ele,start,end) def search2(mat,target): for i in range(len(mat)): ind = binarySearch(mat[i],target,0,len(mat[i])-1) if ind != -1: return [i,ind] return -1 mat = [ [1,2,3], [4,5,6], [7,8,9] ] target = 5 search2(mat,target) def search3(mat,target): n = len(mat) m = len(mat[0]) i = 0 j = m-1 while i<n and j>=0: if mat[i][j] == target: return [i,j] elif mat[i][j]>target: j = j-1 else: i = i + 1 return -1 mat = [ [1,2,3], [4,5,6], [7,8,9] ] target = 2 search3(mat,target) def powerFun(x,n): flag = False if n<0: n = n*-1 flag = True ans = 1 for i in range(1,n+1): ans = ans*x if flag == True: ans = 1/ans return ans x = 5 n = -4 powerFun(x,n) def powerFun2(x,n): flag = False if n<0: n = n*-1 flag = True ans = 1 while n>0: if n%2==1: ans = ans*x n=n-1 else: x = x*x n = n//2 if flag == True: ans = 1/ans return ans x = 5 n = -4 powerFun2(x,n) def majority(arr): count = {} for i in arr: if i in count: count[i] = count[i]+1 else: count[i] = 1 print(count) ans = -1 item = -1 for key,value in count.items(): if value>ans: ans = value item = key return item,ans arr = [1,1,2,2,3,3,4,4,1,1,1,1,1] majority(arr) import math as m def majority2(arr): for i in list(set(arr)): c = 0 for j in arr: if i == j: c += 1 if c > m.floor(len(arr)/2): return i arr = [1,1,2,2,3,3,4,4,1,1,1,1,1] majority2(arr) import math as m def majority3(arr): count = {} for i in arr: if i in count: count[i] = count[i]+1 if count[i]>m.floor(len(arr)/2): return i else: count[i] = 1 arr = [1,1,2,2,3,3,4,4,1,1,1,1,1] majority3(arr) def majority4(arr): count = 0 ele = -1 for i in arr: if count == 0: ele = i if ele == i: count +=1 else: count -= 1 return ele arr = [1,1,2,2,3,3,4,4,1,1,1,1,1] majority4(arr) def uniquePath(i,j,x,y): if i == x-1 and j == y-1: return 1 elif i>=x or j>=y: return 0 else: return uniquePath(i+1,j,x,y)+uniquePath(i,j+1,x,y) i,j = 1,1 x,y = 2,3 uniquePath(i,j,x,y) def uniquePath2(i,j,x,y,dp): if i == x-1 and j == y-1: dp[i][j] = 1 return 1 elif i>=x or j>=y: dp[i][j] = 0 return 0 else: if dp[i+1][j] == -1: alpha = uniquePath2(i+1,j,x,y,dp) else: alpha = dp[i+1][j] if dp[i][j+1] == -1: beta = uniquePath2(i,j+1,x,y,dp) else: beta = dp[i][j+1] return alpha+beta i,j = 0,0 x,y = 2,3 dp = [] for _ in range(x+1): row = [] for __ in range(y+1): row.append(-1) dp.append(row) uniquePath2(i,j,x,y,dp) # + def fact(n): res = 1 for i in range(1,n+1): res = res*i return res def nCr(n,r): return fact(n)//fact(r)*fact(n-r) def uniquePath3(i,j,x,y): return nCr(x+y-2-,y-1) i,j = 1,1 x,y = 2,3 uniquePath3(i,j,x,y) # + def fact(n): res = 1 for i in range(1,n+1): res = res*i return res def nCr(n,r): return fact(n)// (fact(r)*fact(n-r)) def uniquePaths3(i,j,x,y): return nCr(x+y-2-i-j,y-1-j) i,j = 0,0 x,y = 2,3 paths = uniquePaths3(i,j,x,y) print(paths) # -
Math.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json with open("stats.json", 'r') as file: data = json.load(file) key = list(data['1'].keys()) # + result = {} for i in key: result[i] = 0 result # - data.pop('randomSeed') data['1'] for num in data.keys(): value = data[num] for test_name in value.keys(): stat = value[test_name] result[test_name] += stat['weak'] result import json def get_result(name: str): with open(name, 'r') as file: data = json.load(file) key = list(data['1'].keys()) result = {} for i in key: result[i] = 0 data.pop('randomSeed') for num in data.keys(): value = data[num] for test_name in value.keys(): stat = value[test_name] result[test_name] += stat['weak'] for key in result.keys(): print(key, result[key]) get_result("IntelUHD630-10-5000.json") get_result("coherence_IntelUHD630-150-1000.json") get_result("coherence_new_website.json") get_result("IntelIrisCoherence_2.json")
WebGPU_Run_Statistics/survey.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/aapte11/DS-Sprint-02-Storytelling-With-Data/blob/master/LS_DS_123_Make_explanatory_visualizations.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="FrApBPWKIxPs" colab_type="text" # _Lambda School Data Science_ # # # Choose appropriate visualizations # # # Recreate this [example by FiveThirtyEight:](https://fivethirtyeight.com/features/al-gores-new-movie-exposes-the-big-flaw-in-online-movie-ratings/) # # ![](https://fivethirtyeight.com/wp-content/uploads/2017/09/mehtahickey-inconvenient-0830-1.png?w=575) # # Using this data: # # https://github.com/fivethirtyeight/data/tree/master/inconvenient-sequel # # ### Stretch goals # # Recreate more examples from [FiveThityEight's shared data repository](https://data.fivethirtyeight.com/). # # For example: # - [thanksgiving-2015](https://fivethirtyeight.com/features/heres-what-your-part-of-america-eats-on-thanksgiving/) ([`altair`](https://altair-viz.github.io/gallery/index.html#maps)) # - [candy-power-ranking](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) ([`statsmodels`](https://www.statsmodels.org/stable/index.html)) # + [markdown] id="aDdcOUJ-q4OU" colab_type="text" # ## Data Wrangling and Exploration # + id="zt1eGRhBIxPw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 131} outputId="69bfa62d-4c3c-44a2-a7b5-72e526bf4690" import pandas as pd import matplotlib.pyplot as plt url = 'https://raw.githubusercontent.com/fivethirtyeight/data/master/inconvenient-sequel/ratings.csv' df = pd.read_csv(url) # df.head() df.timestamp = pd.to_datetime(df.timestamp) df.timestamp.describe() # + id="XHolxXRfdjNj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 348} outputId="d62a0faa-d2c5-42e9-c772-b7b795d3d44d" df.set_index('timestamp', inplace = True) df.head() # + id="0d3EWIvkMVpI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="273d1b2e-e855-4282-b1e1-0b39f51e1cf1" length, width = df.shape print(df.shape) print(length*width) # + id="IIj3Fb_BeBYR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 342} outputId="a446383c-4224-4b80-a86c-4e5ef0d4eca8" # Use Value Counts rather than Unique df.category.value_counts() # + id="tGDtYvgBMa-r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 98} outputId="351c44bc-82d8-4444-eba0-af40c9f8136d" df.columns # + [markdown] id="a5ns9B5oqpMm" colab_type="text" # ## Suggested Correct Version of this problem # + id="HGECCk9SgtFl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="88711115-6872-4d1a-8f14-0d49a67c159c" cols2 = ['1_pct','2_pct', '3_pct', '4_pct', '5_pct', '6_pct', '7_pct', '8_pct', '9_pct', '10_pct'] pct_votes = df[cols2].tail(1) pct_votes # + id="Rbn25pR0lLnZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 76} outputId="c1dc8cf3-8767-4c7c-80b6-252693a74de8" # Why inplace = True fails is beyond me, better to make a copy pct_votes_new = pct_votes.reset_index(drop=True) pct_votes_new # + id="kGUkvzOchSgv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 335} outputId="0f54e280-0d0d-4cdd-9583-41a77461d45b" pct_votes_new.rename(columns={"1_pct": "1", "2_pct": "2", "3_pct": "3", "4_pct": "4", "5_pct": "5", "6_pct": "6", "7_pct": "7", "8_pct": "8", "9_pct": "9", "10_pct": "10",}, inplace = True) pct_votes_new = pct_votes_new.T pct_votes_new # + id="Je9Va7B-hgor" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 455} outputId="4b83bbf0-77d9-4607-d575-b31063d59b57" # fig, ax = plt.subplots() plt.style.use("fivethirtyeight") # fig = plt.figure(); # fig.patch.set_alpha(0.0) ax = pct_votes_new.plot.bar(color = '#EC713B', width = 0.9) # All bars are the same color and width ax.tick_params(labelrotation = 0, colors = 'gray') y_ticks = [0,10,20,30,40] ax.set_yticks(y_ticks) ax.legend().set_visible(False) ax.set_facecolor("white") ax.set_ylabel("Percent of total votes", fontweight='bold') ax.set_xlabel("Rating", fontweight='bold') ax.text(-1.5,45,s="'An Inconvenient Sequel: Truth to Power' is divisive", fontsize=18, weight='bold') ax.text(-1.5,42,s="IMDb Ratings for the film as of Aug. 29", fontsize=16) # ax.set_title("'An Inconvenient Sequel: Truth to Power' is divisive \n IMDb Ratings for the film as of Aug. 29", loc = 'left') # ax.spines['bottom'].set_color('white') # ax.spines['top'].set_color('white') # ax.spines['right'].set_color('white') # ax.spines['left'].set_color('white') # ax.tick_params(axis = 'x', color = 'lightgray') # ax.tick_params(axis = 'y', color = 'blue') # ax.yaxis.label.set_color('black') # ax.xaxis.label.set_color('black') # ax.patch.set_facecolor('white') plt.show() # plt.savefig('538.png', transparent=True) # + [markdown] id="h3PqUL3sqhi5" colab_type="text" # ## Alternate Attempt at this problem # + id="6leA3oRbNd6Q" colab_type="code" colab={} # Slicing out relevant columns cols = ['1_votes', '2_votes', '3_votes', '4_votes', '5_votes', '6_votes', '7_votes', '8_votes', '9_votes', '10_votes'] df_new = df[cols] sum_votes = pd.Series(df_new.sum()) # + id="t57Zx90pVdCl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 196} outputId="8941d38e-3276-4baf-c85d-787feabf0103" sum_votes = 100 * sum_votes / sum_votes.sum() sum_votes.rename(index={"1_votes": "1", "2_votes": "2", "3_votes": "3", "4_votes": "4", "5_votes": "5", "6_votes": "6", "7_votes": "7", "8_votes": "8", "9_votes": "9", "10_votes": "10",}, inplace = True) sum_votes # + id="SP3pvP1bV5uY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 428} outputId="5f8ea8aa-b8d6-4c7e-8b4c-dadadae11a8c" plt.style.use("fivethirtyeight") ax = sum_votes.plot.bar(color = '#EC713B', width = 0.9) ax.tick_params(labelrotation = 0, colors = 'gray') y_ticks = [0,10,20,30,40] ax.set_yticks(y_ticks) ax.legend().set_visible(False) ax.set_facecolor("white") ax.set_ylabel("Percent of total votes", fontweight='bold') ax.set_xlabel("Rating", fontweight='bold') ax.text(-1.5,45,s="'An Inconvenient Sequel: Truth to Power' is divisive", fontsize=18, weight='bold') ax.text(-1.5,42,s="IMDb Ratings for the film as of Aug. 29", fontsize=18) # ax.set_title("'An Inconvenient Sequel: Truth to Power' is divisive \n IMDb Ratings for the film as of Aug. 29", loc = 'left') # ax.spines['bottom'].set_color('white') # ax.spines['top'].set_color('white') # ax.spines['right'].set_color('white') # ax.spines['left'].set_color('white') # ax.tick_params(axis = 'x', color = 'lightgray') # ax.tick_params(axis = 'y', color = 'blue') # ax.yaxis.label.set_color('black') # ax.xaxis.label.set_color('black') plt.show() # + [markdown] id="XM0dJJodOIf3" colab_type="text" # ## STRETCH GOAL # + id="TjterHVvOLb4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 240} outputId="67e3bb6f-5a68-4a2c-9691-473e6219439f" # Bad Drivers - https://github.com/fivethirtyeight/data/tree/master/bad-drivers url2 = 'https://raw.githubusercontent.com/fivethirtyeight/data/master/bad-drivers/bad-drivers.csv' driver_df = pd.read_csv(url2) driver_df.head() # + id="RsxYPYSlOmjD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 326} outputId="ef72c2a9-639f-4312-d797-546b7b59e688" driver_df.describe() # + id="7CLts_U7Qpix" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 163} outputId="d70df1bd-fb59-4a58-876a-82bdfedb4b94" driver_df.columns # + id="YrCdNdN7PaSs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 220} outputId="a4b103b5-4fd1-4ec3-de6a-8605044309fb" driver_df.rename(columns = {'Number of drivers involved in fatal collisions per billion miles' : 'no_fatal', 'Percentage Of Drivers Involved In Fatal Collisions Who Were Speeding' : 'pct_fatal', 'Percentage Of Drivers Involved In Fatal Collisions Who Were Alcohol-Impaired': 'pct_fatal_alcohol', 'Percentage Of Drivers Involved In Fatal Collisions Who Were Not Distracted': 'pct_fatal_nodistraction', 'Percentage Of Drivers Involved In Fatal Collisions Who Had Not Been Involved In Any Previous Accidents' : 'pct_fatal_noprev', 'Car Insurance Premiums ($)' : 'ins_premiums', 'Losses incurred by insurance companies for collisions per insured driver ($)' : 'ins_losses'}, inplace = True) driver_df.head() # + id="TrXX3f8ZPaVf" colab_type="code" colab={} # import altair as alt # from vega_datasets import data # + id="Mz7tWwnjUSkR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 878} outputId="c6f87e8d-171c-4195-920f-a023402d5082" no_fatal = driver_df.no_fatal.sort_values() no_fatal # + id="_zWHdxbbTqUm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 515} outputId="d64cfaab-fa3a-4265-9911-40c9f99e424a" plt.style.use("fivethirtyeight") # fig = plt.figure(); # fig.patch.set_alpha(0.0) ax = no_fatal.plot.bar(width = 0.9) # All bars are the same color and width (color = '#EC713B') ax.tick_params(labelrotation = 90, colors = 'gray') y_ticks = [0,5,10,15,20,25] ax.set_yticks(y_ticks) ax.legend().set_visible(False) ax.set_facecolor("white") ax.set_ylabel("Drivers involved in fatal collisions", fontweight='bold') ax.set_xlabel("State", fontweight='bold') ax.text(-1.5,28,s="'Drivers involved in fatal collisions per billion miles by State", fontsize=18, weight='bold') # ax.text(-1.5,42,s="IMDb Ratings for the film as of Aug. 29", fontsize=16) # ax.set_title("'An Inconvenient Sequel: Truth to Power' is divisive \n IMDb Ratings for the film as of Aug. 29", loc = 'left') # ax.spines['bottom'].set_color('white') # ax.spines['top'].set_color('white') # ax.spines['right'].set_color('white') # ax.spines['left'].set_color('white') # ax.tick_params(axis = 'x', color = 'lightgray') # ax.tick_params(axis = 'y', color = 'blue') # ax.yaxis.label.set_color('black') # ax.xaxis.label.set_color('black') # ax.patch.set_facecolor('white') plt.show() # + id="60E9DGoZVVcf" colab_type="code" colab={}
LS_DS_123_Make_explanatory_visualizations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # EOPF Product Data Structure samples # This is a simple tutorial to show you how to build a Generic Product Data Structure and display it using our template <br> # Do not hesitate to modify/update the code and do new suggestions # # + import xarray as xr import glob from utils import display, get_ds_variables as get_ds from EOProductDataStructure import EOProductBuilder, EOVariableBuilder, EOGroupBuilder # - # # Let's start building a new Product Data Structure # # 1. Create an instance of EOProduct # + # Adding global attributes global_attributes = [ "product_name", "creation_time", "start_time", "stop_time", "footprint", "absolute_orbit_number", "ac_subsampling_factor", "al_subsampling_factor", "comment", "contact", "history", "institution", "references", "resolution", "source", "title"] attrs = {att: 'TBD' for att in global_attributes} # Create an instance of EOProduct , give it a name p = EOProductBuilder(name="S3 OLCI L1 Product Data Structure", coords=EOGroupBuilder('coords'), attrs=attrs) # display this empty product display(p.compute()) # - # # 2. Adding Groups to our Product # + # adding groups to our product radiances = EOGroupBuilder("radiances", coords=EOGroupBuilder('coordinates')) tie_points = EOGroupBuilder("tie_points") removed_pixels = EOGroupBuilder("removed_pixels") instrument_data = EOGroupBuilder("instrument_data") #p.groups.append(radiances) #p.groups.append(tie_points) p.groups += [radiances,tie_points,removed_pixels,instrument_data] # again display the product display(p.compute()) # - # # 3. Adding Dimensions/Coordinates and Variables to Groups # + # adding attributes to radiances group radiances.attrs['description'] = 'Radiances Data Group' # adding dimensions radiances.dims = ['rows','columns'] # ======================= Coordinates ======================= for name in ["latitude","longitude","altitude","time_stamp"]: coord = EOVariableBuilder(name, default_attrs=True) # set default_attrs to False to disable default attributes coord.dtype = "int32" coord.dims = radiances.dims radiances.coords.variables.append(coord) # ======================= Variables ======================= for r in range(21): variable = EOVariableBuilder("Oa%02d_radiance" % (r+1), default_attrs = True) variable.dtype = "uint16" variable.dims = radiances.dims radiances.variables.append(variable) display(p.compute()) # - # # 4. Adding Metadata p.metadatas = ['xdfumanifest.json','metadata.file'] display(p.compute()) # # 5. Example of S3 product conversion # Path ot S3 product path_to_product = glob.glob("data/S3A_OL_1*.SEN3")[0] path_to_product # # 5.1 Define a new Data Structure # + # Groups definition groups = {} groups['radiances'] = get_ds(path_to_product, names=["Oa%02d_radiance" %r for r in range(21)] + ["time_coordinates","geo_coordinates"], process_coords=True) groups['tie_points'] = get_ds(path_to_product, names=["tie_geo_coordinates","tie_meteo","tie_geometries"], process_coords=True) groups['removed_pixels'] = get_ds(path_to_product, names=["removed_pixels"], process_coords=True) groups['instrument_data'] = get_ds(path_to_product, names=["instrument_data"], process_coords=True) groups['quality_flags'] = get_ds(path_to_product, names=["qualityFlags"], process_coords=True) # Create a new EOProduct instance product = EOProductBuilder("S3 OLCI L1 Product Data Structure", coords=EOGroupBuilder('coords')) # do the same work as before product.metadatas = ["xfdumanifest.json"] # global attributes #get them from a netcdf file ds = xr.open_dataset(path_to_product + "/Oa01_radiance.nc",decode_times=False,mask_and_scale=False) product.attrs.update(ds.attrs) # ==================== Product groups setting ======================== for group_name, ds in groups.items(): ds = groups[group_name] group = EOGroupBuilder(group_name, coords=EOGroupBuilder('coordinates')) group.attrs["description"] = f"{group_name} Data Group" group.dims = ds.dims for cname, cood in ds.coords.items(): variable = EOVariableBuilder( cname, dtype=cood.dtype, dims=cood.dims, attrs=cood.attrs, default_attrs = False, ) group.coords.variables.append(variable) for v, var in ds.variables.items(): if v not in ["latitude","longitude","altitude","time_stamp"]: variable = EOVariableBuilder( v, dtype=var.dtype, dims=var.dims, attrs=var.attrs, default_attrs = False ) group.variables.append(variable) product.groups.append(group) # - display(product.compute()) # # New Proposal : Put all the coordinates in the same group => Coordinates # + p = product rads = p.groups[0] tps = p.groups[1] #Create a dedicated coordiante group , call it Coordinates coordinates = EOGroupBuilder("Coordinates") coordinates.variables = rads.coords.variables for c in rads.coords.variables: for d in c.dims: if d not in coordinates.dims: coordinates.dims.append(d) for c in tps.coords.variables: c.name = f'tie_{c.name}' for d in c.dims: if d not in coordinates.dims: coordinates.dims.append(d) coordinates.variables += tps.coords.variables rads.coords = None tps.coords = None p.coords = coordinates p.attrs['metadata_files'] = '[xfdumanfist.json]' # - display(p.compute())
eopf-notebooks/eopf_product_data_structure/eopf_product_structure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from fractions import Fraction from functools import reduce #reduce(function, seq, initial_value) def product(fracs): t = reduce(lambda f1,f2:f1*f2, fracs) return t.numerator, t.denominator if __name__ == '__main__': fracs = [] for _ in range(int(input())): fracs.append(Fraction(*map(int, input().split()))) result = product(fracs) print(*result) # -
hacker-rank/Python/Python Functionals/Reduce Function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # PCMark benchmark on Android # The goal of this experiment is to run benchmarks on a Pixel device running Android with an EAS kernel and collect results. The analysis phase will consist in comparing EAS with other schedulers, that is comparing *sched* governor with: # # - interactive # - performance # - powersave # - ondemand # # The benchmark we will be using is ***PCMark*** (https://www.futuremark.com/benchmarks/pcmark-android). You will need to **manually install** the app on the Android device in order to run this Notebook. # # When opinening PCMark for the first time you will need to Install the work benchmark from inside the app. import logging from conf import LisaLogging LisaLogging.setup() # + # %pylab inline import copy import os from time import sleep from subprocess import Popen import pandas as pd # Support to access the remote target import devlib from env import TestEnv # Support for trace events analysis from trace import Trace # Suport for FTrace events parsing and visualization import trappy # - # ## Test environment setup # # For more details on this please check out **examples/utils/testenv_example.ipynb**. # In case more than one Android device are conencted to the host, you must specify the ID of the device you want to target in `my_target_conf`. Run `adb devices` on your host to get the ID. Also, you have to specify the path to your android sdk in ANDROID_HOME. # Setup a target configuration my_target_conf = { # Target platform and board "platform" : 'android', # Add target support "board" : 'pixel', # Device ID "device" : "HT6670300102", "ANDROID_HOME" : "/home/vagrant/lisa/tools/android-sdk-linux/", # Define devlib modules to load "modules" : [ 'cpufreq' # enable CPUFreq support ], } my_tests_conf = { # Folder where all the results will be collected "results_dir" : "Android_PCMark", # Platform configurations to test "confs" : [ { "tag" : "pcmark", "flags" : "ftrace", # Enable FTrace events "sched_features" : "ENERGY_AWARE", # enable EAS }, ], } # Initialize a test environment using: # the provided target configuration (my_target_conf) # the provided test configuration (my_test_conf) te = TestEnv(target_conf=my_target_conf, test_conf=my_tests_conf) target = te.target # ## Support Functions # This set of support functions will help us running the benchmark using different CPUFreq governors. # + def set_performance(): target.cpufreq.set_all_governors('performance') def set_powersave(): target.cpufreq.set_all_governors('powersave') def set_interactive(): target.cpufreq.set_all_governors('interactive') def set_sched(): target.cpufreq.set_all_governors('sched') def set_ondemand(): target.cpufreq.set_all_governors('ondemand') for cpu in target.list_online_cpus(): tunables = target.cpufreq.get_governor_tunables(cpu) target.cpufreq.set_governor_tunables( cpu, 'ondemand', **{'sampling_rate' : tunables['sampling_rate_min']} ) # + # CPUFreq configurations to test confs = { 'performance' : { 'label' : 'prf', 'set' : set_performance, }, #'powersave' : { # 'label' : 'pws', # 'set' : set_powersave, #}, 'interactive' : { 'label' : 'int', 'set' : set_interactive, }, #'sched' : { # 'label' : 'sch', # 'set' : set_sched, #}, #'ondemand' : { # 'label' : 'odm', # 'set' : set_ondemand, #} } # The set of results for each comparison test results = {} # + #Check if PCMark si available on the device def check_packages(pkgname): try: output = target.execute('pm list packages -f | grep -i {}'.format(pkgname)) except Exception: raise RuntimeError('Package: [{}] not availabe on target'.format(pkgname)) # Check for specified PKG name being available on target check_packages('com.futuremark.pcmark.android.benchmark') # + # Function that helps run a PCMark experiment def pcmark_run(exp_dir): # Unlock device screen (assume no password required) target.execute('input keyevent 82') # Start PCMark on the target device target.execute('monkey -p com.futuremark.pcmark.android.benchmark -c android.intent.category.LAUNCHER 1') # Wait few seconds to make sure the app is loaded sleep(5) # Flush entire log target.clear_logcat() # Run performance workload (assume screen is vertical) target.execute('input tap 750 1450') # Wait for completion (10 minutes in total) and collect log log_file = os.path.join(exp_dir, 'log.txt') # Wait 5 minutes sleep(300) # Start collecting the log with open(log_file, 'w') as log: logcat = Popen(['adb logcat', 'com.futuremark.pcmandroid.VirtualMachineState:*', '*:S'], stdout=log, shell=True) # Wait additional two minutes for benchmark to complete sleep(300) # Terminate logcat logcat.kill() # Get scores from logcat score_file = os.path.join(exp_dir, 'score.txt') os.popen('grep -o "PCMA_.*_SCORE .*" {} | sed "s/ = / /g" | sort -u > {}'.format(log_file, score_file)) # Close application target.execute('am force-stop com.futuremark.pcmark.android.benchmark') return score_file # + # Function that helps run PCMark for different governors def experiment(governor, exp_dir): os.system('mkdir -p {}'.format(exp_dir)); logging.info('------------------------') logging.info('Run workload using %s governor', governor) confs[governor]['set']() ### Run the benchmark ### score_file = pcmark_run(exp_dir) # Save the score as a dictionary scores = dict() with open(score_file, 'r') as f: lines = f.readlines() for l in lines: info = l.split() scores.update({info[0] : float(info[1])}) # return all the experiment data return { 'dir' : exp_dir, 'scores' : scores, } # - # ## Run PCMark and collect scores # Run the benchmark in all the configured governors for governor in confs: test_dir = os.path.join(te.res_dir, governor) res = experiment(governor, test_dir) results[governor] = copy.deepcopy(res) # After running the benchmark for the specified governors we can show and plot the scores: # + # Create results DataFrame data = {} for governor in confs: data[governor] = {} for score_name, score in results[governor]['scores'].iteritems(): data[governor][score_name] = score df = pd.DataFrame.from_dict(data) df # - df.plot(kind='bar', rot=45, figsize=(16,8), title='PCMark scores vs SchedFreq governors');
ipynb/deprecated/examples/android/benchmarks/Android_PCMark.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- import pandas as pd import krisk.plot as kk # Use this when you want to nbconvert the notebook (used by nbviewer) from krisk import init_notebook; init_notebook() # Before we added talk about each of these features. There's something in common, the way the element position themselves, this are position (`x_pos`,`y_pos`), `align`, and `orientation`. # ## Position # Everytime we add toolbox, title, or legend, we almost always want to reposition those elements. # When default to auto for both x-axes and y-axes, # # * The title will position at top-left corner. # * The legend will position at top-center. # * The toolbox will position at top-left corner. # As we can see from this criteria, there's definitely collision for title and toolbox, and legend if the title is longer. # Take a look at this example, df = pd.read_csv('../krisk/tests/data/gapminderDataFiveYear.txt',sep='\t') p = kk.bar(df,'year',y='pop',how='mean',c='continent') p.set_size(width=800) p.set_title('GapMinder Average Population Across Continent') p.set_toolbox(save_format='png',restore=True) # As we can see, we have to reposition the coordinate accordingly, specifically for the title since it collide with both toolbox and legend. p.set_title('GapMinder Average Population Across Continent',y_pos='-7%') # Notice that the coordinate for x_pos and y_pos has negative element. The way krisk started the position is from bottom-left corner, and based on percentage of the pixel. If on the negative element, then it will be the other way around, with x position started at the right, and y position started at the top. # # # Beside coordinate number, krisk support `auto`, `center`, `left`-`right` (x position), and `top`-`bottom` (y position). # ## Orientation # We can set orientation for legend and toolbox. Below we see the legend oriented vertically and reposition nicely in the chart. p.set_legend(orient='vertical',x_pos='12%',y_pos='-16%') # ## Legend and Title # For legend and title we have discussed all the optional arguments there is. We can see docstring for `set_title` and `set_legend` below. help(p.set_title) help(p.set_legend) # ## Toolbox # Beside align, position, and orientation, Toolbox has different set of options that we can interact with the chart. First the easiest, `restore`,`save_format`, and panning. # ### Restore, Save, and Zoom p.set_toolbox(restore=True, save_format='png', data_zoom=True) # The `restore` option will return your plot to original form. This is intended after you edit the plot, you may want to reset it back. The save_format is nice feature that you can use to download as image after editing the plots. # ### Data View p.set_toolbox(data_view=False, restore=True) p.set_size(width=800) # The `data_view` is another convenience function to let readers see table data that construct the plot. If True, it's set to read_only. Set it to False will let readers modify the data, hence changing the plot. # ### Magic Type p.set_toolbox(magic_type=['line','bar'],restore=True) # `magic_type` let your plot changed as you explore different options to visualize your data. Here the exact same plot is used, but using `line` to change the visualization.
notebooks/legend-title-toolbox.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Import necessary packages import numpy as np import gurobipy as grb import matplotlib.pyplot as plt import pyqubo import dimod from dwave.system import DWaveSampler import dwave.inspector from dwave.system.composites import FixedEmbeddingComposite from minorminer import find_embedding # + global maxOptTimsSeconds maxOptTimsSeconds = -1 def callbackTerminate(model, where): global maxOptTimsSeconds if where != grb.GRB.Callback.POLLING: t = model.cbGet(grb.GRB.Callback.RUNTIME) if t >= maxOptTimsSeconds and maxOptTimsSeconds > 0: model.terminate() # - """ Creation of relaxed MILP model with constraints (subproblem 1) """ def _create_milp_model(T, m, n, A, F, b, gamma, c, eta, V, R): """ Prepare the index for decision variables """ tauVars = [ (i) for i in range(m) ] sVars = [ (t) for t in range(T) ] yVars = [ (i,j,t) for i in range(m) for j in range(n) for t in range(T) ] xVars = [ (i,t) for i in range(m) for t in range(T) ] """ Create model """ model = grb.Model("IrrigationOptGurobi") """ Create decision variables """ S = model.addVars(sVars, vtype=grb.GRB.CONTINUOUS, lb=0, name="S") tau = model.addVars(tauVars, vtype=grb.GRB.BINARY, name="tau") Y = model.addVars(yVars, vtype=grb.GRB.BINARY, name="Y") X = model.addVars(xVars, vtype=grb.GRB.CONTINUOUS, lb=0, name="X") BSWp = model.addVars(xVars, vtype=grb.GRB.CONTINUOUS, lb=0, name="BSWp") BSWm = model.addVars(xVars, vtype=grb.GRB.CONTINUOUS, lb=0, name="BSWm") # Create net revenue (now set as constants R array) # R = np.zeros((m,T),dtype=object) # for t in range(T): # for i in range(m): # R[i,t] += (sum(b[i,j,t] * F[i,j,t] * A[i,j,t] for j in range(n)) - # gamma * X[i,t] * sum(A[i,j,t] for j in range(n)) - # c[i] * BSWm[i,t] * sum(A[i,j,t] for j in range(n)) - # c[i] * BSWp[i,t] * sum(A[i,j,t] for j in range(n))) """ Create the objective function """ objFunc = 0.0 for t in range(T): for i in range(0, m - 1): for i_prime in range(1, m): objFunc += (X[i,t] * sum(A[i,j,t] for j in range(n)) / R[i, t] - X[i_prime,t] * sum(A[i_prime,j,t] for j in range(n)) / R[i_prime, t]) model.setObjective(objFunc, sense=grb.GRB.MAXIMIZE) """ Create constraints """ # 1. reservoir water balance model.addConstrs((grb.quicksum(Y[i,j,t] for i in range(m) for j in range(n)) == 1 for t in range(T)), name="constraint for Y") for t in range(1,T): model.addConstr(lhs=S[t], sense=grb.GRB.EQUAL, rhs=S[t-1]+sum(eta[i,t] for i in range(m))*sum(A[i,j,t] for i in range(m) for j in range(n))-sum(X[i,t]*sum(A[i,j,t] for j in range(n)) for i in range(m)), name="reservoir water balance") # 2. water allocation availability for t in range(T): model.addConstr(lhs=sum(X[i,t]*sum(A[i,j,t] for j in range(n)) for i in range(m)), sense=grb.GRB.LESS_EQUAL, rhs=S[t] + sum(eta[i,t] for i in range(m))*sum(A[i,j,t] for i in range(m) for j in range(n)), name="water allocation availability") # 3. reservoir storage capability for t in range(T): model.addConstr(lhs=S[t], sense=grb.GRB.LESS_EQUAL, rhs=V[t], name="reservoir storage capability") # # 4. water distribution availability --> quantum part # model.add_constraints((sum(Y[i,j,t]*A[i,j,t] for j in range(n)) <= X[i,t]*sum(A[i,j,t] for j in range(n)) + tau[i]*BSWp[i,t]*sum(A[i,j,t] for j in range(n)) + (tau[i]-1)*BSWm[i,t]*sum(A[i,j,t] for j in range(n)) for i in range(m) for t in range(T)), names="water distribution availability") # 4. transaction constraint model.addConstrs((BSWm[i,t]*sum(A[i,j,t] for j in range(n)) <= X[i,t]*sum(A[i,j,t] for j in range(n)) - sum(Y[i,j,t]*A[i,j,t] for j in range(n)) for i in range(m) for t in range(T)), name="transaction constraint") # 5. transaction conservation model.addConstrs((BSWp[i,t]*BSWm[i,t]==0 for i in range(m) for t in range(T)), name="transaction conservation") return model, X, Y, BSWm, BSWp, S, tau """ Feasibility by DWave quantum model """ def _feasi_by_quantum(m, n, ET, Y_star, X_star, BSWm_star, BSWp_star, eta): model = _create_model(m, n, ET, Y_star, X_star, BSWm_star, BSWp_star, eta) # """ Write a log file, cannot call in main() function """ # output_file = os.getcwd() + "/logs/hybrid/2nd/hybrid-milp-dwave-" + file_name + ".log" # model.setParam(grb.GRB.Param.LogFile, output_file) return model """ Creation of quantum annealing model with constraints """ def _create_model(m, n, ET, Y_star, X_star, BSWm_star, BSWp_star, eta): tau = {i: pyqubo.Binary("tau_(%d)" % i) for i in range(m)} """ Partial solution from classical solver""" print("partial solution: {},{},{},{}".format(X_star, Y_star, BSWm_star, BSWp_star)) t = 1 H = 0 """ Hamilton function""" for i in range(m): H += -((1-tau[i])*(X_star[i,t] + eta[i,t] - sum(ET[i,j,t]*A[i,i,t] for j in range(n))) + \ (sum(Y_star[i,j,t]*A[i,j,t]for j in range(n))) - (sum(Y_star[i,j,t]*A[i,j,t]for j in range(n))) * \ (X_star[i,t]*sum(A[i,j,t] for j in range(n))+tau[i]*BSWp_star[i,t]*sum(A[i,j,t] for j in range(n))+(tau[i]-1)*BSWm_star[i,t]*sum(A[i,j,t] for j in range(n)))) #H += pyqubo.Constraint((sum(tau[i] for i in range(m)) - 1)**2, label="constraint1") #H += pyqubo.Constraint((sum(tau[i]*tau[i+1] for i in range(m-1)) - 1), label="constraint2") """ Create model """ model = H.compile() return model # put your TOKEN here token = "<PASSWORD>" """ optimal irrigation problem by hybrid method of Gurobi MILP and D-Wave QPU """ """ Solve the model and formulate the result """ def solve(T, m, n, A, F, b, gamma, c, eta, V, ET): solved = False k = 0 milp_model, X, Y, BSWm, BSWp, S, tau= _create_milp_model(T, m, n, A, F, b, gamma, c, eta, V, R) objVal = None """ Hybrid strategy """ try: while not solved: print("-----------------------------------------ITERATION:--------------------------------", k+1) # milp_model.update() # milp_model.tune() # milp_model.optimize(callbackTerminate) # load quadratic program from gurobi model milp_model.optimize(callbackTerminate) if k > 0: print("After getting integer cuts %i:" %(k + 1)) if milp_model.status == grb.GRB.Status.OPTIMAL: print("Value of obj func: {}".format(milp_model.objVal)) X_star = milp_model.getAttr("X", X) print("Initial water: {}".format(X_star)) Y_star = milp_model.getAttr("X", Y) print("Water used: {}".format(Y_star)) BSWm_star = milp_model.getAttr("X", BSWm) print("Amount water bought: {}".format(BSWm_star)) BSWp_star = milp_model.getAttr("X", BSWp) print("Amount water sold: {}".format(BSWp_star)) """ Check the feasibility subproblem by DWave model """ model = _feasi_by_quantum(m, n, ET, Y_star, X_star, BSWm_star, BSWp_star, eta) qubo, offset = model.to_qubo() bqm = dimod.BinaryQuadraticModel.from_qubo(qubo, offset) solver = DWaveSampler(token=token, solver="DW_2000Q_6") print("Solver used: {}".format(solver)) __, target_edgelist, target_adjacency = solver.structure emb = find_embedding(qubo, target_edgelist, verbose=1) sampler = FixedEmbeddingComposite(solver, emb) # sampler = EmbeddingComposite(solver) response = sampler.sample(bqm, num_reads=100, chain_strength=4.0) sample = response.first.sample # next_sequence = pyqubo.solve_qubo(qubo) print("Sample result: {}".format(sample)) for datum in response.data(['sample', 'energy', 'num_occurrences']): print(datum.sample, "Energy: ", datum.energy, "Occurrences: ", datum.num_occurrences) # # Inspector # dwave.inspector.show(response) solved = True df = response.to_pandas_dataframe(sample_column=True) print(df) plt.show(df.hist(column="energy")) objVal = milp_model.objVal else: assign = milp_model.getAttr("X", X) """ Infeasible """ k += 1 """ Adding integer cuts """ # 1. Creation of set of elements x[i,m] == 1 set_xim = [i for i in range(m) for t in range(T) if assign[(i,t)].x == 1] # 2. Add constraint to MILP model milp_model.addConstrs((grb.quicksum([assign[(i,t)] for i in set_xim for t in range(T) if assign[(i,t)].x == 1]) <= len(set_xim) - 1 for m in range(T)), name="integer cut") except grb.GurobiError as e: print("Error code " + str(e.errno) + ": " + str(e)) return objVal, solved # + StatusDict = {getattr(grb.GRB.Status, s): s for s in dir(grb.GRB.Status) if s.isupper()} """ Read data """ m = 3 # 3 sub-areas n = 4 # number of crops T = 2 # 2 stages of planning and management # Create A_(i)^(t) matrix (t = 0, first period, i = type of sub-area) A = np.array([[[277, 120], [280, 200], [148, 232], [228, 250]], [[161, 100], [162, 120], [86, 80], [133, 120]], [[111, 80], [112, 145], [59, 56], [91, 100]]]) # col 0 = paddy, col 1 = wheat, col 2 = bean, col 3 = oilseed rape c = [-1635, -21868, -10287, -13635] F = np.array([[[277, 120], [280, 200], [148, 232], [228, 250]], [[161, 100], [162, 120], [86, 80], [133, 120]], [[111, 80], [112, 145], [59, 56], [91, 100]]]) b = np.array([[[277, 120], [280, 200], [148, 232], [228, 250]], [[161, 100], [162, 120], [86, 80], [133, 120]], [[111, 80], [112, 145], [59, 56], [91, 100]]]) gamma = 1.0 eta = np.ones((m,T),dtype=float) R = np.array([[3000, 44532], [23145, 21324], [875565, 212324]]) V = [1000, 1000] ET = np.array([[[277, 120], [280, 200], [148, 232], [228, 250]], [[161, 100], [162, 120], [86, 80], [133, 120]], [[111, 80], [112, 145], [59, 56], [91, 100]]]) objVal, solved = solve(T, m, n, A, F, b, gamma, c, eta, V, ET) if solved: print("Water allocation difference: {}".format(objVal))
Boltz/irrigation_opt_hybrid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MsPASS Getting Started Tutorial # ## *<NAME> and Yinzhi (I<NAME>* # ## Preliminaries # This tutorial assumes you have already done the following: # 1. Installed docker. # 2. Run the commmand `docker pull wangyinz/mspass_tutorial` # 3. Launched docker using the tutorial container. # 4. Connected the container to get this tutorial running. # # Our installation manual describes how to do that so we assume that was completed for you to get this far. # # Note MsPASS can also be run from a local copy of MsPASS installed through pip. The only difference is in launching jupyter-notebook to get this tutorial running. None of the tutorial should depend upon which approach you are using. Further, if either approach was not done correctly you can expect python errors at the first import of a mspasspy module. # ## Download data with obspy # ### Overview of this section # MsPASS leans heavily on obspy. In particular, in this section we will use obspy's web services functions to download waveform data, station metadata, and source metadata. The approach we are using here is to stage these data to your local disk. The dataset we will assemble is the mainshock and X days of larger aftershocks of the Tohoku earthquake. The next section then covers how we import these data into the MsPASS framework to allow them to be processed. # ### Select, download, and save source data in MongoDB # As noted we are focusing on the Tohoku earthquake and its aftershocks. That earthquake's origin time is approximately March 11, 2011, at 5:46:24 UTC. The ISC epicenter is 38.30N, 142.50E. We will then apply obspy's *get_events* function with the following time and area filters: # 1. Starttime March 11, 2011, 1 hour before the origin time. # 2. End time 7 days after the mainshock origin time. # 3. Epicenters within + or - 3 degrees Latitude # 4. Epicenters within + or - 3 degrees of Longitude. # 5. Only aftershocks larger than 6.5 # Here is the incantation in obspy to do that: # + from obspy import UTCDateTime from obspy.clients.fdsn import Client client=Client("IRIS") t0=UTCDateTime('2011-03-11T05:46:24.0') starttime=t0-3600.0 endtime=t0+(7.0)*(24.0)*(3600.0) lat0=38.3 lon0=142.5 minlat=lat0-3.0 maxlat=lat0+3.0 minlon=lon0-3.0 maxlon=lon0+3.0 minmag=6.5 cat=client.get_events(starttime=starttime,endtime=endtime, minlatitude=minlat,minlongitude=minlon, maxlatitude=maxlat,maxlongitude=maxlon, minmagnitude=minmag) print(cat) # - # We can save these easily into MongoDB for use in later processing with this simple command. from mspasspy.db.database import Database from mspasspy.db.client import DBClient dbclient=DBClient() db=Database(dbclient,'getting_started',db_schema='mspass_lite.yaml', md_schema='mspass_lite.yaml') n=db.save_catalog(cat) print('number of event entries saved in source collection=',n) # ### Select, download, and save station metadata to MongoDB # We use a very similar procedure to download and save station data. We again use obspy but in this case we use their *get_stations* function to construct what they call an "Inventory" object containing the station data. inv=client.get_stations(network='TA',starttime=starttime,endtime=endtime, format='xml',channel='BH?',level='response') net=inv.networks x=net[0] sta=x.stations print("Number of stations retrieved=",len(sta)) print(inv) # The output shows we just downloaded the data form 446 TA stations that were running during this time period. Note a detail is if you want full response information stored in the database you need to specify *level='response'* as we have here. The default is never right. You need to specify level as at least "channel". # # We will now save these data to MongoDB with a very similar command to above: ret=db.save_inventory(inv,verbose=True) print('save_inventory returned value=',ret) # We turned on verbose mode so the output is quite large, but we did that because it demonstrates what this Database method is doing. It takes apart the complicated Inventory object obspy created from the stationxml data we downloaded and turns the result into a set of documents saved in the two main station metadata collections in MongoDB: (1) *site* for station information and (2) *channel* that contains most of the same data as *site* but add a set of important additional information (notably component orientation and response data). # # We turn now to the task of downloading the waveform data. # ### Download waveform data # The last download step for this tutorial is the one that will take the most time and consume the most disk space; downloading the waveform data. To keep this under control we keep only a waveform section spanning most of the body waves. We won't burden you with the details of how we obtained the following rough numbers we use to define the waveform downloading parameters: # # 1. The approximate distance from the mainshock epicenter to the center of the USArray in 2011 is 86.5 degrees. # 2. P arrival is expected about 763 s after the origin time # 3. S arrival is expected about 1400 s after the origin time # # Since we have stations spanning the continent we will use the origin time of each event +P travel time (763 s) - 4 minutes as the start time. For the end time we will use the origin time + S travel time (1400 s) + 10 minutes. # # This process will be driven by origin times from the events we downloaded earlier. We could drive this by using the obspy *Catalog* object created above, but because saved the event data to the database we will use this opportunity to illustrate how that data is managed in MsPASS. # # First, let's go over the data we saved in MongoDB. We saved these data in a *collection* we call *source*. For those familiar with relational databases a MongoDB "collection" plays a role similar to a table (relation) in a relational database. A "collection" contains one or more "documents". A "document" in MongoDB is analagous to a single tuple in a relational database. The internal structure of a MongoDB is, however, very different being represented by binary storage of name-value pairs in a format they call BSON because the structure can be represented in human readable form as a common format today called JSON. A key point for MsPASS to understand is the BSON (JSON) documents stored in MongoDB map directly into a python dict container. We illustrate that in the next box by printing the event hypocenter data we downloaded above and then stored in MongoDB: dbsource=db.source cursor=dbsource.find() # This says to retrieve and iterator overall all source documents # The Cursor object MongoDB's find function returns is iterable print('Event in tutorial dataset') evlist=list() for doc in cursor: lat=doc['lat'] lon=doc['lon'] depth=doc['depth'] origin_time=doc['time'] mag=doc['magnitude'] # In MsPASS all times are stored as epoch times. obspy's UTCDateTime function easily converts these to # a readable form in the print statment here but do that only for printing or where required to # interact with obspy print(lat,lon,depth,UTCDateTime(origin_time),mag) evlist.append([lat,lon,depth,origin_time,mag]) # Notice that we use python dict syntax to extract attributes like latitude ('lat' key) from "document" which acts like a python dict. # # We saved the core metadata attributes in a python list, *evlist*, to allow us to reduce the volume of data we will retrieve. We'll keep just the three biggest events; the mainshock and the two 7+ aftershocks. We do that in the block below using a set container to define keepers. The approach is a bit obscure and not the most efficient. We could just use the list we just built to drive the processing in the next box, but we use the approach there to illustrate another example of how to loop over documents retrieved from MongoDB - a common thing you will need to do to work with MongoDB in MsPASS. # # With that the loop below is similar to the simple print loop above, BUT requires an obscure parameter not usually discussed in the MongoDB documentation. The problem we have to deal with is that the obspy web service downloader we are going to call in the loop below will take a while to complete (likely over an hour). Long running processes interacting with MongoDB and using a "Cursor" object (the thing *find* returned) can fail and throw confusing messages from a timeout problem. That is, a job will mysteriously fail with a message that does not always make the fundamental problem clear. The solution is to create what some books call an "immortal cursor". You will see this in the next box that does the waveform downloading as this line: # ``` # cursor=dbsource.find({},no_cursor_timeout=True) # ``` # where here we use an explicit "find all" with the (weird) syntax of "{}" and make the cursor immortal by setting no_cursor_timeout to True. # # With that background here is the script to download data. You might want to go grab a cup of coffee while this runs as it will take a while. # + from mspasspy.util.converter import Trace2TimeSeries #db=Database(dbclient,'getting_started') # We have to redefine the client from obspy to use their so called bulk downloader from obspy.clients.fdsn import RoutingClient client = RoutingClient("iris-federator") # This uses the (admitted obscure) approach of a set container of keepers to reduce the download time keepers=set() keepers.add(4) keepers.add(7) keepers.add(10) cursor=db.source.find({},no_cursor_timeout = True) count=0 start_offset=763.0-4*60.0 end_offset=1400.0+10*60.0 i=0 for doc in cursor: if i in keepers: origin_time=doc['time'] # We need the ObjectId of the source to provide a cross reference to link our # waveform data to the right source. Better to do this now than later as association # can be a big challenge id=doc['_id'] print('Starting on event number',i,' with origin time=',UTCDateTime(origin_time)) stime=origin_time+start_offset etime=origin_time+end_offset strm=client.get_waveforms( starttime=UTCDateTime(stime), endtime=UTCDateTime(etime), network='TA', channel='BH?', location='*' ) # The output of get_waveforms is an obspy Stream object. Stream objects are iterable # so we work through the group, converting each to a MsPASS TimeSeries, and then saving # the results to the database. for d in strm: d_mspass=Trace2TimeSeries(d) # Here is where we save the id linked to the source collection. # We use a MsPASS convention that such metadata have name collection_id d_mspass['source_id'] = id #print('Saving data for sta=',d_mspass['sta'],' and chan=',d_mspass['chan']) db.save_data(d_mspass) count += 1 else: print('Skipping event number ',i,' with origin time=',UTCDateTime(origin_time)) i += 1 print('Number of waveforms saved=',count) # - # ## Quick Look for QC # MsPASS has some basic graphics capabilities to display its standard data types. We refer the reader to a the *BasicGraphics* tutorial for more details, but for now we just illustrate using the plotting for a quick look as a basic QC to verify we have what we were looking for. # # The previous step used *save_data* to save the data we downloaded to MongoDB. How it is stored is a topic for later, but here we'll retrieve some of that data and plot it to illustrate the value of abstracting the read and write operations. Reading is slightly more complicated than writing one atomic object as we did above. The reason is that we often want to do a database select operation to limit what we get. This example illustrates some basics on MongoDB queries. The reader is referred to extensive external documentation on MongoDB (books and many online sources) on this topic. # # Let's first grab the data for one station from all three events we saved. This is a good illustration of the basic query mechanism used by MongoDB. from mspasspy.ccore.seismic import TimeSeriesEnsemble sta='234A' ensemble=TimeSeriesEnsemble() ensemble['sta']=sta query={ 'sta' : sta } n=db.wf_TimeSeries.count_documents(query) print('Trying to retrieve ',n,' TimeSeries objects for station=',sta) curs=db.wf_TimeSeries.find(query) for doc in curs: # With the current mspass_lite schema these extra options are required d=db.read_data(doc) ensemble.member.append(d) print('Success: number of members in this ensemble=',len(ensemble.member)) # Notice that like *save_data* the default usage of *read_data* is about as simple as it gets; you just point the reader at the document abstraction (the *doc* symbol) that defines a particular waveform and it is retrieved. # # A second point this example is a key concept in MsPASS. We define two data objects as *Atomic* that we refer to as *TimeSeries* and *Seismogram*. We are working with *TimeSeries* objects here because they were born from single channel records downloaded with FDSN web services. Later in this tutorial we will convert the data now defined by the wf_TimeSeries collection to *Seismogram* objects and write them into a wf_Seismogram collection. We call *Timeseries* and *Seismogram* object "Atomic" because for most processing they should be considered a single thing. (For more experienced programs, note that in reality like atoms these data objects are made of subatomic particles with a class inheritance structure but the energy barrier to pull them apart is significant.) Many functions, however, need the concept we call an *Ensemble*. These are a collection of atomic objects that have a generic relationship. Examples are "shot gathers", "CMP gathers", and "common receiver gathers" concepts used in seismic reflection processing. An *Ensemble* is a container that can be any of those. Your workflow needs to be aware at all times what is in any ensemble and be sure the requirements of an algorithm are met. (e.g. in reflection processing you will junk if you think an ensemble is a NMO corrected CMP gather and the data are actually a raw shot gather). # # There are two named types of ensemble containers; one for each atomic data object. They are called *TimeSeriesEnsemble* and *SeismogramEnsemble* for containers of *TimeSeries* and *Seismogram* object respectively. In both cases the seismic data components are stored in simple list-like container defined with the symbol *member*. The container also can act a bit like a python dict to store global metadata related to the ensemble. We will refer to that container here as the *ensemble metadata*. Our example above can be thought of as a common receiver gather for TA station 234A. Hence, we post that name with the key *sta* with the line `ensemble['sta']=sta`. That model should be the norm for any ensemble. That is, the ensemble metadata should normally contain a set of one or more key-value pairs that at least provide a hint at the ensemble contents. Here that is the station name, but we could add other data like the stations coordinates. We defer that to below where that data becomes necessary. # # With that background let's plot these data. Here we illustrate the use of basic plotting routines in MsPASS but we note any python graphic package can be use for plotting these data if you understand the data structures. We provide the *SeismicPlotter* class here as a convenience. A valuable addition for community development is extensions of our basic graphics module or alternative plotting modules. from mspasspy.graphics import SeismicPlotter plotter=SeismicPlotter(normalize=True) # TODO default wtvaimg has a bug and produces a 0 height plot - wtva works for now plotter.change_style('wtva') plotter.plot(ensemble) # Again, about as simple as it gets. This and the earlier examples illustrate a key design goal we had for MsPASS: make the package as simple to use for beginners as possible. The only trick we used here was to turn on automatic scaling (`normalize=True` line in the constructor) that is turned off by default. Scaling is essential here since we are mixing data from events with different magnitudes. # # The plots shows the mainshock record as the top 3 signals. Below that are sets of 3 signals from the other 2 large events in our data subset defined as "keepers". Notice how the 3 sets of signals are offset in time. The data are plotted that way because our the data time stamp is coordinated universal time (UTC) and they are being plotted in their actual timing position. The topic of a UTC time standard and how MsPASS handles this is a unique feature of MsPASS and is the topic of next section. # # The SeismicPlotter has a fair amount of additional functionality. See the BasicGraphics tutorial to learn some of that functionality. # ## UTC and Relative Time # A unique feature of MsPASS is that we aimed to make it generic by supporting multiple time standards. MsPASS currently supports two time standards we refer to with the name keys *UTC* and *Relative*. The first, *UTC*, is well understood by all seismologists who work with any modern data. UTC is a standard abbreviation for coordinated univeral time, which is the time standard used on all modern data loggers. It is important to recognize that unlike obspy we store all UTC times internally as "unix epoch times". Epoch times are the number of seconds in UTC since the first instant of the year 1970. We use only epoch times internally as it vastly simplifies storage of time attributes since they can be stored as a standard python float (always a 64 bit real number in python) that causes no complications in storage to MongoDB. It also vastly simpifies computing time differences, which is a very common thing in data processing. To convert UTC times to a human readable form we suggest using the obspy UTCDateTime class as we did above. The inverse (converting a UTC date string to an epoch time) is simple with the timestamp method of UTCDateTime. Some wrapped obspy functions require UTCDateTime objects but all database times are stored as floats. Most obspy function, like the web service functions we used above, use the UTCDateTime class to define. The point is be to be cautious about what time arguments mean to different functions. # # The idea of a *Relative* time is well known to anyone who has done seismic reflection processing. Experienced SAC users will also understand the concept through a different mechanism that we generalize. Time 0 for seismic reflection data ALWAYS means the time that the "shot" was fired. That is a type example of what we mean by *Relative* time. Earthquake data can be converted to the same concept by setting time zero for each signal to the origin time of the event. SAC users will recognize that idea as the case of the "O" definition of the data's time stamp. Our *Relative* time, in fact, is a generalization of SAC's finite set of definitions for the time stamp for one of their data files like Tn, B, O, etc. *Relative* just means the data are relative to some arbitrary time stamp that we refer to internally as *t0_shift*. It is the user's responsibility to keep track of what *t0_shift* means for your data and whether that reference is rational for the algorithm being run. TimeSeries and Seismogram objects keep track of *t0_shift* and the reference can be shifted to a different time if desired through combinations of three different methods: *rtoa* (switch from Relative to Absolute=UTC), *ator* (switch from Absolute(UTC) to Relative), and *shift* that is used to apply an relative time shift. # # In this next block we take the ensemble we created above and apply a time shift to put 0 at the predicted P wave arrival time. # We make a deep copy with this mechanism so we can restore raw data later enscpy=TimeSeriesEnsemble(ensemble) i=0 for d in ensemble.member: print('member ',i,' input t0 time=',UTCDateTime(d.t0)) d.ator(d.t0) print('member ',i,' how has time 0=',d.t0) i+=1 # Notice how the times changed from an offset from the origin time we used for downloading to 0. We can see this effect graphically in the next box. plotter.plot(ensemble) # Note that now the time axis starts at 0, BUT that is Relative time. Here that time is a fixed offset from the origin time we obtained form the hypocenter origin time for each event. # # Those signals are ugly because these aftershocks are buried in the long period coda of the mainshock. That should be clear from the UTC time plot we first made where the bottom six signals overlap, a common problem that presents a huge problem in some processing frameworks. Let's plot this again with a bandpass filter applied. from mspasspy.algorithms.signals import filter filter(ensemble,'bandpass',freqmin=0.2,freqmax=2.0) plotter.plot(ensemble) # Notice that we can now see the P and S waves even for the two aftershocks in this extended short period band. # # ## Windowing Data # # The time scale we have in the plot above is largely useless; it is just an arbitrary offset from the origin time for each event. In this section we will illustrate the common processing step in dealing with teleseismic data where we need to extract a smaller time window around a phase of interest. # # First, as we just saw the aftershocks are a special problem because they are buried in low frequency coda of the mainshock. We will thus focus for the rest of this tutorial on the mainshock. First, let's load the vertical components of the mainshock into a working ensemble. To do that, we first have to define a query method to extract only the data we want. This will be a step you will nearly alway need to address in handling teleseismic data. The previous examples and the steps below are a start, but we reiterate that if you become a serious user of MsPASS you will need to become familiar with the pymongo API. Our documentation indirectly covers many of the essentials, but MongoDB is a large, heavily-used package with a lot of features. # # With that lecture, the key data we will use is the start times printed in the box above that ran the ator method. Here is the algorithm. See comments below the run box that provide the lessons to be learned form this example: t_to_query=UTCDateTime('2011-03-11T05:55:06.000000') # This is a MongoDB range query. We allow a +- 5 s slop because the start # times are alway slightly irregular. # we also do an exact match test for BHZ. That works for these data but # more elaborate queries with wildcards are subject best left to MongoDB documentation dt_for_test=5.0 tmin=t_to_query-dt_for_test tmax=t_to_query+dt_for_test query={'starttime' : {'$gt': tmin.timestamp,'$lt' : tmax.timestamp }} # query is a python dict so we can add to an additional criteria like this query['chan']='BHZ' print('Using this query') print(query) n=db.wf_TimeSeries.count_documents(query) print('Mainshock ensemble size from time query=',n) # We could just use that but let's get the special key source_id we set above # and query with it as a more unambigous query doc=db.wf_TimeSeries.find_one(query) srcid=doc['source_id'] idquery={'source_id' : srcid} idquery['chan']='BHZ' # add same equality test of BHZ n=db.wf_TimeSeries.count_documents(idquery) print('Mainshock ensemble size using id query=',n) # Now build our working enemble. To keep graphics clean we limit the # number retrieved to the first 15 stations. Illustrates another pymongo function ensemble=TimeSeriesEnsemble(15) # Set the source_id into the ensembled metadata - this will now be a common source(shot) # gather so appropriate to post it like this. Might normally also add other source # metadata there but they aren't needed for this tutorial ensemble['source_id']=doc['source_id'] cursor=db.wf_TimeSeries.find(idquery).limit(15) for doc in cursor: d=db.read_data(doc) ensemble.member.append(d) plotter.plot(ensemble) # Some things you should learn from this example working from the top down: # 1. With pymongo a query is constructed as a python dict. Equality matches are implied by constructs like `query['chan']='BHZ'`. The starttime construct is more elaborate but note the key to the dict is *starttime* and the value associated with the key is itself a dict. Note that the concept of what MongoDB calls a "document" maps exactly into a python dict container. MongoDB would call the dict container that is the value associated with *starttime* a "subdocument". The construct used for the *starttime* query uses MongoDB operators as the keys in the subdocument. The operators are keywords that begin with the dollar sign ($) symbol. There is a long list of operators that can be found in various MongoDB online sources [like this one.](https://docs.mongodb.com/manual/reference/operator/query/>) # 2. We constructed our query in two steps. That isn't required. We did it just to illustrate that a query can often best be constructed from a core set that can be defined with an incantation using curly brackets, colons, constants, and python variables. It is sometimes useful to add constructs like the line for *chan* inside conditionals to handle different situation. That programming trick is not discussed in most tutorials. # 3. We create a new TimeSeriesEnsemble with a different form of the constructor. That constructor initializes the container setting aside 15 slots for TimeSeries. It provides a minor improvement in efficiency. All core data objects in MsPASS are implemented in C++ to improve performance. Note that any symbol loaded with the path defined by *mspasspy.ccore* means the thing being accessed was written in C++ with python wrappers. We do not currently have a clean mechanism for creating complete sphynx documentation pages for the C++ code. Most of the API can be inferred from the C++ doxygen web page found [here.](https://wangyinz.github.io/mspass/cxx_api/mspass.html#mspass-namespace) # 4. Our example intentionally issues a second (redundant) query for the data of interest. We do that strictly for the educational value this provides. In this case they are exactly equivalent. The two queries illustrate two ways to alternative mechanism (with these data) to assemble a common source gather (Ensemble). The second query uses a special entity used extensively in MongoDB called a [ObjectID](https://docs.mongodb.com/manual/reference/method/ObjectId/). The second query using a *source_id* value works only because we loaded that cross-refernce key with the data. If you inspect the earlier boxes you will see we downloaded the data looping over a list of sources. We defined *source_id* by extracting the ObjectID of the document holding the source information in the *source collection* referenced by the key `"_id"` of the linking source document. (i.e. *source_id* is the same as the value associated with a document in the source collection with the key *_id*.) # In MsPASS the standard model for data is that source and receiver information are stored in what MongoDB calls the [normalized data model](https://docs.mongodb.com/manual/core/data-model-design/). The *read_data* method called in the loop loading the ensemble as the last step in this box will, by default, automatically load source information when it sees the magic key *source_id*. The same mechanism is used for receiver data, but in its current state that cannot yet be done because we haven't yet built the required cross referencing. The next step handles that inline, but for efficiency any workflow that requires receiver metadata would need a mechanism to associate each wf_TimeSeries document to a document in the *channel* collection. # # The last thing the above box does is plot the data. We see the data are again in UTC time and the data are not aligned because there is no "moveout correction". The next step is then to convert the data to relative time and align the data on the predicted P wave arrival time. The first step is to associate each TimeSeries object with the receiver metadata of the instrument that that recorded the data. Because these data came from an FDSN data center (IRIS) a given channel of data is uniquely defined by the four magic SEED code names referred to as network, station, channel, and location. In the standard MsPASS schema these are shortened to *net*, *sta*, *chan*, and *loc* respectively. Here we use a preprocessing function called *get_seed_channel* to retrieve the basic station metadata and load that data into each member of the working ensemble. # Next step is to load source and station coordinates from mspasspy.preprocessing.seed.ensembles import load_source_data_by_id for d in ensemble.member: load_source_data_by_id(db,d) #print(d) net=d['net'] sta=d['sta'] chan=d['chan'] # loc is sometimes undefined if d.is_defined('loc'): loc=d['loc'] else: loc=None doc=db.get_seed_channel(net,sta,chan,loc,time=d.t0) d['channel_lat']=doc['lat'] d['channel_lon']=doc['lon'] d['channel_elev']=doc['elev'] d['channel_hang']=doc['hang'] d['channel_vang']=doc['vang'] d['channel_id']=doc['_id'] # + from mspasspy.algorithms.window import WindowData from mspasspy.algorithms.signals import filter from mspasspy.ccore.seismic import TimeWindow from obspy.taup import TauPyModel model = TauPyModel(model="iasp91") from obspy.geodetics import gps2dist_azimuth,kilometers2degrees # This a time window in time relative to P arrival around # which we cut each waveform cutwin=TimeWindow(-10.0,300.0) i=0 for d in ensemble.member: filter(d,'bandpass',freqmax=2.0,freqmin=0.005) stalat=d['channel_lat'] stalon=d['channel_lon'] srclat=d['source_lat'] srclon=d['source_lon'] depth=d['source_depth'] otime=d['source_time'] georesult=gps2dist_azimuth(srclat,srclon,stalat,stalon) # obspy's function we just called returns distance in m in element 0 of a tuple # their travel time calculator it is degrees so we need this conversion dist=kilometers2degrees(georesult[0]/1000.0) arrivals=model.get_travel_times(source_depth_in_km=depth,distance_in_degree=dist,phase_list=['P']) # Arrivals are returned in time order 0 is always the first arrival # This computes arrival time as an epoch time and shifts the data to put 0 at that time d.ator(otime+arrivals[0].time) #d=WindowData(d,cutwin) ensemble.member[i]=WindowData(d,cutwin) i+=1 plotter.plot(ensemble) # - # The data are not aligned with 0 set as the predicted arrival time. For seismologists this plot mostly illustrates the complex source-time function of this earthquake. One way to clarify this is to calculate the full set of arrivals for this event. An easy way to do that is to just recycle the last coordinates used. We turn off the restriction to only calculate the P arrival time and print the default phase list returned by the tau-p calculator. That will show you that the first major secondary phase, PP, does not arrive until around relative time 225 s. Hence, the common signal between 0 and 225 is mainly a reflection of the source, not structure. arrivals=model.get_travel_times(source_depth_in_km=depth,distance_in_degree=dist) print(arrivals) # ## Creating Seismogram Objects # MsPASS considers two data types to be atomic seismic data. What we call a *TimeSeries* is a single channel of data. As we saw above a unique seed combination of the codes net, sta, chan, and loc define a single channel data stream. In all seismic data processing we usually cut out sections of data as a chunk to be dealt with as a single entity. There are some algorithms where the model of a single channel of data is meaningless or useless. Data recorded by a three-component seismic station is a case in point; the components have a fundamental relationship that for some applications make them indivisible. Two examples most seismologists will be familiar with are particle motion measurements and conventional receiver functions. Both require an input of three-component data to make any sense. For this reason we distinguish a differerent atomic object we call a *Seismogram* to define data that is by definition a three-component set of recordings. # # The problem of assembling three-component data from raw data is not at all trivial. Today most data loggers produce multiple sample rate representations of the same data stream and observatory data like GSN stations frequently have multiple, three-component (3C) sensors at the same approximate location. It is not at all uncommon to have 24 or more channels defining the same data with different sensors and different sample rates. Active source multichannel data, by which I mean older cable systems were the data were multiplexed, present a different problem assembling three-component data as a map between channel number and component is required to put the components in the right place for each receiver position. The primary reason we define a separate data object for 3C versus scalar data is too allow algorithms that depend upon 3C data to not be burdened with that complexity. If the workflow you need for your research requires 3C data you should alway think of your workflow as four distinct steps: # 1. Importing raw data as TimeSeries. For MsPASS that always means selecting and cutting out time windows that define what part of what signals you need to work with. The example above cutting data down to a P wave window is an example, but the concept is generic. # 2. Populating the metadata that define at least two fundamental properties of all data channels to be handled: (1) at least relative amplitudes between components and (2) orientations of the components in space. Our example below will illustrate an example of how this needs to be done. # 3. A generic process we call *bundle*. Bundling is descriptive because the process requires the three components that define a particular sensor to be joined together an put into the single container we call a *Seismogram*. # 4. Save the bundled data in some form. In MsPASS the preferred form is storage under the control of MongoDB and MsPASS, but such data could also be exported in some output format for handling by some external package. # # # In this tutorial we already completed step 1 and part of step 2. What is missing from step 2 is that the wf_Seismogram records are currently incomplete. To see that we will use the *find_one* method of MongoDB to get a quick peek at the first record (document) in our data set of TimeSeries objects: from bson import json_util doc=db.wf_TimeSeries.find_one() print(json_util.dumps(doc,indent=2)) # There is a lot of stuff there, but it is educational to see the full set of attributes stored currently in each document in the wf_TimeSeries collection. Notice the attribute *source_id*. Look above and you will see how we set that attribute earlier. It is a MongoDB *ObjectId* used as we discussed above to identify a unique document in the *source* collection. We used it above to get source coordinates for computing predicted arrival times we used to window the data around times relative to the predicted P wave arrival time. # # The problem we face now is that if you look at all the stuff above, there is no information at all about the receiver except the SEED unique name keys we tag with the keys net, sta, chan, and loc. (These short forms were taken from Antelope. Obspy users should note they are identical to the stats entries they tag with the full englisn words network, station, channel, and location.) These provide a solution for miniseed/seed data that we will exploit below. That is, with seed data those four name keys in combination with a UTC time stamp are guaranteed to yield one and only one channel of data. We caution the user, however, that the seed convention is not at all generic. It works only because the "S" in the "SEED" acronym is "Standard" and any data obtained from an FDSN data center should obey that standard. For other data there are no such rules and the process of linking wf_TimeSeries data to channel collections will require an alternative solution. We anticipate alternatives could become part of MsPASS in the future, but for the present the seed model is the only generic solution to linking data in a wf_TimeSeries collection to the correct document in channel. # # The next box creates those links using two cross-references: one for the *channel* collection (needed for orientation and calibration data) and one for what we call *site* which contains only location information. The *site* collection is the standard way we normalize *Seismogram* objects when read from the database. # For this tutorial it is convenient to build that link at the same time as that for *channel* so it can be # passed to Seismogram objects we will create below. db=Database(dbclient,'getting_started') cursor=db.wf_TimeSeries.find({}) for doc in cursor: wfid=doc['_id'] net=doc['net'] sta=doc['sta'] chan=doc['chan'] #loc=doc['loc'] loc="" stime=doc['starttime'] chandoc=db.get_seed_channel(net,sta,chan,loc,time=stime) if chandoc==None: print('No matching document found in channel for ',net,sta,chan,loc,UTCDateTime(stime)) print('skipped') continue chanid=chandoc['_id'] sitedoc=db.get_seed_site(net,sta,time=stime) if sitedoc==None: print('No matching document found in site for ',net,sta,UTCDateTime(stime)) print('skipped') continue siteid=sitedoc['_id'] db.wf_TimeSeries.update_one({'_id' : wfid}, {'$set' : {'channel_id':chanid}}) db.wf_TimeSeries.update_one({'_id' : wfid}, {'$set' : {'site_id' : siteid}}) # You likely found that took a little time because the algorithm was doing a lot of database transaction on three different collections: wf_TimeSeries, site, and channel. We could have speeded the performance a bit by building an index, but that a topic better discussed in other tutorials were it is more essential. The complexity of the above algorithm is that to handle each wf_TimeSeries document we have to query channel and site and then run an update operation on wf_TimeSeries. We can get a better feel for this from the following: nwf=db.wf_TimeSeries.count_documents({}) nchan=db.channel.count_documents({}) nsite=db.site.count_documents({}) print("Number of documents in current collections:") print("wf_TimeSeries: ",nwf) print("channel: ",nchan) print("site: ",nsite) # To understand the cost of the operation above realize 3933 times the algorithm has to query the channel (contains 1338 documents to scan) and site (contains 446 documents to scan) and then the update has to locate the right record in wf_TimeSeries. Thus each of the 3 sizes above are multipliers in the number of transactions required to do this operation. We point this out because an important thing all new users must recognize is that as a database size grows it may be necesary to more deeply understand how such things work to realize why what you thought might finish quickly runs for hours. As noted above, using the right indices can vastly improve performance. This example also emphasizes how parallelization of database transaction provided through MongoDB can be an important performance gain. We touch on this below, but the full details are an advanced topic. # + from mspasspy.algorithms.bundle import bundle_seed_data from mspasspy.util.Undertaker import Undertaker from mspasspy.ccore.seismic import TimeSeriesEnsemble db=Database(dbclient,'getting_started') srcids=db.wf_TimeSeries.distinct('source_id') stedronsky=Undertaker(db) for sid in srcids: query={'source_id' : sid } nd=db.wf_TimeSeries.count_documents(query) cursor=db.wf_TimeSeries.find( {'source_id' : sid} ) ensemble=TimeSeriesEnsemble(nd) for doc in cursor: d=db.read_data(doc,normalize=['channel','source']) ensemble.member.append(d) #print(d) print('Number of TimeSeries objects for this source=',len(ensemble.member)) ens3c=bundle_seed_data(ensemble) print('Number of (3C) Seismogram object saved for this source=',len(ens3c.member)) [living,bodies]=stedronsky.bring_out_your_dead(ens3c) print('number of bundled Seismogram=',len(living.member)) print('number of killed Seismogram=',len(bodies.member)) for i in range(len(bodies.member)): d=bodies.member[i] net=d['net'] sta=d['sta'] print('Errors posted for net=',net,' station=',sta) for e in d.elog.get_error_log(): print(e.algorithm,e.badness,e.message) db.save_ensemble_data(ens3c) # - n=db.Seismogram.count_documents({}) print('Total number of seismograms objects now in db=',n) # ## Parallel Processing # This section will apply a workflow to the entire downloaded data set. Pretty basic one driven by wf_TimeSeries collection. Thinking: # 1. parallel reader # 2. detrend # 3. filter # 4. cut around P times (means might need a block earlier to post arrival_time to each wf document. Might be a good lesson in foreign undefined metadata.
sage-gage_2021/getting_started.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.10 64-bit (''tf-gpu'': conda)' # name: python3 # --- ''' What is WordNet ? WordNet is a lexical database for the English language, which was created by Princeton, and is part of the NLTK corpus. You can use WordNet alongside the NLTK module to find the meanings of words, synonyms, antonyms, and more. Let's cover some examples. ''' from nltk.corpus import wordnet syns = wordnet.synsets("program") print(syns) for w in syns: print(w.lemmas()[0].name(),": ", w.definition()) print("Example : ", w.examples()) print("------------------------------------------------------------------") # + synonyms = [] antonyms = [] for syn in wordnet.synsets("good"): for l in syn.lemmas(): synonyms.append(l.name()) if l.antonyms(): antonyms.append(l.antonyms()[0].name()) print(set(synonyms)) print(set(antonyms)) # + w1 = wordnet.synset("ship.n.01") w2 = wordnet.synset("boat.n.01") w3 = wordnet.synset("car.n.01") w4 = wordnet.synset("cat.n.01") w5 = wordnet.synset("cactus.n.01") print(w1.wup_similarity(w2)) print(w1.wup_similarity(w3)) print(w1.wup_similarity(w4)) print(w1.wup_similarity(w5)) # + #Additinal # - '''Synsets and Lemmas In WordNet, similar words are grouped into a set known as a Synset (short for Synonym-set). Every Synset has a name, a part-of-speech, and a number. The words in a Synset are known as Lemmas. Getting Synsets The function wordnet.synsets('word') returns an array containing all the Synsets related to the word passed to it as the argument. ''' print(wordnet.synsets('room')) # + #The method returned five Synsets; four have the name ’room’ and are a nouns, while the last one’s name is ’board’ and is a verb. # The output also suggests that the word ‘room’ has a total of five meanings or contexts. # - '''Hyponyms A Hyponym is a specialisation of a Synset. It can be thought of as a child (or derived) class in inheritance. The function hyponyms() returns an array containing all the Synsets which are Hyponyms of the given Synset: ''' print(wordnet.synset('calendar.n.01').hyponyms()) ''' Hypernyms A Hypernym is a generalisation of a Synset (i.e. the opposite of a Hyponym). An array containing all Hypernyms of a Synset is returned by hypernyms(): ''' print(wordnet.synset('solar_calendar.n.01').hypernyms())
9-Day2-WordNet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.10.0 64-bit (''adventofcode-mOkh6lsX'': pipenv)' # language: python # name: python3 # --- # # Streaming binary format parsing # # * <https://adventofcode.com/2021/day/16> # # Part 1 is primarily a stream parsing task; given an input stream of hex digits, parse the stream into packets of data. # # I decided to unpack the hex value as a byte string; in Python the `bytes` type has a convenient [`fromhex()` method](https://docs.python.org/3/library/stdtypes.html#bytes.fromhex), and I made sure to pad the end of the input transmission string with an extra `0` if it didn't contain a multiple of 2 hex digits; that turned out not to be necessary for both the test and puzzle inputs. The reason I did this is because you can then index into the `bytes` object and Python gives you a 8-bit integer, ideal for applying bitwise operations to to extract the desired bits. # # Next, the class encapsulating the stream (the stream *reader*) tracks the position in the input stream of bytes, as well as an offset into the current byte so we can read data at the bit level. This requires that you use [bit shifting](https://en.wikipedia.org/wiki/Bitwise_operation#Bit_shifts) to move the target bits over to the right, as well as [bit masking](https://en.wikipedia.org/wiki/Mask_(computing)#Masking_bits_to_0) to turn off any other bits present in the byte. To make these operations that little bit more efficient I pre-compute those masks and shift counts based on the current bit offset into the byte. Of course, to read a given number of bits might well require that you look at multiple consecutive bytes, so the `StreamReader.read_bits()` method uses a loop and left-shifts to build up the resulting integer value from multiple bytes, as needed. If this was a real-world project, the reader would need to handle a transmission or read buffer, and handle the transmission or file running out of data prematurely, but for AoC we can ignore such trivial error-handling concerns. # # To read each type of packet (literal or operator), the stream reader delegates to the `BasePacket` class, passing in the stream reader. This class reads the 3 bits for the version + the 3 bits for the packet type, then *dispatches* to the right packet subclass to handle reading the rest of the data. Given that part 1 was hand-wavy about the operator types not mattering _yet_ I anticipated that there'd be another 7 types of operator to handle later on, so I created a dispatch system based on registration via the [`object.__init_subclass__()` class hook](https://docs.python.org/3/reference/datamodel.html#object.__init_subclass__). This hook is called whenever a subclass of the class with such a hook is created, and allows you to set class-specific parameters via keyword arguments in a [`class <ClassName>(bases, ...)` statement](https://docs.python.org/3/reference/compound_stmts.html#class). If a specific packet subtype uses `type=<type_id>` in the class definition it'll be used for by the base packet class to read packet data, falling back to the generic `BaseOperatorPacket` class for unknown packet types that I am sure part 2 will tell us more about. This model lets you nicely compartmentalize reading of the packet types and their data, as well as handle reading the contained sub-packets for operators. # # Reading a literal then is simply a function of reading 5 bits in a loop, using the 4 right-most bits to build up the integer value (left-shifting the accumulating value by 4, add the new 4 bits after masking off the 5th bit), until the most-significant bit is 0. # # As isn't that un-common with data transmission formats that have been around for a while, there are multiple ways of expressing how much data you need to expect to read for recursively-embedded packets. The `1` variant (11 bits with a packet count) lets us just loop *count* times and ask the stream reader to read the next packet, but for the `0` variant (15 bits counting out the number of bits the contained packets will take up) requires that all packets track their own size. As you read packets, you can then track if you have read enough data for all child packets that the operator packet covers. Tracking the size of a given packet is not hard, of course, just a little finnicky as you need to account for the initial 6 bits with the version number and type id, plus whatever bits are necessary to implement the packet type. # # Finally, a (cached) property takes care of exposing the version number sum; for literal packets, just return the `version` attribute, for operator packets, the version sum is that of their own `version` value plus the sum of all child `version_sum` values. # + from __future__ import annotations from dataclasses import dataclass from functools import cached_property from typing import ClassVar, Final, Iterator, TypeVar, TypeAlias T = TypeVar("T", bound="BasePacket") TypeId: TypeAlias = int @dataclass(frozen=True) class BasePacket: type: ClassVar[TypeId] # dispatch table to read specific packet types types: ClassVar[dict[TypeId, type[T]]] = {} def __init_subclass__(cls: T, type: TypeId = None, **kwargs) -> None: """Register a subclass for dispatch on a specific packet type_id""" if type is not None: cls.type = type BasePacket.types[type] = cls super().__init_subclass__(**kwargs) version: int size: int # in bits @cached_property def version_sum(self) -> int: """Sum of the version values for this packet and any sub-packets""" return self.version @cached_property def expression_value(self) -> int: """Part 2 expression value for this packet""" raise NotImplementedError @classmethod def _read(cls: type[T], version: int, stream: StreamReader) -> T: """Read data for single packet from the stream The version and type have already been read, subclasses should implement how each packet type data is to be read from the stream, and create the specific packet instance. """ raise NotImplementedError @classmethod def from_stream(cls, stream: StreamReader) -> BasePacket: """Read the next packet from the stream reader Dispatches to specific packet types based on the type_id read from the stream, falling back to BaseOperatorPacket if no specific type subclass is found. """ version, type_id = stream.read_bits(3), stream.read_bits(3) return BasePacket.types.get(type_id, BaseOperatorPacket)._read(version, stream) @dataclass(frozen=True) class BaseOperatorPacket(BasePacket): children: tuple[BasePacket] @property def version_sum(self) -> int: return self.version + sum(child.version_sum for child in self.children) def __len__(self) -> int: """Number of child packets contained""" return len(self.children) def __iter__(self) -> Iterator[int]: """Part 2: iterate over the expression value of each child packet""" yield from (child.expression_value for child in self.children) def __getitem__(self, i: int) -> int: """Part 2: get the expression value of the ith child packet""" return self.children[i].expression_value @classmethod def _read(cls: type[T], version: int, stream: StreamReader) -> T: """Read operator packet and containd child packets from the stream The number of subpackets read is determined from the first bit following the version and type_id bits. Ff 0, read a 15-bit length value from the stream, then read child packets until their total bit size is equal to that length value. If 1, read an 11-bit child packet count, then read child packet count number of packets from the stream. """ length_type_id = stream.read_bits(1) size = 7 # version + type + flag bits subpackets = [] if length_type_id == 0: # length is in bits packet_length = stream.read_bits(15) size += 15 + packet_length while packet_length: sub = next(stream) packet_length -= sub.size subpackets.append(sub) assert packet_length == 0 else: # length is in packets packet_count = stream.read_bits(11) size += 11 for _ in range(packet_count): sub = next(stream) size += sub.size subpackets.append(sub) return cls(version, size, tuple(subpackets)) @dataclass(frozen=True) class LiteralPacket(BasePacket, type=4): value: int @cached_property def expression_value(self) -> int: """Part 2 expression value for this packet is the literal value""" return self.value @classmethod def _read(cls, version: int, stream: StreamReader) -> LiteralPacket: """Read a literal value packet Reads groups of 5 bits, containing a continuation bit and 4 bits for the literal value. Reading continues until the continuation bit is 0. """ value = 0 size = 6 # length of version + type bits while True: chunk = stream.read_bits(5) size += 5 value = (value << 4) | (chunk & 0xF) if not chunk & 0x10: break return cls(version, size, value) # pre-computed bitmasks and right-shift amount to extract a certain number of # bits from a byte, given the bit offset (0-7) and the number of bits to extract # (1-8). MASKS: Final[dict[tuple[int, int, int], tuple[int, int]]] = { (offset, count): ((2 ** count - 1) << (8 - offset - count), 8 - offset - count) for offset in range(8) for count in range(1, 9) if count + offset <= 8 } class StreamReader: def __init__(self, stream: bytes) -> None: self.stream = stream self.pos = 0 # byte position in stream self.bit_pos = 0 # offset into current byte (0-7) @classmethod def from_string(cls, s: str) -> StreamReader: # if the input length doesn't align to full bytes, pad with a 0 # and start reading at the second nibble. if len(s) % 2: s += "0" return cls(bytes.fromhex(s)) def read_bits(self, count: int) -> int: result = 0 bpos, pos, stream = self.bit_pos, self.pos, self.stream while count: bcount = min(count, 8 - bpos) count -= bcount result <<= bcount mask, shift = MASKS[bpos, bcount] result |= (stream[pos] & mask) >> shift bpos += bcount if bpos == 8: bpos = 0 pos += 1 self.bit_pos, self.pos = bpos, pos return result def __iter__(self) -> Iterator[BasePacket]: return self def __next__(self) -> BasePacket: return BasePacket.from_stream(self) tests = { "D2FE28": 6, "38006F45291200": 9, "EE00D40C823060": 14, "8A004A801A8002F478": 16, "620080001611562C8802118E34": 12, "C0015000016115A2E0802F182340": 23, "A0016C880162017C3686B18A3D4780": 31, } for test_input, expected in tests.items(): assert next(StreamReader.from_string(test_input)).version_sum == expected # + import aocd transmission = aocd.get_data(day=16, year=2021) print("Part 1:", next(StreamReader.from_string(transmission)).version_sum) # - # # Part 2: executing the operators # # Now that we can decode packets, we can define the operators; all I had to do was provide implementations of each operator type. I did have to update part 1 to give the my classes a notion of *expression values*, and I expanded the `BaseOperatorPacket` class some additional Python magic methods to implement indexing and iteration over the expression values of the contained child packets, but I could have done the same with mixin classes; it would just have been a lot more verbose. # + import operator from functools import reduce class OpSum(BaseOperatorPacket, type=0): @cached_property def expression_value(self) -> int: return sum(self) class OpProduct(BaseOperatorPacket, type=1): @cached_property def expression_value(self) -> int: return reduce(operator.mul, self) class OpMinimum(BaseOperatorPacket, type=2): @cached_property def expression_value(self) -> int: return min(self) class OpMaximum(BaseOperatorPacket, type=3): @cached_property def expression_value(self) -> int: return max(self) class OpGreaterThan(BaseOperatorPacket, type=5): @cached_property def expression_value(self) -> int: assert len(self.children) == 2 return int(self[0] > self[1]) class OpLessThan(BaseOperatorPacket, type=6): @cached_property def expression_value(self) -> int: assert len(self.children) == 2 return int(self[0] < self[1]) class OpEqualTo(BaseOperatorPacket, type=7): @cached_property def expression_value(self) -> int: assert len(self.children) == 2 return int(self[0] == self[1]) expression_tests = { "C200B40A82": 3, "04005AC33890": 54, "880086C3E88112": 7, "CE00C43D881120": 9, "D8005AC2A8F0": 1, "F600BC2D8F": 0, "9C005AC2F8F0": 0, "9C0141080250320F1802104A08": 1, } for test_input, expected in expression_tests.items(): assert next(StreamReader.from_string(test_input)).expression_value == expected # - print("Part 2:", next(StreamReader.from_string(transmission)).expression_value) # # Comparison with real-world data streams # # Todays exercise is quite a good model for how real-world binary formats work. They usually do not pack data into such odd bit counts however; computers work much more efficiently with data packed into (powers of 2 of multples of) bytes. Individual bytes can still contain multiple pieces of information such as 1 bit flags or several smaller integer values. # # ## Continuation flags # # The continuation bit set in the literal value format is exactly how a range of variable-width encodings work, such as [UTF-8](https://en.wikipedia.org/wiki/UTF-8). Some formats, like UTF-8, use multiple bits to handle continuation signalling (in UTF-8, the number of bits for this purpose is *variable too*, from 1 to 5). The advantage of such a format is that you can encode complex data into a much more compact form than if you used a fixed number of bytes for every possible value you encode, but the disadvantage is that you can't just index into a stream to get to a specific Nth value. For UTF-8, you can encode ASCII text (e.g. the majority of textual computer data in the western world) in just 1 byte per character, increasing to 2 bytes for most [Latin-script alphabets](https://en.wikipedia.org/wiki/Latin-script_alphabet), while 3 bytes covers the remainder of the [Unicode Basic Multilingual Plane](https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane) (BMP) and you only need 4 bytes for Unicode data beyond the BMP. But, if you need to index into a UTF-8 byte string, you'll have to look at the first 4 bits of a lot of the bytes between the start of such a stream and the Nth codepoint you want to skip to (rule of thumb: left-most-bit not set? Then move on to the next byte, otherwise count consecutive left-most bits that are `1` and skip that many times minus 1). # # ## Sub-packet size expressed in total length or a count # # The operator packets record either the total length of the subpackets (in bits, real-world formats are far more likely to record a byte count), or a number of packets. For formats with a variable packet length, those two numbers have very different consequences for how you read such a stream, and both have advantages and disadvantages. Because of this many formats will use both, depending on what kind of information is encoded! # # A fixed size lets you skip over to the next packet in one big step, while a packet count requires that you read each sub-packet in turn, or at least enough to be able to skip the rest of that packet. But, a fixed size requires that the _encoder_ knows what size it is going to be sending, _up front_, and not all data streams lend themselves to this. For a streaming video format, for example, the encoder might not know how well later video data will compress and so won't know how many packets it'll end up sending. But such an encoder will pull in video data to encode in chunks, and may well know that it is going to send N frames of video, each of variable size, and tell the decoder on the other end to expect N sub packets.
2021/Day 16.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # EDA # ## Import the dataset, explore and summarize it # load the necessary python modules import matplotlib.pyplot as plt import matplotlib import pickle import pandas as pd import numpy as np from IPython.display import display # %matplotlib notebook ### Load the dictionary containing the dataset. This code taken from poi_id.py script provided by udacity. with open("final_project_dataset.pkl", "r") as data_file: data_dict = pickle.load(data_file) # get some initial stats for the project report. print("Total Number of persons: %d"%len(data_dict.keys())) print("Total Number of features: %d"%len(list(data_dict.values())[0])) print("Total Number of POIs: %d"%sum([1 if x['poi'] else 0 for x in data_dict.values()])) print data_dict.keys() # + # converting the dictionary dataset to a pandas dataframe enron_df = pd.DataFrame.from_dict(data_dict) # Removing entries belonging to Total and THE TRAVEL AGENCY IN THE PARK as they are non persons del enron_df['TOTAL'] del enron_df['THE TRAVEL AGENCY IN THE PARK'] enron_df = enron_df.transpose() enron_df_num = enron_df.apply(pd.to_numeric, errors='coerce') # Removing the email_address from the dataset as it's non-numeric feature and won't seem to have much use right now. del enron_df_num['email_address'] enron_df_num.describe() # - len(enron_df_num) # We are left with 144 records now in our dataframe. # Also, the summary of the data sets shows some shows a very large standard deviation for some of the features # and some missing data for others. We will drop some of these features as below. del enron_df_num['loan_advances'] del enron_df_num['restricted_stock_deferred'] del enron_df_num['director_fees'] # Feature selections data_corr_list = enron_df_num.corr() print('\nCorrelations between features to POI:\n ' +str(data_corr_list['poi'])) # Features ‘exercised_stock_options’, ‘total_stock_value’, and ‘bonus’ have the highest correlation to POI, in descending order. # + #Get rid of label del enron_df_num['poi'] poi = enron_df['poi'] #Create new features enron_df_num['stock_sum'] = enron_df_num['exercised_stock_options'] +\ enron_df_num['total_stock_value'] +\ enron_df_num['restricted_stock'] enron_df_num['stock_ratio'] = enron_df_num['exercised_stock_options']/enron_df_num['total_stock_value'] enron_df_num['money_total'] = enron_df_num['salary'] +\ enron_df_num['bonus'] -\ enron_df_num['expenses'] enron_df_num['money_ratio'] = enron_df_num['bonus']/enron_df_num['salary'] enron_df_num['email_ratio'] = enron_df_num['from_messages']/(enron_df_num['to_messages']+enron_df_num['from_messages']) enron_df_num['poi_email_ratio_from'] = enron_df_num['from_poi_to_this_person']/enron_df_num['to_messages'] enron_df_num['poi_email_ratio_to'] = enron_df_num['from_this_person_to_poi']/enron_df_num['from_messages'] #Feel in NA values with 'marker' value outside range of real values enron_df_num = enron_df_num.fillna(enron_df_num.mean()) #Scale to 1-0 enron_df_num = (enron_df_num-enron_df_num.min())/(enron_df_num.max()-enron_df_num.min()) # - from sklearn.feature_selection import SelectKBest selector = SelectKBest() selector.fit(enron_df_num,poi.tolist()) scores = {enron_df_num.columns[i]:selector.scores_[i] for i in range(len(enron_df_num.columns))} sorted_features = sorted(scores,key=scores.get, reverse=True) for feature in sorted_features: print('Feature %s has value %f'%(feature,scores[feature])) # + from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.grid_search import RandomizedSearchCV, GridSearchCV from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import precision_score, recall_score, accuracy_score from sklearn.cross_validation import StratifiedShuffleSplit import scipy import warnings warnings.filterwarnings('ignore') gnb_clf = GridSearchCV(GaussianNB(),{}) #No params to tune for for linear bayes, use for convenience svc_clf = SVC() svc_search_params = {'C': scipy.stats.expon(scale=1), 'gamma': scipy.stats.expon(scale=.1), 'kernel': ['linear','poly','rbf'], 'class_weight':['balanced',None]} svc_search = RandomizedSearchCV(svc_clf, param_distributions=svc_search_params, n_iter=25) tree_clf = DecisionTreeClassifier() tree_search_params = {'criterion':['gini','entropy'], 'max_leaf_nodes':[None,25,50,100,1000], 'min_samples_split':[2,3,4], 'max_features':[0.25,0.5,0.75,1.0]} tree_search = GridSearchCV(tree_clf, tree_search_params, scoring='recall') search_methods = [gnb_clf,svc_search,tree_search] average_accuracies = [[0],[0],[0]] average_precision = [[0],[0],[0]] average_recall = [[0],[0],[0]] num_splits = 10 train_split = 0.9 indices = list(StratifiedShuffleSplit(poi.tolist(), num_splits, test_size=1-train_split, random_state=0)) best_features = None max_score = 0 best_classifier = None num_features = 0 for num_features in range(1,len(sorted_features)+1): features = sorted_features[:num_features] feature_df = enron_df_num[features] for classifier_idx in range(3): sum_values = [0,0,0] #Only do parameter search once, too wasteful to do a ton search_methods[classifier_idx].fit(feature_df.iloc[indices[0][0],:], poi[indices[0][0]].tolist()) classifier = search_methods[classifier_idx].best_estimator_ for split_idx in range(num_splits): train_indices, test_indices = indices[split_idx] train_data = (feature_df.iloc[train_indices,:],poi[train_indices].tolist()) test_data = (feature_df.iloc[test_indices,:],poi[test_indices].tolist()) classifier.fit(train_data[0],train_data[1]) predicted = classifier.predict(test_data[0]) sum_values[0]+=accuracy_score(predicted,test_data[1]) sum_values[1]+=precision_score(predicted,test_data[1]) sum_values[2]+=recall_score(predicted,test_data[1]) avg_acc,avg_prs,avg_recall = [val/num_splits for val in sum_values] average_accuracies[classifier_idx].append(avg_acc) average_precision[classifier_idx].append(avg_prs) average_recall[classifier_idx].append(avg_recall) score = (avg_prs+avg_recall)/2 if score>max_score and avg_prs>0.3 and avg_recall>0.3: max_score = score best_features = features best_classifier = search_methods[classifier_idx].best_estimator_ print('Best classifier found is %s \n\ with score (recall+precision)/2 of %f\n\ and feature set %s'%(str(best_classifier),max_score,best_features)) # -
udacity/enron/ud120-projects-master/final_project/Enron Data Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # FAQ # Q: Can I use Maelstrom for _X_ star? # A: It depends! Phase modulation is most useful when the oscillation frequencies exceed 60 Hz, and when the oscillations do not change over the course of the orbit. Most stars that have been used with phase modulation are pulsars, $\delta$ Scuti stars, and subdwarf B stars. # ---- # Q: What SNR should my pulsation be for it to be useful? # A: In general, the pulsation should be clear of any nearby peaks and exceed a SNR of 5. See the Maelstrom paper for a more detailed analysis. # ---- # Q: Will this work for ground-based data? # A: No, I have not yet found any cases where Maelstrom has worked on ground-based data. So far, all analyses have used the _Kepler_ or _TESS_ satellites.
docs/notebooks/FAQ.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### ■ Pythonの基本プログラミング レベル1 # ##### ★実行は、セルを選択して、Shiftキーを押しながらEnterキーを押します。 # ##### ★セルの左側にある角かっこ[ ]の中の数字が変わると、実行されたことになります。 # ##### ★実行結果は、すぐ下の欄に表示されます。 # ##### ★同じ結果になるか確認してみてください。 # ##### ★下に問題が並んでいるので、やってみてください。 # ##### ★セルは、このタブの左上の+のアイコンで追加でき、ハサミのアイコンで削除できます。 # ##### # #### 【1】単純な計算 足し算です。 # ##### セルに1+1を入力して、Shiftキーを押しながらEnterキーを押します。 # ##### 1+2の計算結果が下の欄に表示されます。 1+2 # ↓下のセルに1+2を入力して、Shiftキーを押しながらEnterキーを押します。上と同じになるか確認してみてください。 # #### 【2】単純な計算 足し算です。もうひとつやってみましょう。 2+3 # ↓下のセルに2+3を入力して、Shiftキーを押しながらEnterキーを押し、計算結果が同じになるか確認してください。 # #### 【3】引き算です。 # ##### ↓下のセルに5-3を入力して、Shiftキーを押しながらEnterキーを押し、計算結果が同じになるか確認してください。 5-3 # ##### ★続けていきましょう。 # #### 【4】かけ算です。かけ算は * です。アスタリスクです。 4*6 # #### 【5】 割り算、割り算は / です。スラッシュです。 20/4 # #### 【6】 Print文 # ##### Print文は、'' ダブルコーテーションで囲まれた文字を、すぐ『下の欄』に表示します。 # ##### 文字列は、' シングルこーテーションで囲みます。 # ##### ( かっこは、Shiftキーを押しながら8を押して入力します。) # ##### ( 全角入力だとうまく入力できないので、ALTキーを押しながら半角キーを押して半角入力にしてください) # ##### ' シングルクォーテーションは、Shiftキーを押しながら7を押して入力します。 print('hello python') # #### 【7】 値の代入 # ##### 変数xに1を代入します。 # ##### = は代入です。左のxに右の1を代入します。 # ##### 続けて、変数xの内容をprint文で表示してみます。 x = 1 print(x) # #### 【8】 値の代入 # ##### 変数xに、1+2の計算結果を代入します。続けて変数xの内容をprint文で表示してみます。 x = 1 + 2 print(x) # #### 【9】 計算 x = 10000 * 1.1 print(x) # #### 【10】複数行にわたる計算です。 # ##### xに10000を代入し、x*1.1をyに代入する、のように、複数行にわたっています。 x = 10000 y = x * 1.1 print(y) # #### 【11】複数行にわたる計算です。 # ##### xに1を代入し、yに2を代入し、zにx+yの結果を代入する、のように文が複数行にわたっています。 x=1 y=2 z=x+y print(z) # #### 【12】複数行にわたる計算 # #### 変数kingakuに10000を代入しています。 # #### kingaku=10000のkingakuは、変数名で自由につけてかまいません。変更することもできます。 kingaku=10000 shouhizei=0.1 shiharai = kingaku * (1 + shouhizei) print(shiharai) # #### 【14】小数点以下を切り捨ててみます。int関数です。 # ##### integerが整数であることに由来した関数名です。 en = int(shiharai) print(en) # #### 【15】 if文です。条件文と呼ばれるものです。 # ##### xに1を代入します。=が1つだけだと代入です。 # ##### xが1に一致する時に、'xは1です'という文字列を表示します。 # ##### == と2つある方がイコールの意味になります。xが1に一致する時に・・・ということです。 # ##### :コロンは、if文の最後につける決まりです。 # ##### print文の前には、空白4文字が入っています。インデントと呼ばれるものです。 # ##### インデントがあることで、コンピュータは、if文の中の処理なんだなとわかるわけです。 # ##### インデント(空白4文字)がないと、print文は、if文と無関係とみなされてしまいます。 x=1 if x == 1: print('xは1です') # #### 【16】 if文 # ##### x=2で、xに2を代入します。 # ##### =イコールは代入です。xに2を代入します。 # ##### !=は否定で、「ではない時に」となります。「xが1に一致しないときに」 x=2 if x != 1: print('xは1ではありません') # #### 【17】 if文で、条件が2つ以上ある場合は、elifを使います、 # ##### 最初にxに2を代入しています。 # ##### x==1の時は、文字列'xは1です'を表示し、 # ##### x==2の時は、文字列'xは2です'を表示します。 # ##### elifは、else ifの略から来ていてelseは「その他の」という意味です。 # ##### x==1の時は、'xは1です'と表示し、x==2の時は、'xは2です'と表示します。 # ##### 最初にx==2を代入しているので、print文は、'xは2です' が選択されます。 x = 2 if x == 1: print('xは1です') elif x==2: print('xは2です') # #### 【18】 if文で文字列を処理してみます。 # ##### プログラムの中で文字列を扱います。①プログラムのコード(xやifなどの文字列)と②普通の文字列(helloなど)を区別するために、②の文字列は ' シングルクォーテーションで囲みます。 'hello' # ##### プログラムで使う単語(予約語といいます)や記号 : コロンなどは、【半角入力】が必要です。 # ##### 全角と半角の切り替えは、キーボードの【半角/全角】キーを押してください。 x = 'hello' if x == 'hello': print('xはhelloです') # #### 【19】配列の代入 []はリスト(配列)です。 # ##### 変数xには、数字1つだけでなく、いくつかの数字をまとめて格納できます。配列[]の中に入れて書きます。 # ##### ,カンマで区切って入力します。[]はブラケット演算子といいます。 # ##### また、コンピュータは、数字を0から数えます。0から数え始める点は気をつけてください。 x=[0,1,2,3,4] print(x) # #### 【20】for文です。for文は、繰り返しの処理を書く時に使います。 # ##### この例では、for文は、print文を繰り返し実行しています。 # ##### 変数 i に0を代入して、print文を実行 # ##### 変数 i に1を代入して、print文を実行 # ##### 変数 i に2を代入して、print文を実行 # ##### 変数 i に3を代入して、print文を実行 # ##### 変数 i に4を代入して、print文を実行 # ##### のように、print文を繰り返し実行しています。 # ##### 配列[0,1,2,3,4]の中身を順番にprint文で下の欄に出力してみましょう。 # ##### ひとつひとつ順番に出力されるので、縦に出力されます。 # for i in [0,1,2,3,4]: print(i) # #### 【21】リスト(配列)の代入 # ##### 変数xに、文字のリスト(配列)を代入してみます。 x = ['red','green','blue'] print(x) # #### 【22】スライスは、配列から中身(要素という)を取り出す処理です。 # ##### スライスは、配列で順番を表す番号(インデックス)を指定します。 # ##### コンピュータは、0番目から数え始めます。この点は、注意しましょう。 # ##### 'red'は、0番目 # ##### 'green'は、1番目 # ##### 'blue'は、2番目 # ##### です。 x[0] x[1] x[2] # #### 【23】スライス # ##### ★★★スライスは本当にややこしいので気を付けてください★★★ # ##### 'a'は0番、'b'は1番、'c'は2番、'd'は3番、'e'は4番 # ##### コンピュータは0番から数え始めます。 # ##### x[1:3]は、1番目の'b'と、2番目の'c'だけを抽出します。 # ##### x[1:3]は、0番目の'a'は飛ばし、1番目の'b'と、2番目の'c'を抽出しますが、3番目の’d’では抽出処理を抜けるので抽出されません。 # ##### x[1:3]は、「1番から始まり3番で抽出処理を抜ける」と覚えます。 # ##### ややこしいですが、こうなってしまっているので仕方がありません。そしてよく使います。 # ##### x[1:3]は、1番と2番である'b''c'です。 x = ['a','b','c','d','e'] x[1:3] # #### 【24】アンパック # ##### 2つ以上の変数に、いっぺんに数字を代入します。 # ##### x = 1 xに1を代入する # ##### y = 2 yに2を代入する # ##### という内容をいっぺんに行います。 x, y = 1, 2 print(x) print(y) # #### 【26】print文 # ##### , カンマでつなげて、2つ以上の内容を表示してみます。 print('hello', 'Python ')
.ipynb_checkpoints/Python-Level01-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Importing required libraries import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import numpy as np import seaborn as sns import scipy.stats as stats sns.set(style="ticks", color_codes=True) from sklearn.preprocessing import StandardScaler # # Reading the dataset and storing in df df = pd.read_csv('D:\data analytics\IDS Project.csv') # # Displaying the first five rows in each column df.head() # # Checking if any columns have Null or Missing values # TO CHECK FOR NULL VALUES IN ALL COLUMNS null_columns=df.columns[df.isnull().any()] df[null_columns].isnull().sum() # # Checking total number of rows and type of data each column contains df.info() df.isnull().sum(axis = 0) # ## We see that the total charges columns are seen to have object data where it should have numeric dat # + df.TotalCharges = pd.to_numeric(df.TotalCharges,errors = 'coerce') # - # ## Dropping the customer id column since we dont need it df.drop(["customerID"],axis=1,inplace = True) # ## Checking total proportion of people who churned vs those who didn't # + plt.figure(figsize= (10,6)) df.assign(dummy = 1).groupby(['dummy','Churn']).size().unstack().plot(kind='bar',stacked=True,legend=True) plt.title('proportion of people who churned') #label for stacked bar plot plt.xlabel('Churn') # disable ticks in the x axis plt.xticks([]) plt.show() df['Churn'].value_counts() # - # ## The percentage of people who churned is approximately 36% # ## Calculating number of male and female churners female_indices = df[df['gender'] == 'Female'] print(female_indices['Churn'].value_counts()) male_indices = df[df['gender'] == 'Male'] print(male_indices['Churn'].value_counts()) # ## Plotting proportion of females and males who churned seperately # + #for the feamle customers female_indices.assign(dummy = 1).groupby(['dummy','Churn']).size().unstack().plot(kind='bar',stacked=True,legend=True) plt.title('proportion of females who churned') #label for stacked bar plot plt.xlabel('Churn') # disable ticks in the x axis plt.xticks([]) plt.show() print('proportion=',939/2549 ) #for the male customers male_indices.assign(dummy = 1).groupby(['dummy','Churn']).size().unstack().plot(kind='bar',stacked=True,legend=True) plt.title('proportion of males who churned') #label for stacked bar plot plt.xlabel('Churn') # disable ticks in the x axis plt.xticks([]) plt.show() print('proportion=',930/2625 ) # - # The number of female customers who churned is slightly more than that of the male customers. # However the proportion of women who churned is more than the proportion of men who churned. # ## Countplot to see how many senior citizens churned plt.figure(figsize= (10,6)) sns.countplot(x="SeniorCitizen",hue="Churn",data = df,palette = "hls") # We see that the out of all the telco customers the poportion of senior citizens is very lesser compared to non-senior citizen group. # Churning is seen to be more among the senior citizens compared to the non-senior citizens. # ## Boxplot to check relation between Monthly charges and Churning plt.figure(figsize= (10,6)) sns.boxplot(x="Churn", y="MonthlyCharges", data=df) df.groupby(['Churn'])['MonthlyCharges'].median().values # We see that the median Monthly charges in those who churned is higher. plt.figure(figsize= (10,6)) sns.boxplot(x="Churn", y="TotalCharges", data=df) df.groupby(['Churn'])['TotalCharges'].median().values # + # df[['TotalCharges']] = StandardScaler().fit_transform(df[['TotalCharges']]) # + mean = np.mean(df.TotalCharges) sd = np.std(df.TotalCharges) final_list = [x for x in df.TotalCharges if (x > mean - 2 * sd)] final_list = [x for x in final_list if (x < mean + 2 * sd)] np.shape(final_list) # - plt.boxplot(final_list) sns.boxplot(x="Churn", y="TotalCharges", data=df) # ## Plotting to check the relation between tenure distribution and churning # + fig, ax = plt.subplots(2, 1, figsize=(8,6), sharey=True, sharex=True) sns.distplot(df[df['Churn']=="Yes"]["tenure"] , color="green", ax=ax[0]) sns.distplot(df[df['Churn']=="No"]["tenure"] , color="brown", ax=ax[1]) ax[0].set_xlabel('Churn - Yes', size=12) ax[1].set_xlabel('Churn - No', size=12) #ax.set_ylabel('Churn', size=14, color="green") #ax[0].set_title('Tenure distribution', size=18, color="green") fig.suptitle("Tenure distribution", fontsize=14) plt.show() # - # We see that people who have been customers for lesser tenure tend to churn . # ## Plotting a bar plot to check if Paperless billing affects churn rate # + sns.countplot(x="PaperlessBilling",hue="Churn",data = df) # - # We see that a large population of the people who churned had opted for paperless billing. # peopeortion of Customers who didnt opt for paperless billing and churned is less. # # ## Checking if the type of contract customers have affects churning plt.figure(figsize = (10,8)) ax = sns.countplot(x="Contract",hue="Churn",data = df,palette="husl") # We see that customers who opt for month to month contract tend to churn. # ## Barplot to relate monthly charges and churning # + fig, ax = plt.subplots(2, 1, figsize=(8,6), sharey=True, sharex=True) sns.distplot(df[df['Churn']=="Yes"]["MonthlyCharges"] , color="blue", ax=ax[0]) sns.distplot(df[df['Churn']=="No"]["MonthlyCharges"] , color="red", ax=ax[1]) ax[0].set_xlabel('Churn - Yes', size=12) ax[1].set_xlabel('Churn - No', size=12) fig.suptitle("Monthly Charges distribution", fontsize=14) plt.show() # - # Out of the customers who churned a lot of them had higher monthly charges(last month) # ## Checking how payment method affects churning plt.figure(figsize = (10,8)) ax = sns.countplot(x="PaymentMethod",hue="Churn",data = df,palette ="Set1") # We see that clearly most customers who churn use Electronic checks to pay their bill. # So most people might find it hard to adapt to the electronic check payment method. # ## Bar plot to see if having a partner impacts churning plt.figure(figsize = (10,8)) ax = sns.countplot(x="Partner",hue="Churn",data = df,palette = "inferno") # There is a small correlation and we see that people who dont have a partner have churned. # Customers who have partners plan for a more tsable service. # ## Seeing if peolpe who have dependents tend to churn plt.figure(figsize = (10,8)) ax = sns.countplot(x="Dependents",hue="Churn",data = df,palette="Paired") # We see that most people who churned have no dependents, this shows that having no one else who depends on the service allows you to freely and frequently change or switch services. contingency_table = pd.crosstab(df.Churn,df.gender) contingency_table stats.chi2_contingency(contingency_table) contingency_table = pd.crosstab(df.Churn,df.Dependents) contingency_table stats.chi2_contingency(contingency_table)
.ipynb_checkpoints/ids project (2)-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from pathlib import Path from nltk.tokenize import sent_tokenize files = list(Path('../data/Manifestos/Original').glob('*.csv')) save_to = Path('../data/Manifestos/Processed') save_to.mkdir(exist_ok=True) party_mapping = {'51320':'Labour','51620':'Conservative'} # + out = [] for f in files: party, date = f.stem.split('_') year = date[:4] if party in ['51320','51620']: df = pd.read_csv(f) text = ' '.join([str(i) for i in list(df.text)]) sents = sent_tokenize(text) for s in sents: if len(s.split()) > 10: out.append((party_mapping[party],year,s)) #with open(save_to / (f.stem + '.txt'),'w') as out_txt: # out_txt.write(text) # - df_data = pd.DataFrame(out,columns=['party','year','sent']) df_data.head() df_data.shape len(df_data.sent.unique()) df_sampled = df_data.sample(frac=1.0).reset_index(drop=True) df_sampled.head() deduplicated = df_sampled.drop_duplicates(subset='sent') deduplicated.shape # + deduplicated['split'] = 'train' cutoff = int(deduplicated.shape[0]*.8) deduplicated.loc[cutoff:,'split'] = 'test' deduplicated['split'].value_counts() # - save_to = Path('../data/Manifestos/Classifcation') save_to.mkdir(exist_ok=True) deduplicated.to_csv(save_to / 'labour_conservative_sentences.csv')
utils/Process Manifesto Data for Sentence Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Object Oriented # Python is a object oriented language. Object Oriented Programming is a programming paradigm based on "objects" which contain data and code (or "attributs" and "methods"). Other programming paradigms are imperitive or functional programming. You can do both in python, and to some extend will mix these concepts all the time. A object oriented language per se treats all data as objects and provides methods on the data. Let's see how Python does this: # ## Variables and Objects # When assigning a variable, the intuitive idea is that the variable is a container for the actual data. In Python this is different. Whenever data is called, python will make an object from it: print (300) # The result you see is "300" in the output, but what happened is: Python built an object, gave it the value 300, and displayed it. Since there is no further assignemnt to this object, it is orphaned and will be destructed. So let's use it a bit differently: a=300 b=a c=300 print(id(a), id(b), id(c)) print(a, b) # We built an object with the value of 300 and assigned to the variable a (or we named it a). Then we took the name b and assigned to a, so both are now references to the *same* object. # With the function id() I can access the object identifier and you see that a and b refer to the same object. When I assigned c, Python made a new object, then gave it the value 300, and then assigned the name c to it, thus, the object identifier is a different one than for a and b. # A good way to understand what is meant with objects in programming is to think about objects in the world: consider your family has a cat named "Florence". Each member of your family might refer to her differently: one may say "flory", another one may call her "kittykittykitty" and another on simply as "cute fur ball". If we ask the neighbour, who is a passionate gardener and frequently has to deal with Florences leftovers between the plants, she might use "******* black devil". Florence though won't care much and will always be the same object in the world. # Think of variables as a reference to an object. # # As we have seen with the data types before, the functionality of variables depends on the data they are refering to. Hence the basic datatypes from python come with properties and methods which are frequently used. See the following: # """Florence likes to strive around the neighbouring gardens.""".count("a") # We have an object of the data type string and apparently we can use a method count() to do something that returns the number 2. You can pretty much guess that we count how often the letter "a" can be found within the string. But actually we counted how often one string is contained by another string. # If I assign a variable to the string object, I have access to all the methods provided by string models: # + a="""Florence likes to strive around the neighbouring gardens. Mrs. Lamberts does not appreciate this very much. Mr. Doolittle loves to have Florence around.""" a.count("Florence") # - # This concept may sound pretty challenging at the first glance, but it actually is pretty cool, because in the moment you create and object, you can ask this object about it's methods and properties and thus get a pretty good idea what's provided (of course you should still get familiar with reading a proper documentation). # Here in Jupyter you can assign a variable to an object, then type the variable added by "." and then press tab to get a list of properties and methods. Try it: a="""Florence is a black cat.""" a. # Don't worry, you don't have to immediately understand all of the methods. In fact, without reading the documentaion it's rather hard to figure out what's usefull. Try to get used to read documentation though, it's really, really essential for programming. Have a look at <br> # https://docs.python.org/3/library/stdtypes.html#string-methods <br> # and compare it with the selection you get by typing <tab> in the upper box. # # ### Excercise # # 1. Explore the methods of data types float and integer. # 2. We have used the count() method to find how often a string is contained in another string. Can you find a string method that would search for "Florence" and return a string containing "Flory" ?
02_object_oriented.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:miniconda3] # language: python # name: conda-env-miniconda3-py # --- # ## Load libraries # tensorflow backend from os import environ environ['KERAS_BACKEND'] = 'tensorflow' # vae stuff from chemvae.vae_utils import VAEUtils from chemvae import mol_utils as mu # import scientific py import numpy as np import pandas as pd # rdkit stuff from rdkit.Chem import AllChem as Chem from rdkit.Chem import PandasTools # plotting stuff import matplotlib.pyplot as plt import matplotlib as mpl from IPython.display import SVG, display # %config InlineBackend.figure_format = 'retina' # %matplotlib inline # # Load a model vae = VAEUtils(directory='../models/zinc_properties') # # Using the VAE # # ## Decode/Encode # # Might not be perfect (it's probabilistic), try it several times. # # smiles <i class="fa fa-arrow-right" aria-hidden="true"></i> x <i class="fa fa-arrow-right" aria-hidden="true"></i> z <i class="fa fa-arrow-right" aria-hidden="true"></i> x_r <i class="fa fa-arrow-right" aria-hidden="true"></i> smiles_r # + smiles_1 = mu.canon_smiles('CSCC(=O)NNC(=O)c1c(C)oc(C)c1C') X_1 = vae.smiles_to_hot(smiles_1,canonize_smiles=True) z_1 = vae.encode(X_1) X_r= vae.decode(z_1) print('{:20s} : {}'.format('Input',smiles_1)) print('{:20s} : {}'.format('Reconstruction',vae.hot_to_smiles(X_r,strip=True)[0])) print('{:20s} : {} with norm {:.3f}'.format('Z representation',z_1.shape, np.linalg.norm(z_1))) # - # ## property preditor print('Properties (qed,SAS,logP):') y_1 = vae.predict_prop_Z(z_1)[0] print(y_1) # ## Decode several attempts # VAE are probabilistic noise=5.0 print('Searching molecules randomly sampled from {:.2f} std (z-distance) from the point'.format(noise)) df = vae.z_to_smiles( z_1,decode_attempts=100,noise_norm=noise) print('Found {:d} unique mols, out of {:d}'.format(len(set(df['smiles'])),sum(df['count']))) print('SMILES\n',df.smiles) display(PandasTools.FrameToGridImage(df,column='mol', legendsCol='smiles',molsPerRow=5)) df.head() # ## PCA of latent space # # Sample random points from the training set along with properties Z, data, smiles = vae.ls_sampler_w_prop(size=50000,return_smiles=True) prop_opt = 'qed' prop_df = pd.DataFrame(data).reset_index() prop_df['smiles']=smiles prop_df.head() # Perform a PCA projection and color the points based on a property # + from sklearn.decomposition import PCA from sklearn.preprocessing import MinMaxScaler # do pca and normalize Z_pca = PCA(n_components=2).fit_transform(Z) Z_pca = MinMaxScaler().fit_transform(Z_pca) df = pd.DataFrame(np.transpose((Z_pca[:,0],Z_pca[:,1]))) df.columns = ['x','y'] df[prop_opt]=prop_df[prop_opt] plt.scatter(x=df['x'], y=df['y'], c=df[prop_opt], cmap= 'viridis', marker='.', s=10,alpha=0.5, edgecolors='none') plt.show() # - # compare with t-SNE, will take some time # + from sklearn.manifold import TSNE Z_tsne = TSNE(n_components=2).fit_transform(Z) Z_tsne = MinMaxScaler().fit_transform(Z_tsne) f = pd.DataFrame(np.transpose((Z_tsne[:,0],Z_tsne[:,1]))) df.columns = ['x','y'] df[prop_opt]=prop_df[prop_opt] plt.scatter(x=df['x'], y=df['y'], c=df[prop_opt], cmap= 'viridis', marker='.', s=10,alpha=0.5, edgecolors='none') plt.show()
examples/intro_to_chemvae.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="59DLQSAO7PBm" # # Tutorial - Deep Learning for Genomics # # # # In this tutorial we will use a convolutional neuronal network to address a fairly basic but common problem in genomics. Given a set of sequences belonging to different classes, what are the characteristics in the DNA sequence that let us distinguish the classes. For example, given a set of promoter and enhancer sequences, we could ask if there are any patterns in the DNA that let us distinguish between the two. # # For a 'simple' example, think of two ChIP-seq experiments for two different transcription factors. After analysing the ChIP-seq data, we know at what positions in the genome the two factors bind and the majority of binding sites might be distinct. If we extract the underlying sequences and search for DNA patterns that are enriched in the respective sets, we get an idea what DNA sequences the transcription factor might bind and/or which co-factors influence their binding. # # Traditionally, people use motif discovery tools for finding overrepresented words and motifs. However, if we move to slightly more complicated questions these methods quickly reach their limits and machine learning approaches become more promising. # # A more complicated question: If we have multiple sets of enhancers that are active in different tissues and cell types and we have the underlying sequences, can we figure out what sequence patterns are characteristic for what activity? And once we know that can we infer which factors are common and which are tissue specific? # # For our test dataset we have a simplfied, simulated version of such a task. We simulated 40,000 DNA sequences of length 200 bp. We split them into 4 enhancer classes and populated them with transcription factor binding motifs and other DNA patters to make them distinguishable. However, some motifs are shared between classes, they may overlap each other and are not necessarily perfect matches to the text book motifs. Thats much more how regulatory DNA actually looks like :)! # # We will use keras to build and train a small convolutional neuronal network to classify our enhancer sequences. Once this network is trained well, we can than investigate how the network has learned to distinguish between the classes and try to relate this back to transcriptions factor motifs and so on. # # ------------ # # You can run the tutorial in this colab notebook or checkout (and clone) the github repository (https://github.com/rschwess/tutorial_dl_for_genomics). From there you can either run everything in an interactive python or ipython session or (especially later when optimizing) just adjust and run python dl_intro.py in the terminal. # # ---------------- # # ## Set up # # We will start by downloading the tutorial data. There are more data available on github if you want to work on a slightly more difficult set. # + id="68wKlyer7Hti" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605711741550, "user_tz": 0, "elapsed": 3922, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="4b514565-656a-410b-8499-780459ce232d" language="bash" # # # Download Example Data -------------------------------------------------------- # # rm -rf ./sample_data/ # # mkdir -p data # # rm -f ./data/* # # curl https://raw.githubusercontent.com/rschwess/tutorial_dl_for_genomics/master/data/pwm_seq_200bp_test_set.txt >./data/pwm_seq_200bp_test_set.txt # curl https://raw.githubusercontent.com/rschwess/tutorial_dl_for_genomics/master/data/pwm_seq_200bp_valid_set.txt >./data/pwm_seq_200bp_valid_set.txt # curl https://raw.githubusercontent.com/rschwess/tutorial_dl_for_genomics/master/data/pwm_seq_200bp_train_set.txt >./data/pwm_seq_200bp_train_set.txt # # + colab={"base_uri": "https://localhost:8080/", "height": 537} id="GhvTIDjQgde1" executionInfo={"status": "error", "timestamp": 1605705572479, "user_tz": 0, "elapsed": 10308, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="d9ecb99a-1d44-45de-d84c-f67a0d49f4d1" # SAFE TO IGNORE # if you want to mount your google drive # from google.colab import drive # drive.mount('/content/drive') # + id="WG23xMgboNNG" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": "OK"}}, "base_uri": "https://localhost:8080/", "height": 54} executionInfo={"status": "ok", "timestamp": 1579202102645, "user_tz": 0, "elapsed": 31856, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDx-QB_Sca5HG8eylKgHe_a7x9QI4Drs3fQZI0t=s64", "userId": "01462532515512744206"}} outputId="d6e2bdf8-9b75-4527-dd33-785f7bc9db84" # SAFE TO IGNORE # # for uploading local files # from google.colab import files # uploaded = files.upload() # + id="bnkSDL5RY1ci" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605711753709, "user_tz": 0, "elapsed": 1948, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="415855e2-9905-40c0-9d45-cffb53cec788" language="bash" # # # Create some directories and download helper scripts for later ---------------- # mkdir -p helper visualize # # curl https://raw.githubusercontent.com/rschwess/tutorial_dl_for_genomics/master/helper/functions_for_motif_plotting.R >./helper/functions_for_motif_plotting.R # curl https://raw.githubusercontent.com/rschwess/tutorial_dl_for_genomics/master/helper/plot_sequence_kernel_weights_per_dir.R >./helper/plot_sequence_kernel_weights_per_dir.R # curl https://raw.githubusercontent.com/rschwess/tutorial_dl_for_genomics/master/helper/plot_sequence_kernel_icms_per_dir.R >./helper/plot_sequence_kernel_icms_per_dir.R # # + [markdown] id="-7YlfP_RIPUd" # Lets start by looking at the data. We have 40,000 sequences and they are all labeled with their respective class. We already split them up into training, test and validation set. It is also a good idea to check if our classes are roughly equally distributed across our different sets. # + id="2xI_e2JEIPhX" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605711764387, "user_tz": 0, "elapsed": 937, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="59dc0006-8dc9-46ed-9d8d-9364bdb4854d" language="bash" # # # Check Data Layout ########################### # # # check lines per set # echo "Numbers:" # wc -l ./data/pwm* # # # check data format # echo -ne "\nFormat\n" # head -n 3 ./data/pwm_seq_200bp_test_set.txt # # # check class representation # echo -ne "\nClass Representations:\n" # # echo -ne "\nTraining:\n" # cut -f 1 ./data/pwm_seq_200bp_train_set.txt | sort | uniq -c # echo -ne "\nTest:\n" # cut -f 1 ./data/pwm_seq_200bp_test_set.txt | sort | uniq -c # echo -ne "\nValidation:\n" # cut -f 1 ./data/pwm_seq_200bp_valid_set.txt | sort | uniq -c # # # + [markdown] id="lpkUq8kjKv8s" # # Looks good, lets move straight in! We will use keras with tensorflow as its backend. Keras is ideal for quickly writing down and prototyping networks in just a few lines of code. The documentation site will be usefull throught the tutorial https://keras.io/. # # We import keras and the relevant layers and operations we need. # # + id="2XjUxMEwKwMp" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605711774793, "user_tz": 0, "elapsed": 5937, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="a5cbb14f-5d4f-4415-b53a-8691d4075f6d" # IMPORTING MODULES # %tensorflow_version 1.x import keras # from keras.models import Sequential # from keras.layers import Dense, Flatten # from keras.layers import Conv1D, MaxPooling1D from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout from tensorflow.keras.models import Sequential import numpy as np import os import matplotlib.pyplot as plt # + [markdown] id="OCzDfI2mK898" # I wrote two helper functions to convert the sequences into hot encoded sequences and a wrapper to read in and assemble the data. Feel free to skip over this but you might want to have a quick look and understand how we format the data. The hot encoding transforms the sequence into an X x 4 array whith rows corresponding to the sequence position and the columns representing the 4 DNA bases. The respective base column that matches the sequence at that position is 1 the rest 0. # # + id="XnubGHyAK9IA" executionInfo={"status": "ok", "timestamp": 1605711793307, "user_tz": 0, "elapsed": 1356, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} # Helper Function get hotcoded sequence def get_hot_coded_seq(sequence): """Convert a 4 base letter sequence to 4-row x-cols hot coded sequence""" # initialise empty hotsequence = np.zeros((len(sequence),4)) # set hot code 1 according to gathered sequence for i in range(len(sequence)): if sequence[i] == 'A': hotsequence[i,0] = 1 elif sequence[i] == 'C': hotsequence[i,1] = 1 elif sequence[i] == 'G': hotsequence[i,2] = 1 elif sequence[i] == 'T': hotsequence[i,3] = 1 # return the numpy array return hotsequence # Helper function to read in the labels and seqs and store as hot encoded np array def read_data(infile): # read file in with open(infile, "r") as f: seqs = [] labels = [] for i,l in enumerate(f): l = l.rstrip() l = l.split("\t") seqs.append(l[1]) labels.append(l[0]) # make labels np.array labels = np.array(labels) # convert to one_hot_labels hot_labels = keras.utils.to_categorical(labels, num_classes=4) # make seqs np.array hot_seqs = np.zeros( (len(seqs), 200, 4) ) # fill with hot encoded sequences for j in range(len(seqs)): hotsequence = get_hot_coded_seq(seqs[j]) hot_seqs[j,] = hotsequence return hot_labels, hot_seqs # + [markdown] id="l8J-AlxKLLSf" # Now we can read in the data. # # + id="FRT4zlKdLOlk" executionInfo={"status": "ok", "timestamp": 1605711801588, "user_tz": 0, "elapsed": 2407, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} # read data -------------------------------------------------------------------- train_file = "./data/pwm_seq_200bp_train_set.txt" train_labels, train_seqs = read_data(train_file) valid_file = "./data/pwm_seq_200bp_valid_set.txt" valid_labels, valid_seqs = read_data(valid_file) # + [markdown] id="EKscmL-6LUWo" # Lets check how the data looks like after we read it into python and hot encoded it. # + id="r0ajVoy3LTy4" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605711805557, "user_tz": 0, "elapsed": 983, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="03f2f93c-41df-4742-e167-027ee9af11b9" # Check Data in Python --------------------------------------------------------- # check shapes print("Train Seq Shape", train_seqs.shape) print("Train Label Shape", train_labels.shape) # check data format print("Labels Format:") print(train_labels[1:5]) print("Seq Format (first 10 bp):") print(train_seqs[1, 1:10,:]) # + [markdown] id="itcX25I4L6Gb" # # The labels have a one hot encoding where every column represents a different class and in this case only one class can be active at a time. # # The sequences have shape [sample x sequence_length x base]. We encod the 4 DNA bases as "channels" a term borrowed from image analysis you will encounter a lot when working with tensorlfow. For comparison a set of 2D images would have the dimensions [sample x pixel_rows x pixel_columns x colour_channels]. A grey scale picture would have only one channel while RGB images have three. We can thus think of our sequence as a 1D image with 4 channels. # + [markdown] id="-if6bqKaSE1x" # ## Building the Network # # We now define our network. We first set some global and network architecture options and put them all together in the keras sequential mode. The sequential mode is an easy wrapper for linearly stacked networks that makes your code even more concise. We just define the model to be sequential and than add/stack layer after layer. Here we use a simple convolutional architecture. # # * Our first layer is a 1D convolution over the input: # * We use a 1D convolution because we only want the filter to move along the sequence axis and map the channels to the hidden units. # * We start with 10 hidden units or filters or kernels which are all of length 5 (bp) # * We use the RELU activation function # * We also define the input shape and how to pad the input if necessary (see doc.) # * We next perform max pooling where we take the maximum of a window of 5 consecutive activation values # * This reduces the data dimension, thus simplifying the model and speeding up further computations # * But it also enforces some extend of positional invariance into our model. For example, if we have a match to transcription factor motif in our sequence, we don't necessarily care where exactly this motif lies and a few bp up- or downstream shouldn't make a difference to our predictions. # * We then "Flatten" the activation values to a 1 dimensional vector # * And apply a fully connected or "Dense" layer connecting every value in the 1D vector to every class prediction # * We use the sigmoid (softmax) activation function to perform effectively a multinomial logistic regression # # The number of hidden units, the size of the kernel, the pooling size, but also the number and types of layers we use in the network are usually called hyperparameters. The most work in DL usually comes down to finding the right hyperparameters that let our network training converge and that give us the best possible (at least the best we are able to find) accuracies. # # We set some reasonable choices to begin with. But your task will be to play with these hyperparameters and see how well you can tune the model with a few adjustments. # + id="L3eVN0jEL5Ev" executionInfo={"status": "ok", "timestamp": 1605712369525, "user_tz": 0, "elapsed": 891, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} # Define the Model Architecture ------------------------------------------------ # global options num_classes = 4 # network architecture options conv1_hidden_units = 5 conv1_filter_size = 3 maxpool1_width = 2 # construct the model ---------------------------------------------------------- model = Sequential() model.add(Conv1D(conv1_hidden_units, kernel_size=(conv1_filter_size), activation='relu', input_shape=(200, 4), padding='same')) model.add(MaxPooling1D(pool_size=maxpool1_width)) ## code examples to add dropout and additional layers ## to add dropout use: # model.add(Dropout(rate = 0.3)) # Dropout # # for a second layer add it like this: # model.add(Conv1D(conv1_hidden_units, kernel_size=(conv1_filter_size), activation='relu', padding='same')) # model.add(MaxPooling1D(pool_size=maxpool1_width)) model.add(Flatten()) model.add(Dense(num_classes, activation='softmax')) # + [markdown] id="IlU4fVZHSsdS" # Next we compile the model. We use adam as our optimizer. Since the classes are mutually exclusive we select the categorical_crossentropy as our loss function and we want to monitor the accuracy during training. # # We also print a summary of our network telling us the data shapes throught the network and sumarizing the number of trainable parameters in our model. # + id="OZPCH5WASxLr" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605712407387, "user_tz": 0, "elapsed": 463, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="cda5306a-545c-4320-e97c-fd42adba739c" # compile ---------------------------------------------------------------------- model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # print model summary ---------------------------------------------------------- model.summary() # + [markdown] id="PO8y82o2TFX9" # ## Training # # Now that we have our model set up we can train it. We feed the model with our training sequences and labels, we define a batch size (since we are training in batch mode) and set the number of epochs (cycles through the training data) we want to train for. Five epochs should be fine for us feel free to ramp this up a bit and see if you get improvements or if the learning plateus quickly. # + id="MhvQukq1TPXU" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605712420741, "user_tz": 0, "elapsed": 11947, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="fca8b245-2e99-4992-ee51-f2e7c8f10fa4" # Training Options batch_size = 100 epochs = 10 # Train ------------------------------------------------------------------------ history = model.fit(train_seqs, train_labels, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(valid_seqs, valid_labels)) # For fun you can change the runtime type to cpu only and # compare how long the model takes there # then extrapolate to millions of data examples # + [markdown] id="j_X-3DfOQhrS" # # + [markdown] id="srhT4-NcTWzz" # You will notice that we stored the training output in a **history** variable. We can use this variable to monitor the loss function and accuracy over the training progress. # + id="aBkXtzyoTXCI" colab={"base_uri": "https://localhost:8080/", "height": 573} executionInfo={"status": "ok", "timestamp": 1605712424407, "user_tz": 0, "elapsed": 821, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="02b8f302-1343-479e-ccc1-04d9a09ee917" # Plot Training and Validation Loss ------------------------------ plt.figure() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation']) plt.show() # Plot Training and Validation Accuracy ------------------------------ plt.figure() plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation']) plt.show() # + [markdown] id="J3sAKG3hV_CQ" # Looks alright, the training as well as the validation accuracy is climbing from epoch to epoch and slows down a little more after every epoch. It is now your task to find better hyperparameters for our network and training procedure to see how high up you can get the accuracy. # # Tipps: # # * Should we train longer? # * Do we need more hidden layers? # * Could we max pool over the entire sequence to get one output per filter? # * Would a second layer be beneficial? # * You could also bias the network architechture with some biological knowledge: How long are transcription factor binding motifs in general and what would be an appropriate filter_width then? # # (Hint: you can do pretty well in 5 - 10 epochs with minor tweaks!) # # + [markdown] id="Pl7Z7qITWNoP" # Evaluation and Prediction # # Once you are happy with you network performance or in case you want to jump ahead first and optimize later, we will evaluate our network on the held out validation data. Technically, we only optimized on the training data set but we always kept an eye on the validation data loss as well. We are discarding all nets that do well on the training but worse at the validation (overfitted), therefore we always have an intrinsic bias. The test data set is meant to have never been touched throughout the whole optimization process and we evaluate the perormance of our final model on this set to get an unbiased estimate of its performance. # + id="LKJCMoPPWdkq" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605711966082, "user_tz": 0, "elapsed": 475, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="a29d663f-ad5f-4640-9545-6a097208d498" # Evaluate --------------------------------------------------------------------- test_file = "./data/pwm_seq_200bp_test_set.txt" test_labels, test_seqs = read_data(test_file) score = model.evaluate(test_seqs, test_labels, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # + [markdown] id="HI7MBOL9Wfek" # # # Once we are happy with our network we obviously want to employ it as well. Lets say we have a new sequence we want to classify. # # + id="Aq185u3_Wiar" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605711968060, "user_tz": 0, "elapsed": 487, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="c0d03f65-dd4d-4acc-d1cb-5a45c2867a2a" # Predictions ------------------------------------------------------------------ # read test sequences again with open(test_file, "r") as f: seqs = [] labels = [] for i,l in enumerate(f): l = l.rstrip() l = l.split("\t") seqs.append(l[1]) labels.append(l[0]) # select a single sequence single_seq = seqs[0] single_label = labels[0] print("Sequence: " + single_seq) # hot encode hotseq = get_hot_coded_seq(single_seq) # calculate predictions single_prediction = model.predict(np.expand_dims(hotseq, axis=0)) print("\nClass Prediction \"Probability\":") print("\tClass 0 = %s" % single_prediction[0][0]) print("\tClass 1 = %s" % single_prediction[0][1]) print("\tClass 2 = %s" % single_prediction[0][2]) print("\tClass 3 = %s" % single_prediction[0][3]) # print the true class print("\nTrue Class: " + single_label) # + id="R8QUuQ7hWzSG" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605711977337, "user_tz": 0, "elapsed": 692, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="baa6374c-fb93-4a9f-bae2-0f9ec66aea06" # or just run all predictions for all_test_predictions = model.predict(test_seqs) print(all_test_predictions.shape) print(all_test_predictions[5:8]) # + [markdown] id="SmsbE-BWXbcN" # # ##Inspecting Learned Filters # # Now that we have a reasonably working model, we also want to inspect and see what the net has learned. In applications, we often don't care what the network has learned as long as it performs well and outperforms our competitors. For many research problems however, we are exactly interested in what the network has learned. What features distinguish a cat from a dog or if it comes to decision making (e.g. health care or self driving cars), we obviously want to be able to understand and be able to justify why a certain decision has been chosen and learn how to correct missbehavior. # # In genomics we usually want to learn what sequence features distinguish the sequences from one another and map them back to biological properties and factors. The easiest way is to just plot the filter weights. In the first convolutional layer, our filters are just like position weight matrices, multiplying every base at every position with a learned weight and summing the value up (plus a bias and pipe it through the RELU activation function). Unfortunatly, this becomes less straight forward to interpret in deeper layers. There are ways of back engineering and learning the importance of filters in higher layers (e.g. https://github.com/kundajelab/deeplift) but we concern ourself only with the simple first layer here. # # We can get the weigths of the filters from the model, save them as .txt files and plot them out. I wrote a wrapper to plot the filter weigths for you in R. Run the code, check the filter_X.txt files and look at the plots and try to interpret them. In Colab runs you probaly want to download them. # # * Do any look like transciption factor binding sites you know? # * Do you recognize any sequence features that are not binding motifs? # * Can you simplify the sequences/ motifs from the plot an query them in a transcription factor binding motif database (http://jaspar.genereg.net/) # * What is your best bet: Which sequence motifs did we use for simulating the sequence classes? # * Check the input data. Split them up by class into text files with only the sequences one sequence per line (see example). Query them in standard motif analysis tools (e.g. http://rsat.sb-roscoff.fr/oligo-analysis_form.cgi or http://meme-suite.org/tools/meme). Do these tools find different or similar things? # # + id="ncB7-8g-Ybu3" executionInfo={"status": "ok", "timestamp": 1605711989506, "user_tz": 0, "elapsed": 602, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} # Inspect weights -------------------------------------------------------------- model_weights = model.get_weights() filter_weights = model_weights[0] # save conv filter weights for k in range(model_weights[0].shape[2]): # save single filter weights np.savetxt(("./visualize/filter_%s.txt" % k), filter_weights[:,:,k], delimiter="\t") # + id="fhlssg9kaIiy" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605712030118, "user_tz": 0, "elapsed": 12868, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="f5932a05-6514-437f-bc84-a4b3d306fb6b" language="bash" # # # Plot them using the supplied R script # Rscript ./helper/plot_sequence_kernel_weights_per_dir.R ./visualize ./visualize plot_weight 5 2.5 # + [markdown] id="M8QJwyRZa7NJ" # Now thay don't look the nice motifs with information content we are used to look at from ChIP-seq analysis and such. Our filters have positive and negative values all contributing to the "matching score" of a filter at a given position. In contrast, information content motifs don't have negative values just less informative bases that occur less often in a, let's say, binding site. Luckily there is a [transformation](https://www.biorxiv.org/content/10.1101/163220v2) we can apply to the filter weights to derive information content. For our purposes her just use the helper script that implements the transformation. # + id="VdQK0t8vbsJn" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1605712063562, "user_tz": 0, "elapsed": 12122, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="a63b131f-f595-4d69-9fb1-31727d65ee54" language="bash" # # # Plot ICM like motifs using the supplied R script # Rscript ./helper/plot_sequence_kernel_icms_per_dir.R ./visualize ./visualize plot_motif 5 2.5 # + [markdown] id="IIPoJryvdiwa" # A little wrapper to zip all plots and filter weigths into a tar.gz. Gownload them from the files menu. # + id="eQC9DTgZdi8N" colab={"base_uri": "https://localhost:8080/", "height": 382} executionInfo={"status": "ok", "timestamp": 1589469139240, "user_tz": -60, "elapsed": 601, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="f9c68f31-2595-4342-ae96-dcc0b62e55ca" language="bash" # # tar czvf visualize.tar.gz ./visualize # + [markdown] id="T6-X-0dZc6IQ" # ## Visualize Importance # # A popular approach to visualize and start to interpret the importance of sequence features is the so called **Saliency**. This term has been introduced to descripe the gradient of the model output with respect to a sequence input. In other words, how much would the prediction change if we were to change a single base pair. Here change mean changing the *1* in the hot encoding to a marginally different value e.g. *1.00001*. # # # I nicked this function and visualization was adapted from the James Zhou's [Primer on Genomics](https://colab.research.google.com/drive/17E4h5aAOioh5DiTo7MZg4hpL6Z_0FyWr#scrollTo=WNT_Au-dAP8a) colab notebook. Check it out and the paper is worth a read. # # # In summary, what we are doing here is calculating the gradient of the model ouput with respect to a single sequence input. We then multiply this gradient with the one hot endoded sequence, effectively zeroing all non present bases, and then sum this over the 4 output classes and trim everything below zero (for pure ease of visualization). # + id="6ocZEt_2eZcw" executionInfo={"status": "ok", "timestamp": 1605712094271, "user_tz": 0, "elapsed": 521, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} import tensorflow.keras.backend as K def compute_salient_bases(model, x): input_tensors = [model.input] gradients = model.optimizer.get_gradients(model.output, model.input) compute_gradients = K.function(inputs = input_tensors, outputs = gradients) gradients = compute_gradients([x])[0][0] x = np.squeeze(x, axis = 0) sal = np.clip(np.sum(np.multiply(gradients,x), axis=1),a_min=0, a_max=None) return sal # + id="vg5oB0RdgzgS" # use a single sequence again (index 3 works well) for selected_index in range(5): single_seq = seqs[selected_index] # hot encode hotseq = get_hot_coded_seq(single_seq) hotseq = np.expand_dims(hotseq, axis=0) # calculate predictions single_prediction = model.predict(hotseq) #print("Prediction: %s" % single_prediction) #print("True Label: %s" % labels[selected_index]) sal = compute_salient_bases(model, hotseq) #print(sal) plt.figure(figsize=[25,3]) barlist = plt.bar(np.arange(len(sal)), sal) plt.xlabel('Bases') plt.ylabel('Saliency values') plt.xticks(np.arange(len(sal)), list(single_seq)); plt.title('Saliency map for bases in one of the test sequences'); # + id="fTCrGCS6mMKP" colab={"base_uri": "https://localhost:8080/", "height": 397} executionInfo={"status": "ok", "timestamp": 1589360255091, "user_tz": -60, "elapsed": 3449, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gicz_Ftaghl9mN26kpu5FOZzZsa5x0qTv04nw-Q=s64", "userId": "01462532515512744206"}} outputId="49701031-5487-410b-9986-68253d1e4571" sal = compute_salient_bases(model, hotseq) print(sal) plt.figure(figsize=[25,3]) barlist = plt.bar(np.arange(len(sal)), sal) plt.xlabel('Bases') plt.ylabel('Saliency values') plt.xticks(np.arange(len(sal)), list(single_seq)); plt.title('Saliency map for bases in one of the test sequences'); # + [markdown] id="NKqSNnuhXkg4" # ## Further # # On the github page you wild find links to a slightly more difficult set, using more motifs and 300 bp sequences. # You can adapt the notebook to run on those. #
Machine_learning_applications/practicals/DL_intro_gms2020.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # + import numpy as np import json import tensorflow as tf import matplotlib.pyplot as plt # %matplotlib inline from src import SSD, AnchorGenerator, FeatureExtractor from src.backbones import mobilenet_v1_base # - # # Generate anchors # + tf.reset_default_graph() params = json.load(open('config.json')) input_pipeline_params = params['input_pipeline_params'] params = params['model_params'] width, height = input_pipeline_params['image_size'] images = tf.placeholder(tf.float32, [None, 3, height, width]) is_training = False def backbone(images, is_training): return mobilenet_v1_base(images, is_training, min_depth=8, depth_multiplier=1.0) feature_extractor = FeatureExtractor(backbone, is_training) anchor_generator = AnchorGenerator( min_scale=params['min_scale'], max_scale=params['max_scale'], aspect_ratios=params['aspect_ratios'], interpolated_scale_aspect_ratio=params['interpolated_scale_aspect_ratio'], reduce_boxes_in_lowest_layer=params['reduce_boxes_in_lowest_layer'] ) feature_maps = feature_extractor(images) anchors = anchor_generator(feature_maps, image_size=(width, height)) anchor_grid_list = anchor_generator.anchor_grid_list # - feature_maps anchors anchor_generator.num_anchors_per_location num_anchors_per_feature_map = anchor_generator.num_anchors_per_feature_map num_anchors_per_feature_map anchor_grid_list with tf.Session() as sess: anchor_boxes = sess.run(anchor_grid_list) more_anchor_boxes = anchor_boxes[3] # anchor_boxes = anchor_boxes.reshape((6, 10, 6, 4)) more_anchor_boxes = more_anchor_boxes.reshape((6, 10, 7, 4)) # # Show non clipped anchors # + ymin, xmin, ymax, xmax = [more_anchor_boxes[:, :, :, i] for i in range(4)] h, w = height*(ymax - ymin), width*(xmax - xmin) cy, cx = height*ymin + 0.5*h, width*xmin + 0.5*w centers = np.stack([cy, cx], axis=3) anchor_sizes = np.stack([h, w], axis=3) # + fig, ax = plt.subplots(1, dpi=100, figsize=(int(5*width/height), 5)) unique_centers = centers[:, :, 0, :].reshape(-1, 2) unique_sizes = anchor_sizes[0, 0, :, :] i = 1 for j, point in enumerate(unique_centers): cy, cx = point color = 'g' if j == i else 'r' ax.plot([cx], [cy], marker='o', markersize=3, color=color) cy, cx = unique_centers[i] for box in unique_sizes: h, w = box xmin, ymin = cx - 0.5*w, cy - 0.5*h rect = plt.Rectangle( (xmin, ymin), w, h, linewidth=1.0, edgecolor='k', facecolor='none' ) ax.add_patch(rect) plt.xlim([0, width]); plt.ylim([0, height]);
visualize_anchor_boxes.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.4 # language: julia # name: julia-1.5 # --- # # Generating simulated data using ODE models # <NAME> (@sdwfrost), 2020-04-27 # # ## Introduction # # In this notebook, different ways of generating the number of new cases per day are described. # # ## Libraries using DifferentialEquations using SimpleDiffEq using DiffEqCallbacks using Random using Distributions using Plots # ## Method 1: Calculate cumulative infections and post-process # # A variable is included for the cumulative number of infections, $C$. function sir_ode!(du,u,p,t) (S,I,R,C) = u (β,c,γ) = p N = S+I+R infection = β*c*I/N*S recovery = γ*I @inbounds begin du[1] = -infection du[2] = infection - recovery du[3] = recovery du[4] = infection end nothing end; tmax = 40.0 δt = 1.0 tspan = (0.0,tmax) obstimes = 1.0:δt:tmax; u0 = [990.0,10.0,0.0,0.0]; # S,I.R,C p = [0.05,10.0,0.25]; # β,c,γ prob_ode = ODEProblem(sir_ode!,u0,tspan,p) sol_ode_cumulative = solve(prob_ode,Tsit5(),saveat=δt); # The cumulative counts are extracted. out = Array(sol_ode_cumulative) C = out[4,:]; # The new cases per day are calculated from the cumulative counts. X = C[2:end] .- C[1:(end-1)]; # Although the ODE system is deterministic, we can add measurement error to the counts of new cases. Here, a Poisson distribution is used, although a negative binomial could also be used (which would introduce an additional parameter for the variance). Random.seed!(1234); Y = rand.(Poisson.(X)); bar(obstimes,Y) plot!(obstimes,X) # For this particular model, the decline in susceptibles matches the increase in infections. Here is a comparison of the two. S = out[1,:] Cpred = 990.0 .- S Cdiff = Cpred .- C plot(obstimes,Cdiff[2:end]) # Note that the difference between these two curves is at the limit of machine precision. # # ## Method 2: convert cumulative counts to daily counts using a callback # # In order to fit counts of new infections every time unit, we add a callback that sets $C$ to zero at the observation times. This will result in two observations (one with non-zero `C`, one with `C`=0) at each observation time. However, the standard saving behaviour is turned off, so we don't need to have a special saving callback. affect!(integrator) = integrator.u[4] = 0.0 cb_zero = PresetTimeCallback(obstimes,affect!); # The callback that resets `C` is added to `solve`. Note that this requires `DiffEqCallbacks`. If multiple callbacks are required, then a `CallbackSet` can be passed instead. sol_ode_cb = solve(prob_ode,Tsit5(),saveat=δt,callback=cb_zero); # We cannot simply convert the solution to an `Array`, as this will give us duplicated timepoints when `C` is reset. Calling the solution with the observation times generates the output before the callback. X_cb = sol_ode_cb(obstimes)[4,:]; Random.seed!(1234); Y_cb = rand.(Poisson.(X_cb)); X_diff_cb = X_cb .- X plot(obstimes,X_diff_cb) Y_diff_cb = Y_cb .- Y plot(obstimes,Y_diff_cb) # ## Method 3: Use a delay differential equation to track daily counts function sir_dde!(du,u,h,p,t) (S,I,R,C) = u (β,c,γ) = p N = S+I+R infection = β*c*I/N*S recovery = γ*I e = oneunit(t) history = h(p, t-e)*inv(e) @inbounds begin du[1] = -infection du[2] = infection - recovery du[3] = recovery du[4] = infection - history[4] end nothing end; function sir_history(p, t; idxs = 5) zero(t) end; prob_dde = DDEProblem(DDEFunction(sir_dde!), u0, sir_history, tspan, p; constant_lags = [1.0]); sol_dde = solve(prob_dde,MethodOfSteps(Tsit5())); X_dde = sol_dde(obstimes)[4,:]; Random.seed!(1234) Y_dde = rand.(Poisson.(X_dde)); # The following plots show that there is a difference both in the underlying model output as well as the simulated (Poisson) data using the delay differential equation. X_diff_dde = X_dde .- X plot(X_diff_dde) Y_diff_dde = Y_dde .- Y plot(obstimes, Y_diff_dde) # ## Summary # # While all three methods are mathematically equivalent, the first method, while not directly producing daily counts of cases, results in fewer numerical issues and more easily lends itself to automatic differentiation.
notebook/ode_simdata/ode_simdata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="zLl9oP-RfrdI" colab_type="text" # ## **Setup of the OS** # `` # ''!'' in notebook means that you are executing commands in commad shell # # Update all packages of debian-based distributive of the Linux OS. # + id="7mFTn3l7fOLn" colab_type="code" outputId="048f3af8-9d52-40db-f353-74089ea5906c" colab={"base_uri": "https://localhost:8080/", "height": 102} # ! lsb_release -a # + id="OWOrJHDJaiYF" colab_type="code" outputId="27cca891-c8d1-4d20-f5e7-8552e010f0be" colab={"base_uri": "https://localhost:8080/", "height": 34} # !apt update -qq; # + id="XqtcOlsmBOjX" colab_type="code" outputId="c62da731-9ee2-4284-c5f3-f272aa2ef054" colab={"base_uri": "https://localhost:8080/", "height": 309} # !wget https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb; # + id="kI9HAxg-BXxO" colab_type="code" outputId="8f5718d0-f2f1-4bac-a99c-5efce342cd97" colab={"base_uri": "https://localhost:8080/", "height": 187} # !dpkg -i cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb; # + id="SeECu_MOBb1z" colab_type="code" outputId="40dd236a-0176-4e92-f859-820f5adeb76c" colab={"base_uri": "https://localhost:8080/", "height": 34} # !apt-key add /var/cuda-repo-8-0-local-ga2/7fa2af80.pub; # + id="EWj-8posi2qP" colab_type="code" colab={} # !apt-get update -qq; # + [markdown] id="J5iJMpgXi7Hu" colab_type="text" # Install packages for CUDA programming with C++ # + id="VovJ51TCjB09" colab_type="code" colab={} # !apt-get install cuda-10-0 gcc-5 g++-5 -y -qq; # + [markdown] id="6kk8rplbk0jS" colab_type="text" # Create symbolic links for compiler files # + id="TpWdZVLolBdj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="7b390401-7735-4a00-b373-ab945b9262be" # !ln -s /usr/bin/gcc-5 /usr/local/cuda/bin/gcc; # !ln -s /usr/bin/g++-5 /usr/local/cuda/bin/g++; # + [markdown] id="1Neyi2-7li5c" colab_type="text" # Upload extension to notebook to work with CUDA C directly from notebook # + id="gkQ3ksTWlpyM" colab_type="code" outputId="9d0ede0a-994b-4e0b-b8e8-f72285bd74ec" colab={"base_uri": "https://localhost:8080/", "height": 153} # !pip install git+git://github.com/andreinechaev/nvcc4jupyter.git # + id="OVRIhnAtBnvC" colab_type="code" outputId="f57a2642-ac7a-4fd3-d673-c49dad2ba776" colab={"base_uri": "https://localhost:8080/", "height": 51} # %load_ext nvcc_plugin # + id="Ff6TJ2v5B7yy" colab_type="code" outputId="f56d8c70-ea84-4ba5-c8b2-1e09dab49cb7" colab={"base_uri": "https://localhost:8080/", "height": 51} # !ls /usr/local/cuda/samples/1_Utilities/deviceQuery/ # + id="pbSfXXlQB9F8" colab_type="code" outputId="082176d8-a59e-4e20-80d8-c44b63e62a3c" colab={"base_uri": "https://localhost:8080/", "height": 139} # !cd /usr/local/cuda/samples/1_Utilities/deviceQuery;make clean; make # + id="5fcfs1WtCAhz" colab_type="code" outputId="c171d7db-845f-412c-8a6f-9acd94fc39a8" colab={"base_uri": "https://localhost:8080/", "height": 765} # ! /usr/local/cuda/samples/1_Utilities/deviceQuery/deviceQuery # + id="X1YudrMPCMqE" colab_type="code" outputId="a607634b-f916-41a2-e379-5facf2bbd40c" colab={"base_uri": "https://localhost:8080/", "height": 306} # !nvidia-smi # + [markdown] id="9mrRnS1Lsaq8" colab_type="text" # Mount google drive with test image: # + id="murDhIypsc_F" colab_type="code" outputId="bcc2100e-0cbf-4809-fb31-34e3222bf142" colab={"base_uri": "https://localhost:8080/", "height": 34} from google.colab import drive drive.mount('/content/drive') # + id="-Gbui9jyWoVC" colab_type="code" outputId="ad6bf054-deaf-4ac8-d1fb-ebf11a6a086b" colab={"base_uri": "https://localhost:8080/", "height": 340} # !ls -al '/content/drive/My Drive' # + [markdown] id="UqNp8Qrctr7Y" colab_type="text" # Code for SUM on CPU # + id="vIDaCN0FtiM2" colab_type="code" outputId="331549e3-c715-4805-db59-4fbd54a5d6b1" colab={"base_uri": "https://localhost:8080/", "height": 34} # %%cu #include <stdio.h> #include <immintrin.h> #include <stdlib.h> #include <sys/times.h> #include <time.h> using namespace std; const float msec_const = 1000.0; struct BMPInfo { unsigned char* data; int size; }; BMPInfo readBMP(const char* filename) { int i; FILE* f = fopen(filename, "rb"); unsigned char info[54]; size_t a = fread(info, sizeof(unsigned char), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&info[18]; int height = *(int*)&info[22]; int size = 3 * width * height; unsigned char* data = new unsigned char[size]; // allocate 3 bytes per pixel a = fread(data, sizeof(unsigned char), size, f); // read the rest of the data at once fclose(f); for(i = 0; i < size; i += 3) { unsigned char tmp = data[i]; data[i] = data[i+2]; data[i+2] = tmp; } BMPInfo bmpInfo = {data, size}; return bmpInfo; } u_int64_t cpu_array_sum(unsigned char* array, unsigned long size) { u_int64_t result = 0; for (unsigned long i = 0; i < size; i++) { result += array[i]; } return result; } int main(int argc, char *argv[]) { clock_t start_t; clock_t end_t; clock_t clock_delta; double clock_delta_msec; const char *filename = "/content/drive/My Drive/Colab Notebooks/PerformanceEngineering/Homework2/1.bmp"; BMPInfo bmpInfo = readBMP(filename); unsigned long one_color_channel_data_size = bmpInfo.size / 3; unsigned char* one_color_channel_data = new unsigned char[one_color_channel_data_size]; for(unsigned long i = 0; i < one_color_channel_data_size; i++) { one_color_channel_data[i] = bmpInfo.data[3 * i]; } start_t = clock(); u_int64_t sum = cpu_array_sum(one_color_channel_data, one_color_channel_data_size); end_t = clock(); clock_delta = end_t - start_t; clock_delta_msec = (double) (clock_delta / msec_const); printf("Sum: \t %lu \t\n", sum); printf("CPU sum: \t %.6f ms \t\n", clock_delta_msec); free(one_color_channel_data); } # + [markdown] id="f4KJxfCcB9YR" colab_type="text" # Code for SUM on GPU # + id="OAqifl6JB-_7" colab_type="code" outputId="9bede308-3631-4c0e-ca56-3cddbb6cfa5f" colab={"base_uri": "https://localhost:8080/", "height": 34} # %%cu #include <stdio.h> #include <iostream> #include <fstream> #include <stdint.h> #include <stdlib.h> #include <sys/times.h> #include <time.h> using namespace std; #define BLOCK 1024 #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error at runtime: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) struct BMPInfo { unsigned char* data; int size; int width; int height; }; __global__ void sum_reduce_simple(u_int64_t *g_ivec, u_int64_t *g_ovec, int index, unsigned long size){ extern __shared__ u_int64_t sdata[]; //each thread load s one element from global to shared mem unsigned int tid = threadIdx.x; unsigned long i = index*BLOCK*BLOCK + blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { sdata[tid] = g_ivec[i]; __syncthreads(); // do reduction in shared mem for (unsigned int s=1; s < blockDim.x ; s *= 2) { if (tid % (2*s) == 0) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_ovec[BLOCK*index + blockIdx.x] = sdata[0]; } else { sdata[tid] = 0; } } BMPInfo readBMP(const char* filename) { int i; FILE* f = fopen(filename, "rb"); unsigned char info[54]; size_t a = fread(info, sizeof(unsigned char), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&info[18]; int height = *(int*)&info[22]; int size = 3 * width * height; unsigned char* data = new unsigned char[size]; // allocate 3 bytes per pixel a = fread(data, sizeof(unsigned char), size, f); // read the rest of the data at once fclose(f); for(i = 0; i < size; i += 3) { unsigned char tmp = data[i]; data[i] = data[i+2]; data[i+2] = tmp; } BMPInfo bmpInfo = {data, size, width, height}; return bmpInfo; } int main() { const char *filename = "/content/drive/My Drive/Colab Notebooks/PerformanceEngineering/Homework2/1.bmp"; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); u_int64_t *d_image, *h_image; u_int64_t *d_result, *h_result; //ALLOCATE HOST MEM BMPInfo bmpInfo = readBMP(filename); unsigned long size = bmpInfo.size / 3; h_image = new u_int64_t[size]; for(unsigned long i = 0; i < size; i++) { h_image[i] = (u_int64_t)(bmpInfo.data[3 * i]); } h_result = (u_int64_t *) malloc(sizeof(u_int64_t)); int full_grids_number = (int)(size / (double)(BLOCK*BLOCK)); int elements_left_after_grids = size % (BLOCK * BLOCK); int blocks_number = ceil(elements_left_after_grids / (double)BLOCK); int block_results_count = ceil(size / (double)BLOCK); //ALLOCATE MEM cudaMalloc(&d_image, size * sizeof(u_int64_t)); cudaMalloc(&d_result, block_results_count*sizeof(u_int64_t)); cudaCheckErrors("cudaMalloc fail \n"); cudaMemcpy(d_image, h_image, size*sizeof(u_int64_t), cudaMemcpyHostToDevice); cudaCheckErrors("CudaMEMCPY to DEVICE fail \n"); cudaEventRecord(start, 0); for (int i = 0; i<full_grids_number; i++) { sum_reduce_simple <<< BLOCK, BLOCK, BLOCK*sizeof(u_int64_t) >>> (d_image, d_result, i, size); } if (blocks_number > 0) { sum_reduce_simple <<< blocks_number, BLOCK, BLOCK*sizeof(u_int64_t) >>> (d_image, d_result, full_grids_number, size); } cudaCheckErrors("Kernel sum_reduce_simple CALL fail \n"); if (block_results_count > 1) { size = block_results_count; blocks_number = ceil(size / (double)BLOCK); sum_reduce_simple <<< blocks_number, BLOCK, BLOCK*sizeof(u_int64_t) >>> (d_result, d_result, 0, size); cudaCheckErrors("Kernel sum_reduce_simple CALL fail \n"); } if (blocks_number > 1) { size = blocks_number; sum_reduce_simple <<< 1, BLOCK, BLOCK*sizeof(u_int64_t) >>> (d_result, d_result, 0, size); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf ("Time for the sum_reduce_simple kernel: %f ms\n", time); cudaMemcpy(h_result, d_result, sizeof(u_int64_t), cudaMemcpyDeviceToHost); cudaCheckErrors("Memory copying result fail \n"); //FREE MEM cudaFree(d_image); cudaFree(d_result); cudaCheckErrors("cudaFree fail \n"); printf ("SUM is: %d\n",h_result[0]); free(h_image); free(h_result); return(0); } # + [markdown] id="L4dhNplqEjDy" colab_type="text" # Code for MIN on CPU # + id="OHLR4wUYDso0" colab_type="code" outputId="a2c8a9fb-b9cc-4933-85a9-075ac376a71e" colab={"base_uri": "https://localhost:8080/", "height": 34} # %%cu #include <stdio.h> #include <immintrin.h> #include <stdlib.h> #include <sys/times.h> #include <time.h> using namespace std; const float msec_const = 1000.0; struct BMPInfo { unsigned char* data; int size; }; BMPInfo readBMP(const char* filename) { int i; FILE* f = fopen(filename, "rb"); unsigned char info[54]; size_t a = fread(info, sizeof(unsigned char), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&info[18]; int height = *(int*)&info[22]; int size = 3 * width * height; unsigned char* data = new unsigned char[size]; // allocate 3 bytes per pixel a = fread(data, sizeof(unsigned char), size, f); // read the rest of the data at once fclose(f); for(i = 0; i < size; i += 3) { unsigned char tmp = data[i]; data[i] = data[i+2]; data[i+2] = tmp; } BMPInfo bmpInfo = {data, size}; return bmpInfo; } unsigned char cpu_array_min(unsigned char* array, unsigned long size) { unsigned char min = 255; for (unsigned long i = 0; i < size; i++) { if (array[i] < min) { min = array[i]; } } return min; } int main(int argc, char *argv[]) { clock_t start_t; clock_t end_t; clock_t clock_delta; double clock_delta_msec; const char *filename = "/content/drive/My Drive/Colab Notebooks/PerformanceEngineering/Homework2/1.bmp"; BMPInfo bmpInfo = readBMP(filename); unsigned long one_color_channel_data_size = bmpInfo.size / 3; unsigned char* one_color_channel_data = new unsigned char[one_color_channel_data_size]; for(unsigned long i = 0; i < one_color_channel_data_size; i++) { one_color_channel_data[i] = bmpInfo.data[3 * i]; } start_t = clock(); unsigned char min = cpu_array_min(one_color_channel_data, one_color_channel_data_size); end_t = clock(); clock_delta = end_t - start_t; clock_delta_msec = (double) (clock_delta / msec_const); printf("Min: \t %d \t\n", min); printf("CPU min: \t %.6f ms \t\n", clock_delta_msec); free(one_color_channel_data); } # + [markdown] id="QnrlbP0aDMTt" colab_type="text" # Code for MIN on GPU # + id="wYL_n3H4DN4X" colab_type="code" outputId="518b1c04-0315-4ba3-9202-91c0caaf74b5" colab={"base_uri": "https://localhost:8080/", "height": 34} # %%cu #include <stdio.h> #include <iostream> #include <fstream> #include <stdint.h> #include <stdlib.h> #include <sys/times.h> #include <time.h> using namespace std; #define BLOCK 1024 #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error at runtime: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) struct BMPInfo { unsigned char* data; int size; int width; int height; }; __global__ void min_reduce_simple(unsigned char *g_ivec, unsigned char *g_ovec, int index, unsigned long size){ extern __shared__ unsigned char sdata[]; //each thread load s one element from global to shared mem unsigned int tid = threadIdx.x; unsigned long i = index*BLOCK*BLOCK + blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { sdata[tid] = g_ivec[i]; __syncthreads(); // do reduction in shared mem for (unsigned int s=1; s < blockDim.x ; s *= 2) { if ((tid % (2*s) == 0) && (sdata[tid] > sdata[tid + s])) { sdata[tid] = sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_ovec[BLOCK*index + blockIdx.x] = sdata[0]; } else { sdata[tid] = 255; } } BMPInfo readBMP(const char* filename) { int i; FILE* f = fopen(filename, "rb"); unsigned char info[54]; size_t a = fread(info, sizeof(unsigned char), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&info[18]; int height = *(int*)&info[22]; int size = 3 * width * height; unsigned char* data = new unsigned char[size]; // allocate 3 bytes per pixel a = fread(data, sizeof(unsigned char), size, f); // read the rest of the data at once fclose(f); for(i = 0; i < size; i += 3) { unsigned char tmp = data[i]; data[i] = data[i+2]; data[i+2] = tmp; } BMPInfo bmpInfo = {data, size, width, height}; return bmpInfo; } int main() { const char *filename = "/content/drive/My Drive/Colab Notebooks/PerformanceEngineering/Homework2/1.bmp"; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); unsigned char *d_image, *h_image; unsigned char *d_result, *h_result; //ALLOCATE HOST MEM BMPInfo bmpInfo = readBMP(filename); unsigned long size = bmpInfo.size / 3; h_image = new unsigned char[size]; for(unsigned long i = 0; i < size; i++) { h_image[i] = bmpInfo.data[3 * i]; } h_result = (unsigned char *) malloc(sizeof(unsigned char)); int full_grids_number = (int)(size / (double)(BLOCK*BLOCK)); int elements_left_after_grids = size % (BLOCK * BLOCK); int blocks_number = ceil(elements_left_after_grids / (double)BLOCK); int block_results_count = ceil(size / (double)BLOCK); //ALLOCATE MEM cudaMalloc(&d_image, size * sizeof(unsigned char)); cudaMalloc(&d_result, block_results_count*sizeof(unsigned char)); cudaCheckErrors("cudaMalloc fail \n"); cudaMemcpy(d_image, h_image, size*sizeof(unsigned char), cudaMemcpyHostToDevice); cudaCheckErrors("CudaMEMCPY to DEVICE fail \n"); cudaEventRecord(start, 0); for (int i = 0; i<full_grids_number; i++) { min_reduce_simple <<< BLOCK, BLOCK, BLOCK*sizeof(unsigned char) >>> (d_image, d_result, i, size); } if (blocks_number > 0) { min_reduce_simple <<< blocks_number, BLOCK, BLOCK*sizeof(unsigned char) >>> (d_image, d_result, full_grids_number, size); } cudaCheckErrors("Kernel min_reduce_simple CALL fail \n"); if (block_results_count > 1) { size = block_results_count; blocks_number = ceil(size / (double)BLOCK); min_reduce_simple <<< blocks_number, BLOCK, BLOCK*sizeof(unsigned char) >>> (d_result, d_result, 0, size); cudaCheckErrors("Kernel min_reduce_simple CALL fail \n"); } if (blocks_number > 1) { size = blocks_number; min_reduce_simple <<< 1, BLOCK, BLOCK*sizeof(unsigned char) >>> (d_result, d_result, 0, size); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf ("Time for the min_reduce_simple kernel: %f ms\n", time); cudaMemcpy(h_result, d_result, sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaCheckErrors("Memory copying result fail \n"); //FREE MEM cudaFree(d_image); cudaFree(d_result); cudaCheckErrors("cudaFree fail \n"); printf ("MIN is: %d\n",h_result[0]); free(h_image); free(h_result); return(0); } # + [markdown] id="BjXbD4VHEmbA" colab_type="text" # Code for Integral Image on CPU # + id="IISVC85gDszL" colab_type="code" outputId="59455cf2-fa31-420d-ed77-579a1c8d0961" colab={"base_uri": "https://localhost:8080/", "height": 34} # %%cu #include <stdio.h> #include <immintrin.h> #include <stdlib.h> #include <sys/times.h> #include <time.h> using namespace std; const float msec_const = 1000.0; struct BMPInfo { unsigned char* data; int size; int width; int height; }; BMPInfo readBMP(const char* filename) { int i; FILE* f = fopen(filename, "rb"); unsigned char info[54]; size_t a = fread(info, sizeof(unsigned char), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&info[18]; int height = *(int*)&info[22]; int size = 3 * width * height; unsigned char* data = new unsigned char[size]; // allocate 3 bytes per pixel a = fread(data, sizeof(unsigned char), size, f); // read the rest of the data at once fclose(f); for(i = 0; i < size; i += 3) { unsigned char tmp = data[i]; data[i] = data[i+2]; data[i+2] = tmp; } BMPInfo bmpInfo = {data, size, width, height}; return bmpInfo; } unsigned char* cpu_integral_image(unsigned char* array, unsigned long size, int width, int height) { unsigned char *result = (unsigned char *)malloc(size * sizeof(unsigned char)); for (unsigned long i = 0; i < size; i++) { result[i] = array[i]; } for (int i = 0; i < height; i++) { for (int j = 1; j < width; j++) { result[i * width + j] += result[i * width + j - 1]; } } for (int i = 0; i < width; i++) { for (int j = 1; j < height; j++) { result[j * width + i] += result[(j - 1) * width + i]; } } return result; } int main(int argc, char *argv[]) { clock_t start_t; clock_t end_t; clock_t clock_delta; double clock_delta_msec; const char *filename = "/content/drive/My Drive/Colab Notebooks/PerformanceEngineering/Homework2/1.bmp"; BMPInfo bmpInfo = readBMP(filename); unsigned long one_color_channel_data_size = bmpInfo.size / 3; unsigned char* one_color_channel_data = new unsigned char[one_color_channel_data_size]; for(unsigned long i = 0; i < one_color_channel_data_size; i++) { one_color_channel_data[i] = bmpInfo.data[3 * i]; } start_t = clock(); unsigned char* result = cpu_integral_image(one_color_channel_data, one_color_channel_data_size, bmpInfo.width, bmpInfo.height); end_t = clock(); clock_delta = end_t - start_t; clock_delta_msec = (double) (clock_delta / msec_const); printf("Last elements: %d, %d, %d\n", result[one_color_channel_data_size-3], result[one_color_channel_data_size-2], result[one_color_channel_data_size-1]); printf("CPU integral image: \t %.6f ms \t\n", clock_delta_msec); free(one_color_channel_data); } # + [markdown] id="_c2-gp5iFgWH" colab_type="text" # Code for Integral Image on GPU # + id="yXeOnCnGFicD" colab_type="code" outputId="c9ed2af3-f502-4ed2-d3f4-4a41a0de3cac" colab={"base_uri": "https://localhost:8080/", "height": 34} # %%cu #include <stdio.h> #include <iostream> #include <fstream> #include <stdint.h> #include <stdlib.h> #include <sys/times.h> #include <time.h> using namespace std; #define BLOCK 1024 #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error at runtime: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) struct BMPInfo { unsigned char* data; int size; int width; int height; }; __global__ void integral_image_rows(unsigned char *g_ivec, int height, int width){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < height) { for (int j = 1; j < width; j++) { g_ivec[i * width + j] += g_ivec[i * width + j - 1]; } } } __global__ void integral_image_columns(unsigned char *g_ivec, int height, int width){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < width) { for (int j = 1; j < height; j++) { g_ivec[j * width + i] += g_ivec[(j - 1) * width + i]; } } } BMPInfo readBMP(const char* filename) { int i; FILE* f = fopen(filename, "rb"); unsigned char info[54]; size_t a = fread(info, sizeof(unsigned char), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&info[18]; int height = *(int*)&info[22]; int size = 3 * width * height; unsigned char* data = new unsigned char[size]; // allocate 3 bytes per pixel a = fread(data, sizeof(unsigned char), size, f); // read the rest of the data at once fclose(f); for(i = 0; i < size; i += 3) { unsigned char tmp = data[i]; data[i] = data[i+2]; data[i+2] = tmp; } BMPInfo bmpInfo = {data, size, width, height}; return bmpInfo; } int main() { const char *filename = "/content/drive/My Drive/Colab Notebooks/PerformanceEngineering/Homework2/1.bmp"; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); unsigned char *d_image, *h_image, *h_result; //ALLOCATE HOST MEM BMPInfo bmpInfo = readBMP(filename); unsigned long size = bmpInfo.size / 3; int width = bmpInfo.width; int height = bmpInfo.height; h_image = new unsigned char[size]; h_result = new unsigned char[size]; for(unsigned long i = 0; i < size; i++) { h_image[i] = bmpInfo.data[3 * i]; } //ALLOCATE MEM cudaMalloc(&d_image, size * sizeof(unsigned char)); cudaCheckErrors("cudaMalloc fail \n"); cudaMemcpy(d_image, h_image, size*sizeof(unsigned char), cudaMemcpyHostToDevice); cudaCheckErrors("CudaMEMCPY to DEVICE fail \n"); cudaEventRecord(start, 0); int height_blocks = ceil(height / (double)BLOCK); integral_image_rows <<< height_blocks, BLOCK >>> (d_image, height, width); cudaCheckErrors("Kernel integral_image_rows CALL fail \n"); int width_blocks = ceil(width / (double)BLOCK); integral_image_columns <<< width_blocks, BLOCK >>> (d_image, height, width); cudaCheckErrors("Kernel integral_image_columns CALL fail \n"); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf ("Time for the integral_image kernel: %f ms\n", time); cudaMemcpy(h_result, d_image, size*sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaCheckErrors("Memory copying result fail \n"); //FREE MEM cudaFree(d_image); cudaCheckErrors("cudaFree fail \n"); printf("Last elements: %d, %d, %d\n", h_result[size-3], h_result[size-2], h_result[size-1]); free(h_image); free(h_result); return(0); }
homework2_cuda/colab_cpu_gpu_operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PyTorch Text Classifier # We will create a text classifier that can differentiate between 4 news categories. # This is adapted from [this tutorial](https://pytorch.org/tutorials/beginner/text_sentiment_ngrams_tutorial.html) # ! pip install torchtext torch-model-archiver # + import torch from torchtext.datasets import AG_NEWS from torchtext.data.utils import get_tokenizer from collections import Counter from torchtext.vocab import Vocab tokenizer = get_tokenizer('basic_english') train_iter = AG_NEWS(split='train') counter = Counter() for (label, line) in train_iter: counter.update(tokenizer(line)) vocab = Vocab(counter, min_freq=1) text_pipeline = lambda x: [vocab[token] for token in tokenizer(x)] label_pipeline = lambda x: int(x) - 1 # - # ### Generate data batch and iterator # [torch.utils.data.DataLoader](https://pytorch.org/docs/stable/data.html?highlight=dataloader#torch.utils.data.DataLoader) is recommended for PyTorch users (a tutorial is [here](https://pytorch.org/tutorials/beginner/data_loading_tutorial.html)). It works with a map-style dataset that implements the getitem() and len() protocols, and represents a map from indices/keys to data samples. It also works with an iterable datasets with the shuffle argumnent of False. # # Before sending to the model, `collate_fn` function works on a batch of samples generated from `DataLoader`. The input to `collate_fn` is a batch of data with the batch size in `DataLoader`, and `collate_fn` processes them according to the data processing pipelines declared previouly. Pay attention here and make sure that `collate_fn` is declared as a top level def. This ensures that the function is available in each worker. # # In this example, the text entries in the original data batch input are packed into a list and concatenated as a single tensor for the input of nn.EmbeddingBag. The offset is a tensor of delimiters to represent the beginning index of the individual sequence in the text tensor. Label is a tensor saving the labels of indidividual text entries. # # # + from torch.utils.data import DataLoader device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def collate_batch(batch): label_list, text_list, offsets = [], [], [0] for (_label, _text) in batch: label_list.append(label_pipeline(_label)) processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64) text_list.append(processed_text) offsets.append(processed_text.size(0)) label_list = torch.tensor(label_list, dtype=torch.int64) offsets = torch.tensor(offsets[:-1]).cumsum(dim=0) text_list = torch.cat(text_list) return label_list.to(device), text_list.to(device), offsets.to(device) train_iter = AG_NEWS(split='train') dataloader = DataLoader(train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch) # - # The model is composed of the [nn.EmbeddingBag](https://pytorch.org/docs/stable/nn.html?highlight=embeddingbag#torch.nn.EmbeddingBag) layer plus a linear layer for the classification purpose. nn.EmbeddingBag with the default mode of “mean” computes the mean value of a “bag” of embeddings. Although the text entries here have different lengths, `nn.EmbeddingBag` module requires no padding here since the text lengths are saved in offsets. # # Additionally, since `nn.EmbeddingBag` accumulates the average across the embeddings on the fly, `nn.EmbeddingBag` can enhance the performance and memory efficiency to process a sequence of tensors. # ![img](img/text_sentiment_pytorch.png) # + from torch import nn class TextClassificationModel(nn.Module): def __init__(self, vocab_size=95812, embed_dim=64, num_class=4): super(TextClassificationModel, self).__init__() self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True) self.fc = nn.Linear(embed_dim, num_class) self.init_weights() def init_weights(self): initrange = 0.5 self.embedding.weight.data.uniform_(-initrange, initrange) self.fc.weight.data.uniform_(-initrange, initrange) self.fc.bias.data.zero_() def forward(self, text, offsets): embedded = self.embedding(text, offsets) return self.fc(embedded) # - # ### Initiate an instance # The `AG_NEWS` dataset has four labels and therefore the number of classes is four. # # 1. World # 2. Sports # 3. Business # 4. Sci/Tec # # We build a model with the embedding dimension of 64. The vocab size is equal to the length of the vocabulary instance. The number of classes is equal to the number of labels, # # train_iter = AG_NEWS(split='train') num_class = len(set([label for (label, text) in train_iter])) vocab_size = len(vocab) emsize = 64 model = TextClassificationModel(vocab_size, emsize, num_class).to(device) print("vocab size:", vocab_size) print("emsize:", emsize) print("num_classes:", num_class) # ### Define functions to train the model and evaluate results. # # + import time def train(dataloader): model.train() total_acc, total_count = 0, 0 log_interval = 500 start_time = time.time() for idx, (label, text, offsets) in enumerate(dataloader): optimizer.zero_grad() predited_label = model(text, offsets) loss = criterion(predited_label, label) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1) optimizer.step() total_acc += (predited_label.argmax(1) == label).sum().item() total_count += label.size(0) if idx % log_interval == 0 and idx > 0: elapsed = time.time() - start_time print('| epoch {:3d} | {:5d}/{:5d} batches ' '| accuracy {:8.3f}'.format(epoch, idx, len(dataloader), total_acc/total_count)) total_acc, total_count = 0, 0 start_time = time.time() def evaluate(dataloader): model.eval() total_acc, total_count = 0, 0 with torch.no_grad(): for idx, (label, text, offsets) in enumerate(dataloader): predited_label = model(text, offsets) loss = criterion(predited_label, label) total_acc += (predited_label.argmax(1) == label).sum().item() total_count += label.size(0) return total_acc/total_count # - # ### Split the dataset and run the model # Since the original `AG_NEWS` has no valid dataset, we split the training dataset into train/valid sets with a split ratio of 0.95 (train) and 0.05 (valid). Here we use [torch.utils.data.dataset.random_split](https://pytorch.org/docs/stable/data.html?highlight=random_split#torch.utils.data.random_split) function in PyTorch core library. # # [CrossEntropyLoss](https://pytorch.org/docs/stable/nn.html?highlight=crossentropyloss#torch.nn.CrossEntropyLoss) criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in a single class. It is useful when training a classification problem with C classes. [SGD](https://pytorch.org/docs/stable/_modules/torch/optim/sgd.html) implements stochastic gradient descent method as the optimizer. The initial learning rate is set to 5.0. [StepLR](https://pytorch.org/docs/master/_modules/torch/optim/lr_scheduler.html#StepLR) is used here to adjust the learning rate through epochs. # # # + from torch.utils.data.dataset import random_split # Hyperparameters EPOCHS = 10 # epoch LR = 5 # learning rate BATCH_SIZE = 64 # batch size for training criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=LR) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1) total_accu = None train_iter, test_iter = AG_NEWS() train_dataset = list(train_iter) test_dataset = list(test_iter) num_train = int(len(train_dataset) * 0.95) split_train_, split_valid_ = \ random_split(train_dataset, [num_train, len(train_dataset) - num_train]) train_dataloader = DataLoader(split_train_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch) valid_dataloader = DataLoader(split_valid_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch) test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch) for epoch in range(1, EPOCHS + 1): epoch_start_time = time.time() train(train_dataloader) accu_val = evaluate(valid_dataloader) if total_accu is not None and total_accu > accu_val: scheduler.step() else: total_accu = accu_val print('-' * 59) print('| end of epoch {:3d} | time: {:5.2f}s | ' 'valid accuracy {:8.3f} '.format(epoch, time.time() - epoch_start_time, accu_val)) print('-' * 59) # - # ### Evaluate the model with test dataset # Checking the results of the test dataset… print('Checking the results of test dataset.') accu_test = evaluate(test_dataloader) print('test accuracy {:8.3f}'.format(accu_test)) # Test on a random news # Use the best model so far and test a golf news. # + ag_news_label = {1: "World", 2: "Sports", 3: "Business", 4: "Sci/Tec"} def predict(text, text_pipeline): with torch.no_grad(): text = torch.tensor(text_pipeline(text)) output = model(text, torch.tensor([0])) return output.argmax(1).item() + 1 ex_text_str = "<NAME>. – Four days ago, <NAME> was \ enduring the season’s worst weather conditions on Sunday at The \ Open on his way to a closing 75 at Royal Portrush, which \ considering the wind and the rain was a respectable showing. \ Thursday’s first round at the WGC-FedEx St. Jude Invitational \ was another story. With temperatures in the mid-80s and hardly any \ wind, the Spaniard was 13 strokes better in a flawless round. \ Thanks to his best putting performance on the PGA Tour, Rahm \ finished with an 8-under 62 for a three-stroke lead, which \ was even more impressive considering he’d never played the \ front nine at TPC Southwind." model = model.to("cpu") print("This is a %s news" %ag_news_label[predict(ex_text_str, text_pipeline)]) # - # ## Save the model # A PyTorch model requires slightly more pieces of information than a TensorFlow model to run correctly. # 1. The serialized parameters (weights and biases) of the model. # 2. The model class, including a prediction method. # # # #### Saving Model Parameters # The `model` variable contains parameters of the model, and PyTorch exposes a handy torch method called `save` to allow us to extract and store these numbers. # # *Note that if the definition of TextClassificationModel changed since training the model, you will get [a pickle error](https://stackoverflow.com/questions/1412787/picklingerror-cant-pickle-class-decimal-decimal-its-not-the-same-object). To fix this, restart the notebook and run all the cells again.* # torch.save(model, "my_model.pt") # #### Saving Model Class # For the model class, we save the model definition in a class in a separate file. # # This model class must include a `predict_single` method that can take in your native input format (e.g. text, image bytes, number array) and return a human readible output. You can think of the `predict_single` method as where the preprocess, predict, and postprocess happens. # # Here is a simple file that contains a more complete version of our class definition above. Note how `single_predict` takes in a text format (`str`) and returns a string representation of the predicted class. model_file = """ from torch import nn import torch.tensor from torchtext.datasets import AG_NEWS from torchtext.data.utils import get_tokenizer from collections import Counter from torchtext.vocab import Vocab tokenizer = get_tokenizer("basic_english") train_iter = AG_NEWS(split="train") counter = Counter() for (label, line) in train_iter: counter.update(tokenizer(line)) vocab = Vocab(counter, min_freq=1) text_pipeline = lambda x: [vocab[token] for token in tokenizer(x)] label_pipeline = lambda x: int(x) - 1 ag_news_label = {1: "World", 2: "Sports", 3: "Business", 4: "Sci/Tec"} class TextClassificationModel(nn.Module): def __init__(self, vocab_size=95812, embed_dim=64, num_class=4): super(TextClassificationModel, self).__init__() self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True) self.fc = nn.Linear(embed_dim, num_class) self.init_weights() self.tokenizer = text_pipeline def init_weights(self): initrange = 0.5 self.embedding.weight.data.uniform_(-initrange, initrange) self.fc.weight.data.uniform_(-initrange, initrange) self.fc.bias.data.zero_() def forward(self, tokens, offset): embedded = self.embedding(tokens, offset) return self.fc(embedded) def predict_single(self, input: str): tokenized = torch.tensor(text_pipeline(input)) output = self.forward(tokenized, torch.tensor([0])) prediction = output.argmax(1).item() + 1 return ag_news_label[prediction] """ with open("model_file.py", "w") as fout: fout.write(model_file) # ! cat model_file.py # Try to load the model from disk # --- # Now we are ready to create a model archive. For this, we will use the official [`torch-model-archiver`](https://github.com/pytorch/serve/blob/master/model-archiver/README.md#installation) library. The library also requires a `handler` argument, which is the entrypoint function for inference that the library will serve. # # A few handlers come out of the box with pytorch: # - image_classifier # - object_detector # - text_classifier # - image_segmenter # # See [handler](https://github.com/pytorch/serve/blob/master/model-archiver/README.md#handler) for more options. # # ! torch-model-archiver \ # --model-file model_file.py \ # --version 1.0 \ # --model-name model \ # --serialized-file my_model.pt \ # --handler text_classifier \ # --force # ! cat model_file.py type(model.state_dict()) torch.save(model.state_dict(), "model_store.pt") # + from torch import nn import torch.tensor from torchtext.datasets import AG_NEWS from torchtext.data.utils import get_tokenizer from collections import Counter from torchtext.vocab import Vocab tokenizer = get_tokenizer("basic_english") train_iter = AG_NEWS(split="train") counter = Counter() for (label, line) in train_iter: counter.update(tokenizer(line)) vocab = Vocab(counter, min_freq=1) text_pipeline = lambda x: [vocab[token] for token in tokenizer(x)] label_pipeline = lambda x: int(x) - 1 ag_news_label = {1: "World", 2: "Sports", 3: "Business", 4: "Sci/Tec"} class TextClassificationModel(nn.Module): def __init__(self, vocab_size=95812, embed_dim=64, num_class=4): super(TextClassificationModel, self).__init__() self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=True) self.fc = nn.Linear(embed_dim, num_class) self.init_weights() self.tokenizer = text_pipeline def init_weights(self): initrange = 0.5 self.embedding.weight.data.uniform_(-initrange, initrange) self.fc.weight.data.uniform_(-initrange, initrange) self.fc.bias.data.zero_() def forward(self, tokens, offset): embedded = self.embedding(tokens, offset) return self.fc(embedded) def predict_single(self, input: str): tokenized = torch.tensor(text_pipeline(input)) output = self.forward(tokenized, torch.tensor([0])) prediction = output.argmax(1).item() + 1 return ag_news_label[prediction] # - import torch model = TextClassificationModel() model.load_state_dict(torch.load("model_store.pt")) model.eval() import easytensor from easytensor.pytorch import upload_model as up_mod easytensor.set_base_url("http://127.0.0.1:8000") up_mod("test_pt_final", model, "model_file.py")
docs/examples/PyTorch Text Classifier With Model Archiver.ipynb