text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` #!/usr/bin/python # interpolate scalar gradient onto nedelec space import petsc4py import sys petsc4py.init(sys.argv) from petsc4py import PETSc from dolfin import * # from MatrixOperations import * import numpy as np import PETScIO as IO import common import scipy import scipy.io import time import scipy.sparse as sp import BiLinear as forms import IterOperations as Iter import MatrixOperations as MO import CheckPetsc4py as CP import Solver as S import MHDmatrixPrecondSetup as PrecondSetup import NSprecondSetup import MHDprec as MHDpreconditioner import gc import MHDmulti import MHDmatrixSetup as MHDsetup import HartmanChannel import ExactSol # import matplotlib.pyplot as plt #@profile m = 2 def PETScToScipy(A): data = A.getValuesCSR() sparseSubMat = sp.csr_matrix(data[::-1], shape=A.size) return sparseSubMat def savePETScMat(A, name1, name2): A_ = PETScToScipy(A) scipy.io.savemat(name1, mdict={name2: A_}) set_log_active(False) errL2u = np.zeros((m-1, 1)) errH1u = np.zeros((m-1, 1)) errL2p = np.zeros((m-1, 1)) errL2b = np.zeros((m-1, 1)) errCurlb = np.zeros((m-1, 1)) errL2r = np.zeros((m-1, 1)) errH1r = np.zeros((m-1, 1)) l2uorder = np.zeros((m-1, 1)) H1uorder = np.zeros((m-1, 1)) l2porder = np.zeros((m-1, 1)) l2border = np.zeros((m-1, 1)) Curlborder = np.zeros((m-1, 1)) l2rorder = np.zeros((m-1, 1)) H1rorder = np.zeros((m-1, 1)) NN = np.zeros((m-1, 1)) DoF = np.zeros((m-1, 1)) Velocitydim = np.zeros((m-1, 1)) Magneticdim = np.zeros((m-1, 1)) Pressuredim = np.zeros((m-1, 1)) Lagrangedim = np.zeros((m-1, 1)) Wdim = np.zeros((m-1, 1)) iterations = np.zeros((m-1, 1)) SolTime = np.zeros((m-1, 1)) udiv = np.zeros((m-1, 1)) MU = np.zeros((m-1, 1)) level = np.zeros((m-1, 1)) NSave = np.zeros((m-1, 1)) Mave = np.zeros((m-1, 1)) TotalTime = np.zeros((m-1, 1)) DimSave = np.zeros((m-1, 4)) dim = 2 ShowResultPlots = 'yes' MU[0] = 1e0 for xx in xrange(1, m): print xx level[xx-1] = xx + 0 nn = 2**(level[xx-1]) # Create mesh and define function space nn = int(nn) NN[xx-1] = nn/2 L = 10. y0 = 2. z0 = 1. # mesh, boundaries, domains = HartmanChannel.Domain(nn) mesh = UnitSquareMesh(nn, nn) parameters['form_compiler']['quadrature_degree'] = -1 order = 2 parameters['reorder_dofs_serial'] = False Velocity = VectorElement("CG", mesh.ufl_cell(), order) Pressure = FiniteElement("CG", mesh.ufl_cell(), order-1) Magnetic = FiniteElement("N1curl", mesh.ufl_cell(), order-1) Lagrange = FiniteElement("CG", mesh.ufl_cell(), order-1) VelocityF = VectorFunctionSpace(mesh, "CG", order) PressureF = FunctionSpace(mesh, "CG", order-1) MagneticF = FunctionSpace(mesh, "N1curl", order-1) LagrangeF = FunctionSpace(mesh, "CG", order-1) W = FunctionSpace(mesh, MixedElement( [Velocity, Pressure, Magnetic, Lagrange])) Velocitydim[xx-1] = W.sub(0).dim() Pressuredim[xx-1] = W.sub(1).dim() Magneticdim[xx-1] = W.sub(2).dim() Lagrangedim[xx-1] = W.sub(3).dim() Wdim[xx-1] = W.dim() print "\n\nW: ", Wdim[xx-1], "Velocity: ", Velocitydim[xx-1], "Pressure: ", Pressuredim[xx-1], "Magnetic: ", Magneticdim[xx-1], "Lagrange: ", Lagrangedim[xx-1], "\n\n" dim = [W.sub(0).dim(), W.sub(1).dim(), W.sub(2).dim(), W.sub(3).dim()] def boundary(x, on_boundary): return on_boundary FSpaces = [VelocityF, PressureF, MagneticF, LagrangeF] DimSave[xx-1, :] = np.array(dim) kappa = 1.0 Mu_m = 10.0 MU = 1.0 N = FacetNormal(mesh) IterType = 'Full' params = [kappa, Mu_m, MU] n = FacetNormal(mesh) u0, p0, b0, r0, Laplacian, Advection, gradPres, CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D( 4, 1) MO.PrintStr("Seting up initial guess matricies", 2, "=", "\n\n", "\n") BCtime = time.time() BC = MHDsetup.BoundaryIndices(mesh) MO.StrTimePrint("BC index function, time: ", time.time()-BCtime) Hiptmairtol = 1e-6 HiptmairMatrices = PrecondSetup.MagneticSetup( mesh, Magnetic, Lagrange, b0, r0, Hiptmairtol, params) MO.PrintStr("Setting up MHD initial guess", 5, "+", "\n\n", "\n\n") F_NS = -MU*Laplacian + Advection + gradPres - kappa*NS_Couple if kappa == 0.0: F_M = Mu_m*CurlCurl + gradR - kappa*M_Couple else: F_M = Mu_m*kappa*CurlCurl + gradR - kappa*M_Couple du = TrialFunction(W) (v, q, c, s) = TestFunctions(W) u, p, b, r = split(du) U = Function(W) U.vector()[:] = 1. u_k, p_k, b_k, r_k = split(U) if kappa == 0.0: m11 = params[1]*inner(curl(b), curl(c))*dx else: m11 = params[1]*params[0]*inner(curl(b), curl(c))*dx m21 = inner(c, grad(r))*dx m12 = inner(b, grad(s))*dx a11 = params[2]*inner(grad(v), grad(u))*dx + inner((grad(u)*u_k), v)*dx + ( 1./2)*div(u_k)*inner(u, v)*dx - (1./2)*inner(u_k, n)*inner(u, v)*ds a12 = -div(v)*p*dx a21 = -div(u)*q*dx CoupleT = params[0]*(v[0]*b_k[1]-v[1]*b_k[0])*curl(b)*dx Couple = -params[0]*(u[0]*b_k[1]-u[1]*b_k[0])*curl(c)*dx Ftilde = inner((grad(u_k)*u), v)*dx + (1./2)*div(u) * \ inner(u_k, v)*dx - (1./2)*inner(u, n)*inner(u_k, v)*ds Mtilde = -params[0]*(u_k[0]*b[1]-u_k[1]*b[0])*curl(c)*dx Ctilde = params[0]*(v[0]*b[1]-v[1]*b[0])*curl(b_k)*dx a = m11 + m12 + m21 + a11 + a21 + a12 + \ Couple + CoupleT + Ftilde + Mtilde + Ctilde aa = m11 + m12 + m21 + a11 + a21 + a12 + Couple + CoupleT if kappa == 0.0: m11 = params[1]*inner(curl(b_k), curl(c))*dx else: m11 = params[1]*params[0]*inner(curl(b_k), curl(c))*dx m21 = inner(c, grad(r_k))*dx m12 = inner(b_k, grad(s))*dx a11 = params[2]*inner(grad(v), grad(u_k))*dx + inner((grad(u_k)*u_k), v)*dx + ( 1./2)*div(u_k)*inner(u_k, v)*dx - (1./2)*inner(u_k, n)*inner(u_k, v)*ds a12 = -div(v)*p_k*dx a21 = -div(u_k)*q*dx CoupleT = params[0]*(v[0]*b_k[1]-v[1]*b_k[0])*curl(b_k)*dx Couple = -params[0]*(u_k[0]*b_k[1]-u_k[1]*b_k[0])*curl(c)*dx Lns = inner(v, F_NS)*dx Lmaxwell = inner(c, F_M)*dx L = Lns + Lmaxwell - (m11 + m12 + m21 + a11 + a21 + a12 + Couple + CoupleT) J = derivative(L, U) A, b = assemble_system(a, L) A, b = CP.Assemble(A, b) J = assemble(J) J = CP.Assemble(J) savePETScMat(J, "J", "J") savePETScMat(A, "A", "A") # print J # J = assemble(J) # J = CP.Assemble(J) # x = Iter.u_prev(u_k, p_k, b_k, r_k) # KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup( # PressureF, MU, mesh) # kspFp, Fp = PrecondSetup.FluidNonLinearSetup(PressureF, MU, u_k, mesh) # F = Lns + Lmaxwell - aa # Hiptmairtol = 1e-4 # HiptmairMatrices = PrecondSetup.MagneticSetup( # mesh, Magnetic, Lagrange, b0, r0, Hiptmairtol, params) # IS = MO.IndexSet(W, 'Blocks') # ones = Function(PressureF) # ones.vector()[:] = (0*ones.vector().array()+1) # eps = 1.0 # error measure ||u-u_k|| # tol = 1.0E-4 # tolerance # iter = 0 # iteration counter # maxiter = 1 # max no of iterations allowed # SolutionTime = 0 # outer = 0 # # parameters['linear_algebra_backend'] = 'uBLAS' # u_is = PETSc.IS().createGeneral(W.sub(0).dofmap().dofs()) # p_is = PETSc.IS().createGeneral(W.sub(1).dofmap().dofs()) # b_is = PETSc.IS().createGeneral(W.sub(2).dofmap().dofs()) # r_is = PETSc.IS().createGeneral(W.sub(3).dofmap().dofs()) # NS_is = PETSc.IS().createGeneral(range(VelocityF.dim()+PressureF.dim())) # M_is = PETSc.IS().createGeneral(range(VelocityF.dim()+PressureF.dim(), W.dim())) # bcu = DirichletBC(W.sub(0), Expression(("0.0", "0.0"), degree=4), boundary) # bcb = DirichletBC(W.sub(2), Expression(("0.0", "0.0"), degree=4), boundary) # bcr = DirichletBC(W.sub(3), Expression(("0.0"), degree=4), boundary) # bcs = [bcu, bcb, bcr] # U = Function(W) # the most recently computed solution # F = action(F, U) # # print assemble(dolfin.Jacobian(F)) # # OuterTol = 1e-5 # # InnerTol = 1e-5 # # NSits = 0 # # Mits = 0 # # TotalStart = time.time() # # SolutionTime = 0 # # errors = np.array([]) # # bcu1 = DirichletBC(VelocityF, Expression( # # ("0.0", "0.0"), degree=4), boundary) # # U = x # # while eps > tol and iter < maxiter: # # iter += 1 # # MO.PrintStr("Iter "+str(iter), 7, "=", "\n\n", "\n\n") # # A, b = assemble_system(aa, L) # # A, b = CP.Assemble(A, b) # # savePETScMat(J, "J", "J") # # savePETScMat(A, "A", "A") # # ss J = assemble(J) J = CP.Assemble(J) savePETScMat(J, "J", "J") savePETScMat(A, "A", "A") print problem.jacobian_form() solve(problem) form = problem.jacobian_form() Fw = action(F,U); assemble(Fw) problem.has_jacobian() ```
github_jupyter
#### 1 - 3 summarized below: #### Lineraly Seperable Experiment - **Training data:** X training points were randomly generated (values bounded between -100 and 100). Y training labels were generated by applying a randomly generated target function to the X training points. - **Test data:** X test points were randomly generated (values bounded between -100 and 100). Y test labels were generated by applying the same target function to the X test points. #### Non-lineraly Separable Experiment - **Training data:** X training points were randomly generated (values bounded between -100 and 100). Y training labels randomly generated (-1 and 1). Then, the randomly generated target function was applied with a probaility of .75 to create 'somewhat' lineraly separable data. - **Test data:** X test points were randomly generated (values bounded between -100 and 100). Y test labels randomly generated (-1 and 1). Then, the randomly generated target function was applied with a probaility of .75 to create 'somewhat' lineraly separable data. **4.** The initial choice of the weights is random. #### Answers to questions 5 - 8 can be seen in the statistics (and graphs) on pages 3-4. #### Variation Results 1. The weights that give the lowest in-sample error rate is best. 2. The step size correlates with the amount the vector changes. i.e., A larger step size makes the vector adjustment larger. 3. It is best to consider training points that reduce the error rate the most first. ``` %matplotlib inline import numpy as np import random from perceptron_learning import Perceptron from perceptron_learning import two_d_vector as tdv def main(): bound = 100 # the value that the x and y values are bounded by num_pts = 80 num_train_pts = 50 perceptron = Perceptron(alpha=0.005) target_fn = np.random.uniform(-10, 10, 3) x = get_random_x(num_pts, bound) x_train, x_test = x[:num_train_pts, :], x[num_train_pts:, :] y_test = np.sign(np.dot(x_test, target_fn)) print('---------- Linearly Separable Data ----------') perceptron.fit(x_train, target_fn=target_fn) predictions = perceptron.predict(x_test) print('{:28s}: y = {:.2f}x + {:.2f}'.format('Target Function', tdv.get_slope(target_fn), tdv.get_y_intercept(target_fn))) print_error(predictions, y_test) print() y = get_y(x[:, 1:], target_fn) y_train, y_test = y[:num_train_pts], y[num_train_pts:] print('-------- Non-Linearly Separable Data --------') perceptron.fit(x_train, y_train=y_train) predictions = perceptron.predict(x_test) print_error(predictions, y_test) perceptron.visualize_training() def print_error(predictions, y_test): error = np.sum(np.not_equal(predictions, y_test)) / y_test.shape[0] print('{0:28s}: {1:.2f}%'.format('Out of Sample (Test) Error', error * 100)) def get_y(training_pts, w_target): # Have y be somewhat linearly separable y = np.random.choice([-1, 1], training_pts.shape[0]) for i, pt in enumerate(training_pts): pct_chance = .75 pt_above_line = tdv.pt_above_line(pt, w_target) if pt_above_line and random.random() < pct_chance: y[i] = 1 if not pt_above_line and random.random() < pct_chance: y[i] = -1 return y def get_random_x(num_points, bound): pts = get_random_pts(num_points, bound) x = np.insert(pts, 0, 1, axis=1) # Let x0 equal 1 return x def get_random_pts(num_points, bound): return np.random.randint(-bound, bound, size=(num_points, 2)) if __name__ == '__main__': main() """ two_d_vector.py Functions that operate on 2d vectors. w0 (or x0) is a bias "dummy" weight, so even though the vector is 3 dimensional, we call it a 2 dimensional vector. """ import numpy as np from random import uniform def get_perpendicular_vector(w): # Two lines are perpendicular if: m1 * m2 = -1. # The two slopes must be negative reciprocals of each other. m1 = get_slope(w) m2 = -1 / m1 # m2 = - w[1] / w[2] random_num = uniform(0, 10) return np.array([uniform(0, 10), -1 * m2 * random_num, random_num]) def get_line(w, x_bound): x_range = np.array(range(-x_bound, x_bound)) # Formula for line is: w1x1 + w2x2 + w0 = 0 # we let x2 = y, and x1 = x, then solve for y = mx + b slope = get_slope(w) y_intercept = get_y_intercept(w) y_line = (slope * x_range) + y_intercept return x_range, y_line def pt_above_line(pt, w): return pt[1] > get_slope(w) * pt[0] + get_y_intercept(w) def get_y_intercept(w): return - w[0] / w[2] def get_slope(w): return - w[1] / w[2] """ DataVisualizer.py """ import numpy as np import matplotlib.pyplot as plt from . import two_d_vector as tdv class DataVisualizer: def __init__(self, title, subtitle, x_bound, y_bound): plt.style.use('seaborn-whitegrid') self.fig, self.ax = plt.subplots() self.title = title self.subtitle = subtitle self.x_bound = x_bound self.y_bound = y_bound def setup_axes(self): self.ax.cla() self.fig.canvas.set_window_title(self.subtitle) self.fig.suptitle(self.title, fontsize=18) self.ax.set_title(self.subtitle, fontsize=14) self.ax.set_xlim(-self.x_bound, self.x_bound) self.ax.set_ylim(-self.y_bound, self.y_bound) @staticmethod def red_pts_above_line(pts, w_target, true_classes): pt_above_line = tdv.pt_above_line(pts[0, :], w_target) pt_is_positive_class = true_classes[0] > 0 if pt_above_line and pt_is_positive_class: # positive pt above line return True if not pt_above_line and not pt_is_positive_class: # negative pt below line return True return False def plot_hypothesis(self, pts, true_classes, w_hypothesis, w_target=None): self.setup_axes() self.ax.scatter(x=pts[:, 0], y=pts[:, 1], marker='x', color=['r' if sign >= 0 else 'b' for sign in true_classes]) if w_target is not None: x, y = tdv.get_line(w_target, self.x_bound) self.ax.plot(x, y, label='target', color='m') x, y = tdv.get_line(w_hypothesis, self.x_bound) self.ax.plot(x, y, label='hypothesis', color='g') if w_target is not None: if self.red_pts_above_line(pts, w_target, true_classes): self.ax.fill_between(x, y, np.full((1,), self.y_bound), color=(1, 0, 0, 0.15)) self.ax.fill_between(x, y, np.full((1,), -self.y_bound), color=(0, 0, 1, 0.15)) else: self.ax.fill_between(x, y, np.full((1,), self.y_bound), color=(0, 0, 1, 0.15)) self.ax.fill_between(x, y, np.full((1,), -self.y_bound), color=(1, 0, 0, 0.15)) self.ax.legend(facecolor='w', fancybox=True, frameon=True, edgecolor='black', borderpad=1) # plt.pause(0.01) @staticmethod def visualize(): plt.show() """ Logger.py """ class Logger: def __init__(self): self.num_iterations = 0 self.num_vector_updates = 0 def print_statistics(self): print('{:28s}: {:}'.format('Number of iterations', self.num_iterations)) print('{:28s}: {:}'.format('Number of vector updates', self.num_vector_updates)) """ Perceptron.py """ import numpy as np from . import two_d_vector as tdv from . import DataVisualizer, Logger class Perceptron: """Uses 'pocket' algorithm to keep best hypothesis in it's 'pocket'""" def __init__(self, alpha): self.alpha = alpha self.best_hypothesis = np.random.uniform(-10, 10, 3) self.lowest_error = float('inf') self.logger = Logger() self.dv = None def fit(self, x_train, y_train=None, target_fn=None): """Fits the model to the training data (class labels) or target function. :param x_train: the training data :param y_train: will be passed in in the non-linearly separable case :param target_fn: will be passed in in the linearly separable case :return: None """ self.best_hypothesis = np.random.uniform(-10, 10, 3) self.lowest_error = float('inf') self.logger = Logger() self.dv = get_data_visualizer(target_fn, x_train) if target_fn is not None: y_train = np.sign(np.dot(x_train, target_fn)) self.best_hypothesis = tdv.get_perpendicular_vector(target_fn) pts = x_train[:, 1:] hypothesis = self.best_hypothesis misclassified_pts = predict_and_evaluate(hypothesis, x_train, y_train) while self.logger.num_vector_updates < 100000 and np.sum(misclassified_pts) > 0: for i, misclassified_pt in enumerate(np.nditer(misclassified_pts)): if misclassified_pt: # update rule: w(t + 1) = w(t) + y(t) * x(t) * alpha hypothesis += y_train[i] * x_train[i] * self.alpha these_misclassified_pts = predict_and_evaluate(hypothesis, x_train, y_train) this_error = calculate_error(np.sum(these_misclassified_pts), x_train.shape[0]) if this_error < self.lowest_error: self.best_hypothesis = hypothesis self.lowest_error = this_error self.logger.num_vector_updates += 1 misclassified_pts = predict_and_evaluate(hypothesis, x_train, y_train) self.logger.num_iterations += 1 self.dv.plot_hypothesis(pts, y_train, self.best_hypothesis, target_fn) self.print_fit_statistics() def print_fit_statistics(self): self.logger.print_statistics() print('{:28s}: y = {:.2f}x + {:.2f}'.format('Hypothesis', tdv.get_slope(self.best_hypothesis), tdv.get_y_intercept(self.best_hypothesis))) print('{0:28s}: {1:.2f}%'.format('In Sample (Training) Error', self.lowest_error * 100)) def visualize_training(self): self.dv.visualize() def predict(self, x): return predict(x, self.best_hypothesis) def predict_and_evaluate(hypothesis, x_train, y_train): pred_classes = predict(hypothesis, x_train) misclassified_pts = np.not_equal(pred_classes, y_train) return misclassified_pts def predict(x, hypothesis): return np.sign(np.dot(x, hypothesis.T)) def calculate_error(num_misclassified_pts, num_pts): return num_misclassified_pts / float(num_pts) def get_data_visualizer(target_fn, x_train): plot_title = 'Perceptron Learning' if target_fn is not None: plot_subtitle = 'Linearly Separable Training Data' else: plot_subtitle = 'Non-linearly Separable Training Data' x_bound = np.max(np.absolute(x_train[:, 1])) y_bound = np.max(np.absolute(x_train[:, 2])) return DataVisualizer(plot_title, plot_subtitle, x_bound, y_bound) ```
github_jupyter
# Authorise Notebook server to access Earth Engine This notebook is a reproduction of the workflow originally developed by **Datalab**, which describes how to setup a Google Datalab container in your local machine using Docker. You can check out the full tutorial by going to this link: https://developers.google.com/earth-engine/python_install-datalab-local ``` # Code to check the IPython Widgets library. try: import ipywidgets except ImportError: print('The IPython Widgets library is not available on this server.\n' 'Please see https://github.com/jupyter-widgets/ipywidgets ' 'for information on installing the library.') raise print('The IPython Widgets library (version {0}) is available on this server.'.format( ipywidgets.__version__ )) ``` Next, check if the Earth Engine API is available on the server. ``` # Code to check the Earth Engine API library. try: import ee except ImportError: print('The Earth Engine Python API library is not available on this server.\n' 'Please see https://developers.google.com/earth-engine/python_install ' 'for information on installing the library.') raise print('The Earth Engine Python API (version {0}) is available on this server.'.format( ee.__version__ )) ``` Finally, check if the notebook server is authorized to access the Earth Engine backend servers. ``` # Code to check if authorized to access Earth Engine. import io import os import urllib from IPython import display # Define layouts used by the form. row_wide_layout = ipywidgets.Layout(flex_flow="row nowrap", align_items="center", width="100%") column_wide_layout = ipywidgets.Layout(flex_flow="column nowrap", align_items="center", width="100%") column_auto_layout = ipywidgets.Layout(flex_flow="column nowrap", align_items="center", width="auto") form_definition = {'form': None} response_box = ipywidgets.HTML('') def isAuthorized(): try: ee.Initialize() test = ee.Image(0).getInfo() except: return False return True def ShowForm(auth_status_button, instructions): """Show a form to the user.""" form_definition['form'] = ipywidgets.VBox([ auth_status_button, instructions, ipywidgets.VBox([response_box], layout=row_wide_layout) ], layout=column_wide_layout) display.display(form_definition.get('form')) def ShowAuthorizedForm(): """Show a form for a server that is currently authorized to access Earth Engine.""" def revoke_credentials(sender): credentials = ee.oauth.get_credentials_path() if os.path.exists(credentials): os.remove(credentials) response_box.value = '' Init() auth_status_button = ipywidgets.Button( layout=column_wide_layout, disabled=True, description='The server is authorized to access Earth Engine', button_style='success', icon='check' ) instructions = ipywidgets.Button( layout = row_wide_layout, description = 'Click here to revoke authorization', disabled = False, ) instructions.on_click(revoke_credentials) ShowForm(auth_status_button, instructions) def ShowUnauthorizedForm(): """Show a form for a server that is not currently authorized to access Earth Engine.""" auth_status_button = ipywidgets.Button( layout=column_wide_layout, button_style='danger', description='The server is not authorized to access Earth Engine', disabled=True ) auth_link = ipywidgets.HTML( '<a href="{url}" target="auth">Open Authentication Tab</a><br/>' .format(url=ee.oauth.get_authorization_url() ) ) instructions = ipywidgets.VBox( [ ipywidgets.HTML( 'Click on the link below to start the authentication and authorization process. ' 'Once you have received an authorization code, use it to replace the ' 'REPLACE_WITH_AUTH_CODE in the code cell below and run the cell.' ), auth_link, ], layout=column_auto_layout ) ShowForm(auth_status_button, instructions) def Init(): # If a form is currently displayed, close it. if form_definition.get('form'): form_definition['form'].close() # Display the appropriate form according to whether the server is authorized. if isAuthorized(): ShowAuthorizedForm() else: ShowUnauthorizedForm() Init() ``` If the server **is authorized**, you do not need to run the next code cell. If the server **is not authorized**: 1. Copy the authentication code generated in the previous step. 2. Replace the REPLACE_WITH_AUTH_CODE string in the cell below with the authentication code. 3. Run the code cell to save authentication credentials. ``` auth_code = 'REPLACE_WITH_AUTH_CODE' response_box = ipywidgets.HTML('') try: token = ee.oauth.request_token(auth_code.strip()) ee.oauth.write_token(token) if isAuthorized(): Init() else: response_box.value = '<font color="red">{0}</font>'.format( 'The account was authenticated, but does not have permission to access Earth Engine.' ) except Exception as e: response_box.value = '<font color="red">{0}</font>'.format(e) response_box # Code to display an Earth Engine generated image. from IPython.display import Image url = ee.Image("CGIAR/SRTM90_V4").getThumbUrl({'min':0, 'max':3000}) Image(url=url) ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Eager modunun ana hatlari <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/tr/r1/tutorials/eager/eager_basics.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/tr/r1/tutorials/eager/eager_basics.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> Bu kitapcikta TensorFlow kullanarak konuya giris yapacagiz. Asagidaki konulari isleyecegiz: * Gerekli paketleri iceri aktarma * Tensorlari olusturma ve kullanma * GPU hizlandirmayi kullanmak * Veri setleri ## TensorFlow'u iceri alalim 'tensorflow' modulunu iceri alalim ver eager modunu secelim. Eager modu, TensorFlow'a detaylarini daha sonra aciklayacagimiz etkilesimli bir arayuz saglar. ``` from __future__ import absolute_import, division, print_function try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow.compat.v1 as tf ``` ## Tensorlar Tensor kisaca cok boyutlu bir dizidir. NumPy'deki 'ndarray' nesneleri gibi, `Tensor` nesnesinin de bir veri turu ve sekli vardir. Ayrica Tensorlar GPU gibi hizlandirilmis hafizada bulunabilirler. TensorFlow, Tensorlari olusturmak ve kullanmak icin zengin islemlere sahiptir ([tf.add](https://www.tensorflow.org/api_docs/python/tf/add), [tf.matmul](https://www.tensorflow.org/api_docs/python/tf/matmul), [tf.linalg.inv](https://www.tensorflow.org/api_docs/python/tf/linalg/inv) etc.). Bu islemler Python tiplerini otomatik olarak degistirirler. Ornegin: ``` print(tf.add(1, 2)) print(tf.add([1, 2], [3, 4])) print(tf.square(5)) print(tf.reduce_sum([1, 2, 3])) print(tf.encode_base64("hello world")) # Islec asiri yuklenmesi de desteklenir print(tf.square(2) + tf.square(3)) ``` Her Tensor'un bir sekli ve veri turu vardir ``` x = tf.matmul([[1]], [[2, 3]]) print(x.shape) print(x.dtype) ``` NumPy dizileri ve TensorFlow Tensorlari arasindaki en belirgin farklar sunlardir: 1. Tensorlar hizlandirilmis hafizalar tarafindan desteklenebilr (GPU, TPU gibi). 2. Tensorlar degistirilemez. ### NumPy Uyumlulugu TensorFlow Tensorlari ile NumPy 'ndarray'leri arasindaki donusum cok basittir: * TensorFlow islemleri otomatik olarak NumPy ndarray'lerini Tensorlara donusturur. * NumPy islemleri de otomatik olarak Tensorlari NumPy ndarray'lerine donusturur. '.numpy()' metodunu kullanarak Tensorlari belirgin sekilde NumPy ndarray'lerine donusturebilirsiniz. Tensorlar ve 'ndarray'ler temelde mumkun oldugunca ayni sekilde tanimlandigi icin bu donusturmeler ucuzdur. Fakat, NumPy dizileri her zaman ana hafizada calisirken Tensorlar GPU hafizasini da kullanabildigi icin her zaman benzer sekilde tanimlanamazlar ve donusturme isleminde GPU'dan ana hafizaya kopyalama da bulunur. ``` import numpy as np ndarray = np.ones([3, 3]) print("TensorFlow operations convert numpy arrays to Tensors automatically") tensor = tf.multiply(ndarray, 42) print(tensor) print("And NumPy operations convert Tensors to numpy arrays automatically") print(np.add(tensor, 1)) print("The .numpy() method explicitly converts a Tensor to a numpy array") print(tensor.numpy()) ``` ## GPU hizlandirmasi Hesaplamalar icin GPU kullanarak bircok TensorFlow islemleri hizlandirilabilir. TensorFlow bir islem icin, ek aciklamaya gerek duymadan, otomatik olarak GPU ya da CPU kullanimina karar verir (ve gerektiginde tensorlari GPU ve CPU hafizalarina kopyalar). Bir islem sonucu olusan tensorlar o islem hangi hafizada yurutulduyse o hafizaya kopyalanir. Ornegin: ``` x = tf.random_uniform([3, 3]) print("Is there a GPU available: "), print(tf.test.is_gpu_available()) print("Is the Tensor on GPU #0: "), print(x.device.endswith('GPU:0')) ``` ### Aygit Isimleri `Tensor.device` ozelligi tensorlarin bulundugu aygitin tam adini dizgi olarak temin eder. Bu dizgide bircok detay bulunmaktadir: programin calistigi anasistemin bulundugu agin taniticisi ve anasistemdeki aygit. Bunlar TensorFlow programlarinin dagitiminda gerekli olan bilgilerdir. Eger tensor sistemdeki 'N'inci GPU'ya yerlestirilmisse bu dizgi `GPU:<N>` ile biter. ### Belirtilmis Aygit Yerlestirilmesi TensorFlow'da "yerlestirme" terimi islemlerin uygulama sirasinda sistemde tek tek nasil atandigi (yerlestirildigi) anlaminda kullanilmistir. Yukarida da bahsettigimiz gibi, eger ozellikle belirtilmemisse TensorFlow bir islemi nerde calistiracagina otomatik olarak karar verir ve gerekirse tensorlari oraya kopyalar. Fakat, TensorFlow islemleri 'tf.device' baglam yoneticisi kullanilarak belirli aygitlara yerlestirilebilir. Ornegin: ``` import time def time_matmul(x): start = time.time() for loop in range(10): tf.matmul(x, x) result = time.time()-start print("10 loops: {:0.2f}ms".format(1000*result)) # CPU ustunde zorla calistirma print("On CPU:") with tf.device("CPU:0"): x = tf.random_uniform([1000, 1000]) assert x.device.endswith("CPU:0") time_matmul(x) # Eger mumkunse GPU ustunde zorla calistirma #0 if tf.test.is_gpu_available(): with tf.device("GPU:0"): # Or GPU:1 for the 2nd GPU, GPU:2 for the 3rd etc. x = tf.random_uniform([1000, 1000]) assert x.device.endswith("GPU:0") time_matmul(x) ``` ## Veri setleri Simdi modelimize veri akimini saglamak icin [`tf.data.Dataset` API](https://www.tensorflow.org/r1/guide/datasets)'sini nasil kullanacagimizi gorecegiz: * `Dataset`i olusturalim. * Eager modunda `Dataset`in yinelenmesi. Modelimizin egitim ve degerlendirme dongulerine verilen kompleks girdi hatlarini 'Dataset' API'si ile basit ve tekrar kullanilabilir parcalardan olusturmanizi tavsiye ederiz. 'Dataset' nesnesi olusturma API'si eager modunda iken TensorFlow graph'taki ile aynidir, fakat veri setindeki elemanlarin yinelenmesi islemi biraz daha basittir. 'tf.data.Dataset' nesnesi ustunde direk olarak Python yinelemesi yapabildiginiz icin `tf.data.Iterator` nesnesi olusturmaniza gerek yoktur. Sonuc olarak, eger eager modunu kullaniyorsaniz, [TensorFlow Rehberi](https://www.tensorflow.org/r1/guide/datasets)'nde anlatilan yineleme gereksizdir. ### `Dataset` kaynagi olusturalim Buradaki fonksiyonlardan birini [`Dataset.from_tensors`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensors), [`Dataset.from_tensor_slices`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensor_slices) ya da kutuklerden okunan nesneleri [`TextLineDataset`](https://www.tensorflow.org/api_docs/python/tf/data/TextLineDataset) veya [`TFRecordDataset`](https://www.tensorflow.org/api_docs/python/tf/data/TFRecordDataset) kullanarak _source_ dataset olusturabiliriz. [TensorFlow Rehberi](https://www.tensorflow.org/r1/guide/datasets#reading_input_data)'nde daha detayli bilgi bulabilirsiniz. ``` ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6]) # CSV kutugunu olusturalim import tempfile _, filename = tempfile.mkstemp() with open(filename, 'w') as f: f.write("""Line 1 Line 2 Line 3 """) ds_file = tf.data.TextLineDataset(filename) ``` ### Transformations (donusumler) uygulayalim Veri seti kayitlarini donusturmek icin transformations (donusumler) fonksiyonlarini kullanabiliriz: ornegin [`map`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map), [`batch`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch), [`shuffle`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle). `tf.data.Dataset` API dokumanlari hakkinda daha fazla bilgi icin [buraya bakiniz](https://www.tensorflow.org/api_docs/python/tf/data/Dataset). ``` ds_tensors = ds_tensors.map(tf.square).shuffle(2).batch(2) ds_file = ds_file.batch(2) ``` ### Yineleme Eager modunda 'Dataset' nesneleri yinelemeleri destekler. Eger TensorFlow 'graphs'taki 'Dataset' kullanimina asina iseniz, `Dataset.make_one_shot_iterator()` ya da `get_next()` kullanimina gerek olmadigina lutfen dikkat ediniz. ``` print('Elements of ds_tensors:') for x in ds_tensors: print(x) print('\nElements in ds_file:') for x in ds_file: print(x) ```
github_jupyter
``` import scipy as sp import numpy as np import time try: from localgraphclustering import * except: # when the package is not installed, import the local version instead. # the notebook must be placed in the original "notebooks/" folder sys.path.append("../") from localgraphclustering import * import time import networkx as nx import random import statistics as stat_ ``` ## Load data ``` g = GraphLocal('../datasets/sfld_brown_et_al_amidohydrolases_protein_similarities_for_beh.graphml','graphml',' ') ``` ## TEMP ``` G = nx.read_graphml('../datasets/sfld_brown_et_al_amidohydrolases_protein_similarities_for_beh.graphml') # groups = np.loadtxt('./datasets/ppi_mips.class', dtype = 'float') groups = np.loadtxt('../datasets/sfld_brown_et_al_amidohydrolases_protein_similarities_for_beh_ground_truth.csv', dtype = 'str') groups_by_id = dict() for node in groups: groups_by_id[node[0]] = node[1] ids_clusters = set() for node in groups: ids_clusters.add(node[1]) ids_clusters = list(ids_clusters) ground_truth_clusters_by_id = dict() for node in groups: ground_truth_clusters_by_id[node[1]] = [] for node in groups: ground_truth_clusters_by_id[node[1]].append(node[0]) ground_truth_clusters_by_number = dict() for node in groups: ground_truth_clusters_by_number[node[1]] = [] counter = 0 for node in G.node: if node == '1.0': counter += 1 continue what_group = groups_by_id[node] ground_truth_clusters_by_number[what_group].append(counter) counter += 1 all_clusters = [] counter = 0 for cluster_id in ground_truth_clusters_by_number: cluster = ground_truth_clusters_by_number[cluster_id] if len(cluster) == 1 or len(cluster) == 0: counter += 1 continue cond = g.compute_conductance(cluster) counter += 1 if cond <= 0.57 and len(cluster) >= 10: print("Id: ", cluster_id) print("Cluster: ", counter, " conductance: ", cond, "Size: ", len(cluster)) all_clusters.append(cluster) ``` ## Collect data for l1-reg. PR (with rounding) ``` nodes = {} external_best_cond_acl = {} external_best_pre_cond_acl = {} vol_best_cond_acl = {} vol_best_pre_acl = {} size_clust_best_cond_acl = {} size_clust_best_pre_acl = {} f1score_best_cond_acl = {} f1score_best_pre_acl = {} true_positives_best_cond_acl = {} true_positives_best_pre_acl = {} precision_best_cond_acl = {} precision_best_pre_acl = {} recall_best_cond_acl = {} recall_best_pre_acl = {} cuts_best_cond_acl = {} cuts_best_pre_acl = {} cuts_acl_ALL = {} ct_outer = 0 number_experiments = 0 for rr in all_clusters: how_many = int(len(rr)) print(how_many) random.seed(4) nodes[ct_outer] = np.random.choice(rr, how_many, replace=False) eigv, lambda_val = fiedler_local(g, rr) lambda_val = np.real(lambda_val) step = (2*lambda_val - lambda_val/2)/4 a_list = np.arange(lambda_val/2,2*lambda_val,step) ct = 0 start = time.time() for node in nodes[ct_outer]: ref_node = [node] max_precision = -1 min_conduct = 100 ct_inner = 0 for a in a_list: if ct_outer <= 1: rho = 0.15/np.sum(g.d[rr]) else: rho = 0.2/np.sum(g.d[rr]) output_pr_clustering = approximate_PageRank(g,ref_node,method = "l1reg-rand", epsilon=1.0e-2, rho=rho, alpha=a, cpp = True, normalize=True,normalized_objective=True) number_experiments += 1 output_pr_sc = sweep_cut(g,output_pr_clustering,cpp=True) S = output_pr_sc[0] cuts_acl_ALL[ct_outer,node,ct_inner] = S size_clust_acl_ = len(S) cond_val_l1pr = g.compute_conductance(S) vol_ = sum(g.d[S]) true_positives_acl_ = set(rr).intersection(S) if len(true_positives_acl_) == 0: true_positives_acl_ = set(ref_node) vol_ = g.d[ref_node][0,0] precision = sum(g.d[np.array(list(true_positives_acl_))])/vol_ recall = sum(g.d[np.array(list(true_positives_acl_))])/sum(g.d[rr]) f1_score_ = 2*(precision*recall)/(precision + recall) if f1_score_ >= max_precision: max_precision = f1_score_ external_best_pre_cond_acl[ct_outer,node] = cond_val_l1pr vol_best_pre_acl[ct_outer,node] = vol_ size_clust_best_pre_acl[ct_outer,node] = size_clust_acl_ true_positives_best_pre_acl[ct_outer,node] = true_positives_acl_ precision_best_pre_acl[ct_outer,node] = precision recall_best_pre_acl[ct_outer,node] = recall f1score_best_pre_acl[ct_outer,node] = f1_score_ cuts_best_pre_acl[ct_outer,node] = S if cond_val_l1pr <= min_conduct: min_conduct = cond_val_l1pr external_best_cond_acl[ct_outer,node] = cond_val_l1pr vol_best_cond_acl[ct_outer,node] = vol_ size_clust_best_cond_acl[ct_outer,node] = size_clust_acl_ true_positives_best_cond_acl[ct_outer,node] = true_positives_acl_ precision_best_cond_acl[ct_outer,node] = precision recall_best_cond_acl[ct_outer,node] = recall f1score_best_cond_acl[ct_outer,node] = f1_score_ cuts_best_cond_acl[ct_outer,node] = S print('outer:', ct_outer, 'number of node: ',node, ' completed: ', ct/how_many, ' degree: ', g.d[node]) print('conductance: ', external_best_cond_acl[ct_outer,node], 'f1score: ', f1score_best_cond_acl[ct_outer,node], 'precision: ', precision_best_cond_acl[ct_outer,node], 'recall: ', recall_best_cond_acl[ct_outer,node]) ct += 1 end = time.time() print(" ") print("Outer: ", ct_outer," Elapsed time l1-reg. with rounding: ", end - start) print("Outer: ", ct_outer," Number of experiments: ", number_experiments) print(" ") ct_outer += 1 ``` ## Performance of l1-reg. PR (with rounding). ``` all_data = [] xlabels_ = [] print('Results for l1-reg with rounding') sum_precision = 0 sum_recall = 0 sum_f1 = 0 sum_conductance = 0 info_ref_nodes = all_clusters l_info_ref_nodes = len(info_ref_nodes) for i in range(l_info_ref_nodes): temp_pre = [] temp_rec = [] temp_f1 = [] temp_conductance = [] for j in all_clusters[i]: temp_pre.append(precision_best_cond_acl[i,j]) temp_rec.append(recall_best_cond_acl[i,j]) temp_f1.append(f1score_best_cond_acl[i,j]) temp_conductance.append(external_best_cond_acl[i,j]) print('Feature:', i,'Precision', stat_.mean(temp_pre), 'Recall', stat_.mean(temp_rec), 'F1', stat_.mean(temp_f1), 'Cond.', stat_.mean(temp_conductance)) ``` ## Function for seed set expansion using BFS ``` import queue def seed_grow_bfs_steps(g,seeds,steps,vol_target,target_cluster): """ grow the initial seed set through BFS until its size reaches a given ratio of the total number of nodes. """ Q = queue.Queue() visited = np.zeros(g._num_vertices) visited[seeds] = 1 for s in seeds: Q.put(s) if isinstance(seeds,np.ndarray): seeds = seeds.tolist() else: seeds = list(seeds) for step in range(steps): for k in range(Q.qsize()): node = Q.get() si,ei = g.adjacency_matrix.indptr[node],g.adjacency_matrix.indptr[node+1] neighs = g.adjacency_matrix.indices[si:ei] for i in range(len(neighs)): if visited[neighs[i]] == 0: visited[neighs[i]] = 1 seeds.append(neighs[i]) Q.put(neighs[i]) vol_seeds = np.sum(g.d[seeds]) vol_target_intersection_input = np.sum(g.d[list(set(target_cluster).intersection(set(seeds)))]) sigma = vol_target_intersection_input/vol_target if sigma > 0.75 or vol_seeds > 0.25*g.vol_G: break vol_seeds = np.sum(g.d[seeds]) vol_target_intersection_input = np.sum(g.d[list(set(target_cluster).intersection(set(seeds)))]) sigma = vol_target_intersection_input/vol_target if sigma > 0.75 or vol_seeds > 0.25*g.vol_G: break vol_seeds = np.sum(g.d[seeds]) vol_target_intersection_input = np.sum(g.d[list(set(target_cluster).intersection(set(seeds)))]) sigma = vol_target_intersection_input/vol_target if sigma > 0.75 or vol_seeds > 0.25*g.vol_G: break return seeds ``` ## Collect data for seed set expansion + FlowImprove, try a lot of parameters ``` nodes = {} external_best_cond_flBFS = {} external_best_pre_cond_flBFS = {} vol_best_cond_flBFS = {} vol_best_pre_flBFS = {} size_clust_best_cond_flBFS = {} size_clust_best_pre_flBFS = {} f1score_best_cond_flBFS = {} f1score_best_pre_flBFS = {} true_positives_best_cond_flBFS = {} true_positives_best_pre_flBFS = {} precision_best_cond_flBFS = {} precision_best_pre_flBFS = {} recall_best_cond_flBFS = {} recall_best_pre_flBFS = {} cuts_best_cond_flBFS = {} cuts_best_pre_flBFS = {} cuts_flBFS_ALL = {} ct_outer = 0 number_experiments = 0 for rr in all_clusters: how_many = int(len(rr)) print(how_many) random.seed(4) nodes[ct_outer] = np.random.choice(rr, how_many, replace=False) n_step = 24 vol_target = np.sum(g.d[rr]) ct = 0 start = time.time() for node in nodes[ct_outer]: ref_node = [node] max_precision = -1 min_conduct = 100 seeds = seed_grow_bfs_steps(g,[node],g._num_vertices,vol_target,rr) vol_input = np.sum(g.d[seeds]) vol_graph_minus_input = np.sum(g.d[list(set(range(g._num_vertices)) - set(seeds))]) vol_target_intersection_input = np.sum(g.d[list(set(rr).intersection(set(seeds)))]) gamma = vol_input/vol_graph_minus_input sigma = max(vol_target_intersection_input/vol_target,gamma) delta = min(max((1/3)*(1.0/(1.0/sigma - 1)) - gamma,0),1) S = flow_clustering(g,seeds,method="sl",delta=delta)[0] number_experiments += 1 cuts_flBFS_ALL[ct_outer,node] = S size_clust_flBFS_ = len(S) cond_val_l1pr = g.compute_conductance(S) vol_ = sum(g.d[S]) true_positives_flBFS_ = set(rr).intersection(S) if len(true_positives_flBFS_) == 0: true_positives_flBFS_ = set(ref_node) vol_ = g.d[ref_node][0] precision = sum(g.d[np.array(list(true_positives_flBFS_))])/vol_ recall = sum(g.d[np.array(list(true_positives_flBFS_))])/sum(g.d[rr]) f1_score_ = 2*(precision*recall)/(precision + recall) if f1_score_ >= max_precision: max_precision = f1_score_ external_best_pre_cond_flBFS[ct_outer,node] = cond_val_l1pr vol_best_pre_flBFS[ct_outer,node] = vol_ size_clust_best_pre_flBFS[ct_outer,node] = size_clust_flBFS_ true_positives_best_pre_flBFS[ct_outer,node] = true_positives_flBFS_ precision_best_pre_flBFS[ct_outer,node] = precision recall_best_pre_flBFS[ct_outer,node] = recall f1score_best_pre_flBFS[ct_outer,node] = f1_score_ cuts_best_pre_flBFS[ct_outer,node] = S if cond_val_l1pr <= min_conduct: min_conduct = cond_val_l1pr external_best_cond_flBFS[ct_outer,node] = cond_val_l1pr vol_best_cond_flBFS[ct_outer,node] = vol_ size_clust_best_cond_flBFS[ct_outer,node] = size_clust_flBFS_ true_positives_best_cond_flBFS[ct_outer,node] = true_positives_flBFS_ precision_best_cond_flBFS[ct_outer,node] = precision recall_best_cond_flBFS[ct_outer,node] = recall f1score_best_cond_flBFS[ct_outer,node] = f1_score_ cuts_best_cond_flBFS[ct_outer,node] = S print('outer:', ct_outer, 'number of node: ',node, ' completed: ', ct/how_many, ' degree: ', g.d[node]) print('conductance: ', external_best_cond_flBFS[ct_outer,node], 'f1score: ', f1score_best_cond_flBFS[ct_outer,node], 'precision: ', precision_best_cond_flBFS[ct_outer,node], 'recall: ', recall_best_cond_flBFS[ct_outer,node]) ct += 1 end = time.time() print(" ") print("Outer: ", ct_outer," Elapsed time BFS+SL: ", end - start) print("Outer: ", ct_outer," Number of experiments: ", number_experiments) print(" ") ct_outer += 1 ``` ## Performance of BFS+FlowImp. ``` all_data = [] xlabels_ = [] print('Results for BFS+SL') sum_precision = 0 sum_recall = 0 sum_f1 = 0 sum_conductance = 0 info_ref_nodes = all_clusters l_info_ref_nodes = len(info_ref_nodes) for i in range(l_info_ref_nodes): temp_pre = [] temp_rec = [] temp_f1 = [] temp_conductance = [] for j in all_clusters[i]: temp_pre.append(precision_best_cond_flBFS[i,j]) temp_rec.append(recall_best_cond_flBFS[i,j]) temp_f1.append(f1score_best_cond_flBFS[i,j]) temp_conductance.append(external_best_cond_flBFS[i,j]) print('Feature:', i,'Precision', stat_.mean(temp_pre), 'Recall', stat_.mean(temp_rec), 'F1', stat_.mean(temp_f1), 'Cond.', stat_.mean(temp_conductance)) ``` ## Collect data for L1+SL ``` nodes = {} external_best_cond_l1SL = {} external_best_pre_cond_l1SL = {} vol_best_cond_l1SL = {} vol_best_pre_l1SL = {} size_clust_best_cond_l1SL = {} size_clust_best_pre_l1SL = {} f1score_best_cond_l1SL = {} f1score_best_pre_l1SL = {} true_positives_best_cond_l1SL = {} true_positives_best_pre_l1SL = {} precision_best_cond_l1SL = {} precision_best_pre_l1SL = {} recall_best_cond_l1SL = {} recall_best_pre_l1SL = {} cuts_best_cond_l1SL = {} cuts_best_pre_l1SL = {} cuts_l1SL_ALL = {} ct_outer = 0 number_experiments = 0 for rr in all_clusters: how_many = int(len(rr)) print(how_many) random.seed(4) nodes[ct_outer] = np.random.choice(rr, how_many, replace=False) eigv, lambda_val = fiedler_local(g, rr) lambda_val = np.real(lambda_val) step = (2*lambda_val - lambda_val/2)/4 a_list = np.arange(lambda_val/2,2*lambda_val,step) vol_target = np.sum(g.d[rr]) ct = 0 start = time.time() for node in nodes[ct_outer]: ref_node = [node] max_precision = -1 min_conduct = 100 ct_inner = 0 for a in a_list: if ct_outer <= 1: rho = 0.15/np.sum(g.d[rr]) else: rho = 0.2/np.sum(g.d[rr]) output_pr_clustering = approximate_PageRank(g,ref_node,method = "l1reg-rand", epsilon=1.0e-2, rho=rho, alpha=a, cpp = True, normalize=True,normalized_objective=True) number_experiments += 1 output_pr_sc = sweep_cut(g,output_pr_clustering,cpp=True) S = output_pr_sc[0] vol_input = np.sum(g.d[S]) vol_graph_minus_input = np.sum(g.d[list(set(range(g._num_vertices)) - set(S))]) vol_target_intersection_input = np.sum(g.d[list(set(rr).intersection(set(S)))]) gamma = vol_input/vol_graph_minus_input sigma = max(vol_target_intersection_input/vol_target,gamma) delta = min(max((1/3)*(1.0/(1.0/sigma - 1)) - gamma,0),1) S = flow_clustering(g,S,method="sl",delta=delta)[0] cuts_l1SL_ALL[ct_outer,node,ct_inner] = S size_clust_l1SL_ = len(S) cond_val_l1pr = g.compute_conductance(S) vol_ = sum(g.d[S]) true_positives_l1SL_ = set(rr).intersection(S) if len(true_positives_l1SL_) == 0: true_positives_l1SL_ = set(ref_node) vol_ = g.d[ref_node][0] precision = sum(g.d[np.array(list(true_positives_l1SL_))])/vol_ recall = sum(g.d[np.array(list(true_positives_l1SL_))])/sum(g.d[rr]) f1_score_ = 2*(precision*recall)/(precision + recall) if f1_score_ >= max_precision: max_precision = f1_score_ external_best_pre_cond_l1SL[ct_outer,node] = cond_val_l1pr vol_best_pre_l1SL[ct_outer,node] = vol_ size_clust_best_pre_l1SL[ct_outer,node] = size_clust_l1SL_ true_positives_best_pre_l1SL[ct_outer,node] = true_positives_l1SL_ precision_best_pre_l1SL[ct_outer,node] = precision recall_best_pre_l1SL[ct_outer,node] = recall f1score_best_pre_l1SL[ct_outer,node] = f1_score_ cuts_best_pre_l1SL[ct_outer,node] = S if cond_val_l1pr <= min_conduct: min_conduct = cond_val_l1pr external_best_cond_l1SL[ct_outer,node] = cond_val_l1pr vol_best_cond_l1SL[ct_outer,node] = vol_ size_clust_best_cond_l1SL[ct_outer,node] = size_clust_l1SL_ true_positives_best_cond_l1SL[ct_outer,node] = true_positives_l1SL_ precision_best_cond_l1SL[ct_outer,node] = precision recall_best_cond_l1SL[ct_outer,node] = recall f1score_best_cond_l1SL[ct_outer,node] = f1_score_ cuts_best_cond_l1SL[ct_outer,node] = S print('outer:', ct_outer, 'number of node: ',node, ' completed: ', ct/how_many, ' degree: ', g.d[node]) print('conductance: ', external_best_cond_l1SL[ct_outer,node], 'f1score: ', f1score_best_cond_l1SL[ct_outer,node], 'precision: ', precision_best_cond_l1SL[ct_outer,node], 'recall: ', recall_best_cond_l1SL[ct_outer,node]) ct += 1 end = time.time() print(" ") print("Outer: ", ct_outer," Elapsed time L1+SL with rounding: ", end - start) print("Outer: ", ct_outer," Number of experiments: ", number_experiments) print(" ") ct_outer += 1 ``` ## Performance of l1+SL ``` all_data = [] xlabels_ = [] print('Results for L1+SL') sum_precision = 0 sum_recall = 0 sum_f1 = 0 sum_conductance = 0 info_ref_nodes = all_clusters l_info_ref_nodes = len(info_ref_nodes) for i in range(l_info_ref_nodes): temp_pre = [] temp_rec = [] temp_f1 = [] temp_conductance = [] for j in all_clusters[i]: temp_pre.append(precision_best_cond_l1SL[i,j]) temp_rec.append(recall_best_cond_l1SL[i,j]) temp_f1.append(f1score_best_cond_l1SL[i,j]) temp_conductance.append(external_best_cond_l1SL[i,j]) print('Feature:', i,'Precision', stat_.mean(temp_pre), 'Recall', stat_.mean(temp_rec), 'F1', stat_.mean(temp_f1), 'Cond.', stat_.mean(temp_conductance)) ```
github_jupyter
``` !wget https://gitlab.com/federicozzo/electiveai/raw/master/Desktop/uni/elective_AI/electiveai/bdd100K_img.zip?inline=false from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf import numpy as np import IPython.display as display import cv2 import json import os from tqdm import tqdm import matplotlib.pyplot as plt from google.colab import drive drive.mount('/content/drive', force_remount=True) drive.flush_and_unmount() with open("/content/bdd100k/labels/bdd100k_labels_images_train.json", "r") as f: train_images_label = json.load(f) with open("/content/bdd100k/labels/bdd100k_labels_images_val.json", "r") as f: test_images_label = json.load(f) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def load_image(addr): # read an image and resize to (128, 128) # cv2 load images as BGR, convert it to RGB img = cv2.imread(addr) img = cv2.resize(img, (128, 128), interpolation=cv2.INTER_CUBIC) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img.astype(np.float32) return img def find_img_in_labels(name): name = name.split("/")[-1] for image in train_images_label: if image['name'] == name: return image return None def find_img_in_test_labels(name): name = name.split("/")[-1] for image in test_images_label: if image['name'] == name: return image return None for image in train_images_label: if image['attributes']['timeofday'] not in ['daytime', 'night', 'dawn/dusk', 'undefined']: print(image['name'], image['attributes']['timeofday']) def writeTfRecord(input_path, output_path, setname): # open the TFRecords file writer = tf.python_io.TFRecordWriter(output_path) images_filenames = [input_path+fn for fn in os.listdir(path=input_path)] for image_fn in tqdm(images_filenames): # Load the image img = load_image(image_fn) if setname == 'train': attributes = find_img_in_labels(image_fn) else: attributes = find_img_in_test_labels(image_fn) if attributes is None: print(image_fn) continue label = attributes['attributes']['timeofday'] if label not in ['daytime', 'night']: continue # Create a feature feature = {'label': _bytes_feature(tf.compat.as_bytes(label)), 'image': _bytes_feature(tf.compat.as_bytes(img.tostring()))} # Create an example protocol buffer example = tf.train.Example(features=tf.train.Features(feature=feature)) # Serialize to string and write on the file writer.write(example.SerializeToString()) writer.close() train_path = '/content/bdd100k/images/100k/train/' test_path = '/content/bdd100k/images/100k/test/' val_path = '/content/bdd100k/images/100k/val/' train_out = 'train.tfrecords' test_out = 'test.tfrecords' writeTfRecord(train_path, train_out) writeTfRecord(val_path, test_out, 'val') ``` # ***Read from TfRecords File*** ``` sess = tf.InteractiveSession() ### IMPORTANT : First you have to unzip the tfRecord to import! ### tfrecord_path = 'test.tfrecords' dataset = tf.data.TFRecordDataset(tfrecord_path) def decode(serialized_example): """ Parses an image and label from the given `serialized_example`. It is used as a map function for `dataset.map` """ IMAGE_SHAPE = (128,128,3) # 1. define a parser features = tf.parse_single_example( serialized_example, # Defaults are not specified since both keys are required. features={ 'image': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.string), }) # 2. Convert the data image = tf.decode_raw(features['image'], tf.float32) label = features['label'] # 3. reshape image = tf.convert_to_tensor(tf.reshape(image, IMAGE_SHAPE)) return image, label def normalize(image, label): """Convert `image` from [0, 255] -> [-0.5, 0.5] floats.""" image = tf.cast(image, tf.float32) * (1. / 255) return image, label # Parse the record into tensors with map. # map takes a Python function and applies it to every sample. dataset = dataset.map(decode) dataset = dataset.map(normalize) batch_size = 1000 dataset = dataset.batch(batch_size) # Creating an iterator iterator = dataset.make_one_shot_iterator() image_batch, label_batch = iterator.get_next() image_batch, label_batch = sess.run([image_batch, label_batch]) print(image_batch.shape) print(label_batch.shape) plt.imshow(image_batch[0]) ```
github_jupyter
## 📍 The Data This example considers a hierarchical dataset. The world is split by continents. Continents are split by country. Each country has a value (population size). Our goal is to represent each country as a circle, its size being proportional to its population. Let's create such a dataset: ``` data = [{'id': 'World', 'datum': 6964195249, 'children' : [ {'id' : "North America", 'datum': 450448697, 'children' : [ {'id' : "United States", 'datum' : 308865000}, {'id' : "Mexico", 'datum' : 107550697}, {'id' : "Canada", 'datum' : 34033000} ]}, {'id' : "South America", 'datum' : 278095425, 'children' : [ {'id' : "Brazil", 'datum' : 192612000}, {'id' : "Colombia", 'datum' : 45349000}, {'id' : "Argentina", 'datum' : 40134425} ]}, {'id' : "Europe", 'datum' : 209246682, 'children' : [ {'id' : "Germany", 'datum' : 81757600}, {'id' : "France", 'datum' : 65447374}, {'id' : "United Kingdom", 'datum' : 62041708} ]}, {'id' : "Africa", 'datum' : 311929000, 'children' : [ {'id' : "Nigeria", 'datum' : 154729000}, {'id' : "Ethiopia", 'datum' : 79221000}, {'id' : "Egypt", 'datum' : 77979000} ]}, {'id' : "Asia", 'datum' : 2745929500, 'children' : [ {'id' : "China", 'datum' : 1336335000}, {'id' : "India", 'datum' : 1178225000}, {'id' : "Indonesia", 'datum' : 231369500} ]} ]}] ``` ## 🙇‍♂️ Compute circle position We need an algorythm that computes the position of each country and continent circles, together with their radius. Fortunately, the `circlize` library is here. It's `circlify()` function does exactly that 😍 ``` # import the circlify library import circlify # Compute circle positions thanks to the circlify() function circles = circlify.circlify( data, show_enclosure=False, target_enclosure=circlify.Circle(x=0, y=0, r=1) ) ``` Have a look to the `circles` object, it provides exactly that 🎉. ## 🔨 Build the viz Let's be honnest, that's quite a bit of code to get a decent graph 😞. The `circlize` library has a `bubble()` function that allows to do a simple circle pack with one line of code, but it does not allow to customize the chart. So once more `matplotlib` is our best friend for the rendering part. Here I'm printing the layers from the bottom to the top of the figure: first the cirles for the highest level of hierarchy (continent), then circle and labels for countries, then continent labels. ``` # import libraries import circlify import matplotlib.pyplot as plt # Create just a figure and only one subplot fig, ax = plt.subplots(figsize=(14,14)) # Title ax.set_title('Repartition of the world population') # Remove axes ax.axis('off') # Find axis boundaries lim = max( max( abs(circle.x) + circle.r, abs(circle.y) + circle.r, ) for circle in circles ) plt.xlim(-lim, lim) plt.ylim(-lim, lim) # Print circle the highest level (continents): for circle in circles: if circle.level != 2: continue x, y, r = circle ax.add_patch( plt.Circle((x, y), r, alpha=0.5, linewidth=2, color="lightblue")) # Print circle and labels for the highest level: for circle in circles: if circle.level != 3: continue x, y, r = circle label = circle.ex["id"] ax.add_patch( plt.Circle((x, y), r, alpha=0.5, linewidth=2, color="#69b3a2")) plt.annotate(label, (x,y ), ha='center', color="white") # Print labels for the continents for circle in circles: if circle.level != 2: continue x, y, r = circle label = circle.ex["id"] plt.annotate(label, (x,y ) ,va='center', ha='center', bbox=dict(facecolor='white', edgecolor='black', boxstyle='round', pad=.5)) ```
github_jupyter
# Introduction This tutorial illustrates how to use *ObjTables* to revision datasets, revision schemas, and migrate datasets between revisions of their schemas. This tutorial uses an address book of CEOs as an example. # Define a schema for an address book First, as described in [Tutorial 1](1.%20Building%20and%20visualizing%20schemas.ipynb), use *ObjTables* to define a schema for an address book. ``` import enum import obj_tables import types class Address(obj_tables.Model): street = obj_tables.StringAttribute(unique=True, primary=True, verbose_name='Street') city = obj_tables.StringAttribute(verbose_name='City') state = obj_tables.StringAttribute(verbose_name='State') zip_code = obj_tables.StringAttribute(verbose_name='Zip code') country = obj_tables.StringAttribute(verbose_name='Country') class Meta(obj_tables.Model.Meta): table_format = obj_tables.TableFormat.multiple_cells attribute_order = ('street', 'city', 'state', 'zip_code', 'country',) verbose_name = 'Address' verbose_name_plural = 'Addresses' class Company(obj_tables.Model): name = obj_tables.StringAttribute(unique=True, primary=True, verbose_name='Name') url = obj_tables.UrlAttribute(verbose_name='URL') address = obj_tables.OneToOneAttribute(Address, related_name='company', verbose_name='Address') class Meta(obj_tables.Model.Meta): table_format = obj_tables.TableFormat.row attribute_order = ('name', 'url', 'address',) verbose_name = 'Company' verbose_name_plural = 'Companies' class PersonType(str, enum.Enum): family = 'family' friend = 'friend' business = 'business' class Person(obj_tables.Model): name = obj_tables.StringAttribute(unique=True, primary=True, verbose_name='Name') type = obj_tables.EnumAttribute(PersonType, verbose_name='Type') company = obj_tables.ManyToOneAttribute(Company, related_name='employees', verbose_name='Company') email_address = obj_tables.EmailAttribute(verbose_name='Email address') phone_number = obj_tables.StringAttribute(verbose_name='Phone number') address = obj_tables.ManyToOneAttribute(Address, related_name='people', verbose_name='Address') class Meta(obj_tables.Model.Meta): table_format = obj_tables.TableFormat.row attribute_order = ('name', 'type', 'company', 'email_address', 'phone_number', 'address',) verbose_name = 'Person' verbose_name_plural = 'People' class AddressBook(obj_tables.Model): id = obj_tables.StringAttribute(unique=True, primary=True, verbose_name='Id') companies = obj_tables.OneToManyAttribute(Company, related_name='address_book') people = obj_tables.OneToManyAttribute(Person, related_name='address_book') class Meta(obj_tables.Model.Meta): table_format = obj_tables.TableFormat.column attribute_order = ('id', 'companies', 'people') verbose_name = 'Address book' verbose_name_plural = 'Address books' ``` # Revision an address book of the CEOs of technology companies In many domains such as exploratory areas of science, datasets must often be built iteratively over time. For example, we believe that whole-cell models will be built by iteratively modeling additional biochemical species, reactions, and pathways over time as more experimental data and knowledge is generated and additional collaborators contribute to a model. Consequently, it is often helpful to track the provenence of a dataset including when the dataset was first created; when each revision was made; which objects and relationships were added, removed, or changed with each revision and why; and who contributed each revision. ##### Revisioning datasets with Git We recommend using [Git](https://git-scm.com/) to track the revision provenance of a dataset as follows: 1. Create a Git repository. 2. Optionally, host the repository on a publicly accessible server such as [GitHub](https://github.com). 3. Save each revision in CSV, TSV, MULTI.CSV, or MULTI.TSV format so that Git can difference and merge the dataset. 4. For each revision, create an instance of `DataRepoMetadata`, use the `obj_tables.utils.set_git_repo_metadata_from_path` method to record the revision of the dataset into this instance, and use `obj_tables.io` to save this instance of `DataRepoMetadata` into the dataset. 6. Commit the revision, noting the rationale for the revision in the Git commit message. We also recommend [configuring Git to track the author of each revision](https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup). 7. Optionally, push the revision to the public server. ##### (Step 1) Create a Git repository to track the revisions of the address book ``` from obj_tables.utils import DataRepoMetadata, set_git_repo_metadata_from_path from wc_utils.util.git import RepoMetadataCollectionType import git import os import shutil repo_path = 'Address book' repo_url = 'https://github.com/KarrLab/obj_tables_revisioning_tutorial_repo.git' # create repository if os.path.isdir(repo_path): shutil.rmtree(repo_path) repo = git.Repo.clone_from(repo_url, repo_path) ``` ##### (Steps 3, 4) Create an initial address book of the CEOs of several technology companies as of 2011, and save it to multiple CSV files along with metadata about the current revision of the address book ``` import obj_tables.io # Steve Jobs of Apple apple = Company(name='Apple', url='https://www.apple.com/', address=Address(street='10600 N Tantau Ave', city='Cupertino', state='CA', zip_code='95014', country='US')) jobs = Person(name='Steve Jobs', type=PersonType.business, company=apple, email_address='sjobs@apple.com', phone_number='408-996-1010', address=apple.address) # Reed Hasting of Netflix netflix = Company(name='Netflix', url='https://www.netflix.com/', address=Address(street='100 Winchester Cir', city='Los Gatos', state='CA', zip_code='95032', country='US')) hastings = Person(name='Reed Hastings', type=PersonType.business, company=netflix, email_address='reed.hastings@netflix.com', phone_number='408-540-3700', address=netflix.address) # Eric Schmidt of Google google = Company(name='Google', url='https://www.google.com/', address=Address(street='1600 Amphitheatre Pkwy', city='Mountain View', state='CA', zip_code='94043', country='US')) schmidt = Person(name='Eric Schmidt', type=PersonType.business, company=google, email_address='eschmidt@google.com', phone_number='650-253-0000', address=google.address) # Mark Zuckerberg of Facebook facebook = Company(name='Facebook', url='https://www.facebook.com/', address=Address(street='1 Hacker Way #15', city='Menlo Park', state='CA', zip_code='94025', country='US')) zuckerberg = Person(name='Mark Zuckerberg', type=PersonType.business, company=facebook, email_address='zuck@fb.com', phone_number='650-543-4800', address=facebook.address) # Merge the companies and CEOs into a single address book ceos = AddressBook( id = 'tech', companies = [apple, facebook, google, netflix], people = [schmidt, zuckerberg, hastings, jobs], ) # Get the current revision of the repository revision = DataRepoMetadata() set_git_repo_metadata_from_path(revision, RepoMetadataCollectionType.DATA_REPO, path=repo_path) # Save the address book to multiple CSV files along with its revision metadata address_book_filename = os.path.join(repo_path, 'ceos-*.csv') obj_tables.io.Writer().run(address_book_filename, [revision, ceos], models=[DataRepoMetadata, AddressBook, Company, Person]) import pandas pandas.read_csv(os.path.join(repo_path, 'ceos-Data repo metadata.csv'), delimiter=',') pandas.read_csv(os.path.join(repo_path, 'ceos-Address book.csv'), delimiter=',') pandas.read_csv(os.path.join(repo_path, 'ceos-Companies.csv'), delimiter=',') pandas.read_csv(os.path.join(repo_path, 'ceos-People.csv'), delimiter=',') ``` ##### (Step 5) Commit the initial address book ``` repo.index.add([ 'ceos-Data repo metadata.csv', 'ceos-Address book.csv', 'ceos-Companies.csv', 'ceos-People.csv', ]) repo.index.commit('Initial version of address book') ``` ##### (Steps 3, 4) Revise the address book to reflect the current CEOs as of 2020 ``` # Tim Cook is now the CEO of Apple jobs.cut_relations() cook = Person(name='Tim Cook', type=PersonType.business, company=apple, email_address='tcook@apple.com', phone_number='408-996-1010', address=apple.address) # Sundar Pichai is now the CEO of Google ceos.people.remove(schmidt) google.employees.remove(schmidt) google.address.people.remove(schmidt) pichai = Person(name='Sundar Pichai', type=PersonType.business, company=google, email_address='sundar@google.com', phone_number='650-253-0000', address=google.address) # Get the current revision of the repository set_git_repo_metadata_from_path(revision, RepoMetadataCollectionType.DATA_REPO, path=repo_path) # Save the address book to a MULTI.CSV file along with its revision metadata obj_tables.io.Writer().run(address_book_filename, [revision, ceos], models=[DataRepoMetadata, AddressBook, Company, Person]) pandas.read_csv(os.path.join(repo_path, 'ceos-Data repo metadata.csv'), delimiter=',') pandas.read_csv(os.path.join(repo_path, 'ceos-People.csv'), delimiter=',') ``` ##### (Step 5) Commit the revised address book ``` repo.index.add([ 'ceos-Data repo metadata.csv', 'ceos-Address book.csv', 'ceos-Companies.csv', 'ceos-People.csv', ]) repo.index.commit('Initial version of address book') ``` # Revise the address book schema and migrate the address book to the revised schema Please check back soon! In the meantime, please contact us at [info@karrlab.org](mailto:info@karrlab.org) with any questions.
github_jupyter
## Scalability Experiment (Section 5.3) The experiment is designed to compare the execution time of different coarsening schemes over increasingly large graphs. * For consistency, we use a regular graph of increasing size (vertices, edges) but always the same degree * The reduction is fixed to 0.5. The execution time will only slightly increase for larger ratios (since the problem that has to be solved becomes easier at consecutive levels where the graph is smaller) * If the execution time exceeds a budget (set to 100 sec), computation is skipped. The code accompanies paper [Graph reduction with spectral and cut guarantees](http://www.jmlr.org/papers/volume20/18-680/18-680.pdf) by Andreas Loukas published at JMLR/2019 ([bibtex](http://www.jmlr.org/papers/v20/18-680.bib)). This work was kindly supported by the Swiss National Science Foundation (grant number PZ00P2 179981). 15 March 2019 [Andreas Loukas](https://andreasloukas.blog) [![DOI](https://zenodo.org/badge/175851068.svg)](https://zenodo.org/badge/latestdoi/175851068) Released under the Apache license 2.0 ``` !pip install networkx %load_ext autoreload %autoreload 2 %matplotlib inline from IPython.core.display import display, HTML display(HTML("<style>.container { width:80% !important; }</style>")) from graph_coarsening.coarsening_utils import * import graph_coarsening.graph_lib as graph_lib import graph_coarsening.graph_utils as graph_utils import numpy as np import scipy as sp from scipy import io from scipy.linalg import circulant import time import os import matplotlib import matplotlib.pylab as plt import pygsp as gsp from pygsp import graphs, filters gsp.plotting.BACKEND = 'matplotlib' # Experiment parameters N_all = np.logspace(2, 6, 30, dtype=np.int) methods = ['heavy_edge', 'variation_edges', 'variation_neighborhoods', 'algebraic_JC', 'affinity_GS', 'kron'] K_all = [10,80] #[10, 20, 40] r = 0.5 budget = 100 # don't run anything that takes longer than this (in seconds) n_iterations = 10 deg = 10 algorithm = 'greedy' n_methods = len(methods) # print(deg*N_all/2) ``` ### The actual experiment code (this will take long) If one needs to just see the results, skip running this part. ``` rerun_all = False rewrite_results = False if rerun_all: timings = np.zeros((len(N_all), len(K_all), n_methods, n_iterations)) * np.NaN skip = np.zeros(len(methods)) for NIdx, N in enumerate(N_all): G = graph_lib.models(N, 'regular', k=deg) for KIdx, K in enumerate(K_all): for methodIdx,method in enumerate(methods): if skip[methodIdx] == 1 : timings[NIdx, KIdx, methodIdx] = np.NaN print('skipping: {}, {}, {}'.format(N, method, K)) continue timing = 0 for iteration in range(n_iterations): if method == 'kron': start = time.time() _, tmp = kron_coarsening(G, r=r, m=None) end = time.time() if tmp == None: print('kron failed... skipping') continue else: start = time.time() _, _, Call, _ = coarsen(G, K=K, r=r, max_levels=4, method=method, algorithm=algorithm) end = time.time() if len(Call) >= 4: print('warning: too many levels for {}, r:{}, K:{}'.format(method, r, K) ) timings[NIdx, KIdx, methodIdx, iteration] = end-start timing = np.mean(timings[NIdx, KIdx, methodIdx, :]) skip[methodIdx] = 1 if (timing > budget) else 0 print('N = {}, done!'.format(N)) if sum(skip) == len(methods): break if rewrite_results: filepath = os.path.join('..', 'results', 'experiment_scalability.npz') print('.. saving to "' + filepath + '"') np.savez(filepath, methods=methods, K_all=K_all, N_all=N_all, timings=timings, deg=deg, budget=budget) print('done!') ``` ### Load results ``` filepath = os.path.join('..', 'results', 'experiment_scalability.npz') data = np.load(filepath) methods, K_all, N_all, timings, deg, budget = data['methods'], data['K_all'], data['N_all'], data['timings'], data['deg'], data['budget'] ``` ### Visualize them The produced figures are used in the paper ``` matplotlib.rcParams.update({'font.size': 25}) from matplotlib import cm colors = [ cm.ocean(x) for x in np.linspace(0, 0.95, len(methods)+1)] colors[1] = [0.8,0,0] colors[-2] = (np.array([127, 77, 34])/255).tolist() size = 2.7*2.7; print('The figures are drawn in the following in order:') for KIdx, K in enumerate(K_all): fig, axes = plt.subplots(1, 1, figsize=(1.618*size, size)); for methodIdx,method in reversed(list(enumerate(methods))): lineWidth = 1.5; marker = 's' method = method.replace('_', ' ') if method == 'heavy edge': method = 'heavy edge' cIdx, line, marker = 0, ':', 's' elif 'variation edges' in method: method = 'local var. (edges)' cIdx, line, marker, lineWidth = 2, '-', 'o', 1.5 elif (method == 'variation neighborhoods') or (method == 'variation neighborhood'): method = 'local var. (neigh)' cIdx, line, marker, lineWidth = 1, '-', 'o', 1.5 elif 'algebraic' in method: method = 'algebraic dist.' cIdx, line = 3, ':' elif 'affinity' in method: method = 'affinity' cIdx, line = 4, ':' elif method == 'kron': method = 'kron' cIdx, line, marker = 5, '--', 'x' else: continue style = line + marker color = colors[cIdx] tmp = np.mean(timings[:,KIdx,methodIdx,:], 1) tmp[tmp>budget] = np.NaN axes.plot(N_all*deg/2, tmp, style, label='{}'.format(method), color=color, lineWidth=lineWidth, markersize=6) axes.plot(np.array([10, N_all[-1]])*deg/2, [budget, budget], 'k:') axes.set_xscale('log') axes.set_yscale('log') axes.set_xlabel('number of edges (M)') axes.set_ylabel('execution time (sec)') axes.set_ylim([0.02, budget+30]) axes.set_xlim([300, N_all[-1]]) legend0 = axes.legend(fontsize=22, loc='lower right', edgecolor=[1,1,1]) axes.text(500, 63, 'max execution time', fontsize=21) axes.spines['right'].set_visible(False) axes.spines['top'].set_visible(False) fig.tight_layout() print('* experiment_scalability_K='+ str(K)) # fig.savefig(os.path.join('..', 'results', 'experiment_scalability_K='+ str(K) +'.pdf')) ```
github_jupyter
# Fairness and Explainability with SageMaker Clarify 1. [Overview](#Overview) 1. [Prerequisites and Data](#Prerequisites-and-Data) 1. [Initialize SageMaker](#Initialize-SageMaker) 1. [Download data](#Download-data) 1. [Loading the data: Adult Dataset](#Loading-the-data:-Adult-Dataset) 1. [Data inspection](#Data-inspection) 1. [Data encoding and upload to S3](#Encode-and-Upload-the-Data) 1. [Train and Deploy XGBoost Model](#Train-XGBoost-Model) 1. [Train Model](#Train-Model) 1. [Deploy Model to Endpoint](#Deploy-Model) 1. [Amazon SageMaker Clarify](#Amazon-SageMaker-Clarify) 1. [Detecting Bias](#Detecting-Bias) 1. [Writing BiasConfig](#Writing-BiasConfig) 1. [Pre-training Bias](#Pre-training-Bias) 1. [Post-training Bias](#Post-training-Bias) 1. [Viewing the Bias Report](#Viewing-the-Bias-Report) 1. [Explaining Predictions](#Explaining-Predictions) 1. [Viewing the Explainability Report](#Viewing-the-Explainability-Report) 1. [Clean Up](#Clean-Up) ## Overview Amazon SageMaker Clarify helps improve your machine learning models by detecting potential bias and helping explain how these models make predictions. The fairness and explainability functionality provided by SageMaker Clarify takes a step towards enabling AWS customers to build trustworthy and understandable machine learning models. The product comes with the tools to help you with the following tasks. * Measure biases that can occur during each stage of the ML lifecycle (data collection, model training and tuning, and monitoring of ML models deployed for inference). * Generate model governance reports targeting risk and compliance teams and external regulators. * Provide explanations of the data, models, and monitoring used to assess predictions. This sample notebook walks you through: 1. Key terms and concepts needed to understand SageMaker Clarify 1. Measuring the pre-training bias of a dataset and post-training bias of a model 1. Explaining the importance of the various input features on the model's decision 1. Accessing the reports through SageMaker Studio if you have an instance set up. In doing so, the notebook will first train a [SageMaker XGBoost](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.html) model using training dataset, then use SageMaker Clarify to analyze a testing dataset in CSV format. SageMaker Clarify also supports analyzing dataset in [SageMaker JSONLines dense format](https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html#common-in-formats), which is illustrated in [another notebook](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker_processing/fairness_and_explainability/fairness_and_explainability_jsonlines_format.ipynb). ## Prerequisites and Data ### Initialize SageMaker ``` from sagemaker import Session session = Session() bucket = session.default_bucket() prefix = "sagemaker/DEMO-sagemaker-clarify" region = session.boto_region_name # Define IAM role from sagemaker import get_execution_role import pandas as pd import numpy as np import os import boto3 role = get_execution_role() s3_client = boto3.client("s3") ``` ### Download data Data Source: [https://archive.ics.uci.edu/ml/machine-learning-databases/adult/](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/) Let's __download__ the data and save it in the local folder with the name adult.data and adult.test from UCI repository$^{[2]}$. $^{[2]}$Dua Dheeru, and Efi Karra Taniskidou. "[UCI Machine Learning Repository](http://archive.ics.uci.edu/ml)". Irvine, CA: University of California, School of Information and Computer Science (2017). ``` adult_columns = [ "Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Marital Status", "Occupation", "Relationship", "Ethnic group", "Sex", "Capital Gain", "Capital Loss", "Hours per week", "Country", "Target", ] if not os.path.isfile("adult.data"): s3_client.download_file( "sagemaker-sample-files", "datasets/tabular/uci_adult/adult.data", "adult.data" ) print("adult.data saved!") else: print("adult.data already on disk.") if not os.path.isfile("adult.test"): s3_client.download_file( "sagemaker-sample-files", "datasets/tabular/uci_adult/adult.test", "adult.test" ) print("adult.test saved!") else: print("adult.test already on disk.") ``` ### Loading the data: Adult Dataset From the UCI repository of machine learning datasets, this database contains 14 features concerning demographic characteristics of 45,222 rows (32,561 for training and 12,661 for testing). The task is to predict whether a person has a yearly income that is more or less than $50,000. Here are the features and their possible values: 1. **Age**: continuous. 1. **Workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked. 1. **Fnlwgt**: continuous (the number of people the census takers believe that observation represents). 1. **Education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool. 1. **Education-num**: continuous. 1. **Marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse. 1. **Occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces. 1. **Relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried. 1. **Ethnic group**: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black. 1. **Sex**: Female, Male. * **Note**: this data is extracted from the 1994 Census and enforces a binary option on Sex 1. **Capital-gain**: continuous. 1. **Capital-loss**: continuous. 1. **Hours-per-week**: continuous. 1. **Native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands. Next, we specify our binary prediction task: 15. **Target**: <=50,000, >$50,000. ``` training_data = pd.read_csv( "adult.data", names=adult_columns, sep=r"\s*,\s*", engine="python", na_values="?" ).dropna() testing_data = pd.read_csv( "adult.test", names=adult_columns, sep=r"\s*,\s*", engine="python", na_values="?", skiprows=1 ).dropna() training_data.head() ``` ### Data inspection Plotting histograms for the distribution of the different features is a good way to visualize the data. Let's plot a few of the features that can be considered _sensitive_. Let's take a look specifically at the Sex feature of a census respondent. In the first plot we see that there are fewer Female respondents as a whole but especially in the positive outcomes, where they form ~$\frac{1}{7}$th of respondents. ``` training_data["Sex"].value_counts().sort_values().plot(kind="bar", title="Counts of Sex", rot=0) training_data["Sex"].where(training_data["Target"] == ">50K").value_counts().sort_values().plot( kind="bar", title="Counts of Sex earning >$50K", rot=0 ) ``` ### Encode and Upload the Dataset Here we encode the training and test data. Encoding input data is not necessary for SageMaker Clarify, but is necessary for the model. ``` from sklearn import preprocessing def number_encode_features(df): result = df.copy() encoders = {} for column in result.columns: if result.dtypes[column] == np.object: encoders[column] = preprocessing.LabelEncoder() # print('Column:', column, result[column]) result[column] = encoders[column].fit_transform(result[column].fillna("None")) return result, encoders training_data = pd.concat([training_data["Target"], training_data.drop(["Target"], axis=1)], axis=1) training_data, _ = number_encode_features(training_data) training_data.to_csv("train_data.csv", index=False, header=False) testing_data, _ = number_encode_features(testing_data) test_features = testing_data.drop(["Target"], axis=1) test_target = testing_data["Target"] test_features.to_csv("test_features.csv", index=False, header=False) ``` A quick note about our encoding: the "Female" Sex value has been encoded as 0 and "Male" as 1. ``` training_data.head() ``` Lastly, let's upload the data to S3 ``` from sagemaker.s3 import S3Uploader from sagemaker.inputs import TrainingInput train_uri = S3Uploader.upload("train_data.csv", "s3://{}/{}".format(bucket, prefix)) train_input = TrainingInput(train_uri, content_type="csv") test_uri = S3Uploader.upload("test_features.csv", "s3://{}/{}".format(bucket, prefix)) ``` ### Train XGBoost Model #### Train Model Since our focus is on understanding how to use SageMaker Clarify, we keep it simple by using a standard XGBoost model. ``` from sagemaker.image_uris import retrieve from sagemaker.estimator import Estimator container = retrieve("xgboost", region, version="1.2-1") xgb = Estimator( container, role, instance_count=1, instance_type="ml.m5.xlarge", disable_profiler=True, sagemaker_session=session, ) xgb.set_hyperparameters( max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, objective="binary:logistic", num_round=800, ) xgb.fit({"train": train_input}, logs=False) ``` #### Deploy Model Here we create the SageMaker model. ``` model_name = "DEMO-clarify-model" model = xgb.create_model(name=model_name) container_def = model.prepare_container_def() session.create_model(model_name, role, container_def) ``` ## Amazon SageMaker Clarify Now that you have your model set up. Let's say hello to SageMaker Clarify! ``` from sagemaker import clarify clarify_processor = clarify.SageMakerClarifyProcessor( role=role, instance_count=1, instance_type="ml.m5.xlarge", sagemaker_session=session ) ``` ### Detecting Bias SageMaker Clarify helps you detect possible pre- and post-training biases using a variety of metrics. #### Writing DataConfig and ModelConfig A `DataConfig` object communicates some basic information about data I/O to SageMaker Clarify. We specify where to find the input dataset, where to store the output, the target column (`label`), the header names, and the dataset type. ``` bias_report_output_path = "s3://{}/{}/clarify-bias".format(bucket, prefix) bias_data_config = clarify.DataConfig( s3_data_input_path=train_uri, s3_output_path=bias_report_output_path, label="Target", headers=training_data.columns.to_list(), dataset_type="text/csv", ) ``` A `ModelConfig` object communicates information about your trained model. To avoid additional traffic to your production models, SageMaker Clarify sets up and tears down a dedicated endpoint when processing. * `instance_type` and `instance_count` specify your preferred instance type and instance count used to run your model on during SageMaker Clarify's processing. The testing dataset is small so a single standard instance is good enough to run this example. If your have a large complex dataset, you may want to use a better instance type to speed up, or add more instances to enable Spark parallelization. * `accept_type` denotes the endpoint response payload format, and `content_type` denotes the payload format of request to the endpoint. ``` model_config = clarify.ModelConfig( model_name=model_name, instance_type="ml.m5.xlarge", instance_count=1, accept_type="text/csv", content_type="text/csv", ) ``` A `ModelPredictedLabelConfig` provides information on the format of your predictions. XGBoost model outputs probabilities of samples, so SageMaker Clarify invokes the endpoint then uses `probability_threshold` to convert the probability to binary labels for bias analysis. Prediction above the threshold is interpreted as label value `1` and below or equal as label value `0`. ``` predictions_config = clarify.ModelPredictedLabelConfig(probability_threshold=0.8) ``` #### Writing BiasConfig SageMaker Clarify also needs information on what the sensitive columns (`facets`) are, what the sensitive features (`facet_values_or_threshold`) may be, and what the desirable outcomes are (`label_values_or_threshold`). SageMaker Clarify can handle both categorical and continuous data for `facet_values_or_threshold` and for `label_values_or_threshold`. In this case we are using categorical data. We specify this information in the `BiasConfig` API. Here that the positive outcome is earning >$50,000, Sex is a sensitive category, and Female respondents are the sensitive group. `group_name` is used to form subgroups for the measurement of Conditional Demographic Disparity in Labels (CDDL) and Conditional Demographic Disparity in Predicted Labels (CDDPL) with regards to Simpson’s paradox. ``` bias_config = clarify.BiasConfig( label_values_or_threshold=[1], facet_name="Sex", facet_values_or_threshold=[0], group_name="Age" ) ``` #### Pre-training Bias Bias can be present in your data before any model training occurs. Inspecting your data for bias before training begins can help detect any data collection gaps, inform your feature engineering, and help you understand what societal biases the data may reflect. Computing pre-training bias metrics does not require a trained model. #### Post-training Bias Computing post-training bias metrics does require a trained model. Unbiased training data (as determined by concepts of fairness measured by bias metric) may still result in biased model predictions after training. Whether this occurs depends on several factors including hyperparameter choices. You can run these options separately with `run_pre_training_bias()` and `run_post_training_bias()` or at the same time with `run_bias()` as shown below. ``` clarify_processor.run_bias( data_config=bias_data_config, bias_config=bias_config, model_config=model_config, model_predicted_label_config=predictions_config, pre_training_methods="all", post_training_methods="all", ) ``` #### Viewing the Bias Report In Studio, you can view the results under the experiments tab. <img src="./recordings/bias_report.gif"> Each bias metric has detailed explanations with examples that you can explore. <img src="./recordings/bias_detail.gif"> You could also summarize the results in a handy table! <img src="./recordings/bias_report_chart.gif"> If you're not a Studio user yet, you can access the bias report in pdf, html and ipynb formats in the following S3 bucket: ``` bias_report_output_path ``` ### Explaining Predictions There are expanding business needs and legislative regulations that require explanations of _why_ a model made the decision it did. SageMaker Clarify uses SHAP to explain the contribution that each input feature makes to the final decision. Kernel SHAP algorithm requires a baseline (also known as background dataset). Baseline dataset type shall be the same as `dataset_type` of `DataConfig`, and baseline samples shall only include features. By definition, `baseline` should either be a S3 URI to the baseline dataset file, or an in-place list of samples. In this case we chose the latter, and put the first sample of the test dataset to the list. ``` shap_config = clarify.SHAPConfig( baseline=[test_features.iloc[0].values.tolist()], num_samples=15, agg_method="mean_abs", save_local_shap_values=True, ) explainability_output_path = "s3://{}/{}/clarify-explainability".format(bucket, prefix) explainability_data_config = clarify.DataConfig( s3_data_input_path=train_uri, s3_output_path=explainability_output_path, label="Target", headers=training_data.columns.to_list(), dataset_type="text/csv", ) clarify_processor.run_explainability( data_config=explainability_data_config, model_config=model_config, explainability_config=shap_config, ) ``` #### Viewing the Explainability Report As with the bias report, you can view the explainability report in Studio under the experiments tab <img src="./recordings/explainability_detail.gif"> The Model Insights tab contains direct links to the report and model insights. If you're not a Studio user yet, as with the Bias Report, you can access this report at the following S3 bucket. ``` explainability_output_path ``` #### Analysis of local explanations It is possible to visualize the the local explanations for single examples in your dataset. You can use the obtained results from running Kernel SHAP algorithm for global explanations. You can simply load the local explanations stored in your output path, and visualize the explanation (i.e., the impact that the single features have on the prediction of your model) for any single example. ``` local_explanations_out = pd.read_csv(explainability_output_path + "/explanations_shap/out.csv") feature_names = [str.replace(c, "_label0", "") for c in local_explanations_out.columns.to_series()] local_explanations_out.columns = feature_names selected_example = 111 print( "Example number:", selected_example, "\nwith model prediction:", sum(local_explanations_out.iloc[selected_example]) > 0, ) print("\nFeature values -- Label", training_data.iloc[selected_example]) local_explanations_out.iloc[selected_example].plot( kind="bar", title="Local explanation for the example number " + str(selected_example), rot=90 ) ``` ### Clean Up Finally, don't forget to clean up the resources we set up and used for this demo! ``` session.delete_model(model_name) ```
github_jupyter
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/data-science-ipython-notebooks). # Kaggle Machine Learning Competition: Predicting Titanic Survivors * Competition Site * Description * Evaluation * Data Set * Setup Imports and Variables * Explore the Data * Feature: Passenger Classes * Feature: Sex * Feature: Embarked * Feature: Age * Feature: Family Size * Final Data Preparation for Machine Learning * Data Wrangling Summary * Random Forest: Training * Random Forest: Predicting * Random Forest: Prepare for Kaggle Submission * Support Vector Machine: Training * Support Vector Machine: Predicting ## Competition Site Description, Evaluation, and Data Set taken from the [competition site](https://www.kaggle.com/c/titanic-gettingStarted). ## Description ![alt text](http://upload.wikimedia.org/wikipedia/commons/6/6e/St%C3%B6wer_Titanic.jpg) The sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships. One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class. In this challenge, we ask you to complete the analysis of what sorts of people were likely to survive. In particular, we ask you to apply the tools of machine learning to predict which passengers survived the tragedy. ## Evaluation The historical data has been split into two groups, a 'training set' and a 'test set'. For the training set, we provide the outcome ( 'ground truth' ) for each passenger. You will use this set to build your model to generate predictions for the test set. For each passenger in the test set, you must predict whether or not they survived the sinking ( 0 for deceased, 1 for survived ). Your score is the percentage of passengers you correctly predict. The Kaggle leaderboard has a public and private component. 50% of your predictions for the test set have been randomly assigned to the public leaderboard ( the same 50% for all users ). Your score on this public portion is what will appear on the leaderboard. At the end of the contest, we will reveal your score on the private 50% of the data, which will determine the final winner. This method prevents users from 'overfitting' to the leaderboard. ## Data Set | File Name | Available Formats | |------------------|-------------------| | train | .csv (59.76 kb) | | gendermodel | .csv (3.18 kb) | | genderclassmodel | .csv (3.18 kb) | | test | .csv (27.96 kb) | | gendermodel | .py (3.58 kb) | | genderclassmodel | .py (5.63 kb) | | myfirstforest | .py (3.99 kb) | <pre> VARIABLE DESCRIPTIONS: survival Survival (0 = No; 1 = Yes) pclass Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd) name Name sex Sex age Age sibsp Number of Siblings/Spouses Aboard parch Number of Parents/Children Aboard ticket Ticket Number fare Passenger Fare cabin Cabin embarked Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton) SPECIAL NOTES: Pclass is a proxy for socio-economic status (SES) 1st ~ Upper; 2nd ~ Middle; 3rd ~ Lower Age is in Years; Fractional if Age less than One (1) If the Age is Estimated, it is in the form xx.5 With respect to the family relation variables (i.e. sibsp and parch) some relations were ignored. The following are the definitions used for sibsp and parch. Sibling: Brother, Sister, Stepbrother, or Stepsister of Passenger Aboard Titanic Spouse: Husband or Wife of Passenger Aboard Titanic (Mistresses and Fiances Ignored) Parent: Mother or Father of Passenger Aboard Titanic Child: Son, Daughter, Stepson, or Stepdaughter of Passenger Aboard Titanic Other family relatives excluded from this study include cousins, nephews/nieces, aunts/uncles, and in-laws. Some children travelled only with a nanny, therefore parch=0 for them. As well, some travelled with very close friends or neighbors in a village, however, the definitions do not support such relations. </pre> ## Setup Imports and Variables ``` import pandas as pd import numpy as np import pylab as plt # Set the global default size of matplotlib figures plt.rc('figure', figsize=(10, 5)) # Size of matplotlib figures that contain subplots fizsize_with_subplots = (10, 10) # Size of matplotlib histogram bins bin_size = 10 ``` ## Explore the Data Read the data: ``` df_train = pd.read_csv('../data/titanic/train.csv') df_train.head() df_train.tail() ``` View the data types of each column: ``` df_train.dtypes ``` Type 'object' is a string for pandas, which poses problems with machine learning algorithms. If we want to use these as features, we'll need to convert these to number representations. Get some basic information on the DataFrame: ``` df_train.info() ``` Age, Cabin, and Embarked are missing values. Cabin has too many missing values, whereas we might be able to infer values for Age and Embarked. Generate various descriptive statistics on the DataFrame: ``` df_train.describe() ``` Now that we have a general idea of the data set contents, we can dive deeper into each column. We'll be doing exploratory data analysis and cleaning data to setup 'features' we'll be using in our machine learning algorithms. Plot a few features to get a better idea of each: ``` # Set up a grid of plots fig = plt.figure(figsize=fizsize_with_subplots) fig_dims = (3, 2) # Plot death and survival counts plt.subplot2grid(fig_dims, (0, 0)) df_train['Survived'].value_counts().plot(kind='bar', title='Death and Survival Counts') # Plot Pclass counts plt.subplot2grid(fig_dims, (0, 1)) df_train['Pclass'].value_counts().plot(kind='bar', title='Passenger Class Counts') # Plot Sex counts plt.subplot2grid(fig_dims, (1, 0)) df_train['Sex'].value_counts().plot(kind='bar', title='Gender Counts') plt.xticks(rotation=0) # Plot Embarked counts plt.subplot2grid(fig_dims, (1, 1)) df_train['Embarked'].value_counts().plot(kind='bar', title='Ports of Embarkation Counts') # Plot the Age histogram plt.subplot2grid(fig_dims, (2, 0)) df_train['Age'].hist() plt.title('Age Histogram') ``` Next we'll explore various features to view their impact on survival rates. ## Feature: Passenger Classes From our exploratory data analysis in the previous section, we see there are three passenger classes: First, Second, and Third class. We'll determine which proportion of passengers survived based on their passenger class. Generate a cross tab of Pclass and Survived: ``` pclass_xt = pd.crosstab(df_train['Pclass'], df_train['Survived']) pclass_xt ``` Plot the cross tab: ``` # Normalize the cross tab to sum to 1: pclass_xt_pct = pclass_xt.div(pclass_xt.sum(1).astype(float), axis=0) pclass_xt_pct.plot(kind='bar', stacked=True, title='Survival Rate by Passenger Classes') plt.xlabel('Passenger Class') plt.ylabel('Survival Rate') ``` We can see that passenger class seems to have a significant impact on whether a passenger survived. Those in First Class the highest chance for survival. ## Feature: Sex Gender might have also played a role in determining a passenger's survival rate. We'll need to map Sex from a string to a number to prepare it for machine learning algorithms. Generate a mapping of Sex from a string to a number representation: ``` sexes = sorted(df_train['Sex'].unique()) genders_mapping = dict(zip(sexes, range(0, len(sexes) + 1))) genders_mapping ``` Transform Sex from a string to a number representation: ``` df_train['Sex_Val'] = df_train['Sex'].map(genders_mapping).astype(int) df_train.head() ``` Plot a normalized cross tab for Sex_Val and Survived: ``` sex_val_xt = pd.crosstab(df_train['Sex_Val'], df_train['Survived']) sex_val_xt_pct = sex_val_xt.div(sex_val_xt.sum(1).astype(float), axis=0) sex_val_xt_pct.plot(kind='bar', stacked=True, title='Survival Rate by Gender') ``` The majority of females survived, whereas the majority of males did not. Next we'll determine whether we can gain any insights on survival rate by looking at both Sex and Pclass. Count males and females in each Pclass: ``` # Get the unique values of Pclass: passenger_classes = sorted(df_train['Pclass'].unique()) for p_class in passenger_classes: print 'M: ', p_class, len(df_train[(df_train['Sex'] == 'male') & (df_train['Pclass'] == p_class)]) print 'F: ', p_class, len(df_train[(df_train['Sex'] == 'female') & (df_train['Pclass'] == p_class)]) ``` Plot survival rate by Sex and Pclass: ``` # Plot survival rate by Sex females_df = df_train[df_train['Sex'] == 'female'] females_xt = pd.crosstab(females_df['Pclass'], df_train['Survived']) females_xt_pct = females_xt.div(females_xt.sum(1).astype(float), axis=0) females_xt_pct.plot(kind='bar', stacked=True, title='Female Survival Rate by Passenger Class') plt.xlabel('Passenger Class') plt.ylabel('Survival Rate') # Plot survival rate by Pclass males_df = df_train[df_train['Sex'] == 'male'] males_xt = pd.crosstab(males_df['Pclass'], df_train['Survived']) males_xt_pct = males_xt.div(males_xt.sum(1).astype(float), axis=0) males_xt_pct.plot(kind='bar', stacked=True, title='Male Survival Rate by Passenger Class') plt.xlabel('Passenger Class') plt.ylabel('Survival Rate') ``` The vast majority of females in First and Second class survived. Males in First class had the highest chance for survival. ## Feature: Embarked The Embarked column might be an important feature but it is missing a couple data points which might pose a problem for machine learning algorithms: ``` df_train[df_train['Embarked'].isnull()] ``` Prepare to map Embarked from a string to a number representation: ``` # Get the unique values of Embarked embarked_locs = sorted(df_train['Embarked'].unique()) embarked_locs_mapping = dict(zip(embarked_locs, range(0, len(embarked_locs) + 1))) embarked_locs_mapping ``` Transform Embarked from a string to a number representation to prepare it for machine learning algorithms: ``` df_train['Embarked_Val'] = df_train['Embarked'] \ .map(embarked_locs_mapping) \ .astype(int) df_train.head() ``` Plot the histogram for Embarked_Val: ``` df_train['Embarked_Val'].hist(bins=len(embarked_locs), range=(0, 3)) plt.title('Port of Embarkation Histogram') plt.xlabel('Port of Embarkation') plt.ylabel('Count') plt.show() ``` Since the vast majority of passengers embarked in 'S': 3, we assign the missing values in Embarked to 'S': ``` if len(df_train[df_train['Embarked'].isnull()] > 0): df_train.replace({'Embarked_Val' : { embarked_locs_mapping[nan] : embarked_locs_mapping['S'] } }, inplace=True) ``` Verify we do not have any more NaNs for Embarked_Val: ``` embarked_locs = sorted(df_train['Embarked_Val'].unique()) embarked_locs ``` Plot a normalized cross tab for Embarked_Val and Survived: ``` embarked_val_xt = pd.crosstab(df_train['Embarked_Val'], df_train['Survived']) embarked_val_xt_pct = \ embarked_val_xt.div(embarked_val_xt.sum(1).astype(float), axis=0) embarked_val_xt_pct.plot(kind='bar', stacked=True) plt.title('Survival Rate by Port of Embarkation') plt.xlabel('Port of Embarkation') plt.ylabel('Survival Rate') ``` It appears those that embarked in location 'C': 1 had the highest rate of survival. We'll dig in some more to see why this might be the case. Below we plot a graphs to determine gender and passenger class makeup for each port: ``` # Set up a grid of plots fig = plt.figure(figsize=fizsize_with_subplots) rows = 2 cols = 3 col_names = ('Sex_Val', 'Pclass') for portIdx in embarked_locs: for colIdx in range(0, len(col_names)): plt.subplot2grid((rows, cols), (colIdx, portIdx - 1)) df_train[df_train['Embarked_Val'] == portIdx][col_names[colIdx]] \ .value_counts().plot(kind='bar') ``` Leaving Embarked as integers implies ordering in the values, which does not exist. Another way to represent Embarked without ordering is to create dummy variables: ``` df_train = pd.concat([df_train, pd.get_dummies(df_train['Embarked_Val'], prefix='Embarked_Val')], axis=1) ``` ## Feature: Age The Age column seems like an important feature--unfortunately it is missing many values. We'll need to fill in the missing values like we did with Embarked. Filter to view missing Age values: ``` df_train[df_train['Age'].isnull()][['Sex', 'Pclass', 'Age']].head() ``` Determine the Age typical for each passenger class by Sex_Val. We'll use the median instead of the mean because the Age histogram seems to be right skewed. ``` # To keep Age in tact, make a copy of it called AgeFill # that we will use to fill in the missing ages: df_train['AgeFill'] = df_train['Age'] # Populate AgeFill df_train['AgeFill'] = df_train['AgeFill'] \ .groupby([df_train['Sex_Val'], df_train['Pclass']]) \ .apply(lambda x: x.fillna(x.median())) ``` Ensure AgeFill does not contain any missing values: ``` len(df_train[df_train['AgeFill'].isnull()]) ``` Plot a normalized cross tab for AgeFill and Survived: ``` # Set up a grid of plots fig, axes = plt.subplots(2, 1, figsize=fizsize_with_subplots) # Histogram of AgeFill segmented by Survived df1 = df_train[df_train['Survived'] == 0]['Age'] df2 = df_train[df_train['Survived'] == 1]['Age'] max_age = max(df_train['AgeFill']) axes[0].hist([df1, df2], bins=max_age / bin_size, range=(1, max_age), stacked=True) axes[0].legend(('Died', 'Survived'), loc='best') axes[0].set_title('Survivors by Age Groups Histogram') axes[0].set_xlabel('Age') axes[0].set_ylabel('Count') # Scatter plot Survived and AgeFill axes[1].scatter(df_train['Survived'], df_train['AgeFill']) axes[1].set_title('Survivors by Age Plot') axes[1].set_xlabel('Survived') axes[1].set_ylabel('Age') ``` Unfortunately, the graphs above do not seem to clearly show any insights. We'll keep digging further. Plot AgeFill density by Pclass: ``` for pclass in passenger_classes: df_train.AgeFill[df_train.Pclass == pclass].plot(kind='kde') plt.title('Age Density Plot by Passenger Class') plt.xlabel('Age') plt.legend(('1st Class', '2nd Class', '3rd Class'), loc='best') ``` When looking at AgeFill density by Pclass, we see the first class passengers were generally older then second class passengers, which in turn were older than third class passengers. We've determined that first class passengers had a higher survival rate than second class passengers, which in turn had a higher survival rate than third class passengers. ``` # Set up a grid of plots fig = plt.figure(figsize=fizsize_with_subplots) fig_dims = (3, 1) # Plot the AgeFill histogram for Survivors plt.subplot2grid(fig_dims, (0, 0)) survived_df = df_train[df_train['Survived'] == 1] survived_df['AgeFill'].hist(bins=max_age / bin_size, range=(1, max_age)) # Plot the AgeFill histogram for Females plt.subplot2grid(fig_dims, (1, 0)) females_df = df_train[(df_train['Sex_Val'] == 0) & (df_train['Survived'] == 1)] females_df['AgeFill'].hist(bins=max_age / bin_size, range=(1, max_age)) # Plot the AgeFill histogram for first class passengers plt.subplot2grid(fig_dims, (2, 0)) class1_df = df_train[(df_train['Pclass'] == 1) & (df_train['Survived'] == 1)] class1_df['AgeFill'].hist(bins=max_age / bin_size, range=(1, max_age)) ``` In the first graph, we see that most survivors come from the 20's to 30's age ranges and might be explained by the following two graphs. The second graph shows most females are within their 20's. The third graph shows most first class passengers are within their 30's. ## Feature: Family Size Feature enginering involves creating new features or modifying existing features which might be advantageous to a machine learning algorithm. Define a new feature FamilySize that is the sum of Parch (number of parents or children on board) and SibSp (number of siblings or spouses): ``` df_train['FamilySize'] = df_train['SibSp'] + df_train['Parch'] df_train.head() ``` Plot a histogram of FamilySize: ``` df_train['FamilySize'].hist() plt.title('Family Size Histogram') ``` Plot a histogram of AgeFill segmented by Survived: ``` # Get the unique values of Embarked and its maximum family_sizes = sorted(df_train['FamilySize'].unique()) family_size_max = max(family_sizes) df1 = df_train[df_train['Survived'] == 0]['FamilySize'] df2 = df_train[df_train['Survived'] == 1]['FamilySize'] plt.hist([df1, df2], bins=family_size_max + 1, range=(0, family_size_max), stacked=True) plt.legend(('Died', 'Survived'), loc='best') plt.title('Survivors by Family Size') ``` Based on the histograms, it is not immediately obvious what impact FamilySize has on survival. The machine learning algorithms might benefit from this feature. Additional features we might want to engineer might be related to the Name column, for example honorrary or pedestrian titles might give clues and better predictive power for a male's survival. ## Final Data Preparation for Machine Learning Many machine learning algorithms do not work on strings and they usually require the data to be in an array, not a DataFrame. Show only the columns of type 'object' (strings): ``` df_train.dtypes[df_train.dtypes.map(lambda x: x == 'object')] ``` Drop the columns we won't use: ``` df_train = df_train.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1) ``` Drop the following columns: * The Age column since we will be using the AgeFill column instead. * The SibSp and Parch columns since we will be using FamilySize instead. * The PassengerId column since it won't be used as a feature. * The Embarked_Val as we decided to use dummy variables instead. ``` df_train = df_train.drop(['Age', 'SibSp', 'Parch', 'PassengerId', 'Embarked_Val'], axis=1) df_train.dtypes ``` Convert the DataFrame to a numpy array: ``` train_data = df_train.values train_data ``` ## Data Wrangling Summary Below is a summary of the data wrangling we performed on our training data set. We encapsulate this in a function since we'll need to do the same operations to our test set later. ``` def clean_data(df, drop_passenger_id): # Get the unique values of Sex sexes = sorted(df['Sex'].unique()) # Generate a mapping of Sex from a string to a number representation genders_mapping = dict(zip(sexes, range(0, len(sexes) + 1))) # Transform Sex from a string to a number representation df['Sex_Val'] = df['Sex'].map(genders_mapping).astype(int) # Get the unique values of Embarked embarked_locs = sorted(df['Embarked'].unique()) # Generate a mapping of Embarked from a string to a number representation embarked_locs_mapping = dict(zip(embarked_locs, range(0, len(embarked_locs) + 1))) # Transform Embarked from a string to dummy variables df = pd.concat([df, pd.get_dummies(df['Embarked'], prefix='Embarked_Val')], axis=1) # Fill in missing values of Embarked # Since the vast majority of passengers embarked in 'S': 3, # we assign the missing values in Embarked to 'S': if len(df[df['Embarked'].isnull()] > 0): df.replace({'Embarked_Val' : { embarked_locs_mapping[nan] : embarked_locs_mapping['S'] } }, inplace=True) # Fill in missing values of Fare with the average Fare if len(df[df['Fare'].isnull()] > 0): avg_fare = df['Fare'].mean() df.replace({ None: avg_fare }, inplace=True) # To keep Age in tact, make a copy of it called AgeFill # that we will use to fill in the missing ages: df['AgeFill'] = df['Age'] # Determine the Age typical for each passenger class by Sex_Val. # We'll use the median instead of the mean because the Age # histogram seems to be right skewed. df['AgeFill'] = df['AgeFill'] \ .groupby([df['Sex_Val'], df['Pclass']]) \ .apply(lambda x: x.fillna(x.median())) # Define a new feature FamilySize that is the sum of # Parch (number of parents or children on board) and # SibSp (number of siblings or spouses): df['FamilySize'] = df['SibSp'] + df['Parch'] # Drop the columns we won't use: df = df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1) # Drop the Age column since we will be using the AgeFill column instead. # Drop the SibSp and Parch columns since we will be using FamilySize. # Drop the PassengerId column since it won't be used as a feature. df = df.drop(['Age', 'SibSp', 'Parch'], axis=1) if drop_passenger_id: df = df.drop(['PassengerId'], axis=1) return df ``` ## Random Forest: Training Create the random forest object: ``` from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators=100) ``` Fit the training data and create the decision trees: ``` # Training data features, skip the first column 'Survived' train_features = train_data[:, 1:] # 'Survived' column values train_target = train_data[:, 0] # Fit the model to our training data clf = clf.fit(train_features, train_target) score = clf.score(train_features, train_target) "Mean accuracy of Random Forest: {0}".format(score) ``` ## Random Forest: Predicting Read the test data: ``` df_test = pd.read_csv('../data/titanic/test.csv') df_test.head() ``` Note the test data does not contain the column 'Survived', we'll use our trained model to predict these values. ``` # Data wrangle the test set and convert it to a numpy array df_test = clean_data(df_test, drop_passenger_id=False) test_data = df_test.values ``` Take the decision trees and run it on the test data: ``` # Get the test data features, skipping the first column 'PassengerId' test_x = test_data[:, 1:] # Predict the Survival values for the test data test_y = clf.predict(test_x) ``` ## Random Forest: Prepare for Kaggle Submission Create a DataFrame by combining the index from the test data with the output of predictions, then write the results to the output: ``` df_test['Survived'] = test_y df_test[['PassengerId', 'Survived']] \ .to_csv('../data/titanic/results-rf.csv', index=False) ``` ## Evaluate Model Accuracy Submitting to Kaggle will give you an accuracy score. It would be helpful to get an idea of accuracy without submitting to Kaggle. We'll split our training data, 80% will go to "train" and 20% will go to "test": ``` from sklearn import metrics from sklearn.cross_validation import train_test_split # Split 80-20 train vs test data train_x, test_x, train_y, test_y = train_test_split(train_features, train_target, test_size=0.20, random_state=0) print (train_features.shape, train_target.shape) print (train_x.shape, train_y.shape) print (test_x.shape, test_y.shape) ``` Use the new training data to fit the model, predict, and get the accuracy score: ``` clf = clf.fit(train_x, train_y) predict_y = clf.predict(test_x) from sklearn.metrics import accuracy_score print ("Accuracy = %.2f" % (accuracy_score(test_y, predict_y))) ``` View the Confusion Matrix: | | condition True | condition false| |------|----------------|---------------| |prediction true|True Positive|False positive| |Prediction False|False Negative|True Negative| ``` from IPython.core.display import Image Image(filename='../data/confusion_matrix.png', width=800) ``` Get the model score and confusion matrix: ``` model_score = clf.score(test_x, test_y) print ("Model Score %.2f \n" % (model_score)) confusion_matrix = metrics.confusion_matrix(test_y, predict_y) print ("Confusion Matrix ", confusion_matrix) print (" Predicted") print (" | 0 | 1 |") print (" |-----|-----|") print (" 0 | %3d | %3d |" % (confusion_matrix[0, 0], confusion_matrix[0, 1])) print ("Actual |-----|-----|") print (" 1 | %3d | %3d |" % (confusion_matrix[1, 0], confusion_matrix[1, 1])) print (" |-----|-----|") ``` Display the classification report: $$Precision = \frac{TP}{TP + FP}$$ $$Recall = \frac{TP}{TP + FN}$$ $$F1 = \frac{2TP}{2TP + FP + FN}$$ ``` from sklearn.metrics import classification_report print(classification_report(test_y, predict_y, target_names=['Not Survived', 'Survived'])) ```
github_jupyter
<a href="https://colab.research.google.com/github/kpe/bert-for-tf2/blob/master/examples/movie_reviews_with_bert_for_tf2_on_gpu.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> This is a modification of https://github/google-research/bert/blob/master/predicting_movie_reviews_with_bert_on_tf_hub.ipynb using the Tensorflow 2.0 Keras implementation of BERT from [kpe/bert-for-tf2](https://github.com/kpe/bert-for-tf2) with the original [google-research/bert](https://github.com/google-research/bert) weights. ``` # Copyright 2019 Google Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Predicting Movie Review Sentiment with [kpe/bert-for-tf2](https://github.com/kpe/bert-for-tf2) First install some prerequisites: ``` !pip install tqdm >> /dev/null import os import math import datetime from tqdm import tqdm import pandas as pd import numpy as np import tensorflow as tf tf.__version__ if tf.__version__.startswith("1."): tf.enable_eager_execution() ``` In addition to the standard libraries we imported above, we'll need to install the [bert-for-tf2](https://github.com/kpe/bert-for-tf2) python package, and do the imports required for loading the pre-trained weights and tokenizing the input text. ``` !pip install bert-for-tf2 >> /dev/null import bert from bert import BertModelLayer from bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights from bert.tokenization.bert_tokenization import FullTokenizer ``` #Data First, let's download the dataset, hosted by Stanford. The code below, which downloads, extracts, and imports the IMDB Large Movie Review Dataset, is borrowed from [this Tensorflow tutorial](https://www.tensorflow.org/hub/tutorials/text_classification_with_tf_hub). ``` from tensorflow import keras import os import re # Load all files from a directory in a DataFrame. def load_directory_data(directory): data = {} data["sentence"] = [] data["sentiment"] = [] for file_path in tqdm(os.listdir(directory), desc=os.path.basename(directory)): with tf.io.gfile.GFile(os.path.join(directory, file_path), "r") as f: data["sentence"].append(f.read()) data["sentiment"].append(re.match("\d+_(\d+)\.txt", file_path).group(1)) return pd.DataFrame.from_dict(data) # Merge positive and negative examples, add a polarity column and shuffle. def load_dataset(directory): pos_df = load_directory_data(os.path.join(directory, "pos")) neg_df = load_directory_data(os.path.join(directory, "neg")) pos_df["polarity"] = 1 neg_df["polarity"] = 0 return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True) # Download and process the dataset files. def download_and_load_datasets(force_download=False): dataset = tf.keras.utils.get_file( fname="aclImdb.tar.gz", origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz", extract=True) train_df = load_dataset(os.path.join(os.path.dirname(dataset), "aclImdb", "train")) test_df = load_dataset(os.path.join(os.path.dirname(dataset), "aclImdb", "test")) return train_df, test_df ``` Let's use the `MovieReviewData` class below, to prepare/encode the data for feeding into our BERT model, by: - tokenizing the text - trim or pad it to a `max_seq_len` length - append the special tokens `[CLS]` and `[SEP]` - convert the string tokens to numerical `ID`s using the original model's token encoding from `vocab.txt` ``` import bert from bert import BertModelLayer from bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights from bert.tokenization import FullTokenizer class MovieReviewData: DATA_COLUMN = "sentence" LABEL_COLUMN = "polarity" def __init__(self, tokenizer: FullTokenizer, sample_size=None, max_seq_len=1024): self.tokenizer = tokenizer self.sample_size = sample_size self.max_seq_len = 0 train, test = download_and_load_datasets() train, test = map(lambda df: df.reindex(df[MovieReviewData.DATA_COLUMN].str.len().sort_values().index), [train, test]) if sample_size is not None: assert sample_size % 128 == 0 train, test = train.head(sample_size), test.head(sample_size) # train, test = map(lambda df: df.sample(sample_size), [train, test]) ((self.train_x, self.train_y), (self.test_x, self.test_y)) = map(self._prepare, [train, test]) print("max seq_len", self.max_seq_len) self.max_seq_len = min(self.max_seq_len, max_seq_len) ((self.train_x, self.train_x_token_types), (self.test_x, self.test_x_token_types)) = map(self._pad, [self.train_x, self.test_x]) def _prepare(self, df): x, y = [], [] with tqdm(total=df.shape[0], unit_scale=True) as pbar: for ndx, row in df.iterrows(): text, label = row[MovieReviewData.DATA_COLUMN], row[MovieReviewData.LABEL_COLUMN] tokens = self.tokenizer.tokenize(text) tokens = ["[CLS]"] + tokens + ["[SEP]"] token_ids = self.tokenizer.convert_tokens_to_ids(tokens) self.max_seq_len = max(self.max_seq_len, len(token_ids)) x.append(token_ids) y.append(int(label)) pbar.update() return np.array(x), np.array(y) def _pad(self, ids): x, t = [], [] token_type_ids = [0] * self.max_seq_len for input_ids in ids: input_ids = input_ids[:min(len(input_ids), self.max_seq_len - 2)] input_ids = input_ids + [0] * (self.max_seq_len - len(input_ids)) x.append(np.array(input_ids)) t.append(token_type_ids) return np.array(x), np.array(t) ``` ## A tweak Because of a `tf.train.load_checkpoint` limitation requiring list permissions on the google storage bucket, we need to copy the pre-trained BERT weights locally. ``` bert_ckpt_dir="gs://bert_models/2018_10_18/uncased_L-12_H-768_A-12/" bert_ckpt_file = bert_ckpt_dir + "bert_model.ckpt" bert_config_file = bert_ckpt_dir + "bert_config.json" %%time bert_model_dir="2018_10_18" bert_model_name="uncased_L-12_H-768_A-12" !mkdir -p .model .model/$bert_model_name for fname in ["bert_config.json", "vocab.txt", "bert_model.ckpt.meta", "bert_model.ckpt.index", "bert_model.ckpt.data-00000-of-00001"]: cmd = f"gsutil cp gs://bert_models/{bert_model_dir}/{bert_model_name}/{fname} .model/{bert_model_name}" !$cmd !ls -la .model .model/$bert_model_name bert_ckpt_dir = os.path.join(".model/",bert_model_name) bert_ckpt_file = os.path.join(bert_ckpt_dir, "bert_model.ckpt") bert_config_file = os.path.join(bert_ckpt_dir, "bert_config.json") ``` # Preparing the Data Now let's fetch and prepare the data by taking the first `max_seq_len` tokenens after tokenizing with the BERT tokenizer, und use `sample_size` examples for both training and testing. To keep training fast, we'll take a sample of about 2500 train and test examples, respectively, and use the first 128 tokens only (transformers memory and computation requirements scale quadraticly with the sequence length - so with a TPU you might use `max_seq_len=512`, but on a GPU this would be too slow, and you will have to use a very small `batch_size`s to fit the model into the GPU memory). ``` %%time tokenizer = FullTokenizer(vocab_file=os.path.join(bert_ckpt_dir, "vocab.txt")) data = MovieReviewData(tokenizer, sample_size=10*128*2,#5000, max_seq_len=128) print(" train_x", data.train_x.shape) print("train_x_token_types", data.train_x_token_types.shape) print(" train_y", data.train_y.shape) print(" test_x", data.test_x.shape) print(" max_seq_len", data.max_seq_len) ``` ## Adapter BERT If we decide to use [adapter-BERT](https://arxiv.org/abs/1902.00751) we need some helpers for freezing the original BERT layers. ``` def flatten_layers(root_layer): if isinstance(root_layer, keras.layers.Layer): yield root_layer for layer in root_layer._layers: for sub_layer in flatten_layers(layer): yield sub_layer def freeze_bert_layers(l_bert): """ Freezes all but LayerNorm and adapter layers - see arXiv:1902.00751. """ for layer in flatten_layers(l_bert): if layer.name in ["LayerNorm", "adapter-down", "adapter-up"]: layer.trainable = True elif len(layer._layers) == 0: layer.trainable = False l_bert.embeddings_layer.trainable = False def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if epoch < warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count) * (epoch + 1) else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler ``` #Creating a model Now let's create a classification model using [adapter-BERT](https//arxiv.org/abs/1902.00751), which is clever way of reducing the trainable parameter count, by freezing the original BERT weights, and adapting them with two FFN bottlenecks (i.e. `adapter_size` bellow) in every BERT layer. **N.B.** The commented out code below show how to feed a `token_type_ids`/`segment_ids` sequence (which is not needed in our case). ``` def create_model(max_seq_len, adapter_size=64): """Creates a classification model.""" #adapter_size = 64 # see - arXiv:1902.00751 # create the bert layer with tf.io.gfile.GFile(bert_config_file, "r") as reader: bc = StockBertConfig.from_json_string(reader.read()) bert_params = map_stock_config_to_params(bc) bert_params.adapter_size = adapter_size bert = BertModelLayer.from_params(bert_params, name="bert") input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="input_ids") # token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="token_type_ids") # output = bert([input_ids, token_type_ids]) output = bert(input_ids) print("bert shape", output.shape) cls_out = keras.layers.Lambda(lambda seq: seq[:, 0, :])(output) cls_out = keras.layers.Dropout(0.5)(cls_out) logits = keras.layers.Dense(units=768, activation="tanh")(cls_out) logits = keras.layers.Dropout(0.5)(logits) logits = keras.layers.Dense(units=2, activation="softmax")(logits) # model = keras.Model(inputs=[input_ids, token_type_ids], outputs=logits) # model.build(input_shape=[(None, max_seq_len), (None, max_seq_len)]) model = keras.Model(inputs=input_ids, outputs=logits) model.build(input_shape=(None, max_seq_len)) # load the pre-trained model weights load_stock_weights(bert, bert_ckpt_file) # freeze weights if adapter-BERT is used if adapter_size is not None: freeze_bert_layers(bert) model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")]) model.summary() return model adapter_size = None # use None to fine-tune all of BERT model = create_model(data.max_seq_len, adapter_size=adapter_size) %%time log_dir = ".log/movie_reviews/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%s") tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir) total_epoch_count = 50 # model.fit(x=(data.train_x, data.train_x_token_types), y=data.train_y, model.fit(x=data.train_x, y=data.train_y, validation_split=0.1, batch_size=48, shuffle=True, epochs=total_epoch_count, callbacks=[create_learning_rate_scheduler(max_learn_rate=1e-5, end_learn_rate=1e-7, warmup_epoch_count=20, total_epoch_count=total_epoch_count), keras.callbacks.EarlyStopping(patience=20, restore_best_weights=True), tensorboard_callback]) model.save_weights('./movie_reviews.h5', overwrite=True) %%time _, train_acc = model.evaluate(data.train_x, data.train_y) _, test_acc = model.evaluate(data.test_x, data.test_y) print("train acc", train_acc) print(" test acc", test_acc) ``` # Evaluation To evaluate the trained model, let's load the saved weights in a new model instance, and evaluate. ``` %%time model = create_model(data.max_seq_len, adapter_size=None) model.load_weights("movie_reviews.h5") _, train_acc = model.evaluate(data.train_x, data.train_y) _, test_acc = model.evaluate(data.test_x, data.test_y) print("train acc", train_acc) print(" test acc", test_acc) ``` # Prediction For prediction, we need to prepare the input text the same way as we did for training - tokenize, adding the special `[CLS]` and `[SEP]` token at begin and end of the token sequence, and pad to match the model input shape. ``` pred_sentences = [ "That movie was absolutely awful", "The acting was a bit lacking", "The film was creative and surprising", "Absolutely fantastic!" ] tokenizer = FullTokenizer(vocab_file=os.path.join(bert_ckpt_dir, "vocab.txt")) pred_tokens = map(tokenizer.tokenize, pred_sentences) pred_tokens = map(lambda tok: ["[CLS]"] + tok + ["[SEP]"], pred_tokens) pred_token_ids = list(map(tokenizer.convert_tokens_to_ids, pred_tokens)) pred_token_ids = map(lambda tids: tids +[0]*(data.max_seq_len-len(tids)),pred_token_ids) pred_token_ids = np.array(list(pred_token_ids)) print('pred_token_ids', pred_token_ids.shape) res = model.predict(pred_token_ids).argmax(axis=-1) for text, sentiment in zip(pred_sentences, res): print(" text:", text) print(" res:", ["negative","positive"][sentiment]) ```
github_jupyter
# The soil production function This lesson produced by Simon M Mudd and Fiona J Clubb. Last update (13/09/2021) Back in the late 1800s, people (including G.K. Gilbert) were speculating about the rates at which soil was formed. This might depend on things like the number of burrowing animals, the rock type, the number of plants, and other factors. The soil is "produced" from somewhere, and usually it is produced from some combination of conversion of rock to sediments, addition of organic matter, and deposition of dust. But we are going to focus on the conversion of rock material to sediment that can move. Gilbert suggested that the rate soil was produced (from underlying rocks) depended on the thickness of the soil. We can make a prediction about the relationship between soil thickness and the rate soil is produced, and we call this the *soil production function*. This function has proposed to have a few different forms, which we will explore below. ## Exponential Soil Production In lectures we identified that the rate of weathering on a hillslope could be described as an exponential function that depends on soil depth, with weathering rates declining as soil gets deeper (Heimsath et al., 1997): $p = W_0 e^{-\frac{h}{\gamma}}$ where $W_0$ is the soil production rate with no soil, and $\gamma$ is a length scale that determines how quickly soil production falls off with depth. Typical values for $W_0$ are in the range 0.01-1 mm/yr [(Perron, 2017)](http://www.annualreviews.org/doi/abs/10.1146/annurev-earth-060614-105405). Note that when you're doing numerical calculations you have to be consistent with units. We will always do calculations in length units of ***metres*** (m), time units of ***years*** (y) and mass units of ***kilograms*** (kg). However we might convert to other units for the purposes of plotting sensible numbers (e.g. Weathering rates in mm/y = m/y $\times$ 1000). Let's take a look at what this function looks like by plotting it with python: ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np h_locs = np.arange(0,2,0.1) ``` We define the soil production function: ``` def soil_prod_function(h_locs, W_0 = 0.0001, gamma = 0.4): P = np.multiply(W_0, np.exp( - np.divide(h_locs,gamma) ) ) return P ``` Now lets plot the function and see what it looks like. In the code below there are two soil production functions that you can compare. For example if you make `W_0` twice as much as `W_02` that means the second soil production function produces soil twice as fast as the first when there is no soil. ``` plt.rcParams['figure.figsize'] = [10, 4] plt.clf() # TRY CHANGING THE FOUR PARAMETER BELOW # These two are for the first soil production function W_0 = 0.0001 gamma = 0.4 # These two are for the second soil production function W_02 = 0.0002 gamma2 = 0.4 # This bit calculates the functions P = soil_prod_function(h_locs, W_0 = W_0, gamma = gamma) P2 = soil_prod_function(h_locs, W_0 = W_02, gamma = gamma2) # The rest of this stuff makes the figure f, ax = plt.subplots(1, 1) ax.plot(h_locs, P*1000,label="P1") ax.plot(h_locs, P2*1000,label="P2") ax.set_xlabel("Soil thickness ($m$)") ax.set_ylabel("Soil production (mm/yr)") plt.title("Two soil production function. Try playing with the parameters!") ax.legend() plt.tight_layout() ``` ## The peaked soil production function We also discussed in the lecture an alternative way in which soil may be produced: where there are very slow rates of soil production where there is bare bedrock, then soil peaks at some intermediate thickness before decreasing exponentially with increasing soil thickness. This model dates back to Gilbert (1877), and makes intuitive sense: water is needed for weathering processes as we discussed today. If there is bare bedrock, water is quickly transported through overland flow and little weathering can take place. If there is too much soil, then it's unlikely to be fully saturated down to the bedrock--soil interface. In this section, we will make some plots of a hypothetical peaked (or humped) soil production function. We will use the theoretical formulation from [Cox (1980)](https://onlinelibrary.wiley.com/doi/abs/10.1002/esp.3760050305) to calculate the weathering rate for a range of soil depths. This is a bit more complicated than the exponential function and has a bigger range of parameters: \begin{equation} W = W_0 (\alpha e^{-kh}) + (1 - \alpha)f \\ f = \Bigg(1 + c\frac{h}{h_c} - \frac{h^2}{{h_c}^2}\Bigg) \end{equation} You should recognise some of these parameters from the exponential equation. The first part of the equation is the exponential function multiplied by a coefficient, $\alpha$. $W$ is still the weathering rate, $W_0$ is the inital rate of soil production where there is no soil, and $h$ is soil depth. There are two new parameters: $h_c$ is a critical soil depth (m), and $c$ is an empirical constant. Anhert (1977) suggests that $c$ might vary between 1.7 - 2.3, $h_c$ might vary between 0.6 - 1.5, and $\alpha$ between 0 - 1. If $\alpha = 1$, then the relationship is simply the exponential function. ``` # first, let's clear the original figure plt.clf() # make a new figure fig, ax = plt.subplots() k = 1 # define the critical depth for soil production h_c = 0.5 #metres # define the initial rate of soil production W_0 = 0.0005 #m/year # define the constant c c = 2 #dimensionless # define alpha alpha = 0.2 # calculate the weathering rate for the range of soil depths, h f = (1 + c*(h_locs/h_c) - h_locs**2/h_c**2) W = W_0 * (alpha * np.exp(-k*h_locs) + (1 - alpha)*f) # plot the new result with a blue dashed line ax.plot(h_locs,W*1000.,'--', color='blue', label = 'Peaked function') # add a legend plt.legend(loc = 'upper right') # set the y limit of the humped function to 0 (so we don't get negative weathering rates), and set the axis labels ax.set_ylim(0,) plt.xlabel("Soil Depth (m)") plt.ylabel("Weathering Rate (mm/y)") plt.title("The peaked soil production function") plt.tight_layout() ``` Optional Exercise 1 --- 1. Have a play around and try to change some of the parameters in the peaked function (simply modify in the code block above). How does this affect the curve? 2. Try to make a plot with the exponential and peaked functions on the same set of axes, so you can compare them (HINT - copy the line that creates the exponential soil production function into the code block above, and then give it a different colour). --- --- ## Optional Exercise 2 <p>Create a figure from the practical today that shows the difference between the peaked and exponential soil production functions for different initial soil production rates. You should write a figure caption that annotates what your soil production plot is showing. The caption should be a paragraph of text that describes each line, and the parameters that have been used to create them, and offers a brief explanation of how the parameters used influence rates of soil production. For an indication of the level of detail required, you could look at examples of captions to figures in journal articles, such as Figure 3 in [Heimsath et al. (2012)](https://www.nature.com/ngeo/journal/v5/n3/pdf/ngeo1380.pdf). You can use any program you like, such as Microsoft Word, to create your figure. **Note**: the exercises from the practicals in this module will not be marked, but they are all teaching you important skills that will be used in the summative assessment. I would therefore really encourage you to engage with them. I will go over the answers and discuss the exercises at the start of the next session. For your independent project, you will be expected to present 5 figures with captions, so this is a good chance to practice how to write a good figure caption!
github_jupyter
# Smart signatures with ASA #### 06.3 Writing Smart Contracts ##### Peter Gruber (peter.gruber@usi.ch) 2022-01-12 * Use Smart Signatures with ASAs * Design a contract for token burning ## Setup See notebook 04.1, the lines below will always automatically load functions in `algo_util.py`, the five accounts and the Purestake credentials ``` # Loading shared code and credentials import sys, os codepath = '..'+os.path.sep+'..'+os.path.sep+'sharedCode' sys.path.append(codepath) from algo_util import * cred = load_credentials() # Shortcuts to directly access the 3 main accounts MyAlgo = cred['MyAlgo'] Alice = cred['Alice'] Bob = cred['Bob'] Charlie = cred['Charlie'] Dina = cred['Dina'] from algosdk import account, mnemonic from algosdk.v2client import algod from algosdk.future import transaction from algosdk.future.transaction import PaymentTxn from algosdk.future.transaction import AssetConfigTxn, AssetTransferTxn, AssetFreezeTxn from algosdk.future.transaction import LogicSig, LogicSigTransaction import algosdk.error import json import base64 import hashlib from pyteal import * # Initialize the algod client (Testnet or Mainnet) algod_client = algod.AlgodClient(algod_token='', algod_address=cred['algod_test'], headers=cred['purestake_token']) print(MyAlgo['public']) print(Alice['public']) print(Bob['public']) print(Charlie['public']) ``` #### Check Purestake API ``` last_block = algod_client.status()["last-round"] print(f"Last committed block is: {last_block}") asset_holdings_df(algod_client,MyAlgo['public']) ``` ## Burning Burning seems simple: send money to a contract that always says "no".<br> **Question:** how can a contract that always says "no" opt into an ASA? #### Step 1: Define conditions in Pyteal * Want to design a Burn contract for WSC coin * Need WSC coin index * For all Txn fields, see https://pyteal.readthedocs.io/en/stable/accessing_transaction_field.html ``` # prepare the burn condition WSC_idx = 71140107 # <---------- change this to your WSC coin !!!!! burn_cond = And ( Txn.type_enum() == TxnType.AssetTransfer, # Must be an "asset transfer" transaction Txn.amount() == Int(0), # Do not pay out ALGOs Txn.asset_amount() == Int(0), # Do also not pay out ASAs Txn.xfer_asset() == Int(WSC_idx) # Specific asset index ) # prepare random condition import random a = Int( random.randrange(2**32-1) ) random_cond = ( a == a ) fee_cond = Txn.fee() <= Int(1000) safety_cond = And( # Txn.type_enum() == TxnType.Payment, # This standard payment condition makes no senese here Txn.close_remainder_to() == Global.zero_address(), Txn.rekey_to() == Global.zero_address(), ) burn_pyteal = And( burn_cond, random_cond, fee_cond, safety_cond ) ``` ##### Step 2: Compile PyTeal -> Teal ``` burn_teal = compileTeal(burn_pyteal, Mode.Signature, version=3) print(burn_teal) ``` #### Step 3: Compile Teal -> Bytecode for AVM ``` Burn = algod_client.compile(burn_teal) ``` #### Step 4: Fund burning contract * The Burn contract has to pay TX fees for the opt-in transaction ``` # Step 1: prepare transaction sp = algod_client.suggested_params() amt = int(0.1*1e6) + int(0.1*1e6) + int(0.001*1e6) # Min holdings + min holdings for 1 ASA + TX fee txn = transaction.PaymentTxn(sender=Alice['public'], sp=sp, receiver=Burn['hash'], amt=amt) # Step 2+3+4: sign and send and wait ... stxn = txn.sign(Alice['private']) txid = algod_client.send_transaction(stxn) txinfo = wait_for_confirmation(algod_client, txid) ``` #### Step 5: Burn contract opts-into WSC coin to allow burning * This is an AssetTransferTx, that is signed by the Smart Signature * Remember, opt-in is a transfer of zero units of an ASA to oneself ``` # Step 5.1: Prepare sp = algod_client.suggested_params() txn = AssetTransferTxn(Burn['hash'], sp, Burn['hash'], 0, WSC_idx) # Step 5.2: Sign encodedProg = Burn['result'].encode() program = base64.decodebytes(encodedProg) lsig = LogicSig(program) stxn = LogicSigTransaction(txn, lsig) # Step 5.3 Send txid = algod_client.send_transaction(stxn) # Step 5.4 Wait for ... txinfo = wait_for_confirmation(algod_client, txid) print('http://testnet.algoexplorer.io/tx/'+txid) ``` ## The Burn contract is now ready for use #### Manual burn transaction * MyAlgo burns 8 WSC coins * Simple AssetTransferTxn ``` # WAIT a minute ... how many decimals does the WSC have? WSC_decimals = algod_client.asset_info(WSC_idx)['params']['decimals'] print(WSC_decimals) # Step 1: prepare transaction sp = algod_client.suggested_params() amt = int( 8 * 10**WSC_decimals ) # <---------8 WSC coins in SMALL unit txn = AssetTransferTxn(sender=MyAlgo['public'], sp=sp, receiver=Burn['hash'], amt=amt, index=WSC_idx) # Step 2+3+4: sign and send and wait ... stxn = txn.sign(MyAlgo['private']) txid = algod_client.send_transaction(stxn) txinfo = wait_for_confirmation(algod_client, txid) ``` ### QR code for burning * Burning via QR code ``` # URL for burning WITH asset index url = 'algorand://' + Burn['hash'] + '?amount=1000&asset='+str(WSC_idx)+'&note=Burning' print(url) import qrcode qr = qrcode.QRCode(version=1,box_size=5,border=4) qr.add_data(url) qr.make(fit=True) qr.make_image(fill_color="black", back_color="white") ``` ## Exercise 1 * Reconsider the **burn condition** and discuss possible safety issues. * Espeically, discuss the fact that repeated transactions with zero ALGOs or with zero WSC are possible ```python burn_cond = And ( Txn.type_enum() == TxnType.AssetTransfer, # Must be an "asset transfer" TX Txn.amount() == Int(0), # Do not pay out ALGOs Txn.asset_amount() == Int(0), # Do also not pay out ASAs Txn.xfer_asset() == Int(WSC_idx) # Specific asset index ) ``` ## Exercise 2 * Reconsider the **safety condition** * Why was one common safety condition commented out? ```python safety_cond = And( # Txn.type_enum() == TxnType.Payment, # <--- why??? Txn.close_remainder_to() == Global.zero_address(), Txn.rekey_to() == Global.zero_address(), ) ```
github_jupyter
# Homework 4 - Reinforcement Learning in a Smart Factory Optimization of the robots route for pick-up and storage of items in a warehouse: 1. Implement a reinforcement-learning based algorithm 2. The robot is the agent and decides where to place the next part 3. Use the markov decision process toolbox for your solution 4. Choose the best performing MDP ``` #!pip install pymdptoolbox ## Imports import mdptoolbox import pandas as pd import itertools as it import numpy as np import pickle import time from scipy import sparse ``` ## Import data ``` file_path = 'Exercise 4 - Reinforcement Learning Data - warehousetraining.txt' file_path_test= 'Exercise 4 - Reinforcement Learning Data - warehouseorder.txt' # Name the data colums corectly data = pd.read_csv(file_path, sep='\t', names=["action", "color_state"]) test_data = pd.read_csv(file_path_test, sep='\t', names=["action", "color_state"]) #print(data.info()) print(data.dtypes) data.head() data.groupby(["action", "color_state"]).count() actions = list(np.unique(data.action)) #['restore', 'store'] item_colors = list(np.unique(data.color_state)) #['blue' 'red' 'white'] train_data = np.array( [[actions.index(v[0]), item_colors.index(v[1])] for v in data.values] , dtype=int) ``` ## Reinforcement-learning based algorithm: Markov Descision Process (MDP) A MDP is a discrete time stochastic control process where the markov property is satisfied 1. Create Transitions Matrix represeting the probabilities to lead from state *s0* another state *s1* within the action *a* 2. Generate Reward Matrix defined reward after action *a* in state *s0* to reach state *s1* Optimize the route with following constraints: - Size of warehouse is {1..3} x {1..3} - Separate start/stop position outside the 3x3 storage space where the robot have to go at the end of storage and pick-up - The first position the robot can move into is always (1,1) - Robot can move to adjacent fields - Robot cannot move diagonally - There are three types of items: (white, blue, red) ``` # Set Markov Descision Process (MDP) Constrains warehouse_size = [2,2] #2x2 grid grid_size = np.prod(warehouse_size) grids_cells = [(i+1,j+1) for j in range(warehouse_size[1]) for i in range(warehouse_size[0]) ] # The actions is equal to grid size actions_moves = grid_size.copy() items = len(item_colors) + 1 # Consider also no item moves = len(actions)*len(item_colors) #Total posibilities of item in any satate on the field items_grid = items ** grid_size total_states = items_grid * moves print("The total number of states is: ", total_states) item_states_ID = dict((k,v) for v,k in enumerate( ["noitem"] + item_colors ))# dict.fromkeys(item_colors + ["noitem"], 0) item_states_ID # Create all the posible states indexing def compute_grid_index(grid_size, item_states_ID): grids = [s for s in it.product(item_states_ID.values(), repeat=grid_size)] return np.array(grids) grids = compute_grid_index(grid_size, item_states_ID) print("Number of posible states: ", len(grids)) grid_states= pd.DataFrame(data=grids, columns=grids_cells) grid_states[20:30] def generate_warehosue_states(grid_states, item_states_ID,): warehouse_states = pd.DataFrame() for k,v in item_states_ID.items(): warehouse_states[k] = np.sum(grid_states ==v, axis =1) return warehouse_states warehouse_states = generate_warehosue_states(grid_states, item_states_ID) warehouse_states[20:30] ``` ### Transition Probability Matrix (action, state, next state) ``` def create_TPM(data, grids): # Initialize TMP with shape (action, posible states, posible states) P = np.zeros(( actions_moves, total_states, total_states),dtype=np.float16) # Compute Each action probability as the count of each action on the data move_action_probability = np.array([a*c for a in data["action"].value_counts() / len(data) for c in data["color_state"].value_counts() / len(data) ]) for action in range(actions_moves): idx = 0 for mov in range(moves): for s in grids: for m in range(moves): if m >= (moves//2): # restore actions i = ((idx % items_grid) - (items**(actions_moves - action - 1) * (mov+1))) + (items_grid * m) else: i = ((idx % items_grid) - (items**(actions_moves - action - 1) * (mov+1))) + (items_grid * m) P[action][idx][i] = move_action_probability[m] idx += 1 return P TMP = create_TPM(data, grids) def create_rewards(moves, total_states, grid_states): distances = [sum(np.array(c) - np.array(grids_cells[0])) for c in grids_cells] rewards = dict(keys=grids_cells, values =distances ) R = np.zeros((actions_moves, total_states, )) for action in range(actions_moves): for idx, s in grid_states.iterrows(): next_state = idx//(len(grid_states)//moves) try: if(next_state < (moves//len(actions)) and s[action] == 0): reward = rewards[str(s)] elif (next_state > (moves//len(actions) ) and (s[action] == (next_state - len(actions)))): reward = 10000*rewards[str(s)] #+=100 # Invalid movements else: reward = -10000 R[action][idx] = reward except: pass return np.asarray(R).T R = create_rewards(moves, total_states, grid_states) assert TMP.shape[:-1] == R.shape[::-1], "The actions and states should match" discount = 0.9 max_iter = 750 policy = mdptoolbox.mdp.PolicyIteration(TMP, R, 0.9, max_iter=max_iter) value = mdptoolbox.mdp.ValueIteration(TMP, R, 0.9, max_iter=max_iter) value.run() policy.run() p = policy.policy iterations = policy.iter print("Policy iterations:", iterations) print("Value iterations:", value.iter) ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import yaml import sys sys.path.insert(0,'/Users/ruhl/code/jbolo/python/') from jbolo_funcs import * !ls yamls FIXED_PSAT = True def pwv_vary(yamlfile,site,def_pwv): #expt_yaml = 'yamls/SAT_LFMF_20211210.yaml' sim = yaml.safe_load(open(yamlfile)) # overload yamls, so we can easily change where a telescope is sited. sim['sources']['atmosphere']['site']=site sim['sources']['atmosphere']['pwv']=def_pwv #used to find Psats if not specified, in initial run. #sim['optical_elements']['window']['thickness']=0.02 chlist = list(sim['channels'].keys()) if FIXED_PSAT: if (sim['bolo_config']['psat_method'] != 'specified'): run_optics(sim) run_bolos(sim) sim['bolo_config']['psat_method'] = 'specified' for ch in chlist: sim['channels'][ch]['psat']=sim['outputs'][ch]['P_sat'] out_NET = {} out_Popt = {} for ch in chlist: out_NET[ch] = np.array([]) out_Popt[ch]= np.array([]) out_pwv = np.arange(0,5000,100) for pwv in out_pwv: sim['sources']['atmosphere']['pwv'] = pwv run_optics(sim) run_bolos(sim) for ch in chlist: out_NET[ch] = np.append(out_NET[ch],sim['outputs'][ch]['NET_NC_total']) out_Popt[ch] = np.append(out_Popt[ch],sim['outputs'][ch]['P_opt']) return(out_pwv, out_Popt, out_NET,chlist) %matplotlib inline plt.rcParams.update({'font.size': 18}) plt.rcParams['figure.figsize'] = [16, 6] yamlfile = 'yamls/SAT_LFMF_20220216.yaml' fig, axs = plt.subplots(1,2) # check whether sim exists, and if it does, clear it so we don't get confused. if "sim" in locals(): sim.clear() pwv, Popt_chile, NET_chile, chlist = pwv_vary(yamlfile, 'Atacama',993) pwv, Popt_pole, NET_pole, chlist = pwv_vary(yamlfile, 'Pole',321) chcolor = {'LF_1':'r', 'LF_2':'m', 'MF_1_1':'g', 'MF_2_1':'g', 'MF_1_2':'k', 'MF_2_2':'k', 'HF_1':'c','HF_2':'b'} for ch in chlist: ltype = chcolor[ch] labelstr = 'Chile, '+ch axs[0].plot(pwv,Popt_chile[ch]*1e12, ltype,label=labelstr) axs[1].plot(pwv,NET_chile[ch]*1e6, ltype,label=labelstr) ltype = ltype+'--' labelstr = 'Pole, '+ch axs[0].plot(pwv,Popt_pole[ch]*1e12,ltype, label=labelstr) axs[1].plot(pwv,NET_pole[ch]*1e6,ltype, label = labelstr) axs[0].set_xlabel('pwv (mm)') axs[0].set_ylabel('Popt (pW)') #axs[0].legend(fontsize=10,loc='upper right') axs[1].set_xlabel('pwv (mm)') axs[1].set_ylabel('NET (uKrtsec)') axs[1].legend(fontsize=10,loc='upper right') #axs[1].text(500,500,'hey') ``` # pwv histograms ``` #### Sort the pwv's into a single time-ordered vector and plot histogram; # This lets you plot the timestream if desired. import pickle pwvs = np.array([]) d = pickle.load(open("atmos/Merra2_atmos_atacama.pck", "rb")) for ii in np.arange(0,290): for month in np.arange(4,12): # skip jan/feb/march for hour in np.arange(0,24): pwvs = np.append(pwvs,d[month]['TQV'][hour][ii]) print('Atacama median pwv: {0:5.3f} mm'.format(np.median(pwvs))) #pwv_bins = np.arange(0,10,0.1) pwv_bins = pwv/1000 A_chile = plt.hist(pwvs, bins=pwv_bins, histtype='step',density=True, label='Atacama') pwvs = np.array([]) d = pickle.load(open("atmos/Merra2_atmos_pole.pck", "rb")) for ii in np.arange(0,290): for month in np.arange(0,11): # skip december for hour in np.arange(0,24): pwvs = np.append(pwvs,d[month]['TQV'][hour][ii]) print('Pole median pwv: {0:5.3f} mm'.format(np.median(pwvs))) A_pole = plt.hist(pwvs, bins=pwv_bins, histtype='step',density=True, label='Pole') plt.xlabel('pwv') plt.ylabel('Probability') plt.legend() # Histogram has fewer points than the pwv vector; its binning vector sets the left side of each bin, and right side of last bin. # Bin centers are offset by a half a bin. pwv_bin_centers = (A_chile[1][0:-1]+A_chile[1][1:])/2 # Linearly interpolate NET's to those same bin centers, and calculate weights w_chile = {} w_pole = {} w_ratio = {} cuts = {'LF_1':3, 'LF_2':3, 'MF_1_1':2, 'MF_2_1':2, 'MF_1_2':1.5, 'MF_2_2':1.5, 'HF_1':1.0, 'HF_2':1.0} for ch in chlist: cut_level = cuts[ch] cut_vector = np.where(pwv_bin_centers<cut_level,1,0) NET_chile_onbins = np.interp(pwv_bin_centers,pwv, NET_chile[ch]) NET_pole_onbins = np.interp(pwv_bin_centers,pwv, NET_pole[ch]) w_chile[ch] = (A_chile[0]*cut_vector)/NET_chile_onbins**2 w_pole[ch] = (A_pole[0]*cut_vector)/NET_pole_onbins**2 w_chile_tot = np.trapz(w_chile[ch],pwv_bin_centers) w_pole_tot = np.trapz(w_pole[ch],pwv_bin_centers) w_ratio[ch]=w_chile_tot/w_pole_tot print(ch) print(' Chile weight: {0:9.0f}'.format(w_chile_tot)) print(' Pole weight: {0:9.0f}'.format(w_pole_tot)) print(' Chile/Pole weight ratio: {0:6.3f}'.format(w_ratio[ch])) #Make a plot plt.rcParams['figure.figsize'] = [16, 4] n_plots = len(chlist) n_cols = 2 n_rows = int(np.ceil(n_plots/n_cols)) for ii in np.arange(0,n_plots): plt.subplot(n_rows,n_cols,ii+1) ch = chlist[ii] label1 = 'Chile: '+ch label2 = 'Pole: '+ch plt.plot(pwv_bin_centers,w_chile[ch], label = label1) plt.plot(pwv_bin_centers,w_pole[ch], label = label2) plt.legend() for ch in chlist: if 'LF' in ch: ntubes = 5 elif 'MF' in ch: ntubes = 3 elif 'HF' in ch: ntubes = 2 myvecname = {'vname':'aa'} aa = np.array([1,2,4]) print(myvecname['vname']) ```
github_jupyter
--- _You are currently looking at **version 1.2** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-social-network-analysis/resources/yPcBs) course resource._ --- # Assignment 3 In this assignment you will explore measures of centrality on two networks, a friendship network in Part 1, and a blog network in Part 2. ## Part 1 Answer questions 1-4 using the network `G1`, a network of friendships at a university department. Each node corresponds to a person, and an edge indicates friendship. *The network has been loaded as networkx graph object `G1`.* ``` import networkx as nx G1 = nx.read_gml('friendships.gml') ``` ### Question 1 Find the degree centrality, closeness centrality, and normalized betweeness centrality (excluding endpoints) of node 100. *This function should return a tuple of floats `(degree_centrality, closeness_centrality, betweenness_centrality)`.* ``` def answer_one(): # Your Code Here degCent = nx.degree_centrality(G1) closeCent = nx.closeness_centrality(G1) btwnCent = nx.betweenness_centrality(G1, normalized = True, endpoints = False) return degCent[100], closeCent[100], btwnCent[100] ``` <br> #### For Questions 2, 3, and 4, assume that you do not know anything about the structure of the network, except for the all the centrality values of the nodes. That is, use one of the covered centrality measures to rank the nodes and find the most appropriate candidate. <br> ### Question 2 Suppose you are employed by an online shopping website and are tasked with selecting one user in network G1 to send an online shopping voucher to. We expect that the user who receives the voucher will send it to their friends in the network. You want the voucher to reach as many nodes as possible. The voucher can be forwarded to multiple users at the same time, but the travel distance of the voucher is limited to one step, which means if the voucher travels more than one step in this network, it is no longer valid. Apply your knowledge in network centrality to select the best candidate for the voucher. *This function should return an integer, the name of the node.* ``` def answer_two(): degree = nx.degree_centrality(G1) max_node = None max_degree = -1 for key, value in degree.items(): if value > max_degree: max_degree = value max_node = key return max_node ``` ### Question 3 Now the limit of the voucher’s travel distance has been removed. Because the network is connected, regardless of who you pick, every node in the network will eventually receive the voucher. However, we now want to ensure that the voucher reaches the nodes in the lowest average number of hops. How would you change your selection strategy? Write a function to tell us who is the best candidate in the network under this condition. *This function should return an integer, the name of the node.* ``` def answer_three(): # Your Code Here degree = nx.closeness_centrality(G1) max_node = None max_degree = -1 for key, value in degree.items(): if value > max_degree: max_degree = value max_node = key return max_node ``` ### Question 4 Assume the restriction on the voucher’s travel distance is still removed, but now a competitor has developed a strategy to remove a person from the network in order to disrupt the distribution of your company’s voucher. Your competitor is specifically targeting people who are often bridges of information flow between other pairs of people. Identify the single riskiest person to be removed under your competitor’s strategy? *This function should return an integer, the name of the node.* ``` def answer_four(): degree = nx.betweenness_centrality(G1) max_node = None max_degree = -1 for key, value in degree.items(): if value > max_degree: max_degree = value max_node = key return max_node ``` ## Part 2 `G2` is a directed network of political blogs, where nodes correspond to a blog and edges correspond to links between blogs. Use your knowledge of PageRank and HITS to answer Questions 5-9. ``` G2 = nx.read_gml('blogs.gml') ``` ### Question 5 Apply the Scaled Page Rank Algorithm to this network. Find the Page Rank of node 'realclearpolitics.com' with damping value 0.85. *This function should return a float.* ``` def answer_five(): pr = nx.pagerank(G2,alpha=0.85) return pr['realclearpolitics.com'] ``` ### Question 6 Apply the Scaled Page Rank Algorithm to this network with damping value 0.85. Find the 5 nodes with highest Page Rank. *This function should return a list of the top 5 blogs in desending order of Page Rank.* ``` import operator def answer_six(): pr = nx.pagerank(G2,alpha=0.85) pr_list = sorted(pr.items(), key=operator.itemgetter(1), reverse=True)[0:5] ans = [] for i in pr_list: ans.append(i[0]) return ans ``` ### Question 7 Apply the HITS Algorithm to the network to find the hub and authority scores of node 'realclearpolitics.com'. *Your result should return a tuple of floats `(hub_score, authority_score)`.* ``` def answer_seven(): hits = nx.hits(G2) node = 'realclearpolitics.com' print (len(hits)) return hits[0][node], hits[1][node] ``` ### Question 8 Apply the HITS Algorithm to this network to find the 5 nodes with highest hub scores. *This function should return a list of the top 5 blogs in desending order of hub scores.* ``` def answer_eight(): hits = nx.hits(G2) hubs = hits[0] return sorted(hubs.keys(), key=lambda key:hubs[key], reverse=True)[:5] ``` ### Question 9 Apply the HITS Algorithm to this network to find the 5 nodes with highest authority scores. *This function should return a list of the top 5 blogs in desending order of authority scores.* ``` def answer_nine(): hits = nx.hits(G2) authorities = hits[1] return sorted(authorities.keys(), key=lambda key:authorities[key], reverse=True)[:5] ```
github_jupyter
# Practice for understanding image classification with neural network - Single layer neural network with gradient descent ## 1) Import Packages ``` from PIL import Image import numpy as np import matplotlib.pyplot as plt import random from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import math import sklearn.metrics as metrics ``` ## 2) Make Dataset ``` x_orig = [] y_orig = np.zeros((1,100)) for i in range(1,501): if i <= 100 : folder = 0 elif i <=200 : folder = 1 elif i <=300 : folder = 2 elif i <=400 : folder = 3 else : folder = 4 img = np.array(Image.open('dataset/{0}/{1}.jpg'.format(folder,i))) img = Image.fromarray(img).convert('L') # gray data = img.resize((64,64)) data = np.array(data) x_orig.append(data) for i in range(1,5): y_orig = np.append(y_orig, np.full((1, 100),i), axis = 1) x_orig = np.array(x_orig) print(x_orig.shape) print(y_orig.shape) # Random shuffle s = np.arange(x_orig.shape[0]) np.random.shuffle(s) x_shuffle = x_orig[s,:] y_shuffle = y_orig[:,s] print(x_shuffle.shape) print(y_shuffle.shape) plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(x_shuffle[i,:]) plt.xlabel(y_shuffle[:,i]) plt.show() # Split train and test datasets x_train_orig, x_test_orig, y_train_orig, y_test_orig = train_test_split(x_shuffle,y_shuffle.T, test_size=0.2, shuffle=True, random_state=1004) print(x_train_orig.shape) print (y_train_orig.shape) # Flatten the training and test images x_train_flatten = x_train_orig.reshape(x_train_orig.shape[0], -1).T x_test_flatten = x_test_orig.reshape(x_test_orig.shape[0], -1).T # Normalize image vectors x_train = x_train_flatten/255. x_test = x_test_flatten/255. # Convert training and test labels to one hot matrices enc = OneHotEncoder() y1 = y_train_orig.reshape(-1,1) enc.fit(y1) y_train = enc.transform(y1).toarray() y_train = y_train.T y2 = y_test_orig.reshape(-1,1) enc.fit(y2) y_test = enc.transform(y2).toarray() y_test = y_test.T # Explore dataset print ("number of training examples = " + str(x_train.shape[1])) print ("number of test examples = " + str(x_test.shape[1])) print ("x_train shape: " + str(x_train.shape)) print ("y_train shape: " + str(y_train.shape)) print ("x_test shape: " + str(x_test.shape)) print ("y_test shape: " + str(y_test.shape)) ``` ## 3) Definie required functions ``` def initialize_parameters(nx, ny): """ Argument: nx -- size of the input layer (4096) ny -- size of the output layer (3) Returns: W -- weight matrix of shape (ny, nx) b -- bias vector of shape (ny, 1) """ np.random.seed(1) W = np.random.randn(ny,nx)*0.01 b = np.zeros((ny,1)) assert(W.shape == (ny, nx)) assert(b.shape == (ny, 1)) return W, b def softmax(Z): # compute the softmax activation S = np.exp(Z + np.max(Z)) / np.sum(np.exp(Z + np.max(Z)), axis = 0) return S def classlabel(Z): # probabilities back into class labels y_hat = Z.argmax(axis=0) return y_hat def propagate(W, b, X, Y): m = X.shape[1] # Forward Propagation Z = np.dot(W, X)+ b A = softmax(Z) # compute activation cost = (-1/m) * np.sum(Y * np.log(A)) # compute cost (Cross_entropy) # Backward propagation dW = (1/m) * (np.dot(X,(A-Y).T)).T db = (1/m) * (np.sum(A-Y)) grads = {"dW": dW, "db": db} return grads, cost ``` ## 4) Single-Layer Neural Network with Gradient Descent ``` def optimize(X, Y, num_iterations, learning_rate, print_cost = False): costs = [] W, b = initialize_parameters(4096,5) for i in range(num_iterations): grads, cost = propagate(W,b,X,Y) dW = grads["dW"] db = grads["db"] W = W - (learning_rate) * dW b = b - (learning_rate) * db # Record the costs for plotting if i % 100 == 0: costs.append(cost) # Print the cost every 100 training iterations if print_cost and i % 200 == 0: print ("Cost after iteration %i: %f" %(i, cost)) # plot the cost plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per 200)') plt.title("Learning rate =" + str(learning_rate)) plt.show() # Lets save the trainded parameters in a variable params = {"W": W, "b": b} grads = {"dW": dW, "db": db} return params, grads, costs params, grads, costs = optimize(x_train, y_train, num_iterations= 1000, learning_rate = 0.01, print_cost = True) print ("W = " + str(params["W"])) print ("b = " + str(params["b"])) ``` ## 5) Accuracy Analysis ``` def predict(W, b, X) : m = X.shape[1] # Compute "A" predicting the probabilities Z = np.dot(W, X)+ b A = softmax(Z) # Convert probabilities A to actual predictions y_prediction = A.argmax(axis=0) return y_prediction # Predict test/train set W1 = params['W'] b1 = params['b'] y_prediction_train = predict(W1, b1, x_train) y_prediction_test = predict(W1, b1, x_test) print(y_prediction_train) print(y_prediction_test) # Print train/test accuracy print("train accuracy : ", metrics.accuracy_score(y_prediction_train, y_train_orig)) print("test accuracy : ", metrics.accuracy_score(y_prediction_test, y_test_orig)) ```
github_jupyter
# Dataoverføring og statistikk ## Lese inn en fil `pylab.loadtxt(<filename>[, delimiter=hva man skiller dataene med (',')][, skiprows=antalllinjer å hoppe over][, dtype=datatype])` ``` import pylab data = pylab.loadtxt("sunspots.csv", delimiter=",", skiprows=1) nr = data[:, 0] verdi = data[:, 1] pylab.plot(nr, verdi) pylab.show() maaned = pylab.linspace(1, 12, 12) solflekker = [0] * 12 for i in range(0, len(nr)): month = i % 12 solflekker[month] += verdi[i] pylab.plot(maaned, solflekker) pylab.show() ``` ## Statistisk analyse ### Gjennomsnitt summen av alle verdiene delt på antall verdier. $$\bar x = \frac{1}{n}\sum_{i=1}^{n}x_i$$ `pylab.mean(<liste med verdier>)` ### Standardavvik største/minste verdi - gjennomsnittet $$s = \sigma^2 = \sqrt{\frac{1}{n} \sum_{i=1}^{n}(x_i - \bar x)^2}$$ `pylab.std(<liste med verdier>)` ``` import pylab alder = [15, 24, 16, 18, 19, 42, 15, 20, 16, 17, 19] snitt = pylab.mean(alder) print("Gjennomsnittsalder: {}".format(snitt)) avvik = pylab.std(alder) print("StandardAvvik: {}".format(avvik)) ``` ## Regresjon Finne en modell for et datasett. Detet gjøres ved å finne en funksjon (ofte en polynomfunksjon) som passer best mulig med punktene. Dette blir en modell, som kan stemme bra. Likevel kan den forutsi veldig rare verdier langt utenfor datasettet. ``` import pylab T = [0, 20, 40, 60, 80, 100] sol_NaCl = [35.7, 35.9, 36.4, 37.1, 38.0, 39.2] pylab.scatter(T, sol_NaCl) grad = 4 reg_NaCl = pylab.polyfit(T, sol_NaCl, grad) print(reg_NaCl) def polynomFunction(polynom, x): ret = 0 for i in range(0, len(polynom)): g = len(polynom) - 1 - i ret += polynom[i] * x ** g return ret x = pylab.linspace(0, 150) y1 = polynomFunction(reg_NaCl, x) pylab.plot(x, y1) pylab.show() ``` ### R2-test R2 score gir en verdi på hvor godt regresjonen stememr med dataene. Går fra 0 til 1, der 0 er ufattelig dårlig, og 1 er akkurat riktig. Denne kan brukes for å finen ut om regresjonen er god, MEN den kan vise at den er veldig god fordi den passer veldig godt med dataene, men passer ikke i det heledatt mellom datapunktene. ``` import pylab from sklearn.metrics import r2_score T = [0, 20, 40, 60, 80, 100] sol_NaCl = [35.7, 35.9, 36.4, 37.1, 38.0, 39.2] pylab.scatter(T, sol_NaCl) grad = 4 reg_NaCl = pylab.polyfit(T, sol_NaCl, grad) def polynomFunction(polynom, x): ret = 0 for i in range(0, len(polynom)): g = len(polynom) - 1 - i ret += polynom[i] * x ** g return ret x = pylab.linspace(0, 150) y = polynomFunction(reg_NaCl, x) pylab.plot(x, y) pylab.show() # R2 test R2y = polynomFunction(reg_NaCl, pylab.array(T)) R2 = r2_score(sol_NaCl, R2y) print("R2 test: {}".format(R2)) ``` ### Ekstrapolering Forutsi senere verdier. Selvom R2 verdien er veldig god, trenger den ikke være god på senere verdier. ## Diagrammer `errorbar` kan brukes for å lage et usikekrhetsplott. Det gir feilmargin barer på hvert punkt ### kakediagram og søylediagram ``` import pylab fag = ["R2", "S2", "Kjemi 2", "Fysikk 2", "Tekforsk", "Matematikk X", "Biologi 2"] antall = [110, 25, 74, 65, 10, 3, 45] pylab.pie(antall, labels=fag) pylab.show() import pylab hoyder = [1.79, 1.80, 1.60, 1.75, 1.65, 1.76, 1.83, 1.71, 1.72] pylab.hist(hoyder) pylab.show() from pylab import * x = linspace(-3, 3, 18) y = 5 * exp(-x**2) * sin(6 * x) errorbar(x, y, yerr = exp(-x**2) + .5) show() ```
github_jupyter
# Competition coefficient ``` # Housekeeping library(car) library(ggplot2) library(MASS) library(mgcv) library(nlme) library(reshape2) library(scales) library(tidyr) source("../source.R") # Read in data species_composition = read.table("../../../data/amplicon/species_composition_relative_abundance.txt", sep = "\t", header = T, row.names = 1) metadata = read.table("../../../data/amplicon/metadata.txt", sep = "\t", header = T, row.names = 1) # Extract regime shift data without predation x = metadata$Experiment != "FiltrateExp" & # keep only regime shift data metadata$Predation != 1 & # exclude predation metadata$Immigration != "stock" # exclude stock # Subset species_composition = species_composition[x,] # keep only species with data species_composition = species_composition[,colSums(species_composition)>0] metadata = metadata[x,-c(3, 6)] # remove redundant columns # metadata$ID = paste(metadata$Streptomycin, metadata$Immigration, metadata$Replicate, sep = "_") # metadata$ID = as.factor(metadata$ID) species_composition$SAMPLE = rownames(species_composition) metadata$SAMPLE = rownames(metadata) df = merge(species_composition, metadata, all = T) dim(df) head(df) df2 = melt(df[,-1], id.vars = c("Replicate", "Streptomycin", "Immigration", "Time_point")) colnames(df2) = c("Replicate", "Streptomycin", "Immigration", "Time_point", "Species", "Abundance") head(df2) df3 = spread(df2, Time_point, Abundance) colnames(df3) = c("Replicate", "Streptomycin", "Immigration", "Species", "ABUND16", "ABUND32", "ABUND48") head(df3) ``` ## Compute selection coefficient The slope of a least square regression line (linear model) for logit-transformed allele frequency trajectories (here consisting of two time points) gives the selection coefficient $s = \frac {logit(f(t_2)) - logit(f(t_1))} {t_2 - t_1}$ where $f(t)$ is the frequency of a mutation at time $t$ (https://www.ncbi.nlm.nih.gov/pubmed/28961717). Fitness $W$ is then given by $W = s + 1$. These metrics are used here for changes in the relative abundance of species and denote the competitive ability of species. ``` # Convert zero abundance to 10^-6 (order of magnitude lower than lowest detected abundance) # to allow estimation of selection coefficients extinction = df3[df3$ABUND32 == 0 | df3$ABUND48 == 0,] df3 = df3[df3$ABUND16 != 0 & df3$ABUND32 != 0 & df3$ABUND48 != 0,] #df3$ABUND16 = ifelse(df3$ABUND16 == 0, 0.000001, df3$ABUND16) #df3$ABUND32 = ifelse(df3$ABUND32 == 0, 0.000001, df3$ABUND32) #df3$ABUND48 = ifelse(df3$ABUND48 == 0, 0.000001, df3$ABUND48) # Compute s df3$ABUND_S_INTERVAL1 = (logit(df3$ABUND32) - logit(df3$ABUND16))/(32-16) df3$ABUND_S_INTERVAL2 = (logit(df3$ABUND48) - logit(df3$ABUND32))/(48-32) head(df3) df3$Sample = paste(df3$Streptomycin, df3$Immigration, df3$Replicate, sep = "_") head(df3) library(lmerTest) # ABUND_S_INTERVAL1 # Model with linear regression M1 = lmer(ABUND_S_INTERVAL1 ~ Streptomycin * Immigration * Species + (1|Sample), data = df3) step(M1) # Best model M2 = lmer(ABUND_S_INTERVAL1 ~ Streptomycin * Species + (1 | Sample), data = df3) M3 = lmer(ABUND_S_INTERVAL1 ~ Streptomycin + Species + (1 | Sample), data = df3) anova(M2, M3, test = "Chi") # sig. interaction M4 = lmer(ABUND_S_INTERVAL1 ~ Streptomycin + (1 | Sample), data = df3) anova(M2, M4, test = "Chi") # sig. species effect M5 = lmer(ABUND_S_INTERVAL1 ~ Species + (1 | Sample), data = df3) anova(M2, M5, test = "Chi") # sig. streptomycin effect # Streptomycin affects slope depending on species # Plot ggplot(df3, aes(factor(Streptomycin), ABUND_S_INTERVAL1, colour = factor(Streptomycin))) + geom_boxplot() + facet_grid(Species~., scales = "free") + ylab("Rate of change of species") + xlab(expression(paste("Initial abundance level"))) + theme_classic() + theme(panel.spacing = unit(1, "lines"), legend.title=element_blank(), strip.background = element_blank(), strip.text = element_text(face = "italic")) # ABUND_S_INTERVAL2 M1 = lmer(ABUND_S_INTERVAL2 ~ Streptomycin * Immigration * Species + (1|Sample), data = df3) step(M1) # Best model M2 = lmer(ABUND_S_INTERVAL2 ~ Streptomycin * Immigration * Species + (1 | Sample), data = df3) M3 = lmer(ABUND_S_INTERVAL2 ~ Streptomycin + Immigration + Species + (1 | Sample), data = df3) anova(M2, M3, test = "Chi") # sig. interaction M4 = lmer(ABUND_S_INTERVAL2 ~ Streptomycin * Immigration + (1 | Sample), data = df3) anova(M2, M4, test = "Chi") # sig. species effect M5 = lmer(ABUND_S_INTERVAL2 ~ Streptomycin * Species + (1 | Sample), data = df3) anova(M2, M5, test = "Chi") # sig. immigration effect M6 = lmer(ABUND_S_INTERVAL2 ~ Species*Immigration + (1 | Sample), data = df3) anova(M2, M6, test = "Chi") # sig. streptomycin effect # Plot ggplot(df3, aes(factor(Streptomycin), ABUND_S_INTERVAL2)) + geom_boxplot() + facet_grid(Species~., scales = "free") + ylab("Rate of change of species") + xlab(expression(paste("Initial abundance level"))) + theme_classic() + theme(panel.spacing = unit(1, "lines"), legend.title=element_blank(), strip.background = element_blank(), strip.text = element_text(face = "italic")) # Slopes M1 = lmer(ABUND_S_INTERVAL2 ~ ABUND_S_INTERVAL1 + Species + Streptomycin + Immigration + (1|Sample), data = df3) step(M1) # Best model M2 = lmer(ABUND_S_INTERVAL2 ~ ABUND_S_INTERVAL1 + Species + Streptomycin + (1 | Sample), data = df3) # Incorporate phenotypes phenotypes = read.table("../../../data/amplicon/phenotypic_traits.txt", sep = "\t", header = T) phenotypes$Species = rownames(phenotypes) df4 = merge(df3, phenotypes, by = "Species", all = T) dim(df3) dim(df4) head(df4) df4$MIC_orig = exp(df4$Streptomycin_MIC) head(df4) # Slope 1 M1 = lmer(ABUND_S_INTERVAL1 ~ Streptomycin + Immigration + K_growth + r_growth + Aminoglycoside_resistance_gene + Carbon_sources + Streptomycin_MIC + (1|Sample), data = df4) step(M1) # Best model M2 = lmer(ABUND_S_INTERVAL1 ~ r_growth + Aminoglycoside_resistance_gene + Streptomycin_MIC + (1 | Sample), data = df4) summary(M2) # Slope 2 M1 = lmer(ABUND_S_INTERVAL2 ~ Streptomycin + Immigration + K_growth + r_growth + Aminoglycoside_resistance_gene + Carbon_sources + Streptomycin_MIC + (1|Sample), data = df4) step(M1) # Best model #M2 = lmer(ABUND_S_INTERVAL2 ~ r_growth + Aminoglycoside_resistance_gene + Streptomycin_MIC + (1 | Sample), # data = df4) #summary(M2) head(df4) df5 = melt(df4[,-c(1,2,5,6,7,10,11,14,15, 16)], id.vars = c("Streptomycin", "Immigration", "r_growth", "Streptomycin_MIC")) colnames(df5) = c("Streptomycin", "Immigration", "r_growth", "Streptomycin_MIC", "Slope_interval", "Slope") head(df5) df6 = melt(df5, id.vars = c("Streptomycin", "Immigration", "Slope_interval", "Slope")) colnames(df6) = c("Streptomycin", "Immigration", "Slope_interval", "Slope", "Phenotype", "Pheno_value") head(df6) # Edit df6$Slope_interval = ifelse(df6$Slope_interval == "ABUND_S_INTERVAL1", "Antibotic exposure phase", "Recovery phase") df6$Slope_interval = factor(df6$Slope_interval, levels = c("Antibiotic exposure phase", "Recovery phase")) ggplot(na.omit(df6), aes(Pheno_value, Slope, colour = factor(Streptomycin), fill = factor(Streptomycin))) + stat_smooth(method = "lm") + facet_grid(Slope_interval~Phenotype*Streptomycin, scales = "free_x") + ylab("Species competitive ability") + xlab(expression(paste("Phenotype"))) + scale_color_manual(values = c("#D3D3D3", "#cd6090", "#8f4364", "#522639")) + scale_fill_manual(values = c("#D3D3D3", "#cd6090", "#8f4364", "#522639")) + labs(colour="Streptomycin level", fill = "Streptomycin level") + theme_classic() + theme(panel.spacing = unit(1, "lines"), strip.background = element_blank()) ggsave("../../../manuscript/figures/phenotype_response.pdf", width = 12, height = 10) ```
github_jupyter
# Iterables Some steps in a neuroimaging analysis are repetitive. Running the same preprocessing on multiple subjects or doing statistical inference on multiple files. To prevent the creation of multiple individual scripts, Nipype has as execution plugin for ``Workflow``, called **``iterables``**. <img src="../static/images/iterables.png" width="240"> If you are interested in more advanced procedures, such as synchronizing multiple iterables or using conditional iterables, check out the `synchronize `and `intersource` section in the [`JoinNode`](basic_joinnodes.ipynb) notebook. ## Realistic example Let's assume we have a workflow with two nodes, node (A) does simple skull stripping, and is followed by a node (B) that does isometric smoothing. Now, let's say, that we are curious about the effect of different smoothing kernels. Therefore, we want to run the smoothing node with FWHM set to 2mm, 8mm, and 16mm. ``` from nipype import Node, Workflow from nipype.interfaces.fsl import BET, IsotropicSmooth # Initiate a skull stripping Node with BET skullstrip = Node(BET(mask=True, in_file='/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz'), name="skullstrip") ``` Create a smoothing Node with IsotropicSmooth ``` isosmooth = Node(IsotropicSmooth(), name='iso_smooth') ``` Now, to use ``iterables`` and therefore smooth with different ``fwhm`` is as simple as that: ``` isosmooth.iterables = ("fwhm", [4, 8, 16]) ``` And to wrap it up. We need to create a workflow, connect the nodes and finally, can run the workflow in parallel. ``` # Create the workflow wf = Workflow(name="smoothflow") wf.base_dir = "/output" wf.connect(skullstrip, 'out_file', isosmooth, 'in_file') # Run it in parallel (one core for each smoothing kernel) wf.run('MultiProc', plugin_args={'n_procs': 3}) ``` **Note**, that ``iterables`` is set on a specific node (``isosmooth`` in this case), but ``Workflow`` is needed to expend the graph to three subgraphs with three different versions of the ``isosmooth`` node. If we visualize the graph with ``exec``, we can see where the parallelization actually takes place. ``` # Visualize the detailed graph from IPython.display import Image wf.write_graph(graph2use='exec', format='png', simple_form=True) Image(filename='/output/smoothflow/graph_detailed.png') ``` If you look at the structure in the workflow directory, you can also see, that for each smoothing, a specific folder was created, i.e. ``_fwhm_16``. ``` !tree /output/smoothflow -I '*txt|*pklz|report*|*.json|*js|*.dot|*.html' ``` Now, let's visualize the results! ``` from nilearn import plotting %matplotlib inline plotting.plot_anat( '/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz', title='original', display_mode='z', dim=-1, cut_coords=(-50, -35, -20, -5), annotate=False); plotting.plot_anat( '/output/smoothflow/skullstrip/sub-01_ses-test_T1w_brain.nii.gz', title='skullstripped', display_mode='z', dim=-1, cut_coords=(-50, -35, -20, -5), annotate=False); plotting.plot_anat( '/output/smoothflow/_fwhm_4/iso_smooth/sub-01_ses-test_T1w_brain_smooth.nii.gz', title='FWHM=4', display_mode='z', dim=-0.5, cut_coords=(-50, -35, -20, -5), annotate=False); plotting.plot_anat( '/output/smoothflow/_fwhm_8/iso_smooth/sub-01_ses-test_T1w_brain_smooth.nii.gz', title='FWHM=8', display_mode='z', dim=-0.5, cut_coords=(-50, -35, -20, -5), annotate=False); plotting.plot_anat( '/output/smoothflow/_fwhm_16/iso_smooth/sub-01_ses-test_T1w_brain_smooth.nii.gz', title='FWHM=16', display_mode='z', dim=-0.5, cut_coords=(-50, -35, -20, -5), annotate=False); ``` # ``IdentityInterface`` (special use case of ``iterables``) We often want to start our worflow from creating subgraphs, e.g. for running preprocessing for all subjects. We can easily do it with setting ``iterables`` on the ``IdentityInterface``. The ``IdentityInterface`` interface allows you to create ``Nodes`` that does simple identity mapping, i.e. ``Nodes`` that only work on parameters/strings. For example, you want to start your workflow by collecting anatomical files for 5 subjects. ``` # First, let's specify the list of subjects subject_list = ['01', '02', '03', '07'] ``` Now, we can create the IdentityInterface Node ``` from nipype import IdentityInterface infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource") infosource.iterables = [('subject_id', subject_list)] ``` That's it. Now, we can connect the output fields of this ``infosource`` node to ``SelectFiles`` and ``DataSink`` nodes. ``` from os.path import join as opj from nipype.interfaces.io import SelectFiles, DataSink anat_file = opj('sub-{subject_id}', 'ses-test', 'anat', 'sub-{subject_id}_ses-test_T1w.nii.gz') templates = {'anat': anat_file} selectfiles = Node(SelectFiles(templates, base_directory='/data/ds000114'), name="selectfiles") # Datasink - creates output folder for important outputs datasink = Node(DataSink(base_directory="/output", container="datasink"), name="datasink") wf_sub = Workflow(name="choosing_subjects") wf_sub.connect(infosource, "subject_id", selectfiles, "subject_id") wf_sub.connect(selectfiles, "anat", datasink, "anat_files") wf_sub.run() ``` Now we can check that five anatomicl images are in ``anat_files`` directory: ``` ! ls -lh /output/datasink/anat_files/ ``` This was just a simple example of using ``IdentityInterface``, but a complete example of preprocessing workflow you can find in [Preprocessing Example](example_preprocessing.ipynb)). ## Exercise 1 Create a workflow to calculate various powers of ``2`` using two nodes, one for ``IdentityInterface`` with ``iterables``, and one for ``Function`` interface to calculate the power of ``2``. ``` # write your solution here # lets start from the Identity node from nipype import Function, Node, Workflow from nipype.interfaces.utility import IdentityInterface iden = Node(IdentityInterface(fields=['number']), name="identity") iden.iterables = [("number", range(8))] # the second node should use the Function interface def power_of_two(n): return 2**n # Create Node power = Node(Function(input_names=["n"], output_names=["pow"], function=power_of_two), name='power') #and now the workflow wf_ex1 = Workflow(name="exercise1") wf_ex1.connect(iden, "number", power, "n") res_ex1 = wf_ex1.run() # we can print the results for i in range(8): print(list(res_ex1.nodes())[i].result.outputs) ```
github_jupyter
# Сериализация ## Обработка конфигурационных файлов ### json JSON (JavaScript Object Notation) - простой формат обмена данными, основанный на подмножестве синтаксиса JavaScript. Модуль json позволяет кодировать и декодировать данные в удобном формате. Некоторые возможности библиотеки **json** **json.dump**`(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, default=None, sort_keys=False, **kw)` - сериализует obj как форматированный JSON поток в fp. **json.dumps**`(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, default=None, sort_keys=False, **kw)` - сериализует obj в строку JSON-формата. **json.load**`(fp, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw)` - десериализует JSON из fp. **json.loads**`(s, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw)` - десериализует s (экземпляр str, содержащий документ JSON) в объект Python. ``` import json # Кодирование основных объектов Python print(json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])) print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)) # Компактное кодирование print(json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',', ':'))) # Красивый вывод print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)) # Декодирование (парсинг) JSON print(json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')) print(json.loads('"\\"foo\\bar"')) ``` ### yaml YAML (YAML Ain’t Markup Language) - еще один текстовый формат для записи данных. YAML более приятен для восприятия человеком, чем JSON, поэтому его часто используют для описания сценариев в ПО. Например, в Ansible. Для работы с YAML в Python используется модуль **pyyaml**. Он не входит в стандартную библиотеку модулей, поэтому его нужно установить: `pip install pyyaml` ``` # Чтение из YAML (файл info.yaml) import yaml from pprint import pprint # Модуль pprint позволяет красиво отображать объекты Python with open('info.yaml') as f: templates = yaml.safe_load(f) pprint(templates) # Использование функции модуля pprint для вывода # Запись в YAML trunk_template = [ 'switchport trunk encapsulation dot1q', 'switchport mode trunk', 'switchport trunk native vlan 999', 'switchport trunk allowed vlan' ] access_template = [ 'switchport mode access', 'switchport access vlan', 'switchport nonegotiate', 'spanning-tree portfast', 'spanning-tree bpduguard enable' ] to_yaml = {'trunk': trunk_template, 'access': access_template} with open('sw_templates.yaml', 'w') as f: yaml.dump(to_yaml, f) with open('sw_templates.yaml') as f: print(f.read()) ``` ### ini Как правило, ini-файлы используют для хранения настроек приложения или операционной системы. Библиотека в ядре Python включает в себя модуль, под названием **configparser**, который вы можете использовать для создания и работы с файлами конфигурации. ``` import configparser # Создание конфигурационного файла config = configparser.ConfigParser() config.add_section("Settings") config.set("Settings", "font", "Courier") config.set("Settings", "font_size", "10") config.set("Settings", "font_style", "Normal") config.set("Settings", "font_info", "You are using %(font)s at %(font_size)s pt") with open('my_settings.ini', 'w') as config_file: config.write(config_file) # ===Выведем содержимое файла=== with open('my_settings.ini', 'r') as config_file: print(config_file.read()) # Чтение конфигурационного файла config = configparser.ConfigParser() config.read('my_settings.ini') # Читаем некоторые значения из конфиг. файла. font = config.get("Settings", "font") font_size = config.get("Settings", "font_size") # Меняем значения из конфиг. файла. config.set("Settings", "font_size", "12") # Удаляем значение из конфиг. файла. config.remove_option("Settings", "font_style") # Вносим изменения в конфиг. файл. with open('my_settings.ini', "w") as config_file: config.write(config_file) # ===Выведем содержимое файла=== with open('my_settings.ini', 'r') as config_file: print(config_file.read()) ``` ## Консервация объектов Модуль `pickle` (англ. pickle - консервировать) реализует мощный алгоритм сериализации и десериализации объектов Python. "Pickling" - процесс преобразования объекта Python в поток байтов, а "unpickling" - обратная операция, в результате которой поток байтов преобразуется обратно в Python-объект. Так как поток байтов легко можно записать в файл, модуль `pickle` широко применяется для сохранения и загрузки сложных объектов в Python. Модуль pickle предоставляет следующие функции для удобства сохранения/загрузки объектов: - `pickle.dump(obj, file, protocol=None, *, fix_imports=True)`\ Записывает сериализованный объект в файл. Дополнительный аргумент protocol указывает используемый протокол. По умолчанию равен 3 и именно он рекомендован для использования в Python 3 (несмотря на то, что в Python 3.4 добавили протокол версии 4 с некоторыми оптимизациями). В любом случае, записывать и загружать надо с одним и тем же протоколом. - `pickle.dumps(obj, protocol=None, *, fix_imports=True)`\ Возвращает сериализованный объект. Впоследствии вы его можете использовать как угодно. - `pickle.load(file, *, fix_imports=True, encoding="ASCII", errors="strict")`\ Загружает объект из файла. - `pickle.loads(bytes_object, *, fix_imports=True, encoding="ASCII", errors="strict")`\ Загружает объект из потока байт. Модуль `pickle` также определяет несколько исключений: `pickle.PickleError` - `pickle.PicklingError` - случились проблемы с сериализацией объекта. - `pickle.UnpicklingError` - случились проблемы с десериализацией объекта. Этих функций вполне достаточно для сохранения и загрузки встроенных типов данных. ``` import pickle data = { 'a': [1, 2.0, 3, 4+6j], 'b': ("character string", b"byte string"), 'c': {None, True, False} } with open('data.pickle', 'wb') as f: pickle.dump(data, f) with open('data.pickle', 'rb') as f: data_new = pickle.load(f) print(data_new) ```
github_jupyter
# JS vs PY automl: who will win? Here an instance of a rather simplistic automl implementation in Python is pitted against implementation in JS. Does the JS version of AutoML reach the quality standards of even a simple Python version? Find out in this notebook. ``` from subprocess import call, DEVNULL import numpy as np import pmlb import json from time import time import os import random ``` ## Reference implementation in Python A simple grid search is employed to do automl in Python. The code below should speak for itself. ``` from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.linear_model import SGDClassifier, SGDRegressor from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor from sklearn.dummy import DummyClassifier, DummyRegressor from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline, Pipeline from time import time class PythonAutoML(): def __init__(self): pass def fit_predict(self, X_train, y_train, X_test): # determine type of learning problem classification = False for v in y_train: try: v = float(v) except BaseException as ex: classification = True break # its a tree tree = { 'model': [DecisionTreeClassifier() if classification else DecisionTreeRegressor()], 'model__max_leaf_nodes': [3, 5, 7, 11, 19, 31, 53, 89, 137, 179, 227, 271] } # its a dot product lsgd = { 'model': [SGDClassifier() if classification else SGDRegressor()], 'model__alpha': np.logspace(-3, 6, 11), 'model__l1_ratio': [0.0, 0.5, 1.0] } # no its a gradient boosting gbdt = { 'model': [GradientBoostingClassifier() if classification else GradientBoostingRegressor()], 'model__learning_rate': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5] } model = GridSearchCV( estimator=Pipeline([ ('imputer', SimpleImputer()), ('scaler', StandardScaler()), ('model', DummyRegressor()) ]), param_grid=[ tree, lsgd, gbdt ], cv=5, n_jobs=-1, verbose=0 ) start = time() model.fit(X_train, y_train) duration = time() - start y_pred = model.predict(X_test) return { 'y_pred': y_pred, 'fit_time': duration } ``` ## Implementation in JavaScript It is given below. ``` from random import choice import json from subprocess import call, DEVNULL class JSAutoML(): def __init__(self): pass def fit_predict(self, X_train, y_train, X_test): puid = ''.join([choice("abcdefghijklmnopqrstuvwxyz") for _ in range(10)]) node_code = """ const puid = "%s" const ai = require('../src/automljs') const fs = require('fs') var data = JSON.parse(fs.readFileSync(puid + '.data.json', 'utf8')); async function main(){ var X = data['X']; var y = data['y']; var X_test = data['X_test']; // read estimator from the serialization module var model = new ai.automl.AutoMLModel({'max_iter':10}) var fit_start = process.hrtime(); await model.fit(X, y) var elapsed = process.hrtime(fit_start)[1] / 1000000; // divide by a million to get nano to milli var y_pred = await model.predict(X_test) var res = { 'y_pred': y_pred } await fs.writeFile(puid + '.result.json', JSON.stringify(res), 'utf8', function(){ }) } main() """ % puid jsfile = puid + '.js' dtfile = puid + '.data.json' rsfile = puid + '.result.json' with open(jsfile, 'w') as s: s.write(node_code) with open(dtfile, 'w') as d: json.dump({ 'X': X_train.tolist(), 'y': y_train.tolist(), 'X_test': X_test.tolist() }, d) start = time() call(['node ' + jsfile], shell=True) duration = time() - start result = None with open(rsfile, 'r') as js: result = json.load(js) return { 'y_pred': result['y_pred'], 'fit_time': duration } ``` ## Benchmarks! ``` from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.linear_model import SGDClassifier, SGDRegressor from sklearn.preprocessing import Imputer, StandardScaler from sklearn.pipeline import make_pipeline from sklearn.metrics import accuracy_score, r2_score from tqdm import tqdm from itertools import product from pprint import pprint from pmlb import classification_dataset_names, regression_dataset_names ############################################ SETTINGS ############################################ models = [PythonAutoML, JSAutoML] max_size = 1000 data_classification = True ############################################ SETTINGS ############################################ results = [] datasets = classification_dataset_names if data_classification else regression_dataset_names metric = accuracy_score if data_classification else r2_score for name in datasets: if name in {'1191_BNG_pbc', '1595_poker'}: continue print('Fetching data ...') X, y = pmlb.fetch_data(name, True, local_cache_dir='/home/iaroslav/.cache/pmlb') print(name, X.shape) # skip too many features for now if X.shape[-1] >= 500: continue # make datasets small for testing if len(y) > max_size: X = X[:max_size] y = y[:max_size] # skip datasets with constant outputs unique_outputs = len(set(y)) if(unique_outputs < 2): continue print('Number of unique outputs: %s' % unique_outputs) if data_classification: # disambiguation with e.g. integer class labels y = np.array(['class_' + str(v) for v in y]) # make training and testing partitions X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) result_dataset = { 'dataset_name': name, 'dataset_shape': str(X.shape), 'models': {} } for model in models: inst = model() result = inst.fit_predict(X_train, y_train, X_test) y_pred = result['y_pred'] del result['y_pred'] score = metric(y_test, y_pred) result['test_score'] = score result_dataset['models'][model.__name__] = result pprint(result_dataset) results.append(result_dataset) ``` ## Final evaluation The results are summarized here. ``` import pandas as pd I = [] # this will be the index of the df rows = [] for result in results: I.append(result['dataset_name']) row = {} for model_name, stats in result['models'].items(): for stat in stats: row[model_name + "_" + stat] = stats[stat] rows.append(row) df = pd.DataFrame(data=rows, index=I) display(df.describe().round(3)) ```
github_jupyter
# Custom generators ``` import tohu from tohu.v4.primitive_generators import * from tohu.v4.derived_generators import * from tohu.v4.dispatch_generators import * from tohu.v4.custom_generator import * from tohu.v4.utils import print_generated_sequence, make_dummy_tuples print(f'Tohu version: {tohu.__version__}') ``` ## Custom generator without `__init__` method ``` class QuuxGenerator(CustomGenerator): aa = Integer(100, 200) bb = HashDigest(length=6) cc = FakerGenerator(method='name') g = QuuxGenerator() print_generated_sequence(g, num=10, sep='\n', seed=12345) ``` ### Explicitly setting the name of generated items Let's repeat the previous example, but explicitly set the name of generated items by setting the `__tohu_items_name__` attribute inside the custom generator. ``` class SomeGeneratorWithExplicitItemsName(CustomGenerator): __tohu_items_name__ = 'Foobar' aa = Integer(100, 200) bb = HashDigest(length=6) cc = FakerGenerator(method='name') g = SomeGeneratorWithExplicitItemsName() ``` The generated sequence is the same as above, but the name of the items has changed from `Quux` to `Foobar`. ``` print_generated_sequence(g, num=10, sep='\n', seed=12345) ``` ## Custom generator with `__init__` method ``` class QuuxGenerator(CustomGenerator): aa = Integer(100, 200) def __init__(self, faker_method): self.bb = FakerGenerator(method=faker_method) # Note: the call to super().__init__() needs to be at the end, # and it needs to be passed the same arguments as the __init__() # method from which it is called (here: `faker_method`). super().__init__(faker_method) g1 = QuuxGenerator(faker_method='first_name') g2 = QuuxGenerator(faker_method='city') print_generated_sequence(g1, num=10, sep='\n', seed=12345); print() print_generated_sequence(g2, num=10, sep='\n', seed=12345) ``` ## Custom generator containing derived generators ``` some_tuples = make_dummy_tuples('abcdefghijklmnopqrstuvwxyz') #some_tuples[:5] ``` ### Example: extracting attributes ``` class QuuxGenerator(CustomGenerator): aa = SelectOne(some_tuples) bb = GetAttribute(aa, 'x') cc = GetAttribute(aa, 'y') g = QuuxGenerator() print_generated_sequence(g, num=10, sep='\n', seed=12345) ``` ### Example: arithmetic ``` def square(x): return x * x def add(x, y): return x + y class QuuxGenerator(CustomGenerator): aa = Integer(0, 20) bb = Integer(0, 20) cc = Apply(add, aa, Apply(square, bb)) g = QuuxGenerator() print_generated_sequence(g, num=10, sep='\n', seed=12345) df = g.generate(num=100, seed=12345).to_df() print(list(df['aa'][:20])) print(list(df['bb'][:20])) print(list(df['cc'][:20])) all(df['aa'] + df['bb']**2 == df['cc']) ``` ### Example: multi-stage dependencies ``` class QuuxGenerator(CustomGenerator): name = FakerGenerator(method="name") tag = SelectOne(['a', 'bb', 'ccc']) g = QuuxGenerator() quux_items = g.generate(num=100, seed=12345) quux_items.to_df().head(5) tag_lookup = { 'a': [1, 2, 3, 4, 5], 'bb': [10, 20, 30, 40, 50], 'ccc': [100, 200, 300, 400, 500], } class FoobarGenerator(CustomGenerator): some_quux = SelectOne(quux_items) number = SelectOneDerived(Lookup(GetAttribute(some_quux, 'tag'), tag_lookup)) h = FoobarGenerator() h_items = h.generate(10000, seed=12345) df = h_items.to_df(fields={'name': 'some_quux.name', 'tag': 'some_quux.tag', 'number': 'number'}) df.head() print(df.query('tag == "a"')['number'].isin([1, 2, 3, 4, 5]).all()) print(df.query('tag == "bb"')['number'].isin([10, 20, 30, 40, 50]).all()) print(df.query('tag == "ccc"')['number'].isin([100, 200, 300, 400, 500]).all()) df.query('tag == "a"').head(5) df.query('tag == "bb"').head(5) df.query('tag == "ccc"').head(5) ```
github_jupyter
# 2つのガウス分布を含む混合ガウス分布のためのEMアルゴリズム (細かいコメントはもうちょっと待ってくださーい) 千葉工業大学 上田 隆一 (c) 2017 Ryuichi Ueda This software is released under the MIT License, see LICENSE. ## はじめに このコードは、2つの2次元ガウス分布を含む混合ガウス分布から生成されたデータについて、EMアルゴリズムでパラメータを求めるためのEMアルゴリズムの実装例です。処理の流れは、次のようなものです。 * (準備)2つのガウス分布からサンプリング * 推定対象は、この2つのガウス分布のパラメータと、どちらからどれだけサンプリングされたかの比 * 適当なパラメータで2つガウス分布を準備し、収束するまで以下の繰り返し * Eステップ: 各サンプルがどちらの分布から生成されたらしいかを、2つのガウス分布の確率密度関数から得られる値の比で計算 * Mステップ: Eステップで得た比を利用して、推定対象の値を計算 * 収束したら、推定値を出力 ## アルゴリズムを適用される対象になるデータの生成 クラスタリングの対象となるデータを作ります。二つの2次元ガウス分布から、2:1の割合で標本抽出します。(GitHubだと行列が崩れて表示されますが、$\mathcal{N}$の二番目の引数は2x2行列です。) * 2つの分布 * 分布A(200個抽出): $\mathcal{N}\left( \begin{bmatrix}170 \\ 70 \end{bmatrix}, \begin{bmatrix}6^2 & -30 \\ -30 & 8^2\end{bmatrix} \right)$ * 分布B(100個抽出): $\mathcal{N}\left( \begin{bmatrix}165 \\ 50 \end{bmatrix}, \begin{bmatrix}5^2 & 20 \\ 20 & 6^2\end{bmatrix} \right)$ ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import math def make_samples(): # グループAのガウス分布 muA_ans = [170,70] # 横軸、縦軸をx,y軸とすると、x=170, y=70が中心 covA_ans = [[6**2,-30],[-30,8**2]] # x軸の標準偏差6、y軸の標準偏差-30、共分散-30 samplesA = np.random.multivariate_normal(muA_ans,covA_ans,200).T #200個の点をサンプリング # グループBのガウス分布 muB_ans = [165,50] # x=165, y=50が中心 covB_ans = [[5.**2,20],[20,6**2]] # x軸の標準偏差5、y軸の標準偏差6、共分散20 samplesB = np.random.multivariate_normal(muB_ans,covB_ans,100).T #100個の点をサンプリング # 2つのグループのリストをくっつけて返す return np.column_stack((samplesA,samplesB)) # データを作る samples = make_samples() #描画してみましょう plt.scatter(samples[0],samples[1],color='g',marker='+') # sample[0]がx値のずらっと入ったリスト、sample[1]がy値 # このデータに関する既知のパラメータ K = 2 # クラスタの数 N = len(samples[0]) # サンプルの数 ``` 以後、サンプルは$\boldsymbol{x}_n = (x_n,y_n) \quad (n=0,1,2,\dots,N)$と表現します。 ## パラメータの初期設定 2つの分布のパラメータを格納する変数を準備して、このパラメータを上記の分布の式に近づけていきます。また、混合係数の変数も準備します。混合係数というのは、どっちからどれだけサンプルが生成されたかの割合のことです。上の例だと分布1で$2/3$、分布2で$1/3$となります。 * パラメータ * 各分布(リストdistributions): $\mathcal{N}(\boldsymbol{x} | \boldsymbol{\mu}_k, \Sigma_k)\quad (k=0,1)$ * 混合係数(リストmixing_coefs): $\pi_k \quad (k=0,1; \pi_0 + \pi_1 = 1)$ ``` from scipy.stats import multivariate_normal # これを使うと多次元ガウス分布のオブジェクトが生成できます # 2つのガウス分布のオブジェクトを作る distributions = [] distributions.append(multivariate_normal(mean=[160,80],cov= [[100,0],[0,100]]) ) # 分布1を適当な分布の中心、共分散行列で初期化 distributions.append(multivariate_normal(mean=[170,100],cov= [[100,0],[0,100]]) ) # 分布2を同様に初期化。分布1と少し値を変える必要アリ # 混合係数のリスト mixing_coefs = [1.0/K for k in range(K)] # 回りくどい書き方をしていますが、両方0.5で初期化されます。 ``` 描画の関係でサンプルの分布に重なるようにガウス分布を初期化していますが、辺鄙な値でも大丈夫です。 ## 描画用の関数 ``` def draw(ds,X): # 分布を等高線で描画 x, y = np.mgrid[(min(X[0])):(max(X[0])):1, (min(X[1])):(max(X[1])):1] # 描画範囲の指定 for d in ds: pos = np.empty(x.shape + (2,)) pos[:, :, 0] = x; pos[:, :, 1] = y plt.contour(x, y, d.pdf(pos),alpha=0.2) # サンプルの描画 plt.scatter(X[0],X[1],color='g',marker='+') draw(distributions,samples) ``` 以後、描かれた2つの楕円がサンプルの上に重なるように計算していきます。 ## Eステップの実装 分布を固定し、各サンプルがどっちの分布に属すかを表した確率である負担率$\gamma(z_{nk})$のリストを各サンプル、各分布に対して計算して返します。 $\gamma(z_{nk}) = \dfrac{\pi_k \mathcal{N}(\boldsymbol{x}_n | \boldsymbol{\mu}_k, \Sigma_k) }{\sum_j^K\pi_j \mathcal{N}(\boldsymbol{x}_n | \boldsymbol{\mu}_j, \Sigma_j)}$ ``` def expectation_step(ds,X,pis): # 負担率の計算 ans = [] # 負担率のリスト for n in range(N): # サンプルの数だけ繰り返し # サンプルの地点における各分布の値(密度)を計算 ws = [ pis[k] * ds[k].pdf([X[0][n],X[1][n]]) for k in range(K) ] # 各クラスタに対して負担率の分子を計算 ans.append([ws[k]/sum(ws) for k in range(K)]) # 各クラスタの負担率の合計が1になるように正規化しているだけ return ans # K * N 個のリスト ``` ## Mステップの実装 各分布のパラメータと混合係数を更新します。次のコードの2行目の$N_k = \sum_{n=0}^{N-1} \gamma(z_{nk}) $は、各分布に関する全サンプルの負担率を合計して、各分布に「いくつ」サンプルが所属するかを求めたものです。負担率はゼロイチではないので、$N_k$は小数になります。 * 分布の中心の更新: $\boldsymbol{\mu}_k \longleftarrow \dfrac{1}{N_k} \sum_{n=0}^{N-1} \gamma(z_{nk})\boldsymbol{x}_n$ * 分布の共分散行列の更新: $\Sigma_k \longleftarrow \dfrac{1}{N_k} \sum_{n=0}^{N-1} \gamma(z_{nk}) (\boldsymbol{x}_n - \boldsymbol{\mu}_k)(\boldsymbol{x}_n - \boldsymbol{\mu}_k)^T$(更新後の$\boldsymbol{\mu}_k$を使用します。) * 混合係数の更新: $\pi_k \longleftarrow \dfrac{N_k}{N}$ ``` def maximization_step(k,X,gammas): # 引数は分布の番号、全サンプル、全サンプルと分布の負担率 N_k = sum ( [ gammas[n][k] for n in range(N) ]) # 分布の中心の更新 tmp_x = sum ( [ gammas[n][k] * X[0][n] for n in range(N) ]) / N_k # 全サンプルのx軸の値の平均値を、その分布に対する負担率で重み付き平均で計算 tmp_y = sum ( [ gammas[n][k] * X[1][n] for n in range(N) ]) / N_k # 同様にy軸の重み付き平均を計算 mu = [tmp_x,tmp_y] # 更新値 # 共分散行列の更新 ds= [ np.array([[X[0][n],X[1][n]]]) - np.array([mu]) for n in range(N) ] # 分布の中心に対するサンプルの位置のリスト sigma = sum( [ gammas[n][k]* ds[n].T.dot(ds[n]) for n in range(N)] ) / N_k # 上のリストをかけて2x2行列を作り、負担率で重み付き平均をとる return multivariate_normal(mean=mu,cov=sigma), N_k/N ``` ### とりあえず1回ずつEステップとMステップを実行 Eステップで負担率のリストを作り、Mステップでパラメータを更新します。 ``` def log_likelihood(ds,X,pis): # 収束の判断のために対数尤度を返す関数 ans = 0.0 for n in range(N): ws = [ pis[k] * ds[k].pdf([X[0][n],X[1][n]]) for k in range(K) ] ans += math.log1p(sum(ws) ) return ans def one_step(): # Eステップ gammas = expectation_step(distributions,samples,mixing_coefs) # Mステップ for k in range(K): distributions[k], mixing_coefs[k] = maximization_step(k,samples,gammas) return log_likelihood(distributions,samples,mixing_coefs) one_step() draw(distributions,samples) ``` 少し二つの分布の位置がサンプルのある場所に近づいているのが分かります。 ## 対数尤度が収束するまで繰り返し 対数尤度は次の式で与えられます。 $\ln p(\boldsymbol{x}_{0:N-1} | \boldsymbol{\mu}_{0:1}, \Sigma_{0:1}, \pi_{0:1}) = \sum_{n=0}^{N-1} \ln \left\{ \sum_{k=0}^{K-1} \pi_k \mathcal{N}(\boldsymbol{x}_n | \boldsymbol{\mu}_k, \Sigma_k) \right\}$ ``` prev_log_likelihood = 0.0 for i in range(99): after_log_likelihood = one_step() if prev_log_likelihood/after_log_likelihood > 0.999: # 0.1%以上対数尤度が改善しなければ抜ける break else: prev_log_likelihood = after_log_likelihood if i % 3 == 0: plt.figure() draw(distributions,samples) plt.figure() draw(distributions,samples) print("---------------------------------------------") print("repeat: ", i+1) for k in range(K): print("Gauss",k,": ") print(" share: ", mixing_coefs[k]) print(" mean: ", distributions[k].mean) print(" cov: ", distributions[k].cov) ```
github_jupyter
# Description This notebook documents allows the following on a group seven LIFX Tilechain with 5 Tiles laid out horizontaly as following T1 [0] [1] [2] [3] [4] T2 [0] [1] [2] [3] [4] T3 [0] [1] [2] [3] [4] T4 [0] [1] [2] [3] [4] T5 [0] [1] [2] [3] [4] T6 [0] [1] [2] [3] [4] T7 [0] [1] [2] [3] [4] Care should be taken to ensure that the LIFX Tiles are all facing up to ensure that the 0,0 position is in the expected place. Program will perform the following - take a jpg or png located in the same folder as the notebook and create a image to display across all 4 tilechains or 20 tiles. Image will be reduced from original size to a 32x40 matrix so resolution will not be great. You've been warned. ``` !pip install pylifxtiles #Main Program for Convert Single Image to Tiles # Full running function with all dependencies #imports RGB to HSBK conversion function from LIFX LAN library from lifxlan import LifxLAN from lifxlan.utils import RGBtoHSBK from pylifxtiles import tiles from pylifxtiles import actions from matplotlib import image from PIL import Image # modify this variable to the name of the specific LIFX Tilechain as shown in the LIFX app source_image = './images/Sunflowers.jpg' def main(): lan = LifxLAN() tilechain_lights = lan.get_tilechain_lights() print(len(tilechain_lights)) if len(tilechain_lights) != 0: for tile in tilechain_lights: if tile.get_label() == 'T1': print(tile.get_label()) T1 = tile if tile.get_label() =='T2': print(tile.get_label()) T2 = tile if tile.get_label() == 'T3': print(tile.get_label()) T3 = tile if tile.get_label() == 'T4': print(tile.get_label()) T4 = tile if tile.get_label() == 'T5': print(tile.get_label()) T5 = tile if tile.get_label() == 'T6': print(tile.get_label()) T6 = tile if tile.get_label() == 'T7': print(tile.get_label()) T7 = tile tc_list = [ T1, T2, T3, T4, T5, T6, T7] try: display_image(source_image,(40,56), tc_list) except KeyboardInterrupt: print("Done.") #combined function # resize image and force a new shape and save to disk def display_image(image_to_display,image_size, tilechain_list): # load the image my_image = Image.open(image_to_display) # report the size of the image #print(my_image.size) # resize image and ignore original aspect ratio img_resized = my_image.resize(image_size) #changing the file extension from jpg to png changes output brightness. You might need to play with this. img_resized.save('./images/resized_image.jpg') data = image.imread('./images/resized_image.jpg') target_tcs = [] for row in data: temp_row = [] for pixel in row: temp_row.append(RGBtoHSBK(pixel)) target_tcs.append(temp_row) #print ("length of target_tcs is " + str(len(target_tcs))) tcsplit = tiles.split_tilechains(target_tcs) #print ("legnth of tcssplit is " + str(len(tcsplit))) #print ("length tilelist is " + str(len(tilechain_list))) for tile in range(len(tilechain_list)): print (tile) tilechain_list[tile].set_tilechain_colors(tiles.split_combined_matrix(tcsplit[tile]),rapid=True) if __name__ == "__main__": main() ``` # test write to three tiles ``` #Main Program for Convert Single Image to Tiles # Full running function with all dependencies #imports RGB to HSBK conversion function from LIFX LAN library from lifxlan import LifxLAN from lifxlan.utils import RGBtoHSBK from pylifxtiles import tiles from pylifxtiles import actions from matplotlib import image from PIL import Image # modify this variable to the name of the specific LIFX Tilechain as shown in the LIFX app source_image = './images/Youtubelogo.jpg' def main(): lan = LifxLAN() tilechain_lights = lan.get_tilechain_lights() print(len(tilechain_lights)) if len(tilechain_lights) != 0: for tile in tilechain_lights: if tile.get_label() == 'T1': print(tile.get_label()) T1 = tile if tile.get_label() =='T2': print(tile.get_label()) T2 = tile if tile.get_label() == 'T3': print(tile.get_label()) T3 = tile if tile.get_label() == 'T4': print(tile.get_label()) T4 = tile tc_list = [T2, T3, T4] try: display_image(source_image,(40,24), tc_list) except KeyboardInterrupt: print("Done.") #combined function # resize image and force a new shape and save to disk def display_image(image_to_display,image_size, tilechain_list): # load the image my_image = Image.open(image_to_display) # report the size of the image #print(my_image.size) # resize image and ignore original aspect ratio img_resized = my_image.resize(image_size) #changing the file extension from jpg to png changes output brightness. You might need to play with this. img_resized.save('./images/resized_image.jpg') data = image.imread('./images/resized_image.jpg') target_tcs = [] for row in data: temp_row = [] for pixel in row: temp_row.append(RGBtoHSBK(pixel)) target_tcs.append(temp_row) print ("length of target_tcs is " + str(len(target_tcs))) tcsplit = tiles.split_tilechains(target_tcs) print ("legnth of tcssplit is " + str(len(tcsplit))) print ("length tilelist is " + str(len(tilechain_list))) for tile in range(len(tilechain_list)): print (tile) tilechain_list[tile].set_tilechain_colors(tiles.split_combined_matrix(tcsplit[tile]),rapid=True) if __name__ == "__main__": main() ```
github_jupyter
# Delicious Asian and Indian Cuisines Install Imblearn which will enable SMOTE. This is a Scikit-learn package that helps handle imbalanced data when performing classification. (https://imbalanced-learn.org/stable/) ``` pip install imblearn import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import numpy as np from imblearn.over_sampling import SMOTE df = pd.read_csv('../../data/cuisines.csv') ``` This dataset includes 385 columns indicating all kinds of ingredients in various cuisines from a given set of cuisines. ``` df.head() df.info() df.cuisine.value_counts() ``` Show the cuisines in a bar graph ``` df.cuisine.value_counts().plot.barh() thai_df = df[(df.cuisine == "thai")] japanese_df = df[(df.cuisine == "japanese")] chinese_df = df[(df.cuisine == "chinese")] indian_df = df[(df.cuisine == "indian")] korean_df = df[(df.cuisine == "korean")] print(f'thai df: {thai_df.shape}') print(f'japanese df: {japanese_df.shape}') print(f'chinese df: {chinese_df.shape}') print(f'indian df: {indian_df.shape}') print(f'korean df: {korean_df.shape}') ``` ## What are the top ingredients by class ``` def create_ingredient_df(df): # transpose df, drop cuisine and unnamed rows, sum the row to get total for ingredient and add value header to new df ingredient_df = df.T.drop(['cuisine','Unnamed: 0']).sum(axis=1).to_frame('value') # drop ingredients that have a 0 sum ingredient_df = ingredient_df[(ingredient_df.T != 0).any()] # sort df ingredient_df = ingredient_df.sort_values(by='value', ascending=False, inplace=False) return ingredient_df thai_ingredient_df = create_ingredient_df(thai_df) thai_ingredient_df.head(10).plot.barh() japanese_ingredient_df = create_ingredient_df(japanese_df) japanese_ingredient_df.head(10).plot.barh() chinese_ingredient_df = create_ingredient_df(chinese_df) chinese_ingredient_df.head(10).plot.barh() indian_ingredient_df = create_ingredient_df(indian_df) indian_ingredient_df.head(10).plot.barh() korean_ingredient_df = create_ingredient_df(korean_df) korean_ingredient_df.head(10).plot.barh() ``` Drop very common ingredients (common to all cuisines) ``` feature_df= df.drop(['cuisine','Unnamed: 0','rice','garlic','ginger'], axis=1) labels_df = df.cuisine #.unique() feature_df.head() ``` Balance data with SMOTE oversampling to the highest class. Read more here: https://imbalanced-learn.org/dev/references/generated/imblearn.over_sampling.SMOTE.html ``` oversample = SMOTE() transformed_feature_df, transformed_label_df = oversample.fit_resample(feature_df, labels_df) print(f'new label count: {transformed_label_df.value_counts()}') print(f'old label count: {df.cuisine.value_counts()}') transformed_feature_df.head() # export transformed data to new df for classification transformed_df = pd.concat([transformed_label_df,transformed_feature_df],axis=1, join='outer') transformed_df transformed_df.info() ``` Save the file for future use ``` transformed_df.to_csv("../../data/cleaned_cuisine.csv") ```
github_jupyter
## _*LiH dissociation curve using VQE with UCCSD variational form*_ This notebook demonstrates using Qiskit Chemistry to plot graphs of the ground state energy of the Lithium Hydride (LiH) molecule over a range of inter-atomic distances using VQE and UCCSD. It is compared to the same energies as computed by the ExactEigensolver This notebook populates a dictionary, that is a progammatic representation of an input file, in order to drive the Qiskit Chemistry stack. Such a dictionary can be manipulated programmatically and this is indeed the case here where we alter the molecule supplied to the driver in each loop. This notebook has been written to use the PYSCF chemistry driver. ``` import numpy as np import pylab import copy from qiskit.chemistry import QiskitChemistry # Input dictionary to configure Qiskit Chemistry for the chemistry problem. qiskit_chemistry_dict = { 'driver': {'name': 'PYSCF'}, 'PYSCF': {'atom': '', 'basis': 'sto3g'}, 'operator': {'name': 'hamiltonian', 'qubit_mapping': 'parity', 'two_qubit_reduction': True, 'freeze_core': True, 'orbital_reduction': [-3, -2]}, 'algorithm': {'name': ''}, 'optimizer': {'name': 'SLSQP', 'maxiter': 1000}, 'variational_form': {'name': 'UCCSD'}, 'initial_state': {'name': 'HartreeFock'} } molecule = 'H .0 .0 -{0}; Li .0 .0 {0}' algorithms = ['VQE', 'ExactEigensolver'] pts = [x * 0.1 for x in range(6, 20)] pts += [x * 0.25 for x in range(8, 16)] pts += [4.0] energies = np.empty([len(algorithms), len(pts)]) hf_energies = np.empty(len(pts)) distances = np.empty(len(pts)) dipoles = np.empty([len(algorithms), len(pts)]) eval_counts = np.empty(len(pts)) print('Processing step __', end='') for i, d in enumerate(pts): print('\b\b{:2d}'.format(i), end='', flush=True) qiskit_chemistry_dict['PYSCF']['atom'] = molecule.format(d/2) for j in range(len(algorithms)): dict = copy.deepcopy(qiskit_chemistry_dict) dict['algorithm']['name'] = algorithms[j] if algorithms[j] == 'ExactEigensolver': del dict['optimizer'] del dict['variational_form'] del dict['initial_state'] solver = QiskitChemistry() result = solver.run(dict) energies[j][i] = result['energy'] hf_energies[i] = result['hf_energy'] dipoles[j][i] = result['total_dipole_moment'] / 0.393430307 if algorithms[j] == 'VQE': eval_counts[i] = result['algorithm_retvals']['eval_count'] distances[i] = d print(' --- complete') print('Distances: ', distances) print('Energies:', energies) print('Hartree-Fock energies:', hf_energies) print('VQE num evaluations:', eval_counts) pylab.plot(distances, hf_energies, label='Hartree-Fock') for j in range(len(algorithms)): pylab.plot(distances, energies[j], label=algorithms[j]) pylab.xlabel('Interatomic distance') pylab.ylabel('Energy') pylab.title('LiH Ground State Energy') pylab.legend(loc='upper right'); pylab.plot(distances, np.subtract(hf_energies, energies[1]), label='Hartree-Fock') pylab.plot(distances, np.subtract(energies[0], energies[1]), label='VQE') pylab.xlabel('Interatomic distance') pylab.ylabel('Energy') pylab.title('Energy difference from ExactEigensolver') pylab.legend(loc='upper left'); for j in reversed(range(len(algorithms))): pylab.plot(distances, dipoles[j], label=algorithms[j]) pylab.xlabel('Interatomic distance') pylab.ylabel('Moment in debye') pylab.title('LiH Dipole Moment') pylab.legend(loc='upper right'); pylab.plot(distances, eval_counts, '-o', color=[0.8500, 0.3250, 0.0980], label='VQE') pylab.xlabel('Interatomic distance') pylab.ylabel('Evaluations') pylab.title('VQE number of evaluations') pylab.legend(loc='upper left'); ```
github_jupyter
## Manual publication DB insertion from raw text using syntax features ### Publications and conferences of Prof. Darabant Sergiu Adrian #### http://www.cs.ubbcluj.ro/~dadi/ ``` text = """ A Versatile 3D Face Reconstruction from Multiple Images for Face Shape Classification Conference Paper Sep 2019 Alexandru Ion Marinescu Tudor Ileni Adrian Sergiu Darabant View Fast In-the-Wild Hair Segmentation and Color Classification Conference Paper Jan 2019 Tudor Ileni Diana Borza Adrian Sergiu Darabant In this paper we address the problem of hair segmentation and hair color classification in facial images using a machine learning approach based on both convolutional neural networks and classical neural networks. Hair with its color shades, shape and length represents an important feature of the human face and is used in domains like biometrics, v... View A Deep Learning Approach to Hair Segmentation and Color Extraction from Facial Images: 19th International Conference, ACIVS 2018, Poitiers, France, September 24–27, 2018, Proceedings Chapter Sep 2018 Diana Borza Tudor Ileni Adrian Sergiu Darabant In this paper we tackle the problem of hair analysis in unconstrained images. We propose a fully convolutional, multi-task neural network to segment the image pixels into hair, face and background classes. The network also decides if the person is bald or not. The detected hair pixels are analyzed by a color recognition module which uses color feat... View Micro-Expressions Detection Based on Micro-Motions Dense Optical Flows Conference Paper Sep 2018 Sergiu Cosmin Nistor Adrian Sergiu Darabant Diana Borza View Automatic Skin Tone Extraction for Visagism Applications Conference Paper Jan 2018 Diana Borza Adrian Sergiu Darabant Radu Danescu View Figure 1. High-speed video acquisition and analysis process. Figure 1. High-speed video acquisition and analysis process. High-Speed Video System for Micro-Expression Detection and Recognition Article Full-text available Dec 2017 Diana Borza Radu Danescu Razvan Itu Adrian Sergiu Darabant Micro-expressions play an essential part in understanding non-verbal communication and deceit detection. They are involuntary, brief facial movements that are shown when a person is trying to conceal something. Automatic analysis of micro-expression is challenging due to their low amplitude and to their short duration (they occur as fast as 1/15 to... View Supplementary Material Data Dec 2017 Diana Borza Radu Danescu Razvan Itu Adrian Sergiu Darabant View Towards Automatic Skin Tone Classification in Facial Images Conference Paper Oct 2017 Diana Borza Sergiu Cosmin Nistor Adrian Sergiu Darabant In this paper, we address the problem of skin tone classification in facial images, which has applications in various domains: visagisme, soft biometry and surveillance systems. We propose four skin tone classification algorithms and analyze their performance using different color spaces. The first two methods rely directly on pixel values, while t... View A linear approach to distributed database optimization using data reallocation Conference Paper Sep 2017 Adrian Sergiu Darabant Viorica Varga Leon Tambulea View Fig. 1. Flowchart of the proposed solution for gender classification Fig. 4. Loss function of Inception-v4 trained with image distortions Automatic gender recognition for “in the wild” facial images using convolutional neural networks Conference Paper Full-text available Sep 2017 Sergiu Cosmin Nistor Alexandra-Cristina Marina Adrian Sergiu Darabant Diana Borza View Fig. 1: The evaluation tree and the values associated to an example query. Fig. 2: Fragment used by a binary operator-one operand is always a leaf... Table 2 : Costs and exec times for MFRN=1 and MFRN=5, cases (a) and (b) Fig. 4: Cost Improvements Percents for MFRN=1 and MFRN=5 Access Patterns Optimization in Distributed Databases Using Data Reallocation Conference Paper Full-text available Aug 2017 Adrian Sergiu Darabant Leon Tambulea Viorica Varga Large distributed databases are split into fragments stored on far distant nodes that communicate through a communication network. Query execution requires data transfers between the processing sites of the system. In this paper we propose a solution for minimizing raw data transfers by re-arranging and replicating existing data within the constrai... View Fast Eye Tracking and Feature Measurement using a Multi-stage Particle Filter Conference Paper Jan 2017 Radu Danescu Adrian Sergiu Darabant Diana Borza View Table 1 . Iris center localization accuracies compared to the... Table 2 . Iris center localization results on the University of... Table 3 . Iris radius computation results on the University of Michigan... Table 4 . Performance of the eye shape segmentation algorithm the UMFD... +3Table 5 . Mean error normalized by the inter-pupillary distance. Real-Time Detection and Measurement of Eye Features from Color Images Article Full-text available Jul 2016 Diana Borza Adrian Sergiu Darabant Radu Danescu The accurate extraction and measurement of eye features is crucial to a variety of domains, including human-computer interaction, biometry, and medical research. This paper presents a fast and accurate method for extracting multiple features around the eyes: the center of the pupil, the iris radius, and the external shape of the eye. These features... View Magnetic Stimulation of the Spinal Cord: Evaluating the Characteristics of an Appropriate Stimulator Article Oct 2015 Mihaela Cretu Adrian Sergiu Darabant Radu V. Ciupa This article aims to determine the necessary characteristics of a magnetic stimulator, capable of stimulating neural tracts of the spinal cord in a healthy subject. Our previous preliminary tests had shown that the commercial clinical magnetic stimulator Magstim Rapid2 was unable to reach excitable structures within the spinal cord, and only adjace... View Eyeglasses contour extraction using genetic algorithms Conference Paper Sep 2015 Diana Borza Radu Danescu Adrian Sergiu Darabant This paper presents an eyeglasses contour extraction method that uses genetic algorithms to find the exact shape of the lenses. An efficient shape description, based on Fourier coefficients, is used to represent the shape of the eyeglasses, allowing a wide range of shapes to be represented with a small number of parameters. The proposed method does... View Figure 1. Eyeglasses detection algorithm outline. Figure 2. Reconstruction of the rim contour using Fourier descriptors.... Table 2 . Detection rates. Table 3 . Comparison of the proposed method with related works. +4Figure 7. Eyeglasses region of interest (ROI). The detected position of... Eyeglasses Lens Contour Extraction from Facial Images Using an Efficient Shape Description Article Full-text available Oct 2013 Diana Borza Adrian Sergiu Darabant Radu Danescu This paper presents a system that automatically extracts the position of the eyeglasses and the accurate shape and size of the frame lenses in facial images. The novelty brought by this paper consists in three key contributions. The first one is an original model for representing the shape of the eyeglasses lens, using Fourier descriptors. The seco... View Magnetic Stimulation of the Spinal Cord: Experimental Results and Simulations Article May 2013 Laura Darabant Mihaela Cretu Adrian Sergiu Darabant This paper aims in interpreting the leg muscles responses recorded by electromyography during magnetic stimulation of the spinal cord by computing the electric field induced in the spinal cord and the nearby areas during this procedure. A simplified model of the spine was created and a Finite Difference Method algorithm was implemented in Matlab. View Fig. 4. Comparative FPE clustering results. Fig. 5. Comparative results for small, medium and large datasets. Clustering methods in data fragmentation Article Full-text available Jan 2011 Adrian Sergiu Darabant L. Darabant This paper proposes an enhanced version for three clustering algorithms: hierarchical, k-means and fuzzy c-means applied in horizontal object oriented data fragmentation. The main application is focusing in distributed object oriented database (OODB) fragmentation, but the method applicability is not limited to this research area. The proposed algo... View Figure 1. Illuminated center of pupils Figure 2. Auxiliary object with markers Figure 3. Multiple reflections issue Figure 4. Final preprocessing step: Canny Edge Detection and Closing +2Figure 6. Center detection on binarized image of circle Computer Vision Aided Measurement of Morphological Features in Medical Optics Article Full-text available Sep 2010 Bologa Bogdana Adrian Sergiu Darabant This paper presents a computer vision aided method for non invasive interupupillary (IPD) distance measurement. IPD is a morphological feature requirement in any oftalmological frame prescription. A good frame prescription is highly dependent nowadays on accurate IPD estimation in order for the lenses to be eye strain free. The idea is to replace t... View Figure 1. Original video frame from the input video. Figure 2. Foreground objects after subtraction. Figure 3. Binary image(a), Eroded image(b). Figure 4. Dilated image-blobs are well separated and compact. +3Figure 5. Normal blobs(a), Blobs with holes(b), Fragmented blobs(c). A Computer Vision Approach to Object Tracking and Counting Article Full-text available Sep 2010 Mezei Sergiu Adrian Sergiu Darabant This paper, introduces a new method for counting people or more generally objects that enter or exit a certain area/building or perimeter. We propose an algorithm (method) that analyzes a video sequence, detects moving objects and their moving direction and filters them according to some criteria (ex only humans). As result one obtains in and out c... View Energy Efficient Coils for Magnetic Stimulation of Peripheral Nerves Article Apr 2009 Laura Darabant M. Plesa Dan Micu[...] Adrian Sergiu Darabant The preoccupation for improving the quality of life, for persons with different handicaps, led to extended research in the area of functional stimulation. Due to its advantages compared to electrical stimulation, magnetic stimulation of the human nervous system is now a common technique in modern medicine. A difficulty of this technique is the need... View Hierarchical clustering in large object datasets – a study on complexity, quality and scalability Article Jan 2009 Adrian Sergiu Darabant Anca Andreica Object database fragmentation (horizontal fragmentation) deals with splitting the extension of classes into subsets according to some criteria. The resulting fragments are then used either in distributed database processing or in parallel data processing in order to spread the computation power over multiple nodes or to increase data locality featu... View A medical application of electromagnetic fields: The magnetic stimulation of nerve fibers inside a cylindrical tissue Conference Paper Jun 2008 M. Plesa L. Darabant R. Ciupa Adrian Sergiu Darabant A model is presented that predicts the electric field induced in the arm during magnetic stimulation of a peripheral nerve. The arm is represented as a homogeneous, cylindrical volume conductor. The electric field arises from two sources: the time - varying magnetic field and the accumulation of charge on the tissue - air surface. In magnetic stimu... View Fig. 2-The MobMed System Architecture and Integration with the Hospital... Fig 3-Merge Replication Architecture . Fig. 6 MobMed's login window Fig. 7 MobMed's main and patient form Mobile Devices and Data Synchronization Assisting Medical Diagnosis Article Full-text available Jun 2008 Adrian Sergiu Darabant Horea Todoran In order to be able to establish the most accurate diagnostics as quick as possible, medical doctors need fast access not only to the current patient state and test results but also to its historical medical data. With the diversity of the malady symptoms today a correct diagnostic often requires a valuable time that is not always available due to... View Web services for e-learning and e-recruitment Article Jan 2007 George Chis Horea Grebla D. Matis[...] Adrian Sergiu Darabant Mobile phone communication can no longer be conceived as a communication mean only, but also as a way to integrate voice services together with data services which are oriented towards large consumer groups. Together with voice services, mobile Internet represents the second most important component of the service packages offered in Romania. The a... View Fig. 3 Comparative PE costs for variant M1 on all classes. Fig. 5-PE values for M1 on complex class fragmentation and primary... The similarity measures and their impact on OODB fragmentation using hierarchical clustering algorithms Article Full-text available Sep 2006 Adrian Sergiu Darabant Horea Todoran Octavian Creţ George Chis Class fragmentation is an essential phase in the design of Distributed Object Oriented Databases (DOODB). Due to their semantic similarity with the purpose of database fragmentation (obtaining sets of similar objects with respect to the user applications running in the system), clustering algorithms have recently begun to be investigated in the pro... View Building an efficient architecture for data synchronization on mobile wireless agents Article Aug 2006 Adrian Sergiu Darabant H. Todoran Nowadays, negotiation between a representative of a commercial enterprise and its clients is a pre-requisite for selling most of the industrial goods in large quantities. In many cases, it is the task of a mobile salesman to conduct the negotiation on behalf of the supplier. But this is not an easy task to accomplish, since the mobile agent must br... View Fig.1 Business information flow Fig. 2 – The MobSel System Architecture and Integration with the... Fig 5 Controlling the synchronized data. Implementing data synchronization on mobile wireless agents Conference Paper Full-text available Jul 2006 Adrian Sergiu Darabant Horea Todoran Mobile salesmen out in the wild and the entire commercial store with them? A while ago this would have seemed atypical. Nowadays, it has become a must have asset for any salesmen-based commercial enterprise. In the past, the Web brought the virtual store to the client's premises. While this is still enough for certain types of commerce, negotiation... View Table 1 . Results of the software (C++) implementation Table 2 . Results of the software implementation Fig. 3. The hardware architecture in the 1D case Solving the Maximum Subsequence Problem with a Hardware Agents-based System Article Full-text available Jul 2006 Octavian Creţ Zsolt Mathe Cristina Grama[...] Adrian Sergiu Darabant The maximum subsequence problem is widely encountered in various digital processing systems. Given a stream of both positive and negative integers, it consists of determining the subsequence of maximal sum inside the input stream. In its two-dimensional version, the input is an array of both positive and negative integers, and the problem consists... View Figure 1: The fragmentation costs for the CBAk(incremental) and... Table 1 : Comparative results for the CBAk and k-means algorithms Incremental Horizontal Fragmentation: A new Approach in the Design of Distributed Object Oriented Databases Article Full-text available Jan 2006 Adrian Sergiu Darabant Alina Campan Horea Todoran Distributed relational or more recently object-oriented databases usually employ data fragmenta-tion techniques during the design phase in order to split and allocate the database entities across the nodes of the system. Most of the design algorithms are usually static and do not take into account the system evolution: data updates and addition of... View A Hardware Implementation of the Kadane’s Algorithm for the Maximum Subsequence Problem Conference Paper Jan 2006 Octavian Creţ Zsolt Mathe Lucia Văcariu[...] Levente-Karoly Gorog View "The School in Your Pocket": Useful PoeketPC applications for students Article Jan 2006 Horea Todoran Adrian Sergiu Darabant Much smaller than laptops and still suitable for almost all kinds of applications, hand-held devices have the potential to rapidly become interesting tools for various daily activities. They can be successfully used in education by all participants (students, educators, administrative staff), if helpful applications are carefully designed and imple... View Figure 2. Macroflows composed of connections originating from different... Fine-Grained Macroflow Granularity in Congestion Control Management Article Full-text available Jun 2005 Darius Bufnea Alina Campan Adrian Sergiu Darabant A recent approach in Internet congestion control suggests collaboration between sets of streams that should share network resources and learn from each other about the state of the network. Currently such a set of collaborating streams – a macroflow – is organized on host pair basis. We propose in this paper a new method for grouping streams into m... View Figure 3 Fuzzy fragmentation vs k-means primary and k-means... Using Fuzzy Clustering for Advanced OODB Horizontal Fragmentation with Fine-Grained Replication. Conference Paper Full-text available Jan 2005 Adrian Sergiu Darabant Alina Campan Octavian Creţ In this paper we present a new approach for horizontal object oriented database fragmentation combined with fine-grained object level replication in one step. We build our fragmentation/replication method using AI probabilis- tic clustering (fuzzy clustering). Fragmentation quality evaluation is provided using an evaluator function. View Figure 1: The database class hierarchy Figure 2: Experimental results CLUSTERING TECHNIQUES FOR ADAPTIVE HORIZONTAL FRAGMENTATION IN OBJECT ORIENTED DATABASES Article Full-text available Jan 2005 Alina Campan Adrian Sergiu Darabant Gabriela Serban Optimal application performance in a Distributed Object Ori- ented System requires class fragmentation and the development of allocation schemes to place fragments at distributed sites so data transfer is minimal. A horizontal fragmentation approach that uses data mining clustering methods for partitioning object instances into fragments has alread... View Figure 1: The database class hierarchy Figure 2: The database aggregation/association graph Figure 3: Comparative PE values for our fragmentation method,... Figure 4: Comparative class PE values for each similarity measure Figure 5: Comparative PE values for primary only fragmentation and our... A NEW APPROACH IN FRAGMENTATION OF DISTRIBUTED OBJECT ORIENTED DATABASES USING CLUSTERING TECHNIQUES Article Full-text available Jan 2005 Adrian Sergiu Darabant Horizontal fragmentation plays an important role in the design phase of Distributed Databases. Complex class relationships: associations, aggregations and complex methods, require fragmentation algorithms to take into account the new problem dimensions induced by these features of the object oriented models. We propose in this paper a new method fo... View Table 1 . Table 2 . Figure 3. Parameter transmission in the SW array Figure 4. The interface of a PE and the connections between adjacent... FPGA-based Scalable Implementation of the General Smith-Waterman Algorithm Conference Paper Full-text available Nov 2004 Octavian Creţ Stefan Mathe Balint Szente[...] Adrian Sergiu Darabant The Smith-Waterman algorithm is fundamental in Bioinformatics. This paper presents an FPGA-based systolic implementation of the Smith-Waterman algorithm that addresses a general case of it. A solution that improves the scalability of the design is proposed. The architecture is optimized for both speed and space, by reusing the hardware resources fo... View TABLE 2 . Allocation of Fragments to Distributed Sites Fig. 3. Comparative quality measures for each class. Fig. 4. Comparative PE for k-means, full replication and centralized case. Fig. 5. Comparative PE values for our fragmentation methods. Semi-supervised learning techniques: k-means clustering in OODB Fragmentation Conference Paper Full-text available Feb 2004 Adrian Sergiu Darabant Alina Campan Vertical and horizontal fragmentations are central issues in the design process of distributed object based systems. A good fragmentation scheme followed by an optimal allocation could greatly enhance performance in such systems, as data transfer between distributed sites is minimized. In this paper we present a horizontal fragmentation approach th... View Figure 1: The database inheritance hierarchy Figure 2: The database aggregation hierarchy Figure 3: Partial RelGraph-CAN values and weights Figure 4: Comparative PE values for each class Figure 5: Comparative PE values for different fragmentation orders A new approach for optimal fragmentation order in distributed object oriented databases Article Full-text available Feb 2004 Adrian Sergiu Darabant Alina Campan Class fragmentation is an important task in the design of Distributed OODBs and there are many algorithms handling it. Almost none of them deals however with the class fragmentation order details. We claim that class fragmentation order can induce severe performance penalties if not considered in the frag- mentation phase. We propose here two varia... View Figure 1. The database inheritance hierarchy Figure 2. The database aggregation hierarchy OPTIMAL CLASS FRAGMENTATION ORDERING IN OBJECT ORIENTED DATABASES Article Full-text available Jan 2004 Adrian Sergiu Darabant Alina Campan Distributed Object Oriented Databases require class fragmenta- tion, performed either horizontally or vertically. Complex class relationships like aggregation and/or association are often represented as two-way refer- ences or object-links between classes. In order to obtain a good quality horizontal fragmentation, an optimal class processing order... View TABLE 1. Figure 3 Comparative PE values for our fragmentation method,... Figure 4 Comparative class PE values for each similarity measure. AI CLUSTERING TECHNIQUES: A NEW APPROACH IN HORIZONTAL FRAGMENTATION OF CLASSES WITH COMPLEX ATTRIBUTES AND METHODS IN OBJECT ORIENTED DATABASES Article Full-text available Jan 2004 Adrian Sergiu Darabant Alina Campan Grigor Moldovan Horea Grebla Horizontal fragmentation plays an important role in the design phase of Distributed Databases. Complex class relationships: associations, aggregations and complex methods, require fragmentation algorithms to take into account the new problem dimensions induced by these features of the object oriented models. We propose in this paper a new method fo... View DATA ALLOCATION IN DISTRIBUTED DATABASE SYSTEMS PERFORMED BY MOBILE INTELLIGENT AGENTS Article Full-text available Jan 2004 Horea Grebla Grigor Moldovan Adrian Sergiu Darabant Alina Campan As the European Union extends its boundaries the major companies have extended their presence on different markets resulting sales expansion and marketing specialization. Moreover, globalization brings a bigger impact on vital business's data because of the applications that have been developed on platforms having specific aspects by means of datab... View Figure 2. Comparative PE for k-means, full replication and centralized... Figure 3. Comparison quality measures for each of our fragmentation... Advanced Object Database Design Techniques Article Full-text available Jan 2004 Adrian Sergiu Darabant Alina Ampan Class fragmentation is an important task in the design of Distributed Object Oriented Databases (DOOD). However, fragmentation in DOOD is still at its beginnings and mostly adapted from the relational approaches. In this paper we propose an alternative approach for horizontal fragmentation of DOOD. Our method uses two different AI clustering techni... View Fig. 2 . CREC development system A hardware/software codesign method for general purpose reconfigurable computing Conference Paper Full-text available Jul 2003 Octavian Creţ Kalman Pusztai Cristian Cosmin Vancea[...] Adrian Sergiu Darabant CREC is an original, low-cost general-purpose Reconfigurable Computer whose architecture is generated through a Hardware / Software CoDesign process. The main idea of the CREC computer is to generate the best-suited hardware architecture for the execution of each software application. The CREC Parallel Compiler parses the source code and generates... View Current Technologies in Automatic Test Suites Generation and Verification of Complex Systems Article Full-text available Jan 1999 Adrian Sergiu Darabant View Multi-tiered client-server techniques for distributed database systems Article Jan 1998 Adrian Sergiu Darabant Information explosion across all areas has determined an increase in hardware requirements for application that provide data to the users. As hardware evelopment is quite susceptible to be bound after a top barrier is reached, new technologies must be developed in the software area in order to keep up with the requirements. We present here such a t... View Fig. 2. The database class hierarchy Fig. 3. The database aggregation/association graph Hierarchical clustering in object oriented data models with complex class relationships Article Full-text available Adrian Sergiu Darabant Alina Campan Octavian Creţ Class fragmentation is an essential phase in the design of Distributed Object Oriented Databases (DOODB). Horizontal and vertical fragmentation are the two commonly used fragmentation techniques. We propose here two new methods for horizontal fragmentation of objects with complex attributes. They rely on AI clustering techniques for grouping object... View Energy Efficient Coils for Transcranial Magnetic Stimulation (TMS) Article Laura DARABANT M. Plesa Radu CIUPA[...] Adrian Sergiu Darabant The preoccupation for improving the quality of life, for persons with different handicaps, led to extended research in the area of functional stimulation. Due to its advantages compared to electrical stimulation, magnetic stimulation of the human nervous system is now a common technique in modern medicine. A difficulty of this technique is the need... View Fig.2: Web Services for E-Learning E-Learning Services as a Recruitment Tool Article Full-text available George Chis Horea Grebla DUMITRU MATIS[...] Adrian Sergiu Darabant Networks expansion and Internet provide a good platform for e-learning in the idea of connecting learners with educational resources. The various systems that are already implemented consider the learning process as a remote task to gather knowledge in order to pass some exams. In the learning process evaluation represents a final step for a course... View A Comparative Study of Horizontal Object Clustering-based Fragmentation Techniques Article Adrian Sergiu Darabant Alina Campan Design of modern Distributed Object Oriented Databases (DOODs) requires class fragmentation techniques. Although research has been conducted in this area, most of the developed methods are inspired from the relational fragmentation algorithms. In this paper we develop a comparative approach of two new methods for horizontal class fragmentation in a... View TABLE 2 . OCM -exceptional case TABLE 3 . CVM -for OCM Fig. 4. Comparative quality measures for fragmentation variants,... TABLE 4 . OCM -with phantom object TABLE 5 . CVM -with phantom object AI Clustering Techniques: a New Approach to Object Oriented Database Fragmentation Article Full-text available Adrian Sergiu Darabant Alina Campan Cluj Napoca M Kogalniceanu Optimal application performance on a Distributed Object Based System requires class fragmentation and the development of allocation schemes to place fragments at distributed sites so data transfer is minimal. In this paper we present a horizontal fragmentation approach that uses the k-means centroid based clustering method for partitioning object i... View A Comparative Study on the Influence of Similarity Measures in Hierarchical Clustering in Complex Distributed Object-Oriented Databases Article Full-text available Adrian Sergiu Darabant Horea Todoran Octavian Creţ George Chis Class fragmentation is an essential phase in the design of Distributed Object Oriented Databases (DOODB). Due to their semantic similarity with the purpose of database fragmentation (obtaining sets of similar objects with respect to the user applications running in the system), clustering algorithms have recently begun to be investigated in the pro... View Figure 1. Medical information flow Figure 2. The MobMed Architecture and Integration with the Hospital... Figure 3. Merge Replication Architecture EFFICIENT DATA SYNCHRONIZATION FOR MOBILE WIRELESS MEDICAL USERS Article Full-text available Adrian Sergiu Darabant Darabant And Horea Todoran In order to take the appropriate decisions as quick as possible, medical doctors need fast access to various pieces of information on their pa-tients. The required information should be accurate, up-to-date, and avail-able on the spot. Even more, after finishing his/her investigation, the medical doctor should be able to immediately forward the rel... View Implementing Efficient Data Synchronization for Mobile Wireless Medical Users Article Full-text available Adrian Sergiu Darabant In order to take the appropriate decisions as quick as possible, medical doctors need fast access to various pieces of information on their patients. The required information should be accurate, up-to-date, and available on the spot. Even more, after finishing his/her investigation, the medical doctor should be able to immediately forward the relev... View """ import re class HelperMethods: @staticmethod def IsDate(text): # print("text") # print(text) val = re.match("(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) (1|2)(0|9)[0-9]{2}", text) if not val: return False return True mylines = [] ctr = 0 title = "" authors = "" affiliations = "" date = "" papers = [] titles = [] dates = [] for line in text.split('View')[1:-1]: fields = [] current_date = None print(line.split('\n')) for field in line.split('\n'): val = re.match("(\+[0-9])?(Figure|Fig[\.]?|Table|TABLE)( )?[0-9]+", field) if val: continue if field == "": continue print("field: ", field) fields.append(field) if HelperMethods.IsDate(field): current_date = field title = fields[0] papers.append((title, current_date)) print(len(papers)) print(papers) for i, paper in enumerate(papers): print(i, paper) #mylines[i][0] = mylines[i][1] ``` # DB Storage (TODO) Time to store the entries in the `papers` DB table. ![Screenshot](Images/PapersTableSpec.PNG) ``` import mariadb import json with open('../credentials.json', 'r') as crd_json_fd: json_text = crd_json_fd.read() json_obj = json.loads(json_text) credentials = json_obj["Credentials"] username = credentials["username"] password = credentials["password"] table_name = "publications_cache" db_name = "ubbcluj" mariadb_connection = mariadb.connect(user=username, password=password, database=db_name) mariadb_cursor = mariadb_connection.cursor() import datetime from datetime import datetime for paper in papers: title = "" authors = "" pub_date = "" affiliations = "" try: title = paper[0].lstrip() except: pass try: # print(paper[1]) pub_date = datetime.strptime(paper[1], "%b %Y").strftime("%Y-%m-%d") except: pass insert_string = "INSERT INTO {0} SET ".format(table_name) insert_string += "Title=\'{0}\', ".format(title) insert_string += "ProfessorId=\'{0}\', ".format(12) if pub_date != "": insert_string += "PublicationDate=\'{0}\', ".format(str(pub_date)) insert_string += "Authors=\'{0}\', ".format(authors) insert_string += "Affiliations=\'{0}\' ".format(affiliations) print(insert_string) # print(paper) # continue try: mariadb_cursor.execute(insert_string) except mariadb.ProgrammingError as pe: print("Error") raise pe except mariadb.IntegrityError: continue mariadb_connection.close() ``` # Conclusion ### In the end, the DB only required ~1 manual modifications with this code. This was first stored in a DB cache table which is a duplicate of the main, reviewed, then inserted in the main table.
github_jupyter
``` # General Dependencies import os import numpy as np # Denoising dependencies from trefide.pmd import batch_decompose,\ batch_recompose,\ overlapping_batch_decompose,\ overlapping_batch_recompose,\ determine_thresholds from trefide.reformat import overlapping_component_reformat # Plotting & Video Rendering Dependencies import funimag import matplotlib.pyplot as plt from trefide.plot import pixelwise_ranks from trefide.video import play_cv2 # Set Demo Dataset Location ext = os.path.join("..", "example_movies") filename = os.path.join(ext, "demoMovie.tif") %load_ext autoreload %autoreload 2 ``` # Load Data ``` from skimage import io mov = io.imread(filename).transpose([1,2,0])[:60,:60,:] mov = np.asarray(mov,order='C',dtype=np.float64) print(mov.shape) fov_height, fov_width, num_frames = mov.shape ``` # Set Params ``` # Maximum of rank 50 blocks (safeguard to terminate early if this is hit) max_components = 50 # Enable Decimation max_iters_main = 10 max_iters_init = 40 d_sub=2 t_sub=2 # Defaults consec_failures = 3 tol = 0.0005 # Set Blocksize Parameters block_height = 20 block_width = 20 overlapping = True ``` # Compress Video ## Simulate Critical Region with Noise ``` spatial_thresh, temporal_thresh = determine_thresholds((fov_height, fov_width, num_frames), (block_height, block_width), consec_failures, max_iters_main, max_iters_init, tol, d_sub, t_sub, 5, True) ``` ## Decompose Each Block Into Spatial & Temporal Components ``` # Blockwise Parallel, Single Tiling if not overlapping: spatial_components,\ temporal_components,\ block_ranks,\ block_indices = batch_decompose(fov_height, fov_width, num_frames, mov, block_height, block_width, max_components, consec_failures, max_iters_main, max_iters_init, tol, d_sub=d_sub, t_sub=t_sub) # Blockwise Parallel, 4x Overlapping Tiling else: spatial_components,\ temporal_components,\ block_ranks,\ block_indices,\ block_weights = overlapping_batch_decompose(fov_height, fov_width, num_frames, mov, block_height, block_width, spatial_thresh, temporal_thresh, max_components, consec_failures, max_iters_main, max_iters_init, tol, d_sub=d_sub, t_sub=t_sub) ``` # Reconstruct Denoised Video ``` # Single Tiling (No need for reqweighting) if not overlapping: mov_denoised = np.asarray(batch_recompose(spatial_components, temporal_components, block_ranks, block_indices)) # Overlapping Tilings With Reweighting else: mov_denoised = np.asarray(overlapping_batch_recompose(fov_height, fov_width, num_frames, block_height, block_width, spatial_components, temporal_components, block_ranks, block_indices, block_weights)) ``` # Produce Diagnostics ### Single Tiling Pixel-Wise Ranks ``` if overlapping: pixelwise_ranks(block_ranks['no_skew']['full'], fov_height, fov_width, num_frames, block_height, block_width) else: pixelwise_ranks(block_ranks, fov_height, fov_width, num_frames, block_height, block_width) ``` ### Correlation Images ``` from funimag.plots import util_plot util_plot.comparison_plot([mov, mov_denoised + np.random.randn(np.prod(mov.shape)).reshape(mov.shape)*.01], plot_orientation="vertical") ``` ## Render Videos & Residual ``` play_cv2(np.vstack([mov, mov_denoised, mov-mov_denoised]), magnification=2) ``` # Save Results ``` U, V = overlapping_component_reformat(fov_height, fov_width, num_frames, block_height, block_width, spatial_components, temporal_components, block_ranks, block_indices, block_weights) np.savez(os.path.join(ext, "demo_results.npz"), U, V,block_ranks,block_height,block_width) ```
github_jupyter
All data credits belong to the wonderful work done by **Rekhta foundation**. Data has been parsed into Urdu, Hindi and English translieration thanks to their excellent data organization. Consider supporting them for their great work in pushing the urdu language. ![image.png](attachment:image.png) Credits to these authors for their wonderful original creations: *'mirza-ghalib','allama-iqbal','faiz-ahmad-faiz','sahir-ludhianvi','meer-taqi-meer', 'dagh-dehlvi','kaifi-azmi','gulzar','bahadur-shah-zafar','parveen-shakir', 'jaan-nisar-akhtar','javed-akhtar','jigar-moradabadi','jaun-eliya', 'ahmad-faraz','meer-anees','mohsin-naqvi','firaq-gorakhpuri','fahmida-riaz','wali-mohammad-wali', 'waseem-barelvi','akbar-allahabadi','altaf-hussain-hali','ameer-khusrau','naji-shakir','naseer-turabi', 'nazm-tabatabai','nida-fazli','noon-meem-rashid', 'habib-jalib'* ``` from bs4 import BeautifulSoup from bs4.element import Comment import urllib.request # credits to this stackoverflow answer https://stackoverflow.com/questions/1936466/beautifulsoup-grab-visible-webpage-text allowed_sections=['style', 'script', 'head', 'title', 'meta', '[document]'] def tag_visible(element): if element.parent.name in allowed_sections: return False if isinstance(element, Comment): return False return True def text_from_html(body): soup = BeautifulSoup(body, 'html.parser') texts = soup.findAll(text=True) visible_texts = filter(tag_visible, texts) return u" ".join(t.strip() for t in visible_texts) #captures the different <p> tags as newlines def text_with_newlines(elem): text = '' for e in elem.descendants: if isinstance(e, str): text += e elif e.name == 'br' or e.name == 'p': text += '\n' return text def parse_ghazal(url): html = urllib.request.urlopen(url).read() soup= BeautifulSoup(html, 'html.parser') mydivs = soup.find("div", {"class": "pMC"}) #this section removes some of the English translations present on the webpage # mixing language data would add noise, and make it difficult for the model to learn #BUT in future these urdu to english translations could be a valuable resource to prepare machine translation data for div in mydivs.find_all("div", {'class':'t'}): div.decompose() mydivs= text_with_newlines(mydivs) return mydivs url_english='https://www.rekhta.org/ghazals/sitaaron-se-aage-jahaan-aur-bhii-hain-allama-iqbal-ghazals-1' url_urdu='https://www.rekhta.org/ghazals/sitaaron-se-aage-jahaan-aur-bhii-hain-allama-iqbal-ghazals-1?lang=ur' url_hindi='https://www.rekhta.org/ghazals/sitaaron-se-aage-jahaan-aur-bhii-hain-allama-iqbal-ghazals-1?lang=hi' ghazal = parse_ghazal(url_english) print(ghazal) ''' okay I have a problem the way this urdu font is rendered. It is absolutely terrible to read. There have to be better font options. ''' ``` ## Parsed text samples ### English transliteration sitāroñ se aage jahāñ aur bhī haiñ abhī ishq ke imtihāñ aur bhī haiñ tū shāhīñ hai parvāz hai kaam terā tire sāmne āsmāñ aur bhī haiñ isī roz o shab meñ ulajh kar na rah jā ki tere zamān o makāñ aur bhī haiñ ### Urdu ستاروں سے آگے جہاں اور بھی ہیں تو شاہیں ہے پرواز ہے کام تیرا ترے سامنے آسماں اور بھی ہیں اسی روز و شب میں الجھ کر نہ رہ جا کہ تیرے زمان و مکاں اور بھی ہیں ### Hindi सितारों से आगे जहाँ और भी हैं अभी इश्क़ के इम्तिहाँ और भी हैं तू शाहीं है परवाज़ है काम तेरा तिरे सामने आसमाँ और भी हैं इसी रोज़ ओ शब में उलझ कर न रह जा कि तेरे ज़मान ओ मकाँ और भी हैं ### Observing the structure of urls to make sure the same script would work across different poets on the webpage ``` #The folder structure has urls as https://www.rekhta.org/ghazals/ #https://www.rekhta.org/ghazals/tire-ishq-kii-intihaa-chaahtaa-huun-allama-iqbal-ghazals #Iqbal url='https://www.rekhta.org/ghazals/tire-ishq-kii-intihaa-chaahtaa-huun-allama-iqbal-ghazals' url= 'https://www.rekhta.org/ghazals/kabhii-ai-haqiiqat-e-muntazar-nazar-aa-libaas-e-majaaz-men-allama-iqbal-ghazals' ##Ghalib's ghazals ## there are currently 234 ghazals by this poet on the page ## which makes it a rich resource for training a text model url= 'https://www.rekhta.org/ghazals/hazaaron-khvaahishen-aisii-ki-har-khvaahish-pe-dam-nikle-mirza-ghalib-ghazals' url ='https://www.rekhta.org/ghazals/hazaaron-khvaahishen-aisii-ki-har-khvaahish-pe-dam-nikle-mirza-ghalib-ghazals?lang=ur' url='https://www.rekhta.org/ghazals/hazaaron-khvaahishen-aisii-ki-har-khvaahish-pe-dam-nikle-mirza-ghalib-ghazals?lang=hi' url='https://www.rekhta.org/ghazals/har-ek-baat-pe-kahte-ho-tum-ki-tuu-kyaa-hai-mirza-ghalib-ghazals' url='https://www.rekhta.org/ghazals/ishq-mujh-ko-nahiin-vahshat-hii-sahii-mirza-ghalib-ghazals' url='https://www.rekhta.org/ghazals/ishq-mujh-ko-nahiin-vahshat-hii-sahii-mirza-ghalib-ghazals?lang=ur' url='https://www.rekhta.org/ghazals/ishq-mujh-ko-nahiin-vahshat-hii-sahii-mirza-ghalib-ghazals?lang=hi' url='https://www.rekhta.org/ghazals/koii-din-gar-zindagaanii-aur-hai-mirza-ghalib-ghazals' url='https://www.rekhta.org/ghazals/koii-din-gar-zindagaanii-aur-hai-mirza-ghalib-ghazals?lang=ur' url='https://www.rekhta.org/ghazals/hai-bazm-e-butaan-men-sukhan-aazurda-labon-se-mirza-ghalib-ghazals' url='https://www.rekhta.org/ghazals/hai-bazm-e-butaan-men-sukhan-aazurda-labon-se-mirza-ghalib-ghazals?lang=ur' url='https://www.rekhta.org/ghazals/ghar-jab-banaa-liyaa-tire-dar-par-kahe-bagair-mirza-ghalib-ghazals' url='https://www.rekhta.org/ghazals/ghar-jab-banaa-liyaa-tire-dar-par-kahe-bagair-mirza-ghalib-ghazals?lang=hi' url='https://www.rekhta.org/ghazals/ghar-jab-banaa-liyaa-tire-dar-par-kahe-bagair-mirza-ghalib-ghazals?lang=ur' ## Sahir Ludhianvi url='https://www.rekhta.org/ghazals/kabhii-khud-pe-kabhii-haalaat-pe-ronaa-aayaa-sahir-ludhianvi-ghazals' url='https://www.rekhta.org/ghazals/kabhii-khud-pe-kabhii-haalaat-pe-ronaa-aayaa-sahir-ludhianvi-ghazals?lang=ur' url= 'https://www.rekhta.org/ghazals/kabhii-khud-pe-kabhii-haalaat-pe-ronaa-aayaa-sahir-ludhianvi-ghazals?lang=hi' ## Faiz en_url='https://www.rekhta.org/ghazals/gulon-men-rang-bhare-baad-e-nau-bahaar-chale-faiz-ahmad-faiz-ghazals' hindi_url='https://www.rekhta.org/ghazals/gulon-men-rang-bhare-baad-e-nau-bahaar-chale-faiz-ahmad-faiz-ghazals?lang=hi' urdu_url='https://www.rekhta.org/ghazals/gulon-men-rang-bhare-baad-e-nau-bahaar-chale-faiz-ahmad-faiz-ghazals?lang=ur' url='https://www.rekhta.org/ghazals/hazaaron-khvaahishen-aisii-ki-har-khvaahish-pe-dam-nikle-mirza-ghalib-ghazals'#?lang=ur' ghazal = parse_ghazal(url) print(ghazal) #testing the homepage for parsing all poet names now #for ghalib def parse_webpage_at_given_scroll(html): ctr=0 soup= BeautifulSoup(html, 'html.parser') mydivs = soup.find("div", {"class": "contentListBody"}) titles=[] for a in mydivs.find_all('a', href=True): t=a['href'] if t not in titles: if ctr%5==0: print("Found the URL:", t) titles.append(t) ctr+=1 print('=============================') print('number of titles',len(titles)) print('=============================') return titles #language argument can be ur or hi for urdu or hindi def read_and_write_web(author,language='ur'): lang=language #author='mirza-ghalib' author_lan=author+'/'+lang if not os.path.exists(author_lan): os.makedirs(author_lan) for url in titles: name_poem=url.split('https://www.rekhta.org/ghazals/')[1] path_poem= author_lan+'/'+name_poem if os.path.exists(path_poem): pass else: f= open(path_poem,"w+") if lang=='en': url_for_lang= url else: url_for_lang= url+'?lang='+lang ghazal = parse_ghazal(url_for_lang) f.write(ghazal) f.close() ##Parsing based on home page of authors url_base='https://www.rekhta.org/poets/' ## TODO : Later ## or just iterate through the list of all poets on the index, instead of hand curated list authors=['mirza-ghalib','allama-iqbal','faiz-ahmad-faiz','sahir-ludhianvi','meer-taqi-meer', 'dagh-dehlvi','kaifi-azmi','gulzar','bahadur-shah-zafar','parveen-shakir', 'jaan-nisar-akhtar','javed-akhtar','jigar-moradabadi','jaun-eliya', 'ahmad-faraz','meer-anees','mohsin-naqvi','firaq-gorakhpuri','fahmida-riaz','wali-mohammad-wali', 'waseem-barelvi','akbar-allahabadi','altaf-hussain-hali','ameer-khusrau','naji-shakir','naseer-turabi' ,'nazm-tabatabai','nida-fazli','noon-meem-rashid','habib-jalib'] for author in authors: url_home_page= url_base +author+ '/ghazals' html = urllib.request.urlopen(url_home_page).read() titles= parse_webpage_at_given_scroll(html) read_and_write_web(author,'en') read_and_write_web(author,'ur') read_and_write_web(author,'hi') ``` ہم کو مٹا سکے یہ زمانے میں دم نہیں ہم سے زمانہ خود ہے زمانے سے ہم نہیں हम को मिटा सके ये ज़माने में दम नहीं हम से ज़माना ख़ुद है ज़माने से हम नहीं Jigar Moradabadi
github_jupyter
# Create a Pipeline You can perform the various steps required to ingest data, train a model, and register the model individually by using the Azure ML SDK to run script-based experiments. However, in an enterprise environment it is common to encapsulate the sequence of discrete steps required to build a machine learning solution into a *pipeline* that can be run on one or more compute targets; either on-demand by a user, from an automated build process, or on a schedule. In this notebook, you'll bring together all of these elements to create a simple pipeline that pre-processes data and then trains and registers a model. ## Connect to your workspace To get started, connect to your workspace. > **Note**: If you haven't already established an authenticated session with your Azure subscription, you'll be prompted to authenticate by clicking a link, entering an authentication code, and signing into Azure. ``` import azureml.core from azureml.core import Workspace # Load the workspace from the saved config file ws = Workspace.from_config() print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name)) ``` ## Prepare data In your pipeline, you'll use a dataset containing details of diabetes patients. Run the cell below to create this dataset (if you created it previously, the code will find the existing version) ``` from azureml.core import Dataset default_ds = ws.get_default_datastore() if 'diabetes dataset' not in ws.datasets: default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data target_path='diabetes-data/', # Put it in a folder path in the datastore overwrite=True, # Replace existing files of the same name show_progress=True) #Create a tabular dataset from the path on the datastore (this may take a short while) tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv')) # Register the tabular dataset try: tab_data_set = tab_data_set.register(workspace=ws, name='diabetes dataset', description='diabetes data', tags = {'format':'CSV'}, create_new_version=True) print('Dataset registered.') except Exception as ex: print(ex) else: print('Dataset already registered.') ``` ## Create scripts for pipeline steps Pipelines consist of one or more *steps*, which can be Python scripts, or specialized steps like a data transfer step that copies data from one location to another. Each step can run in its own compute context. In this exercise, you'll build a simple pipeline that contains two Python script steps: one to pre-process some training data, and another to use the pre-processed data to train and register a model. First, let's create a folder for the script files we'll use in the pipeline steps. ``` import os # Create a folder for the pipeline step files experiment_folder = 'diabetes_pipeline' os.makedirs(experiment_folder, exist_ok=True) print(experiment_folder) ``` Now let's create the first script, which will read data from the diabetes dataset and apply some simple pre-processing to remove any rows with missing data and normalize the numeric features so they're on a similar scale. The script includes a argument named **--prepped-data**, which references the folder where the resulting data should be saved. ``` %%writefile $experiment_folder/prep_diabetes.py # Import libraries import os import argparse import pandas as pd from azureml.core import Run from sklearn.preprocessing import MinMaxScaler # Get parameters parser = argparse.ArgumentParser() parser.add_argument("--input-data", type=str, dest='raw_dataset_id', help='raw dataset') parser.add_argument('--prepped-data', type=str, dest='prepped_data', default='prepped_data', help='Folder for results') args = parser.parse_args() save_folder = args.prepped_data # Get the experiment run context run = Run.get_context() # load the data (passed as an input dataset) print("Loading Data...") diabetes = run.input_datasets['raw_data'].to_pandas_dataframe() # Log raw row count row_count = (len(diabetes)) run.log('raw_rows', row_count) # remove nulls diabetes = diabetes.dropna() # Normalize the numeric columns scaler = MinMaxScaler() num_cols = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree'] diabetes[num_cols] = scaler.fit_transform(diabetes[num_cols]) # Log processed rows row_count = (len(diabetes)) run.log('processed_rows', row_count) # Save the prepped data print("Saving Data...") os.makedirs(save_folder, exist_ok=True) save_path = os.path.join(save_folder,'data.csv') diabetes.to_csv(save_path, index=False, header=True) # End the run run.complete() ``` Now you can create the script for the second step, which will train a model. The script includes a argument named **--training-data**, which references the location where the prepared data was saved by the previous step. ``` %%writefile $experiment_folder/train_diabetes.py # Import libraries from azureml.core import Run, Model import argparse import pandas as pd import numpy as np import joblib import os from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve import matplotlib.pyplot as plt # Get parameters parser = argparse.ArgumentParser() parser.add_argument("--training-data", type=str, dest='training_data', help='training data') args = parser.parse_args() training_data = args.training_data # Get the experiment run context run = Run.get_context() # load the prepared data file in the training folder print("Loading Data...") file_path = os.path.join(training_data,'data.csv') diabetes = pd.read_csv(file_path) # Separate features and labels X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values # Split data into training set and test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0) # Train adecision tree model print('Training a decision tree model...') model = DecisionTreeClassifier().fit(X_train, y_train) # calculate accuracy y_hat = model.predict(X_test) acc = np.average(y_hat == y_test) print('Accuracy:', acc) run.log('Accuracy', np.float(acc)) # calculate AUC y_scores = model.predict_proba(X_test) auc = roc_auc_score(y_test,y_scores[:,1]) print('AUC: ' + str(auc)) run.log('AUC', np.float(auc)) # plot ROC curve fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1]) fig = plt.figure(figsize=(6, 4)) # Plot the diagonal 50% line plt.plot([0, 1], [0, 1], 'k--') # Plot the FPR and TPR achieved by our model plt.plot(fpr, tpr) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC Curve') run.log_image(name = "ROC", plot = fig) plt.show() # Save the trained model in the outputs folder print("Saving model...") os.makedirs('outputs', exist_ok=True) model_file = os.path.join('outputs', 'diabetes_model.pkl') joblib.dump(value=model, filename=model_file) # Register the model print('Registering model...') Model.register(workspace=run.experiment.workspace, model_path = model_file, model_name = 'diabetes_model', tags={'Training context':'Pipeline'}, properties={'AUC': np.float(auc), 'Accuracy': np.float(acc)}) run.complete() ``` ## Prepare a compute environment for the pipeline steps In this exercise, you'll use the same compute for both steps, but it's important to realize that each step is run independently; so you could specify different compute contexts for each step if appropriate. First, get the compute target you created in a previous lab (if it doesn't exist, it will be created). > **Important**: Change *your-compute-cluster* to the name of your compute cluster in the code below before running it! Cluster names must be globally unique names between 2 to 16 characters in length. Valid characters are letters, digits, and the - character. ``` from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException cluster_name = "your-compute-cluster" try: # Check for existing compute target pipeline_cluster = ComputeTarget(workspace=ws, name=cluster_name) print('Found existing cluster, use it.') except ComputeTargetException: # If it doesn't already exist, create it try: compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS11_V2', max_nodes=2) pipeline_cluster = ComputeTarget.create(ws, cluster_name, compute_config) pipeline_cluster.wait_for_completion(show_output=True) except Exception as ex: print(ex) ``` > **Note**: Compute instances and clusters are based on standard Azure virtual machine images. For this exercise, the *Standard_DS11_v2* image is recommended to achieve the optimal balance of cost and performance. If your subscription has a quota that does not include this image, choose an alternative image; but bear in mind that a larger image may incur higher cost and a smaller image may not be sufficient to complete the tasks. Alternatively, ask your Azure administrator to extend your quota. The compute will require a Python environment with the necessary package dependencies installed. ``` %%writefile $experiment_folder/experiment_env.yml name: experiment_env dependencies: - python=3.6.2 - scikit-learn - ipykernel - matplotlib - pandas - pip - pip: - azureml-defaults - pyarrow ``` Now that you have a Conda configuration file, you can create an environment and use it in the run configuration for the pipeline. ``` from azureml.core import Environment from azureml.core.runconfig import RunConfiguration # Create a Python environment for the experiment (from a .yml file) experiment_env = Environment.from_conda_specification("experiment_env", experiment_folder + "/experiment_env.yml") # Register the environment experiment_env.register(workspace=ws) registered_env = Environment.get(ws, 'experiment_env') # Create a new runconfig object for the pipeline pipeline_run_config = RunConfiguration() # Use the compute you created above. pipeline_run_config.target = pipeline_cluster # Assign the environment to the run configuration pipeline_run_config.environment = registered_env print ("Run configuration created.") ``` ## Create and run a pipeline Now you're ready to create and run a pipeline. First you need to define the steps for the pipeline, and any data references that need to be passed between them. In this case, the first step must write the prepared data to a folder that can be read from by the second step. Since the steps will be run on remote compute (and in fact, could each be run on different compute), the folder path must be passed as a data reference to a location in a datastore within the workspace. The **OutputFileDatasetConfig** object is a special kind of data reference that is used for interim storage locations that can be passed between pipeline steps, so you'll create one and use at as the output for the first step and the input for the second step. Note that you need to pass it as a script argument so your code can access the datastore location referenced by the data reference. ``` from azureml.data import OutputFileDatasetConfig from azureml.pipeline.steps import PythonScriptStep # Get the training dataset diabetes_ds = ws.datasets.get("diabetes dataset") # Create an OutputFileDatasetConfig (temporary Data Reference) for data passed from step 1 to step 2 prepped_data = OutputFileDatasetConfig("prepped_data") # Step 1, Run the data prep script prep_step = PythonScriptStep(name = "Prepare Data", source_directory = experiment_folder, script_name = "prep_diabetes.py", arguments = ['--input-data', diabetes_ds.as_named_input('raw_data'), '--prepped-data', prepped_data], compute_target = pipeline_cluster, runconfig = pipeline_run_config, allow_reuse = True) # Step 2, run the training script train_step = PythonScriptStep(name = "Train and Register Model", source_directory = experiment_folder, script_name = "train_diabetes.py", arguments = ['--training-data', prepped_data.as_input()], compute_target = pipeline_cluster, runconfig = pipeline_run_config, allow_reuse = True) print("Pipeline steps defined") ``` OK, you're ready build the pipeline from the steps you've defined and run it as an experiment. ``` from azureml.core import Experiment from azureml.pipeline.core import Pipeline from azureml.widgets import RunDetails # Construct the pipeline pipeline_steps = [prep_step, train_step] pipeline = Pipeline(workspace=ws, steps=pipeline_steps) print("Pipeline is built.") # Create an experiment and run the pipeline experiment = Experiment(workspace=ws, name = 'mslearn-diabetes-pipeline') pipeline_run = experiment.submit(pipeline, regenerate_outputs=True) print("Pipeline submitted for execution.") RunDetails(pipeline_run).show() pipeline_run.wait_for_completion(show_output=True) ``` A graphical representation of the pipeline experiment will be displayed in the widget as it runs. Keep an eye on the kernel indicator at the top right of the page, when it turns from **&#9899;** to **&#9711;**, the code has finished running. You can also monitor pipeline runs in the **Experiments** page in [Azure Machine Learning studio](https://ml.azure.com). When the pipeline has finished, you can examine the metrics recorded by it's child runs. ``` for run in pipeline_run.get_children(): print(run.name, ':') metrics = run.get_metrics() for metric_name in metrics: print('\t',metric_name, ":", metrics[metric_name]) ``` Assuming the pipeline was successful, a new model should be registered with a *Training context* tag indicating it was trained in a pipeline. Run the following code to verify this. ``` from azureml.core import Model for model in Model.list(ws): print(model.name, 'version:', model.version) for tag_name in model.tags: tag = model.tags[tag_name] print ('\t',tag_name, ':', tag) for prop_name in model.properties: prop = model.properties[prop_name] print ('\t',prop_name, ':', prop) print('\n') ``` ## Publish the pipeline After you've created and tested a pipeline, you can publish it as a REST service. ``` # Publish the pipeline from the run published_pipeline = pipeline_run.publish_pipeline( name="diabetes-training-pipeline", description="Trains diabetes model", version="1.0") published_pipeline ``` Note that the published pipeline has an endpoint, which you can see in the **Endpoints** page (on the **Pipeline Endpoints** tab) in [Azure Machine Learning studio](https://ml.azure.com). You can also find its URI as a property of the published pipeline object: ``` rest_endpoint = published_pipeline.endpoint print(rest_endpoint) ``` ## Call the pipeline endpoint To use the endpoint, client applications need to make a REST call over HTTP. This request must be authenticated, so an authorization header is required. A real application would require a service principal with which to be authenticated, but to test this out, we'll use the authorization header from your current connection to your Azure workspace, which you can get using the following code: ``` from azureml.core.authentication import InteractiveLoginAuthentication interactive_auth = InteractiveLoginAuthentication() auth_header = interactive_auth.get_authentication_header() print("Authentication header ready.") ``` Now we're ready to call the REST interface. The pipeline runs asynchronously, so we'll get an identifier back, which we can use to track the pipeline experiment as it runs: ``` import requests experiment_name = 'mslearn-diabetes-pipeline' rest_endpoint = published_pipeline.endpoint response = requests.post(rest_endpoint, headers=auth_header, json={"ExperimentName": experiment_name}) run_id = response.json()["Id"] run_id ``` Since you have the run ID, you can use it to wait for the run to complete. > **Note**: The pipeline should complete quickly, because each step was configured to allow output reuse. This was done primarily for convenience and to save time in this course. In reality, you'd likely want the first step to run every time in case the data has changed, and trigger the subsequent steps only if the output from step one changes. ``` from azureml.pipeline.core.run import PipelineRun published_pipeline_run = PipelineRun(ws.experiments[experiment_name], run_id) published_pipeline_run.wait_for_completion(show_output=True) ``` ## Schedule the Pipeline Suppose the clinic for the diabetes patients collects new data each week, and adds it to the dataset. You could run the pipeline every week to retrain the model with the new data. ``` from azureml.pipeline.core import ScheduleRecurrence, Schedule # Submit the Pipeline every Monday at 00:00 UTC recurrence = ScheduleRecurrence(frequency="Week", interval=1, week_days=["Monday"], time_of_day="00:00") weekly_schedule = Schedule.create(ws, name="weekly-diabetes-training", description="Based on time", pipeline_id=published_pipeline.id, experiment_name='mslearn-diabetes-pipeline', recurrence=recurrence) print('Pipeline scheduled.') ``` You can retrieve the schedules that are defined in the workspace like this: ``` schedules = Schedule.list(ws) schedules ``` You can check the latest run like this: ``` pipeline_experiment = ws.experiments.get('mslearn-diabetes-pipeline') latest_run = list(pipeline_experiment.get_runs())[0] latest_run.get_details() ``` This is a simple example, designed to demonstrate the principle. In reality, you could build more sophisticated logic into the pipeline steps - for example, evaluating the model against some test data to calculate a performance metric like AUC or accuracy, comparing the metric to that of any previously registered versions of the model, and only registering the new model if it performs better. You can use the [Azure Machine Learning extension for Azure DevOps](https://marketplace.visualstudio.com/items?itemName=ms-air-aiagility.vss-services-azureml) to combine Azure ML pipelines with Azure DevOps pipelines (yes, it *is* confusing that they have the same name!) and integrate model retraining into a *continuous integration/continuous deployment (CI/CD)* process. For example you could use an Azure DevOps *build* pipeline to trigger an Azure ML pipeline that trains and registers a model, and when the model is registered it could trigger an Azure Devops *release* pipeline that deploys the model as a web service, along with the application or service that consumes the model.
github_jupyter
``` from IPython.core.display import HTML def css_styling(): styles = open("./styles/custom.css", "r").read() return HTML(styles) css_styling() ``` # Approximate solutions to the Riemann Problem ## Solutions in practice Solutions to the Riemann problem are mainly used in two contexts: 1. As reference solutions against which a numerical method is benchmarked, or 2. As part of a numerical method, such as a high resolution shock capturing method, where the flux between two numerical cells is required. In the first case, accuracy is paramount and the complete solution (all wave speeds, and all intermediate states) is required. In the second case only one thing is required: the flux ${\bf f}^*$ between the cells, which is the flux on the characteristic line $\xi = x / t = 0$. In this second case, the numerical method will have to repeatedly solve the Riemann problem. In a general problem, the solution may be needed tens of times *per cell, per timestep*, leading to millions (or more!) solutions in a simulation. The speed of the solution is then extremely important, and approximate solutions are often used. ## Roe-type solutions The most obvious simplification is to reduce the nonlinear problem \begin{equation} \partial_t {\bf q} + \partial_x {\bf f}({\bf q}) = {\bf 0} \end{equation} to the *linear* problem \begin{equation} \partial_t {\bf q} + A \partial_x {\bf q} = {\bf 0}, \end{equation} where $A$ is a *constant* matrix that approximates the Jacobian $\partial {\bf f} / \partial {\bf q}$. We can then solve the linear problem exactly (e.g. by diagonalising the matrix and solving the resulting uncoupled advection equations), to find \begin{align} {\bf q}(x, t) & = {\bf q}_l + \sum_{p: \lambda^{(p)} < \tfrac{x}{t}} \left\{ {\bf l}^{(p)} \cdot \left( {\bf q}_r - {\bf q}_l \right) \right\} {\bf r}^{(p)}, \\ & = {\bf q}_r - \sum_{p: \lambda^{(p)} > \tfrac{x}{t}} \left\{ {\bf l}^{(p)} \cdot \left( {\bf q}_r - {\bf q}_l \right) \right\} {\bf r}^{(p)}, \\ & = \frac{1}{2} \left( {\bf q}_l + {\bf q}_r \right) + \sum_{p: \lambda^{(p)} < \tfrac{x}{t}} \left\{ {\bf l}^{(p)} \cdot \left( {\bf q}_r - {\bf q}_l \right) \right\} {\bf r}^{(p)} - \sum_{p: \lambda^{(p)} > \tfrac{x}{t}} \left\{ {\bf l}^{(p)} \cdot \left( {\bf q}_r - {\bf q}_l \right) \right\} {\bf r}^{(p)}. \end{align} where $\lambda^{(p)}, {\bf r}^{(p)},$ and ${\bf l}^{(p)}$ are the eigenvalues and the (right and left respectively) eigenvectors of $A$, ordered such that $\lambda^{(1)} \le \dots \le \lambda^{(N)}$ as usual. All three solutions are equivalent; the last is typically used. Given this complete solution, it is easily evaluated along $x = 0$, and the flux calculated from the result. An even greater shortcut can be found by noting that we are approximating ${\bf f} = A {\bf q}$. Therefore the standard form is to write \begin{equation} {\bf f}^* = \frac{1}{2} \left( {\bf f}_l + {\bf f}_r \right) + \sum_{p} \left| \lambda^{(p)} \right| \left\{ {\bf l}^{(p)} \cdot \left( {\bf q}_r - {\bf q}_l \right) \right\} {\bf r}^{(p)}, \end{equation} where now we are summing over all eigenvalues and eigenvectors. It should be noted that ${\bf f}^* \ne {\bf f}({\bf q}^*)$ in general, as the calculation of ${\bf f}^*$ relied on an approximation to the flux. In order to complete this specification of the solver, we only need to say how $A$ is defined. Roe gave the suggestion that \begin{equation} A = A({\bf q}_{\textrm{Roe}}) = \left. \frac{\partial {\bf f}}{\partial {\bf q}} \right|_{{\bf q}_{\textrm{Roe}}}, \end{equation} where the *Roe average* ${\bf q}_{\textrm{Roe}}$ satisfies 1. $A({\bf q}_{\textrm{Roe}}) \left( {\bf q}_r - {\bf q}_l \right) = {\bf f}_r - {\bf f}_l$, 2. $A({\bf q}_{\textrm{Roe}})$ is diagonalizable with real eigenvalues, and 3. $A({\bf q}_{\textrm{Roe}}) \to \partial {\bf f} / \partial {\bf q}$ smoothly as ${\bf q}_{\textrm{Roe}} \to {\bf q}$. It is *possible* to construct the Roe average for many systems (such as the Euler equations, and the relativistic Euler equations). However, a simple arithmetic average is often nearly as good - in the sense that the algorithm will fail only slightly more often than the algorithm with the full Roe average! The problem with Roe type solvers is that it approximates all waves as discontinuities. This leads to inaccuracies near rarefactions, and these can be catastrophically bad when the rarefaction fan crosses $\xi = 0$ (a *sonic rarefaction*). It is possible to detect when these problems will occur (e.g. by looking at when $\lambda^{(p)}$ changes sign between the left and right states) and change the approximation at this point, often known as an *entropy fix*. More systematic and complex methods that extend the Roe approach whilst avoiding this problem include the *Marquina* solver. ## HLL-type solutions An alternative type of method simplifies the wave structure even more, by simplifying the number of waves. HLL (for Harten, Lax and van Leer) type solutions assume that 1. there are two waves, both discontinuities, separating a constant central state in the solution, and 2. the waves propagate at the (known) speeds $\xi_{(\pm)}$. From these assumptions, and the Rankine-Hugoniot conditions, we have the two equations \begin{align} \xi_{(-)} \left[ {\bf q}_m - {\bf q}_l \right] & = {\bf f}_m - {\bf f}_l, \\ \xi_{(+)} \left[ {\bf q}_r - {\bf q}_m \right] & = {\bf f}_r - {\bf f}_m. \end{align} These are immediately solved to give \begin{align} {\bf q}_m & = \frac{\xi_{(+)} {\bf q}_r - \xi_{(-)} {\bf q}_l - {\bf f}_r + {\bf f}_l}{\xi_{(+)} - \xi_{(-)}}, \\ {\bf f}_m & = \frac{\hat{\xi}_{(+)} {\bf f}_l - \hat{\xi}_{(-)} {\bf f}_r + \hat{\xi}_{(+)} \hat{\xi}_{(-)} \left( {\bf q}_r - {\bf q}_r \right)}{\hat{\xi}_{(+)} - \hat{\xi}_{(-)}}, \end{align} where \begin{equation} \hat{\xi}_{(-)} = \min(0, \xi_{(-)}), \qquad \hat{\xi}_{(+)} = \max(0, \xi_{(+)}). \end{equation} Again it should be noted that, in general, ${\bf f}_m \ne {\bf f}({\bf q}_m)$. We still need some way to compute the wave speeds $\xi_{(\pm)}$. The simplest method is to make them as large as possible, compatible with stability. This means (via the CFL condition) setting \begin{equation} -\xi_{(-)} = \xi_{(+)} = \frac{\Delta x}{\Delta t} \end{equation} which implies that (as the central state is now guaranteed to include the origin, as the waves have different signs) \begin{equation} {\bf f}^* = \frac{1}{2} \left( {\bf f}_l + {\bf f}_r + \frac{\Delta x}{\Delta t} \left[ {\bf q}_l - {\bf q}_r \right] \right). \end{equation} This is the *Lax-Friedrichs* flux, as [used in HyperPython](https://github.com/ketch/HyperPython). We can also easily see how the *local* Lax-Friedrichs method, [used in lesson 3 of HyperPython](http://nbviewer.ipython.org/github/ketch/HyperPython/blob/master/Lesson_03_High-resolution_methods.ipynb), comes about: simply choose \begin{equation} -\xi_{(-)} = \xi_{(+)} = \alpha = \min \left( \left| \lambda \left( {\bf q}_l \right) \right|, \left| \lambda \left( {\bf q}_r \right) \right| \right) \end{equation} to get \begin{equation} {\bf f}^* = \frac{1}{2} \left( {\bf f}_l + {\bf f}_r + \alpha \left[ {\bf q}_l - {\bf q}_r \right] \right). \end{equation} HLL type methods are straightforward to use but typically do not capture linear waves, such as the contact wave in the Euler equations, well. Extending the HLL method by including more waves is possible (see the *HLLC* method in Toro's book as an example), but rapidly increases the complexity of the solver.
github_jupyter
# Regression Week 5: LASSO (coordinate descent) In this notebook, you will implement your very own LASSO solver via coordinate descent. You will: * Write a function to normalize features * Implement coordinate descent for LASSO * Explore effects of L1 penalty # Fire up graphlab create Make sure you have the latest version of graphlab (>= 1.7) ``` import graphlab ``` # Load in house sales data Dataset is from house sales in King County, the region where the city of Seattle, WA is located. ``` sales = graphlab.SFrame('kc_house_data.gl/') # In the dataset, 'floors' was defined with type string, # so we'll convert them to int, before using it below sales['floors'] = sales['floors'].astype(int) ``` If we want to do any "feature engineering" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the first notebook of Week 2. For this notebook, however, we will work with the existing features. # Import useful functions from previous notebook As in Week 2, we convert the SFrame into a 2D Numpy array. Copy and paste `get_num_data()` from the second notebook of Week 2. ``` import numpy as np # note this allows us to refer to numpy as np instead ``` Also, copy and paste the `predict_output()` function to compute the predictions for an entire matrix of features given the matrix and the weights: # Normalize features In the house dataset, features vary wildly in their relative magnitude: `sqft_living` is very large overall compared to `bedrooms`, for instance. As a result, weight for `sqft_living` would be much smaller than weight for `bedrooms`. This is problematic because "small" weights are dropped first as `l1_penalty` goes up. To give equal considerations for all features, we need to **normalize features** as discussed in the lectures: we divide each feature by its 2-norm so that the transformed feature has norm 1. Let's see how we can do this normalization easily with Numpy: let us first consider a small matrix. ``` X = np.array([[3.,5.,8.],[4.,12.,15.]]) print X ``` Numpy provides a shorthand for computing 2-norms of each column: ``` norms = np.linalg.norm(X, axis=0) # gives [norm(X[:,0]), norm(X[:,1]), norm(X[:,2])] print norms ``` To normalize, apply element-wise division: ``` print X / norms # gives [X[:,0]/norm(X[:,0]), X[:,1]/norm(X[:,1]), X[:,2]/norm(X[:,2])] ``` Using the shorthand we just covered, write a short function called `normalize_features(feature_matrix)`, which normalizes columns of a given feature matrix. The function should return a pair `(normalized_features, norms)`, where the second item contains the norms of original features. As discussed in the lectures, we will use these norms to normalize the test data in the same way as we normalized the training data. To test the function, run the following: ``` features, norms = normalize_features(np.array([[3.,6.,9.],[4.,8.,12.]])) print features # should print # [[ 0.6 0.6 0.6] # [ 0.8 0.8 0.8]] print norms # should print # [5. 10. 15.] ``` # Implementing Coordinate Descent with normalized features We seek to obtain a sparse set of weights by minimizing the LASSO cost function ``` SUM[ (prediction - output)^2 ] + lambda*( |w[1]| + ... + |w[k]|). ``` (By convention, we do not include `w[0]` in the L1 penalty term. We never want to push the intercept to zero.) The absolute value sign makes the cost function non-differentiable, so simple gradient descent is not viable (you would need to implement a method called subgradient descent). Instead, we will use **coordinate descent**: at each iteration, we will fix all weights but weight `i` and find the value of weight `i` that minimizes the objective. That is, we look for ``` argmin_{w[i]} [ SUM[ (prediction - output)^2 ] + lambda*( |w[1]| + ... + |w[k]|) ] ``` where all weights other than `w[i]` are held to be constant. We will optimize one `w[i]` at a time, circling through the weights multiple times. 1. Pick a coordinate `i` 2. Compute `w[i]` that minimizes the cost function `SUM[ (prediction - output)^2 ] + lambda*( |w[1]| + ... + |w[k]|)` 3. Repeat Steps 1 and 2 for all coordinates, multiple times For this notebook, we use **cyclical coordinate descent with normalized features**, where we cycle through coordinates 0 to (d-1) in order, and assume the features were normalized as discussed above. The formula for optimizing each coordinate is as follows: ``` ┌ (ro[i] + lambda/2) if ro[i] < -lambda/2 w[i] = ├ 0 if -lambda/2 <= ro[i] <= lambda/2 └ (ro[i] - lambda/2) if ro[i] > lambda/2 ``` where ``` ro[i] = SUM[ [feature_i]*(output - prediction + w[i]*[feature_i]) ]. ``` Note that we do not regularize the weight of the constant feature (intercept) `w[0]`, so, for this weight, the update is simply: ``` w[0] = ro[i] ``` ## Effect of L1 penalty Let us consider a simple model with 2 features: ``` simple_features = ['sqft_living', 'bedrooms'] my_output = 'price' (simple_feature_matrix, output) = get_numpy_data(sales, simple_features, my_output) ``` Don't forget to normalize features: ``` simple_feature_matrix, norms = normalize_features(simple_feature_matrix) ``` We assign some random set of initial weights and inspect the values of `ro[i]`: ``` weights = np.array([1., 4., 1.]) ``` Use `predict_output()` to make predictions on this data. ``` prediction = ``` Compute the values of `ro[i]` for each feature in this simple model, using the formula given above, using the formula: ``` ro[i] = SUM[ [feature_i]*(output - prediction + w[i]*[feature_i]) ] ``` *Hint: You can get a Numpy vector for feature_i using:* ``` simple_feature_matrix[:,i] ``` ***QUIZ QUESTION*** Recall that, whenever `ro[i]` falls between `-l1_penalty/2` and `l1_penalty/2`, the corresponding weight `w[i]` is sent to zero. Now suppose we were to take one step of coordinate descent on either feature 1 or feature 2. What range of values of `l1_penalty` **would not** set `w[1]` zero, but **would** set `w[2]` to zero, if we were to take a step in that coordinate? ***QUIZ QUESTION*** What range of values of `l1_penalty` would set **both** `w[1]` and `w[2]` to zero, if we were to take a step in that coordinate? So we can say that `ro[i]` quantifies the significance of the i-th feature: the larger `ro[i]` is, the more likely it is for the i-th feature to be retained. ## Single Coordinate Descent Step Using the formula above, implement coordinate descent that minimizes the cost function over a single feature i. Note that the intercept (weight 0) is not regularized. The function should accept feature matrix, output, current weights, l1 penalty, and index of feature to optimize over. The function should return new weight for feature i. ``` def lasso_coordinate_descent_step(i, feature_matrix, output, weights, l1_penalty): # compute prediction prediction = ... # compute ro[i] = SUM[ [feature_i]*(output - prediction + weight[i]*[feature_i]) ] ro_i = ... if i == 0: # intercept -- do not regularize new_weight_i = ro_i elif ro_i < -l1_penalty/2.: new_weight_i = ... elif ro_i > l1_penalty/2.: new_weight_i = ... else: new_weight_i = 0. return new_weight_i ``` To test the function, run the following cell: ``` # should print 0.425558846691 import math print lasso_coordinate_descent_step(1, np.array([[3./math.sqrt(13),1./math.sqrt(10)],[2./math.sqrt(13),3./math.sqrt(10)]]), np.array([1., 1.]), np.array([1., 4.]), 0.1) ``` ## Cyclical coordinate descent Now that we have a function that optimizes the cost function over a single coordinate, let us implement cyclical coordinate descent where we optimize coordinates 0, 1, ..., (d-1) in order and repeat. When do we know to stop? Each time we scan all the coordinates (features) once, we measure the change in weight for each coordinate. If no coordinate changes by more than a specified threshold, we stop. For each iteration: 1. As you loop over features in order and perform coordinate descent, measure how much each coordinate changes. 2. After the loop, if the maximum change across all coordinates is falls below the tolerance, stop. Otherwise, go back to step 1. Return weights ``` def lasso_cyclical_coordinate_descent(feature_matrix, output, initial_weights, l1_penalty, tolerance): ``` Using the following parameters, learn the weights on the sales dataset. ``` simple_features = ['sqft_living', 'bedrooms'] my_output = 'price' initial_weights = np.zeros(3) l1_penalty = 1e7 tolerance = 1.0 ``` First create a normalized version of the feature matrix, `normalized_simple_feature_matrix` ``` (simple_feature_matrix, output) = get_numpy_data(sales, simple_features, my_output) (normalized_simple_feature_matrix, simple_norms) = normalize_features(simple_feature_matrix) # normalize features ``` Then, run your implementation of LASSO coordinate descent: ``` weights = lasso_cyclical_coordinate_descent(normalized_simple_feature_matrix, output, initial_weights, l1_penalty, tolerance) ``` ***QUIZ QUESTIONS*** 1. What is the RSS of the learned model on the normalized dataset? 2. Which features had weight zero at convergence? # Evaluating LASSO fit with more features Let us split the sales dataset into training and test sets. ``` train_data,test_data = sales.random_split(.8,seed=0) ``` Let us consider the following set of features. ``` all_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront', 'view', 'condition', 'grade', 'sqft_above', 'sqft_basement', 'yr_built', 'yr_renovated'] ``` First, create a normalized feature matrix from the TRAINING data with these features. (Make you store the norms for the normalization, since we'll use them later) First, learn the weights with `l1_penalty=1e7`, on the training data. Initialize weights to all zeros, and set the `tolerance=1`. Call resulting weights `weights1e7`, you will need them later. ***QUIZ QUESTION*** What features had non-zero weight in this case? Next, learn the weights with `l1_penalty=1e8`, on the training data. Initialize weights to all zeros, and set the `tolerance=1`. Call resulting weights `weights1e8`, you will need them later. ***QUIZ QUESTION*** What features had non-zero weight in this case? Finally, learn the weights with `l1_penalty=1e4`, on the training data. Initialize weights to all zeros, and set the `tolerance=5e5`. Call resulting weights `weights1e4`, you will need them later. (This case will take quite a bit longer to converge than the others above.) ***QUIZ QUESTION*** What features had non-zero weight in this case? ## Rescaling learned weights Recall that we normalized our feature matrix, before learning the weights. To use these weights on a test set, we must normalize the test data in the same way. Alternatively, we can rescale the learned weights to include the normalization, so we never have to worry about normalizing the test data: In this case, we must scale the resulting weights so that we can make predictions with *original* features: 1. Store the norms of the original features to a vector called `norms`: ``` features, norms = normalize_features(features) ``` 2. Run Lasso on the normalized features and obtain a `weights` vector 3. Compute the weights for the original features by performing element-wise division, i.e. ``` weights_normalized = weights / norms ``` Now, we can apply `weights_normalized` to the test data, without normalizing it! Create a normalized version of each of the weights learned above. (`weights1e4`, `weights1e7`, `weights1e8`). To check your results, if you call `normalized_weights1e7` the normalized version of `weights1e7`, then: ``` print normalized_weights1e7[3] ``` should return 161.31745624837794. ## Evaluating each of the learned models on the test data Let's now evaluate the three models on the test data: ``` (test_feature_matrix, test_output) = get_numpy_data(test_data, all_features, 'price') ``` Compute the RSS of each of the three normalized weights on the (unnormalized) `test_feature_matrix`: ***QUIZ QUESTION*** Which model performed best on the test data?
github_jupyter
# Simple Hello World example for IBM Cloud Functions PyWren This is a simple Hello World example, showing how to take a function and run it with pywren. First we import the necessary libraries to run our functions. ``` import numpy as np import os ``` It is possible to use pywren_ibm_cloud inside IBM Watson Studio or Jupyter notebooks in order to run your workloads. You must ensure that the IBM-PyWren package is installed in the environment you are using the notebook. To do so, if you can't install the package manually, we recommend to add the next lines: ``` import sys try: import pywren_ibm_cloud as pywren except: !{sys.executable} -m pip install pywren-ibm-cloud import pywren_ibm_cloud as pywren ``` Installation supports PyWren version as an input parameter, for example: ``` # !{sys.executable} -m pip install -U pywren-ibm-cloud==1.3.0 ``` Pywren is designed to run any existing python functions you have, in parallel, at scale, on the cloud. So first, we create an example python function. ``` def my_function(x): return x + 7 ``` PyWren needs the configuration to access to IBM Cloud Object Storage and IBM Cloud Functions services. If you don't have the config file in ~/.pywren/config, provide the configuration as a dictionary: ``` config = {'ibm_cf': {'endpoint': '<IBM Cloud Functions Endpoint>', 'namespace': '<NAMESPACE>', 'api_key': '<API KEY>'}, 'ibm_cos': {'endpoint': '<IBM COS Endpoint>', 'private_endpoint': '<IBM COS Private Endpoint>', 'api_key' : '<API KEY>'}, 'pywren' : {'storage_bucket' : '<IBM COS BUCKET>'}} ``` To start using `pywren`, we first create an executor with the previous config. ``` pw = pywren.ibm_cf_executor(config=config) ``` We can call `my_function(3)` remotely via `call_async`: ``` pw.call_async(my_function, 3) ``` Future is a placeholder for the returned value from applying `my_function` to the number `3`. We can call `result` on it and get the result. Note that this will block until the remote job has completed. Once finished it calls `close` to clean all the unnecessary data stored in COS. ``` print(pw.get_result()) pw.clean() ``` You can apply `my_function` to a list of arguments, and each will be executed remotely at the same time. ``` pw = pywren.ibm_cf_executor(config=config) pw.map(my_function, range(10)) ``` The pywren `get_all_results` function will wait until all of the futures are done and return their results ``` print(pw.get_result()) pw.clean() ``` That's it, we are now familiar how to make use of PyWren for parallelly executing a Python function across many actions in IBM Cloud Functions.
github_jupyter
``` import numpy as np import tensorflow as tf import matplotlib.pylab as plt from modules.spectral_pool import max_pool, l2_loss_images from modules.frequency_dropout import test_frequency_dropout from modules.create_images import open_image, downscale_image from modules.utils import load_cifar10 np.set_printoptions(precision=3, linewidth=200) % matplotlib inline % load_ext autoreload % autoreload 2 images, _ = load_cifar10(1, get_test_data=False) images.shape ``` ### In the cell below, we choose two random images and show how the quality progressively degradees as frequency dropout is applied. ``` batch_size=2 random_selection_indices = np.random.choice(len(images), size=batch_size) for cutoff in range(16,1,-2): minibatch_cutoff = tf.cast(tf.constant(cutoff), dtype=tf.float32) random_selection = images[random_selection_indices] downsampled_images = np.moveaxis( test_frequency_dropout( np.moveaxis(random_selection, 3, 1), minibatch_cutoff ), 1, 3 ) print('Cutoff = {0}'.format(cutoff)) for i in range(batch_size): plt.imshow(np.clip(downsampled_images[i],0,1), cmap='gray') plt.show() ``` ### The next cell demonstrates how the random cutoff is applied to all images in a minibatch, but changes from batch to batch. ``` batch_size = 2 minibatch_cutoff = tf.random_uniform([], 2, 12) for iter_idx in range(3): random_selection_indices = np.random.choice(len(images), size=batch_size) random_selection = images[random_selection_indices] downsampled_images = np.moveaxis( test_frequency_dropout( np.moveaxis(random_selection, 3, 1), minibatch_cutoff ), 1, 3 ) print('Minibatch {0}'.format(iter_idx+1)) for i in range(batch_size): plt.imshow(random_selection[i], cmap='gray') plt.show() plt.imshow(np.clip(downsampled_images[i],0,1), cmap='gray') plt.show() ``` ### max pool test ``` images_pool = max_pool(images, 2) images_pool.shape plt.imshow(images_pool[1], cmap='gray') ``` ### spectral pool test ``` cutoff_freq = int(32 / (2 * 2)) tf_cutoff_freq = tf.cast(tf.constant(cutoff_freq), tf.float32) images_spectral_pool = np.clip(np.moveaxis( test_frequency_dropout( np.moveaxis(images, 3, 1), tf_cutoff_freq ), 1, 3 ), 0, 1) images_spectral_pool.shape plt.imshow(images_spectral_pool[1], cmap='gray') ``` ## Iterate and plot ``` images_sample = images[np.random.choice(len(images), size=256)] # calculate losses for max_pool: pool_size_mp = [2, 4, 8, 16, 32] max_pool_errors = [] for s in pool_size_mp: images_pool = max_pool(images_sample, s) loss = l2_loss_images(images_sample, images_pool) max_pool_errors.append(loss) # calculate losses for spectral_pool: filter_size_sp = np.arange(16) spec_pool_errors = [] for s in filter_size_sp: tf_cutoff_freq = tf.cast(tf.constant(s), tf.float32) images_sp = np.moveaxis( test_frequency_dropout( np.moveaxis(images_sample, 3, 1), tf_cutoff_freq ), 1, 3 ) loss = l2_loss_images(images_sample, images_sp) spec_pool_errors.append(loss) pool_frac_kept = [1/x**2 for x in pool_size_mp] sp_frac_kept = [(x/16)**2 for x in filter_size_sp] fig, ax = plt.subplots(1, 1) ax.semilogy(pool_frac_kept, max_pool_errors, basey=2, marker='o', linestyle='--', color='r', label='Max Pooling') ax.semilogy(sp_frac_kept, spec_pool_errors, basey=2, marker='o', linestyle='--', color='b', label='Spectral Pooling') ax.legend() ax.grid(linestyle='--', alpha=0.5) ax.set_xlabel('Fraction of Parameters Kept') ax.set_ylabel('Relative Loss') fig.savefig('../Images/Figure4_Approximation_Loss.png') ```
github_jupyter
# How to use OpenNMT-py as a Library The example notebook (available [here](https://github.com/OpenNMT/OpenNMT-py/blob/master/docs/source/examples/Library.ipynb)) should be able to run as a standalone execution, provided `onmt` is in the path (installed via `pip` for instance). Some parts may not be 100% 'library-friendly' but it's mostly workable. ### Import a few modules and functions that will be necessary ``` import yaml import torch import torch.nn as nn from argparse import Namespace from collections import defaultdict, Counter import onmt from onmt.inputters.inputter import _load_vocab, _build_fields_vocab, get_fields, IterOnDevice from onmt.inputters.corpus import ParallelCorpus from onmt.inputters.dynamic_iterator import DynamicDatasetIter from onmt.translate import GNMTGlobalScorer, Translator, TranslationBuilder from onmt.utils.misc import set_random_seed ``` ### Enable logging ``` # enable logging from onmt.utils.logging import init_logger, logger init_logger() ``` ### Set random seed ``` is_cuda = torch.cuda.is_available() set_random_seed(1111, is_cuda) ``` ### Retrieve data To make a proper example, we will need some data, as well as some vocabulary(ies). Let's take the same data as in the [quickstart](https://opennmt.net/OpenNMT-py/quickstart.html): ``` !wget https://s3.amazonaws.com/opennmt-trainingdata/toy-ende.tar.gz !tar xf toy-ende.tar.gz ls toy-ende ``` ### Prepare data and vocab As for any use case of OpenNMT-py 2.0, we can start by creating a simple YAML configuration with our datasets. This is the easiest way to build the proper `opts` `Namespace` that will be used to create the vocabulary(ies). ``` yaml_config = """ ## Where the vocab(s) will be written save_data: toy-ende/run/example # Corpus opts: data: corpus: path_src: toy-ende/src-train.txt path_tgt: toy-ende/tgt-train.txt transforms: [] weight: 1 valid: path_src: toy-ende/src-val.txt path_tgt: toy-ende/tgt-val.txt transforms: [] """ config = yaml.safe_load(yaml_config) with open("toy-ende/config.yaml", "w") as f: f.write(yaml_config) from onmt.utils.parse import ArgumentParser parser = ArgumentParser(description='build_vocab.py') from onmt.opts import dynamic_prepare_opts dynamic_prepare_opts(parser, build_vocab_only=True) base_args = (["-config", "toy-ende/config.yaml", "-n_sample", "10000"]) opts, unknown = parser.parse_known_args(base_args) opts from onmt.bin.build_vocab import build_vocab_main build_vocab_main(opts) ls toy-ende/run ``` We just created our source and target vocabularies, respectively `toy-ende/run/example.vocab.src` and `toy-ende/run/example.vocab.tgt`. ### Build fields We can build the fields from the text files that were just created. ``` src_vocab_path = "toy-ende/run/example.vocab.src" tgt_vocab_path = "toy-ende/run/example.vocab.tgt" # initialize the frequency counter counters = defaultdict(Counter) # load source vocab _src_vocab, _src_vocab_size = _load_vocab( src_vocab_path, 'src', counters) # load target vocab _tgt_vocab, _tgt_vocab_size = _load_vocab( tgt_vocab_path, 'tgt', counters) # initialize fields src_nfeats, tgt_nfeats = 0, 0 # do not support word features for now fields = get_fields( 'text', src_nfeats, tgt_nfeats) fields # build fields vocab share_vocab = False vocab_size_multiple = 1 src_vocab_size = 30000 tgt_vocab_size = 30000 src_words_min_frequency = 1 tgt_words_min_frequency = 1 vocab_fields = _build_fields_vocab( fields, counters, 'text', share_vocab, vocab_size_multiple, src_vocab_size, src_words_min_frequency, tgt_vocab_size, tgt_words_min_frequency) ``` An alternative way of creating these fields is to run `onmt_train` without actually training, to just output the necessary files. ### Prepare for training: model and optimizer creation Let's get a few fields/vocab related variables to simplify the model creation a bit: ``` src_text_field = vocab_fields["src"].base_field src_vocab = src_text_field.vocab src_padding = src_vocab.stoi[src_text_field.pad_token] tgt_text_field = vocab_fields['tgt'].base_field tgt_vocab = tgt_text_field.vocab tgt_padding = tgt_vocab.stoi[tgt_text_field.pad_token] ``` Next we specify the core model itself. Here we will build a small model with an encoder and an attention based input feeding decoder. Both models will be RNNs and the encoder will be bidirectional ``` emb_size = 100 rnn_size = 500 # Specify the core model. encoder_embeddings = onmt.modules.Embeddings(emb_size, len(src_vocab), word_padding_idx=src_padding) encoder = onmt.encoders.RNNEncoder(hidden_size=rnn_size, num_layers=1, rnn_type="LSTM", bidirectional=True, embeddings=encoder_embeddings) decoder_embeddings = onmt.modules.Embeddings(emb_size, len(tgt_vocab), word_padding_idx=tgt_padding) decoder = onmt.decoders.decoder.InputFeedRNNDecoder( hidden_size=rnn_size, num_layers=1, bidirectional_encoder=True, rnn_type="LSTM", embeddings=decoder_embeddings) device = "cuda" if torch.cuda.is_available() else "cpu" model = onmt.models.model.NMTModel(encoder, decoder) model.to(device) # Specify the tgt word generator and loss computation module model.generator = nn.Sequential( nn.Linear(rnn_size, len(tgt_vocab)), nn.LogSoftmax(dim=-1)).to(device) loss = onmt.utils.loss.NMTLossCompute( criterion=nn.NLLLoss(ignore_index=tgt_padding, reduction="sum"), generator=model.generator) ``` Now we set up the optimizer. This could be a core torch optim class, or our wrapper which handles learning rate updates and gradient normalization automatically. ``` lr = 1 torch_optimizer = torch.optim.SGD(model.parameters(), lr=lr) optim = onmt.utils.optimizers.Optimizer( torch_optimizer, learning_rate=lr, max_grad_norm=2) ``` ### Create the training and validation data iterators Now we need to create the dynamic dataset iterator. This is not very 'library-friendly' for now because of the way the `DynamicDatasetIter` constructor is defined. It may evolve in the future. ``` src_train = "toy-ende/src-train.txt" tgt_train = "toy-ende/tgt-train.txt" src_val = "toy-ende/src-val.txt" tgt_val = "toy-ende/tgt-val.txt" # build the ParallelCorpus corpus = ParallelCorpus("corpus", src_train, tgt_train) valid = ParallelCorpus("valid", src_val, tgt_val) # build the training iterator train_iter = DynamicDatasetIter( corpora={"corpus": corpus}, corpora_info={"corpus": {"weight": 1}}, transforms={}, fields=vocab_fields, is_train=True, batch_type="tokens", batch_size=4096, batch_size_multiple=1, data_type="text") # make sure the iteration happens on GPU 0 (-1 for CPU, N for GPU N) train_iter = iter(IterOnDevice(train_iter, 0)) # build the validation iterator valid_iter = DynamicDatasetIter( corpora={"valid": valid}, corpora_info={"valid": {"weight": 1}}, transforms={}, fields=vocab_fields, is_train=False, batch_type="sents", batch_size=8, batch_size_multiple=1, data_type="text") valid_iter = IterOnDevice(valid_iter, 0) ``` ### Training Finally we train. ``` report_manager = onmt.utils.ReportMgr( report_every=50, start_time=None, tensorboard_writer=None) trainer = onmt.Trainer(model=model, train_loss=loss, valid_loss=loss, optim=optim, report_manager=report_manager, dropout=[0.1]) trainer.train(train_iter=train_iter, train_steps=1000, valid_iter=valid_iter, valid_steps=500) ``` ### Translate For translation, we can build a "traditional" (as opposed to dynamic) dataset for now. ``` src_data = {"reader": onmt.inputters.str2reader["text"](), "data": src_val} tgt_data = {"reader": onmt.inputters.str2reader["text"](), "data": tgt_val} _readers, _data = onmt.inputters.Dataset.config( [('src', src_data), ('tgt', tgt_data)]) dataset = onmt.inputters.Dataset( vocab_fields, readers=_readers, data=_data, sort_key=onmt.inputters.str2sortkey["text"]) data_iter = onmt.inputters.OrderedIterator( dataset=dataset, device="cuda", batch_size=10, train=False, sort=False, sort_within_batch=True, shuffle=False ) src_reader = onmt.inputters.str2reader["text"] tgt_reader = onmt.inputters.str2reader["text"] scorer = GNMTGlobalScorer(alpha=0.7, beta=0., length_penalty="avg", coverage_penalty="none") gpu = 0 if torch.cuda.is_available() else -1 translator = Translator(model=model, fields=vocab_fields, src_reader=src_reader, tgt_reader=tgt_reader, global_scorer=scorer, gpu=gpu) builder = onmt.translate.TranslationBuilder(data=dataset, fields=vocab_fields) ``` **Note**: translations will be very poor, because of the very low quantity of data, the absence of proper tokenization, and the brevity of the training. ``` for batch in data_iter: trans_batch = translator.translate_batch( batch=batch, src_vocabs=[src_vocab], attn_debug=False) translations = builder.from_batch(trans_batch) for trans in translations: print(trans.log(0)) break ```
github_jupyter
# Pessimistic Neighbourhood Aggregation for States in Reinforcement Learning *Author: Maleakhi Agung Wijaya Supervisors: Marcus Hutter, Sultan Javed Majeed Date Created: 21/12/2017* ``` import random import math import sys import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from IPython.display import display, clear_output # Set grid color for seaborn sns.set(style="whitegrid") ``` ## Mountain Car Environment **Mountain Car** is a standard testing domain in Reinforcement Learning, in which an under-powered car must drive up a steep hill. Since gravity is stronger than the car's engine, even at full throttle, the car cannot simply accelerate up the steep slope. The car is situated in a valley and must learn to leverage potential energy by driving up the opposite hill before the car is able to make it to the goal at the top of the rightmost hill. **Technical Details** - *State:* feature vectors consisting of velocity and position represented by an array [velocity, position] - *Reward:* -1 for every step taken, 0 for achieving the goal - *Action:* (left, neutral, right) represented by (-1, 0, 1) - *Initial state:* velocity = 0.0, position = -0.5 represented by [0.0, -0.5] - *Terminal state:* position >= 0.6 - *Boundaries:* velocity = (-0.07, 0.07), position = (-1.2, 0.6) - *Update function:* velocity = velocity + (Action) \* 0.001 + cos(3\*Position) * (-0.0025), position = position + velocity ``` class MountainCarEnvironment: """ Description: Environment for Mountain Car problem, adapted from Sutton and Barto's Introduction to Reinforcement Learning. Author: Maleakhi Agung Wijaya """ VELOCITY_BOUNDARIES = (-0.07, 0.07) POSITION_BOUNDARIES = (-1.2, 0.6) INITIAL_VELOCITY = 0.0 INITIAL_POSITION = -0.5 REWARD_STEP = -1 REWARD_TERMINAL = 0 # Constructor for MountainCarEnvironment # Input: agent for the MountainCarEnvironment # Output: MountainCarEnvironment object def __init__(self, car): self.car = car self.reset() # Compute next state (feature) # Output: [new velocity, new position] def nextState(self, action): # Get current state (velocity, position) and the action chosen by the agent velocity = self.car.state[0] position = self.car.state[1] # Calculate the new velocity and new position velocity += action * 0.001 + math.cos(3*position) * (-0.0025) # Consider boundary for velocity if (velocity < MountainCarEnvironment.VELOCITY_BOUNDARIES[0]): velocity = MountainCarEnvironment.VELOCITY_BOUNDARIES[0] elif (velocity > MountainCarEnvironment.VELOCITY_BOUNDARIES[1]): velocity = MountainCarEnvironment.VELOCITY_BOUNDARIES[1] position += velocity # Consider boundary for position if (position < MountainCarEnvironment.POSITION_BOUNDARIES[0]): position = MountainCarEnvironment.POSITION_BOUNDARIES[0] velocity = 0 elif (position > MountainCarEnvironment.POSITION_BOUNDARIES[1]): position = MountainCarEnvironment.POSITION_BOUNDARIES[1] new_state = [velocity, position] return(new_state) # Reset to the initial state def reset(self): self.car.state[0] = MountainCarEnvironment.INITIAL_VELOCITY self.car.state[1] = MountainCarEnvironment.INITIAL_POSITION # Give reward for each of the chosen action, depending on what the next state that the agent end up in # Output: terminal state = 0, non-terminal state = -1 def calculateReward(self): # Get current position of the agent position = self.car.state[1] # Determine the reward given if (position >= MountainCarEnvironment.POSITION_BOUNDARIES[1]): return(MountainCarEnvironment.REWARD_TERMINAL) else: return(MountainCarEnvironment.REWARD_STEP) ``` ## KNN-TD Agent **kNN-TD** combines the concept of *K-Nearest Neighbours* and *TD-Learning* to learn and evaluate Q values in both continuous and discrete state space RL problems. This method is especially useful in continuous states RL problems as the number of (state, action) pairs is very large and thus impossible to store and learn this information. By choosing a particular k-values and decided some initial points over continuous states, one can estimate Q values based on calculated the weighted average of Q values of the k-nearest neighbours for the state that the agent are currently in and use that values to decide the next move using some decision methods (i.e. UCB or epsilon-greedy). As for the learning process, one can update all of the k-nearest neighbours that contribute for the Q calculation. **Algorithm:** 1. Cover the whole state space by some initial Q(s,a) pairs, possibly scatter it uniformly across the whole state space and give an initial value of 0/ -1 2. When an agent in a particular state, get the feature vectors representing the state and possible actions from the state 3. For each possible action from the state, calculate Q(s,a) pairs by taking the expected value from previous Q values based on k-nearest neighbours of a particular action. *Steps for k-nearest neighbours:* - Standardise every feature in the feature vectors to (-1, 1) or other ranges to make sure that one feature scale not dominate the distance calculation (i.e. if position ranges between (-50, 50) and velocity (-0.7, 0.7) position will dominate distance calculation). - Calculate the distance between current state and all of other points with the same action using distance formula (i.e. Euclidean distance) and store the k-nearest neighbours to knn vector, and it's distance (for weight) in weight vector - Determine the probability p(x) for the expected value by using weight calculation (i.e. weight = 1/distance). To calculate weight, one can use other formula as long as that formula gives more weight to closer point. To calculate p(x) just divide individual weight with sum of all weights to get probability - Estimate the Q(s,a) pairs using expectation formula from kNN previous Q values 4. Using epsilon greedy/ UCB/ other decision methods to choose the next move 5. Observe the reward and update the Q values for all of the neighbours on knn vector using SARSA or Q Learning. (on the code below, I use Q Learning) 6. Repeat step 2-5 ``` class KNNAgent: """ Description: Mountain Car problem agent based on kNN-TD(0) algorithm Author: Maleakhi Agung Wijaya """ INITIAL_VELOCITY = 0.0 INITIAL_POSITION = -0.5 INITIAL_VALUE = -1 ACTIONS = [-1, 0, 1] GAMMA = 0.995 EPSILON = 0.05 INDEX_DISTANCE = 0 INDEX_ORIGINAL = 1 INDEX_WEIGHT = 2 REWARD_STEP = -1 REWARD_TERMINAL = 0 # Constructor # Input: size of the storage for previous Q values, parameters for how many neighbours which the agent will choose def __init__(self, size, k): self.state = [KNNAgent.INITIAL_VELOCITY, KNNAgent.INITIAL_POSITION] self.q_storage = [] self.k = k # fixed number of nearest neighbours that we will used self.alpha = 1 # will be decaying and change later # Storage of the k nearest neighbour (data) and weight (inverse of distance) for a particular step self.knn = [] self.weight = [] # Initialise the storage with random point for i in range(size): initial_action = random.randint(-1, 1) initial_state = [random.uniform(-0.07, 0.07), random.uniform(-1.2, 0.6)] # Each data on the array will consist of state, action pair + value data = {"state": initial_state, "value": KNNAgent.INITIAL_VALUE, "action": initial_action} self.q_storage.append(data) # Find all index for a given value # Input: value, list to search # Output: list of all index where you find that value on the list def findAllIndex(self, value, list_value): indices = [] for i in range(len(list_value)): if (value == list_value[i]): indices.append(i) return indices # Standardise feature vector given # Input: feature vector to be standardised # Output: standardised feature vector def standardiseState(self, state): standardised_state = [] # The number is taken from VELOCITY_BOUNDARIES and POSITION_BOUNDARIES using normal standardisation formula standardised_velocity = 2 * ((state[0]+0.07) / (0.07+0.07)) - 1 standardised_position = 2 * ((state[1]+1.2) / (0.6+1.2)) - 1 standardised_state.append(standardised_velocity) standardised_state.append(standardised_position) return(standardised_state) # Calculate Euclidean distance between 2 vectors # Input: 2 feature vectors # Output: distance between them def calculateDistance(self, vector1, vector2): return(math.sqrt((vector1[0]-vector2[0])**2 + (vector1[1]-vector2[1])**2)) # Calculate total weight # Input: list of weights # Output: total weight def calculateTotalWeight(self, weight_list): total_weight = 0 for i in range(len(weight_list)): total_weight += weight_list[i][KNNAgent.INDEX_WEIGHT] return(total_weight) # Apply the kNN algorithm for feature vector and store the data point on the neighbours array # Input: feature vector of current state, actions array consisting of all possible actions, list that will store knn data and weights data # Output: vector containing the value of taking each action (left, neutral, right) def kNNTD(self, state, actions, knn_list, weight_list): approximate_action = [] # Get the standardised version of state standardised_state = self.standardiseState(state) # Loop through every element in the storage array and only calculate for particular action for action in actions: temp = [] # array consisting of tuple (distance, original index, weight) for each point in the q_storage for i in range(len(self.q_storage)): data = self.q_storage[i] # Only want to calculate the nearest neighbour state which has the same action if (data["action"] == action): vector_2 = data["state"] standardised_vector_2 = self.standardiseState(vector_2) distance = self.calculateDistance(standardised_state, standardised_vector_2) index = i weight = 1 / (1+distance**2) # weight formula # Create the tuple and append that to temp temp.append(tuple((distance, index, weight))) else: continue # After we finish looping through all of the point and calculating the standardise distance, # Sort the tuple based on the distance and only take k of it and append that to the neighbours array # We also need to calculate the total weight to make it into valid probability that we can compute it's expectation sorted_temp = sorted(temp, key=lambda x: x[0]) for i in range(self.k): try: weight_list.append(sorted_temp[i]) knn_list.append(self.q_storage[sorted_temp[i][KNNAgent.INDEX_ORIGINAL]]) except IndexError: sys.exit(0) # Calculate the expected value of the action and append it to the approximate_action array expected_value = 0 total_weight = self.calculateTotalWeight(weight_list[(action+1)*self.k:(action+1)*self.k + self.k]) for i in range((action+1)*self.k, (action+1)*self.k + self.k): weight = weight_list[i][KNNAgent.INDEX_WEIGHT] probability = weight / total_weight expected_value += probability * knn_list[i]["value"] approximate_action.append(expected_value) return(approximate_action) # Select which action to choose, whether left, neutral, or right (using epsilon greedy) # Output: -1 (left), 0 (neutral), 1 (right) def selectAction(self): # First call the knn-td algorithm to determine the value of each Q(s,a) pairs action_value = self.kNNTD(self.state, KNNAgent.ACTIONS, self.knn, self.weight) # Use the epsilon-greedy method to choose value random_number = random.uniform(0.0, 1.0) if (random_number <= KNNAgent.EPSILON): action_chosen = random.randint(-1, 1) else: # Return the action with highest Q(s,a) possible_index = self.findAllIndex(max(action_value), action_value) action_chosen = possible_index[random.randrange(len(possible_index))] - 1 # Only store chosen data in the knn and weight list # Clearance step chosen_knn = [] chosen_weight = [] for i in range(self.k*(action_chosen+1), self.k*(action_chosen+1) + self.k): chosen_knn.append(self.knn[i]) chosen_weight.append(self.weight[i]) self.knn = chosen_knn self.weight = chosen_weight return action_chosen # Calculate TD target based on Q Learning/ SARSAMAX # Input: Immediate reward based on what the environment gave # Output: TD target based on off policy Q learning def calculateTDTarget(self, immediate_reward): # Consider condition on the final state, return 0 immediately if (immediate_reward == KNNAgent.REWARD_TERMINAL): return(immediate_reward) knn_prime = [] weight_prime = [] action_value = self.kNNTD(self.state, KNNAgent.ACTIONS, knn_prime, weight_prime) return(immediate_reward + KNNAgent.GAMMA*max(action_value)) # Q learning TD updates on every neighbours on the kNN based on the contribution that are calculated using probability weight # Input: Immediate reward based on what the environment gave def TDUpdate(self, immediate_reward, alpha): self.alpha = alpha # First, calculate the TD target td_target = self.calculateTDTarget(immediate_reward) # Iterate every kNN and update using Q learning method based on the weighting total_weight = self.calculateTotalWeight(self.weight) for i in range(len(self.weight)): index = self.weight[i][KNNAgent.INDEX_ORIGINAL] probability = self.weight[i][KNNAgent.INDEX_WEIGHT] / total_weight # Begin updating td_error = td_target - self.q_storage[index]["value"] self.q_storage[index]["value"] = self.q_storage[index]["value"] + self.alpha*td_error*probability self.cleanList() # clean list to prepare for another step # Clear the knn list and also the weight list def cleanList(self): self.knn = [] self.weight = [] ``` ## KNN Main Function **KNN Main function** is responsible for initiating the KNN agent, environment and handling agent-environment interaction. It consists of a non-terminate inner loop that direct agent decision while also giving reward and next state from the environment. This inner loop will only break after the agent successfully get out of the environment, which in this case the mountain. The outer loop can also be created to control the number of episodes which the agent will perform before the main function ends. Apart from handling agent-environment interaction, main function also responsible to display three kinds of visualisation which will be explain below the appropriate graph. ``` # Generate decaying alphas # Input: minimum alpha, number of episodes # Output: list containing alpha def generateAlphas(minimum_alpha, n_episodes): return(np.linspace(1.0, MIN_ALPHA, N_EPISODES)) N_EPISODES = 1000 MIN_ALPHA = 0.02 alphas = generateAlphas(MIN_ALPHA, N_EPISODES) # Initialise the environment and the agent size = 1000 # size of the q_storage k = 6 # knn parameter (this is the best k so far that we have) agent = KNNAgent(size, k) mountain_car_environment = MountainCarEnvironment(agent) # Used for graphing purposes count_step = [] # counter for how many step in each episodes # Iterate the process, train the agent (training_iteration episodes) training_iteration = N_EPISODES for i in range(training_iteration): step = 0 alpha = alphas[i] mountain_car_environment.reset() while (True): action = agent.selectAction() next_state = mountain_car_environment.nextState(action) # Change agent current state and getting reward agent.state = next_state immediate_reward = mountain_car_environment.calculateReward() # Used for graphing step += 1 # Test for successful learning if (immediate_reward == MountainCarEnvironment.REWARD_TERMINAL): agent.TDUpdate(immediate_reward, alpha) count_step.append(step) clear_output(wait=True) # clear previous output # Create table d = {"Steps": count_step} episode_table = pd.DataFrame(data=d, index=np.arange(1, len(count_step)+1)) episode_table.index.names = ['Episodes'] display(episode_table) break # Update using Q Learning and kNN agent.TDUpdate(immediate_reward, alpha) ``` The table above displays total step data taken from 1000 episodes simulation. The first column represents episode and the second column represents total steps taken in a particular episode. It can be seen from the table that during the first few episodes, the agent hasn't learned the environment and hence it chose action unoptimally represented by huge number of steps taken to get to goal. Despite that, after experiencing hundred of episodes the agent have learnt the environment and Q values which enable it to reach the goal in just 200-400 steps. ``` # Create graph for step vs episodes y = count_step x = np.arange(1, len(y) + 1) plt.plot(x, y) plt.title("Steps vs Episodes (Log Scale)", fontsize=16) plt.xlabel("Episodes") plt.ylabel("Steps") plt.xscale('log') plt.yscale('log') plt.show() ``` The line plot visualise the table that are explained above. On the y axis, the plot displays steps taken on each episode, while on the x axis the number of episodes (1000 in the simulation). The line plot is displayed in log-log scale to make it easy to visualise small fluctuation within episode and making sure that large steps in first few episodes don't dominate the graph. From the plot we can see that the overall trend is going downward. This result implies that over many episodes the Q values is getting better and better which eventually will converge to true Q values. Consequently, the agent perform better and better and the step taken to get out of the mountain will decrease with respect to number of episodes. ``` # Create heatmap for Q values data = pd.DataFrame() data_left = [] data_neutral = [] data_right = [] position_left = [] position_neutral = [] position_right = [] velocity_left = [] velocity_neutral = [] velocity_right = [] # Sort q_storage based on position and velocity q_storage_sorted = sorted(agent.q_storage, key=lambda k: k['state'][0]) # Separate action left, neutral, and right for elem in q_storage_sorted: if (elem["action"] == -1): data_left.append(elem["value"]) position_left.append(elem["state"][1]) velocity_left.append(elem["state"][0]) elif (elem["action"] == 0): data_neutral.append(elem["value"]) position_neutral.append(elem["state"][1]) velocity_neutral.append(elem["state"][0]) else: data_right.append(elem["value"]) position_right.append(elem["state"][1]) velocity_right.append(elem["state"][0]) # Make scatter plot for 3 actions (left, neutral, right) # Left plt.scatter(x=velocity_left, y=position_left, c=data_left, cmap="YlGnBu") plt.title("Q Values (Action Left)", fontsize=16) plt.xlabel("Velocity") plt.ylabel("Position") plt.colorbar() plt.show() # Neutral plt.scatter(x=velocity_neutral, y=position_neutral, c=data_neutral, cmap="YlGnBu") plt.title("Q Values (Action Neutral)", fontsize=16) plt.xlabel("Velocity") plt.ylabel("Position") plt.colorbar() plt.show() # Right plt.scatter(x=velocity_right, y=position_right, c=data_right, cmap="YlGnBu") plt.title("Q Values (Action Right)", fontsize=16) plt.xlabel("Velocity") plt.ylabel("Position") plt.colorbar() plt.show() ``` Three scatter plots above display Q values for every action on the last episode (1000). Y axis represents position and x axis represents velocity of the 1000 points that we scattered random uniformly initially. To represent Q values for every point, these scatter plots use color indicating value that can be seen from the color bar. When the point is darker, the Q value is around -20. On the other hand, if the point is lighter the Q value is around -100. These Q values are later used for comparison with PNA Algorithm ## PNA Agent **PNA** may be viewed as a refinement for kNN, with k adapting to the situation. On the one hand, it is beneficial to use large k since that means large data can be learn from. On the other hand, it is beneficial to learn only from the most similar past experiences (small k), as the data they provide should be the most relevant. PNA suggests that when predicting the value of an action a in a state s, k should be chosen dynamically to minimise: ![equation](pictures/variance.jpg) where c = 1 and Var(Nsa) is the variance of observed rewards in the neighbourhood Nsa. This is a negative version of the term endorsing exploration in the UCB algorithm. Here it promotes choosing neighbourhoods that contain as much data as possible but with small variation between rewards. For example, in the ideal choice of k, all k nearest neighbours of (s, a) behave similarly, but actions farther away behave very differently. Action are chosen optimistically according to the UCB: ![equation](pictures/action_selection.jpg) with c > 0 a small constant. The upper confidence bound is composed of two terms: The first terms is the estimated value, and the second term is an exploration bonus for action whose value is uncertain. Actions can have uncertain value either because they have rarely been selected or have a high variance among previous returns. Meanwhile, the neighbourhoods are chosen "pessimistically" for each action to minimise the exploration bonus. **Algorithm:** 1. Cover the whole state space by some initial Q(s,a) pairs, possibly scatter it uniformly across the whole state space and give an initial value of 0/ -1 2. When an agent in a particular state, get the feature vectors representing the state and possible actions from the state 3. For each possible action from the state, calculate Q(s,a) pairs by taking the expected value from previous Q values based on k-nearest neighbours of a particular action. With PNA, we also need to dynamically consider the k values *Steps for PNA:* - Standardise every feature in the feature vectors to (-1, 1) or other ranges to make sure that one feature scale not dominate the distance calculation (i.e. if position ranges between (-50, 50) and velocity (-0.7, 0.7) position will dominate distance calculation). - Calculate the distance between current state and all of other points with the same action using distance formula (i.e. Euclidean distance) and sort based on the closest distance - Determine k by minimising the variance function described above - Store the k-nearest neighbours to knn vector, and it's distance (for weight) in weight vector - Determine the probability p(x) for the expected value by using weight calculation (i.e. weight = 1/distance). To calculate weight, one can use other formula as long as that formula gives more weight to closer point. To calculate p(x) just divide individual weight with sum of all weights to get probability - Estimate the Q(s,a) pairs using expectation formula from kNN previous Q values 4. Using epsilon greedy/ UCB/ other decision methods to choose the next move 5. Observe the reward and update the Q values for only the closest neighbour (1 point or chosen by hyperparametric) from KNN array using SARSA or Q Learning. (on the code below, I use Q Learning) 6. Repeat step 2-5 ``` class PNAAgent: """ Description: Mountain Car problem agent based on PNA algorithm adapted from Marcus Hutter's literatures Author: Maleakhi Agung Wijaya """ INITIAL_VELOCITY = 0.0 INITIAL_POSITION = -0.5 INITIAL_VALUE = -1 ACTIONS = [-1, 0, 1] GAMMA = 0.995 C = 0.01 # UCB constant EPSILON = 0.05 RADIUS = 1 INDEX_DISTANCE = 0 INDEX_ORIGINAL = 1 INDEX_WEIGHT = 2 REWARD_STEP = -1 REWARD_TERMINAL = 0 # Constructor # Input: size of the storage for previous Q values def __init__(self, size): self.state = [PNAAgent.INITIAL_VELOCITY, PNAAgent.INITIAL_POSITION] self.q_storage = [] self.alpha = 1 # choose fixed alpha, but we will vary alpha later # Storage of the k nearest neighbour (data) and weight (inverse of distance) for a particular step self.knn = [] self.weight = [] self.k_history = [] # used to store history of k chosen for each action # For plotting expected PNA function graph self.var_function_left = [] self.var_function_neutral = [] self.var_function_right = [] self.converge_function_left = [] self.converge_function_neutral = [] self.converge_function_right = [] self.episode = 0 # keep count of how many episodes for plotting purposes as well # Initialise the storage with random point for i in range(size): initial_value = PNAAgent.INITIAL_VALUE initial_action = random.randint(-1, 1) initial_state = [random.uniform(-0.07, 0.07), random.uniform(-1.2, 0.6)] # Fill the graph with all possible k if (initial_action == -1): self.var_function_left.append(0) self.converge_function_left.append(0) elif (initial_action == 0): self.var_function_neutral.append(0) self.converge_function_neutral.append(0) else: self.var_function_right.append(0) self.converge_function_right.append(0) # Each data on the array will consist of state, action pair + value data = {"state": initial_state, "value": initial_value, "action": initial_action} self.q_storage.append(data) # Since the k start at 2 that we want to calculate, just pop 1 self.var_function_left.pop() self.var_function_neutral.pop() self.var_function_right.pop() self.converge_function_left.pop() self.converge_function_neutral.pop() self.converge_function_right.pop() # Standardise feature vector given # Input: feature vector to be standardised # Output: standardised feature vector def standardiseState(self, state): standardised_state = [] standardised_velocity = 2 * ((state[0]+0.07) / (0.07+0.07)) - 1 standardised_position = 2 * ((state[1]+1.2) / (0.6+1.2)) - 1 standardised_state.append(standardised_velocity) standardised_state.append(standardised_position) return(standardised_state) # Find all index for a given value # Input: value, list to search # Output: list of all index where you find that value on the list def findAllIndex(self, value, list_value): indices = [] for i in range(len(list_value)): if (value == list_value[i]): indices.append(i) return indices # Calculate Euclidean distance between 2 vectors # Input: 2 feature vectors # Output: distance between them def calculateDistance(self, vector1, vector2): return(math.sqrt((vector1[0]-vector2[0])**2 + (vector1[1]-vector2[1])**2)) # Calculate total weight # Input: list of weights # Output: total weight def calculateTotalWeight(self, weight_list): total_weight = 0 for i in range(len(weight_list)): total_weight += weight_list[i][PNAAgent.INDEX_WEIGHT] return(total_weight) # Clear the knn list, k_history, and also the weight list def cleanList(self): self.knn = [] self.weight = [] self.k_history = [] # Choose the appropriate k by minimising variance and maximising the number of data to learn # Input: sorted neighbourhood list based on distance (distance, index, weight) # Output: k (numbers of nearest neighbour) that minimise neighbourhood variance function def chooseK(self, neighbourhood_list): data_list = [] # Extract the data (Q value from the neighbourhood_list) and append it to the data_list for data in neighbourhood_list: data_list.append(self.q_storage[data[PNAAgent.INDEX_ORIGINAL]]["value"]) action = self.q_storage[data[PNAAgent.INDEX_ORIGINAL]]["action"] # Initialise minimum variance minimum_k = 2 # Variable that will be return that minimise the variance of the neighbourhood minimum_function = self.neighbourhoodVariance(1, data_list[:2]) # For plotting variance function graph list_var = [] if (action == -1): list_var = self.var_function_left elif (action == 0): list_var = self.var_function_neutral else: list_var = self.var_function_right list_var[0] += minimum_function if (self.episode > 900): list_var_converge = [] if (action == -1): list_var_converge = self.converge_function_left elif (action == 0): list_var_converge = self.converge_function_neutral else: list_var_converge = self.converge_function_right list_var_converge[0] += minimum_function previous_sum_variance = np.var(data_list[:2]) * 2 previous_mean = np.mean(data_list[:2]) k = 2 # Iterate to find optimal k that will minimise the neighbourhood variance function for i in range(2, len(neighbourhood_list)): target_x = data_list[i] mean = (previous_mean * k + target_x) / (k + 1) current_sum_variance = previous_sum_variance + (target_x - previous_mean) * (target_x - mean) # Update for next iteration k = k + 1 previous_sum_variance = current_sum_variance previous_mean = mean function = self.neighbourhoodVariance(1, [], previous_sum_variance / k, k) list_var[k-2] += function if (self.episode > 900): list_var_converge[k-2] += function # Update the k value and minimum var value if find parameter which better minimise than the previous value if (function <= minimum_function): minimum_k = k minimum_function = function return(minimum_k) # PNA variance function that needed to be minimise # Input: constant c, list containing data points # Output: calculation result from the neighbourhood variance function def neighbourhoodVariance(self, c, data_list, var = None, k = None): if (var == None): return(math.sqrt(c * np.var(data_list) / len(data_list))) else: return(math.sqrt(c * var / k)) # Get starting index for the weight list # Input: action, k_history # Output: starting index for the weight list def getStartingIndex(self, action, k_history): count_action = action + 1 if (count_action == 0): return(0) else: index = 0 for i in range(count_action): index += k_history[i] return(index) # Apply the PNA algorithm for feature vector and store the data point on the neighbours array # Input: feature vector of current state, actions array consisting of all possible actions, list that will store knn data and weights data, k_history # Output: vector containing the value of taking each action (left, neutral, right) def PNA(self, state, actions, knn_list, weight_list, k_history): approximate_action = [] # Get the standardised version of state standardised_state = self.standardiseState(state) # Loop through every element in the storage array and only calculate for particular action for action in actions: temp = [] # array consisting of tuple (distance, original index, weight) for each point in the q_storage for i in range(len(self.q_storage)): data = self.q_storage[i] # Only want to calculate the nearest neighbour state which has the same action if (data["action"] == action): vector_2 = data["state"] standardised_vector_2 = self.standardiseState(vector_2) distance = self.calculateDistance(standardised_state, standardised_vector_2) index = i weight = 1 / (1+distance**2) # Create the tuple and append that to temp temp.append(tuple((distance, index, weight))) else: continue # After we finish looping through all of the point and calculating the standardise distance, # Sort the tuple based on the distance and only take k of it and append that to the neighbours array sorted_temp = sorted(temp, key=lambda x: x[0]) # Get the value of the k dynamically k = self.chooseK(sorted_temp) k_history.append(k) for i in range(k): try: weight_list.append(sorted_temp[i]) knn_list.append(self.q_storage[sorted_temp[i][PNAAgent.INDEX_ORIGINAL]]) except IndexError: sys.exit(0) # Calculate the expected value of the action and append it to the approximate_action array expected_value = 0 # We also need to calculate the total weight to make it into valid probability that we can compute it's expectation total_weight = self.calculateTotalWeight(weight_list[self.getStartingIndex(action, k_history):self.getStartingIndex(action, k_history)+k]) for i in range(self.getStartingIndex(action, k_history), self.getStartingIndex(action, k_history) + k): try: weight = weight_list[i][PNAAgent.INDEX_WEIGHT] probability = weight / total_weight expected_value += probability * knn_list[i]["value"] except IndexError: sys.exit(0) approximate_action.append(expected_value) return(approximate_action) # Calculate TD target based on Q Learning/ SARSAMAX # Input: Immediate reward based on what the environment gave # Output: TD target based on off policy Q learning def calculateTDTarget(self, immediate_reward): # Condition if final state if (immediate_reward == PNAAgent.REWARD_TERMINAL): return(immediate_reward) k_history = [] knn_prime = [] weight_prime = [] action_value = self.PNA(self.state, PNAAgent.ACTIONS, knn_prime, weight_prime, k_history) return(immediate_reward + PNAAgent.GAMMA * max(action_value)) # Q learning TD updates on every neighbours on the kNN based on the contribution that are calculated using probability weight # Input: Immediate reward based on what the environment gave def TDUpdate(self, immediate_reward, alpha): self.alpha = alpha # First, calculate the TD target td_target = self.calculateTDTarget(immediate_reward) try: # Update only the #radius closest point total_weight = self.calculateTotalWeight(self.weight[0:PNAAgent.RADIUS]) for i in range(PNAAgent.RADIUS): index = self.weight[i][PNAAgent.INDEX_ORIGINAL] probability = self.weight[i][PNAAgent.INDEX_WEIGHT] / total_weight td_error = td_target - self.q_storage[index]["value"] self.q_storage[index]["value"] += self.alpha * td_error * probability except IndexError: total_weight = self.calculateTotalWeight(self.weight) for i in range(len(self.weight)): index = self.weight[i][PNAAgent.INDEX_ORIGINAL] probability = self.weight[i][PNAAgent.INDEX_WEIGHT] / total_weight # Begin updating td_error = td_target - self.q_storage[index]["value"] self.q_storage[index]["value"] += self.alpha * td_error * probability self.cleanList() # clean list to prepare for another step # Choosing based on Epsilon Greedy method # Input: action_value array consisting the Q value of every action # Output: action chosen (-1, 0, 1) def epsilonGreedy(self, action_value): # Use the epsilon-greedy method to choose value random_number = random.uniform(0.0, 1.0) if (random_number <= PNAAgent.EPSILON): action_chosen = random.randint(-1, 1) else: # Return the action with highest Q(s,a) possible_index = self.findAllIndex(max(action_value), action_value) action_chosen = possible_index[random.randrange(len(possible_index))] - 1 return action_chosen # Getting the maximum of the ucb method # Input: action_value list, bonus_variance list # Output: action which maximise def maximumUCB(self, action_value, bonus_variance): max_index = 0 max_value = action_value[0] + bonus_variance[0] # Check 1, 2 (all possible action) for i in range(1, 3): value = action_value[i] + bonus_variance[i] if (value >= max_value): max_value = value max_index = i return(max_index - 1) # return the action which maximise # Select which action to choose, whether left, neutral, or right (using UCB) # Output: -1 (left), 0 (neutral), 1 (right) def selectAction(self): action_value = self.PNA(self.state, PNAAgent.ACTIONS, self.knn, self.weight, self.k_history) # Second term of ucb, calculate the bonus variance start_index = [] # used to calculate start index for each action finish_index = [] # used to calculate end index for each action for action in PNAAgent.ACTIONS: # Prevent index out of bound if (action != 1): # Data extraction start_index.append(self.getStartingIndex(action, self.k_history)) finish_index.append(self.getStartingIndex(action+1, self.k_history)) else: # Data extraction start_index.append(self.getStartingIndex(action, self.k_history)) finish_index.append(len(self.weight)) # Choose the action based on ucb method action_chosen = self.epsilonGreedy(action_value) # Only store chosen data in the knn and weight list # Clearance step chosen_knn = [] chosen_weight = [] for i in range(start_index[action_chosen+1], finish_index[action_chosen+1]): chosen_knn.append(self.knn[i]) chosen_weight.append(self.weight[i]) self.knn = chosen_knn self.weight = chosen_weight return action_chosen ``` ## PNA Main Function **PNA Main function** is responsible for initiating the PNA agent, environment and handling agent-environment interaction. It consists of a non-terminate inner loop that direct agent decision while also giving reward and next state from the environment. This inner loop will only break after the agent successfully get out of the environment, which in this case the mountain or if it is taking too long to converge. The outer loop can also be created to control the number of episodes which the agent will perform before the main function ends. Apart from handling agent-environment interaction, main function also responsible to display five kinds of visualisation. First, table/ DataFrame displaying episodes and step that are required by the agent to get out of the mountain on each episode. Second, scatter plot displaying steps on the y axis and episodes on the x axis to learn about algorithm convergence property. Third, expected standard error function for every actions. Fourth, heatmap of the Q value for the last episode. Lastly, as the k is dynamically changing each steps, I have created a heatmap indicating k chosen each steps for first episode and last episode. ``` # Generate decaying alphas # Input: minimum alpha, number of episodes # Output: list containing alpha def generateAlphas(minimum_alpha, n_episodes): return(np.linspace(1.0, MIN_ALPHA, N_EPISODES)) N_EPISODES = 1000 MIN_ALPHA = 0.02 alphas = generateAlphas(MIN_ALPHA, N_EPISODES) # Initialise the environment and the agent size = 1000 # size of the q_storage agent = PNAAgent(size) mountain_car_environment = MountainCarEnvironment(agent) convergence = 100 # used to extract data when agent has converges # Used for graphing purposes count_step = [] # counter for how many step in each episodes k_first_left = [] k_first_neutral = [] k_first_right = [] k_last_left = [] k_last_neutral = [] k_last_right = [] k_convergence_left = agent.var_function_left[:] k_convergence_neutral = agent.var_function_neutral[:] k_convergence_right = agent.var_function_right[:] # Iterate the process, train the agent (training_iteration episodes) total_step = 0 training_iteration = N_EPISODES for i in range(training_iteration): step = 0 alpha = alphas[i] mountain_car_environment.reset() agent.episode = i + 1 while (True): action = agent.selectAction() next_state = mountain_car_environment.nextState(action) # Change agent current state and getting reward agent.state = next_state immediate_reward = mountain_car_environment.calculateReward() # Used for graphing step += 1 total_step += 1 # Only append first and last episode (for the k) if (i == 1): k_first_left.append(agent.k_history[0]) k_first_neutral.append(agent.k_history[1]) k_first_right.append(agent.k_history[2]) if (i == (training_iteration - 1)): k_last_left.append(agent.k_history[0]) k_last_neutral.append(agent.k_history[1]) k_last_right.append(agent.k_history[2]) # Count how many k chosen after converge if (agent.episode > 900): # Increment count when a particular k is chosen, 2 is just scaling factor since the k starts from 2 in the array k_convergence_left[agent.k_history[0]-2] += 1 k_convergence_neutral[agent.k_history[1]-2] += 1 k_convergence_right[agent.k_history[2]-2] += 1 # Test for successful learning if (immediate_reward == MountainCarEnvironment.REWARD_TERMINAL): agent.TDUpdate(immediate_reward, alpha) count_step.append(step) clear_output(wait=True) # clear previous output # Create table d = {"Steps": count_step} episode_table = pd.DataFrame(data=d, index=np.arange(1, len(count_step)+1)) episode_table.index.names = ['Episodes'] display(episode_table) break # Update using Q Learning and kNN agent.TDUpdate(immediate_reward, alpha) ``` The table above displays total step data taken from 1000 episodes simulation. The first column represents episode and the second column represents total steps taken in a particular episode. It can be seen from the table that during the first few episodes, the agent hasn't learned the environment and hence it chose action unoptimally represented by huge number of steps taken to get to goal. Despite that, after experiencing hundred of episodes the agent have learnt the environment and Q values which enable it to reach the goal in around 300-600 steps. ``` # Create graph for step vs episodes y = count_step x = np.arange(1, len(y) + 1) plt.plot(x, y) plt.title("Steps vs Episodes (Log Scale)", fontsize=16) plt.xlabel("Episodes (Log)") plt.ylabel("Steps (Log)") plt.xscale('log') plt.yscale('log') plt.show() ``` The line plot visualise the table that are explained above. On the y axis, the plot displays steps taken on each episode, while on the x axis the number of episodes (1000 in the simulation). The line plot is displayed in log-log scale to make it easy to visualise small fluctuation within episode and making sure that large steps in first few episodes don't dominate the graph. From the plot we can see that the overall trend is going downward. The result implies that over many episodes the Q values is getting better and better which eventually will converge to true Q values. Consequently, the agent perform better and better and the step taken to get out of the mountain will decrease with respect to number of episodes. ``` # Create plot for the average standard error function average_var_left = [] average_var_neutral = [] average_var_right = [] for elem in agent.var_function_left: average_var_left.append(elem / total_step) for elem in agent.var_function_neutral: average_var_neutral.append(elem / total_step) for elem in agent.var_function_right: average_var_right.append(elem / total_step) # Make a scatter plot # Left y = average_var_left x = np.arange(2, len(y)+2) plt.plot(x, y, color="#55A868") plt.title("Average Standard Error Function vs K (Action Left)", fontsize=16) plt.xlabel("K") plt.ylabel("Average f(K)") plt.xticks(np.arange(2, len(y) + 2, 50)) plt.show() # Make a scatter plot # Neutral y = average_var_neutral x = np.arange(2, len(y)+2) plt.plot(x, y, color="#55A868") plt.title("Average Standard Error Function vs K (Action Neutral)", fontsize=16) plt.xlabel("K") plt.ylabel("Average f(K)") plt.xticks(np.arange(2, len(y) + 2, 50)) plt.show() # Make a scatter plot # Right y = average_var_right x = np.arange(2, len(y)+2) plt.plot(x, y, color="#55A868") plt.title(" Average Standard Error Function vs K (Action Right)", fontsize=16) plt.xlabel("K") plt.ylabel("Average f(K)") plt.xticks(np.arange(2, len(y) + 2, 50)) plt.show() # Now plot the standard error function after convergence reverse_count_step = count_step[::-1] total_last_step = 0 for i in range(convergence): total_last_step += reverse_count_step[i] average_converge_left = [] average_converge_neutral = [] average_converge_right = [] for elem in agent.converge_function_left: average_converge_left.append(elem / total_last_step) for elem in agent.converge_function_neutral: average_converge_neutral.append(elem / total_last_step) for elem in agent.converge_function_right: average_converge_right.append(elem / total_last_step) # Make a scatter plot # Left y = average_converge_left x = np.arange(2, len(y)+2) plt.plot(x, y, color="#B14C4D") plt.title("Average Standard Error Function vs K After Convergence (Action Left)", fontsize=16) plt.xlabel("K") plt.ylabel("Average f(K)") plt.xticks(np.arange(2, len(y) + 2, 50)) plt.show() # Make a scatter plot # Neutral y = average_converge_neutral x = np.arange(2, len(y)+2) plt.plot(x, y, color="#B14C4D") plt.title("Average Standard Error Function vs K After Convergence (Action Neutral)", fontsize=16) plt.xlabel("K") plt.ylabel("Average f(K)") plt.xticks(np.arange(2, len(y) + 2, 50)) plt.show() # Make a scatter plot # Right y = average_converge_right x = np.arange(2, len(y)+2) plt.plot(x, y, color="#B14C4D") plt.title(" Average Standard Error Function vs K After Convergence (Action Right)", fontsize=16) plt.xlabel("K") plt.ylabel("Average f(K)") plt.xticks(np.arange(2, len(y) + 2, 50)) plt.show() ``` The first 3 graphs display average standard error function calculated for every steps from episode 1 - episode 1000. X axis display the possible k for every actions, while y axis display the average standard error function for each k. From both the plot above and bar plot below, it can be seen that k = 2 is chosen most of the time since it's mostly minimise the standard error function compare to other k. Even though 2 is the most frequent k chosen, if we dissect the plot for every episodes, it is not always the case. On some steps/ episodes, the graph are dominated by the number of neighbourhood which makes the graph looks like 1/sqrt(n) resulted in large amount of k (200-300) chosen. The last 3 graphs display average standard error function calculated for the last 100 episodes out of 1000 episodes (converges). These graphs have similar value with the first 3 graphs and hence the explanation is similar. ``` # Create heatmap for Q values data = pd.DataFrame() data_left = [] data_neutral = [] data_right = [] position_left = [] position_neutral = [] position_right = [] velocity_left = [] velocity_neutral = [] velocity_right = [] # Sort q_storage based on position and velocity q_storage_sorted = sorted(agent.q_storage, key=lambda k: k['state'][0]) # Separate action left, neutral, and right for elem in q_storage_sorted: if (elem["action"] == -1): data_left.append(elem["value"]) position_left.append(elem["state"][1]) velocity_left.append(elem["state"][0]) elif (elem["action"] == 0): data_neutral.append(elem["value"]) position_neutral.append(elem["state"][1]) velocity_neutral.append(elem["state"][0]) else: data_right.append(elem["value"]) position_right.append(elem["state"][1]) velocity_right.append(elem["state"][0]) # Make scatter plot for 3 actions (left, neutral, right) # Left plt.scatter(x=velocity_left, y=position_left, c=data_left, cmap="YlGnBu") plt.title("Q Values (Action Left)", fontsize=16) plt.xlabel("Velocity") plt.ylabel("Position") plt.colorbar() plt.show() # Neutral plt.scatter(x=velocity_neutral, y=position_neutral, c=data_neutral, cmap="YlGnBu") plt.title("Q Values (Action Neutral)", fontsize=16) plt.xlabel("Velocity") plt.ylabel("Position") plt.colorbar() plt.show() # Right plt.scatter(x=velocity_right, y=position_right, c=data_right, cmap="YlGnBu") plt.title("Q Values (Action Right)", fontsize=16) plt.xlabel("Velocity") plt.ylabel("Position") plt.colorbar() plt.show() ``` Three scatter plots above display Q values for every action on the last episode (1000). Y axis represents position and x axis represents velocity of the 1000 points that we scattered random uniformly initially. To represent Q values for every point, these scatter plots use color indicating the value that can be seen from the color bar. When the point is darker, the Q value is around -20. On the other hand, if the point is lighter the Q value is around -100. If we observe the Q values for both KNN-TD and PNA, it can be seen that the Q values are roughly similar. This result implies that both of the algorithm converges for the Mountain Car problem and eventually after numerous episodes, the agent Q values will converge to the true Q values. ``` # Create heatmap showing the k (first episode) data = pd.DataFrame() data["Action Left"] = k_first_left data["Action Neutral"] = k_first_neutral data["Action Right"] = k_first_right data["Steps"] = np.arange(1, len(k_first_left) + 1) data.set_index("Steps", inplace=True) grid_kws = {"height_ratios": (.9, .05), "hspace": .3} f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws) ax = sns.heatmap(data, ax=ax, cbar_ax=cbar_ax, cbar_kws={"orientation": "horizontal"}, yticklabels=False) ax.set_title("Number of K Chosen Each Step (First Episode)", fontsize=16) plt.show() # Create heatmap showing the k (last episode) data = pd.DataFrame() data["Action Left"] = k_last_left data["Action Neutral"] = k_last_neutral data["Action Right"] = k_last_right data["Steps"] = np.arange(1, len(k_last_left) + 1) data.set_index("Steps", inplace=True) grid_kws = {"height_ratios": (.9, .05), "hspace": .3} f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws) ax = sns.heatmap(data, ax=ax, cbar_ax=cbar_ax, cbar_kws={"orientation": "horizontal"}, yticklabels=False) ax.set_title("Number of K Chosen Each Step (Last Episode)", fontsize=16) plt.show() ``` The heatmap displayed above represents k chosen each step for every actions. Each strip on the heatmap represents the k chosen on a step. The first heatmap displayed data from the first episode. Based on the heatmap, it can be seen that the k chosen each step during the first episode is large (around 120-180). This occurs because all points are initialise to have a uniform value of -1. As the Q values is uniformly/ roughly -1 across the whole space, this will make the variance approximately 0 and hence resulted in the standard error function depends largely on the number of k chosen/ neighbourhoods. As the algorithm prefer to minimise the standard error function, it will chooses as many point as possible. The second heatmap displayed data from the last episode. Based on the heatmap, it can be seen that the k chosen each step is relatively small (around 2-60). This occurs since the agent has gain large amount of experience and Q values greatly differ in different region. As a result, if the agent choose a really large k to learn, it will make the variance very high and hence really large standard error. Consequently, the agent will minimise standard error function by repeatedly choose k around 2-60. ``` # Plot bar chart displaying number of k chosen after convergence y_bar = k_convergence_left x_bar = np.arange(2, len(y_bar) + 2) plt.bar(x_bar, y_bar, color="#FFD700") plt.yscale('log') plt.title("Number of K Chosen vs K After Convergence (Action Left)", fontsize=16) plt.xlabel("K") plt.ylabel("Number of K Chosen") plt.xticks(np.arange(2, len(y_bar) + 2, 50)) plt.show() y_bar = k_convergence_neutral x_bar = np.arange(2, len(y_bar) + 2) plt.bar(x_bar, y_bar, color="#FFD700") plt.yscale('log') plt.title("Number of K Chosen vs K After Convergence (Action Neutral)", fontsize=16) plt.xlabel("K") plt.ylabel("Number of K Chosen") plt.xticks(np.arange(2, len(y_bar) + 2, 50)) plt.show() y_bar = k_convergence_right x_bar = np.arange(2, len(y_bar) + 2) plt.bar(x_bar, y_bar, color="#FFD700") plt.yscale('log') plt.title("Number of K Chosen vs K After Convergence (Action Right)", fontsize=16) plt.xlabel("K") plt.ylabel("Number of K Chosen") plt.xticks(np.arange(2, len(y_bar) + 2, 50)) plt.show() ``` These bar plots represent the number of k chosen for each k after convergence for every actions. X axis represents possible k for each action, while y axis represents how many times the k chosen for a particular k after convergence. The convergence defined in the code is the last 100 episodes out of 1000 episodes. In all of bar plots, we can see that after convergence the agent mostly choose k equals 2 and relatively small k such as from 2 - 150. This condition occurs because the agent has lots of experience which thus make the Q values highly differ between region. Based on the result, we can see that mostly variance dominates the standard error function which force the agent to choose small k to minimise the standard error function.
github_jupyter
# Decisions This notebook is based on materials kindly provided by the [IN1900]( https://www.uio.no/studier/emner/matnat/ifi/IN1900/h19/) team. How can we use Python to automatically recognize different features in our data, and take a different action for each? Here, we will learn how to write code that executes only when certain conditions are true. We can tell Python to take an action depending on the value of a variable: ``` length = 42 if length > 100: print('greater') ``` We can also include an alternate path, `else`: ``` length = 42 if length > 100: print('greater') else: print('smaller') print('done') ``` This code can be illustrated with a flowchart: ![if illustration](images/if.png) ## `elif` We can chain multiple `if`-tests with `elif`, short for "else if". ``` length = 42 if length > 100: print('greater') elif length < 0: print('Oops, negative length?') else: print('smaller') print('done') ``` ### <span style="color:green"> Exercise: multiple hits </span> With `elif`, only the first test that yields `True` is executed. The code below is supposed to show a warning for temperatures above 70, but there is a bug. Find two different ways to fix the code, so that the warning is displayed. ``` temperature = 120 if temperature > 0: print("it's warm") elif temperature <= 0: print("it's freezing") elif temperature > 70: print("WARNING: dangerously hot") ``` ## `boolean` Expressions The comparisons that are part of the if statements in the examples are Boolean expressions. Boolean expressions include comparisons (`>`, `<`), equality (`==`) and inequality (`!=`). Boolean expressions evaluate to `True` or `False`. ### `boolean` Connectors We can use the `boolean` connectors or operators to build larger expressions. The boolean connectors in Python are `and`, `or` and `not`. ``` warm = True cloudy = False print(warm and cloudy) print(warm or cloudy) if warm and not cloudy: print("Remember sunscreen!") ``` ### <span style="color:green"> Exercise: Boolean Operators </span> Again we look at the temperature test. This time, use a Boolean operator to fix this test so that the warning is displayed. ``` temperature = 120 if temperature > 0: print("it's warm") elif temperature <= 0: print("it's freezing") elif temperature > 70: print("WARNING: dangerously hot") ``` ### <span style="color:green"> Case Law Exercise: count dissenting opinions </span> In the code below, we loop through a list of cases from the Case Law Api, then loop through the opinions for each of those cases. Each `opinion` has a `"type"` field which describes if it's a majority opinion, dissenting opinion or concurring opinion. First, try to run the code below to check if you can print out the value of this field for each opinion: ``` import requests import json URL = "https://api.case.law/v1/cases/?jurisdiction=ill&full_case=true&decision_date_min=2011-01-01&page_size=20" data = requests.get(URL).json() cases = data["results"] for case in cases: opinions = case["casebody"]["data"]["opinions"] for opinion in opinions: print(opinion["type"]) ``` Now, try to modify the code below to count the number of dissenting opinions by using an `if` test with `opinion["type"]`. If you find a dissent, you will need to increase the variable `dissent_count`: ``` import requests import json URL = "https://api.case.law/v1/cases/?jurisdiction=ill&full_case=true&decision_date_min=2011-01-01&page_size=20" data = requests.get(URL).json() dissent_count = 0 cases = data["results"] for case in cases: opinions = case["casebody"]["data"]["opinions"] for opinion in opinions: 'Your code here' print("Number of dissents:", dissent_count) ``` ### <span style="color:green"> Library Data Exercise: Count Fulltext Documents </span> In the code below, we loop through a list of items from the National Library API. Each `item` has a dictionary `accessInfo`, containing a key `isDigital`. The corresponding value is a Boolean which is `True` if the document is available digitally in fulltext. First, try to run the code below to check if you can print out the value of `isDigital` for each item: ``` import requests import json URL = "https://api.nb.no/catalog/v1/items?size=20&filter=mediatype:b%C3%B8ker&q=Bing,Jon" data = requests.get(URL).json() embedded = data['_embedded'] items = embedded['items'] for item in items: accessInfo = item['accessInfo'] isDigital = accessInfo['isDigital'] print(isDigital) ``` Now, try to modify the code below to count the number of digital fulltext documents by using an `if` test with `isDigital`. If you find a digital document, you will need to increase the variable `fulltext_count`: ``` import requests import json URL = "https://api.nb.no/catalog/v1/items?size=20&filter=mediatype:b%C3%B8ker&q=Bing,Jon" data = requests.get(URL).json() embedded = data['_embedded'] items = embedded['items'] fulltext_count = 0 for item in items: accessInfo = item['accessInfo'] isDigital = accessInfo['isDigital'] # your code here ``` ## <span style="color:blue">Key Points</span> - We use `if`-statements to control program flow - `if`-statements can have an `else`-part - We can chain multiple `if`-statements with `elif` - `if`-statements use Boolean expressions, which can be `True` or `False`
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt ``` # **COVID-19 Twitter Sentiments** # A. **Problem**: Do Twitter-tweet sentiments have any correlations with COVID19 death counts? That is, do states with higher death counts have a particular sentiment correlated to its tweets? # **B. Datasets used** ## Tweet Source: I constructed the twextual dataset by using a guide on twitter webscraping. I used the Twint library to construct a twitter webscraper that did not need to use Twitter's API. https://pypi.org/project/twint/ Twint allowed me to filter by tweet date, querey (keyword being COIVD19), number of tweets that are to be scraped, location of the tweet (state), and finally an output file in `.csv` of the scarped data. The code can be found on my github. The code may be ran in an UNIX-Based OS via terminal. If that's not possible, one could make an args data class and delete the argparse part from the code. Code: https://github.com/kwxk/twitter-textual-scraper with comments for each line. Here is the general format for the crawler within the argparse of the code: `python tweet_crawler -q [write here query] -d [write here since date] -c [write here how many tweets you want from each state] -s [provide here a list of states each state between quotation marks] -o [write here output file name]` So for example: `python tweet_crawler -q covid19 -d 2020-01-01 -c 100 -s "New Jersey" "Florida" -o output.csv` Tweets were collected from a year to date (Decemeber 01, 2021). **I treated this as if it were an ETL pipeline.** ## **Tweet Dataset** The main dataset must be split between states and english (en) tweets must be preserved in each dataset. ### **Main tweet data frame** ``` df = pd.read_csv('covid19.csv') df.head() ``` ## **Split Tweet Dataframe (split by states)** ``` flp = df[df['near'].str.contains('Florida',na=False)] fl = flp[flp['language'].str.contains('en',na=False)] fl txp = df[df['near'].str.contains('Texas',na=False)] tx = txp[txp['language'].str.contains('en',na=False)] tx njp = df[df['near'].str.contains('New Jersey',na=False)] njp = njp[njp['language'].str.contains('en',na=False)] nj nyp = df[df['near'].str.contains('New York',na=False)] ny = nyp[nyp['language'].str.contains('en',na=False)] ny ``` ## **Stopwords** ``` ### Stopwords List stop= open("stopwords.txt").read().replace("\n",' ').split(" ")[:-1] stat = pd.read_csv('us-states.csv') stat ny_stat = stat[stat['state'].str.contains('New York',na=False)] nystat2 = ny_stat.drop(['fips','cases','state'], axis = 1) nj_stat = stat[stat['state'].str.contains('New Jersey',na=False)] njstat2 = nj_stat.drop(['fips','cases','state'], axis = 1) tx_stat = stat[stat['state'].str.contains('Texas',na=False)] txstat2 = tx_stat.drop(['fips','cases','state'], axis = 1) fl_stat = stat[stat['state'].str.contains('Florida',na=False)] flstat2 = fl_stat.drop(['fips','cases','state'], axis = 1) fl_stat = stat[stat['state'].str.contains('Florida',na=False)] fl_stat ``` ## **Sentiment Analysis** ``` from nltk.stem.wordnet import WordNetLemmatizer from gensim import corpora, models from nltk.tokenize import word_tokenize import gensim #import pyLDAvis.gensim_models as gensimvis from gensim import corpora from matplotlib.patches import Rectangle import pandas as pd import numpy as np import nltk nltk.downloader.download('vader_lexicon') nltk.downloader.download('stopwords') from nltk.corpus import stopwords from nltk.tokenize import RegexpTokenizer from wordcloud import WordCloud, STOPWORDS import matplotlib.colors as mcolors import string import matplotlib.pyplot as plt import seaborn as sns from wordcloud import WordCloud import itertools import collections import re import nltk from nltk.corpus import stopwords import re import networkx from textblob import TextBlob df1= df.copy() df1.drop(["Unnamed: 0","username","link","language"],axis=1, inplace=True) df1 def cleaner(text): text = re.sub("@[A-Za-z0-9]+","",text) text = re.sub("#[A-Za-z0-9]+","",text)#Remove @ sign text = re.sub(r"(?:\@|http?\://|https?\://|www)\S+", "", text) #Remove http links text = text.replace("\n", "") text=text.lower() return text df1['tweet'] = df1['tweet'].map(lambda x: cleaner(x)) punct = "\n\r"+string.punctuation df1['tweet'] = df1['tweet'].str.translate(str.maketrans('','',punct)) def clean_sentence(val): "remove chars that are not letters or number or down case then remove" regex=re.compile('([^\sa-zA-Z0-9]|_)+') sentence=regex.sub('',val).lower() sentence = sentence.replace('\n'," ") sentence = sentence.replace(','," ") sentence = sentence.replace('\\~'," ") sentence = sentence.replace('QAQ\\~\\~'," ") sentence=sentence.split(" ") for word in list(sentence): if word in stop: sentence.remove(word) sentence=" ".join(sentence) return sentence def clean_dataframe(data): "drop nans,thenb apply clean sentence function to description" # data=data.dropna(how="any") for col in ['tweet']: df1[col]=df1[col].apply(clean_sentence) return df1 cleaned_text = clean_dataframe(df1) # Create textblob objects of the tweets sentiment_objects = [TextBlob(text) for text in df1['tweet']] sentiment_objects[2].polarity, sentiment_objects[0] sentiment_values = [[text.sentiment.polarity, str(text)] for text in sentiment_objects] sentiment_df = pd.DataFrame(sentiment_values, columns=["polarity", "tweet"]) a=sentiment_df['polarity'].round(2) b=list(a) sentiment_df['Polar']=b new_list=[] for i in range(len(sentiment_df['Polar'])): a = sentiment_df['Polar'][i] if a == 0: new_list.append("Neutral") continue if a >0: new_list.append("Positive") continue if a <0: new_list.append("Negative") continue sentiment_df['Sentiments']=new_list sentiment_df df1['Sentiments']=sentiment_df['Sentiments'] df1['Polar']=sentiment_df['Polar'] df1 ``` ## **Florida Sentiments Analysis** ``` df_fl= df1[df1['near']=="Florida"].reset_index(drop=True) df_fl ``` ## **New York Sentiments Analysis** ``` df_ny= df1[df1['near']=="New York"].reset_index(drop=True) df_ny ``` ## **New Jersy Sentiments Analysis** ``` df_nj= df1[df1['near']=="New Jersey"].reset_index(drop=True) df_nj ``` ## **Texas Sentiments Analysis** ``` df_tx= df1[df1['near']=="Texas"].reset_index(drop=True) df_tx ``` # **C. Findings:** ## **Overall Sentiments among all states** ``` ## Visualizing the Text sentiments pos=df1[df1['Sentiments']=='Positive'] neg=df1[df1['Sentiments']=='Negative'] neu=df1[df1['Sentiments']=='Neutral'] import plotly.express as px #Frist_Day = Frist_Day fig = px.pie(df1, names='Sentiments') fig.show() plt.title('Total number of tweets and sentiments') plt.xlabel('Emotions') plt.ylabel('Number of Tweets') sns.countplot(x='Sentiments', data=df1) ``` **Finding: Neutral Sentiments are the most prevalent of sentiments from the combine dataframe of NJ, NY, FL, TX tweets. There are slightly more positive sentiments than negative sentiments.** ``` df1['near'].unique() ``` ## **Barplot for the Sentiments (New Jersey)** ``` b=df_nj['Sentiments'].value_counts().reset_index() plt.title('NJ number of tweets and sentiments') plt.xlabel('Emotions') plt.ylabel('Number of Tweets') plt.bar(x=b['index'], height=b['Sentiments']) ``` **Findings: New Jersey has a majority neutral sentiment tweets from the dataframe. It has slightly more positive sentiment tweets than there are negative sentiment tweets.** ## **Barplot for the Sentiments (New York)** ``` b=df_ny['Sentiments'].value_counts().reset_index() plt.title('NY number of tweets and sentiments') plt.xlabel('Emotions') plt.ylabel('Number of Tweets') plt.bar(x=b['index'], height=b['Sentiments']) ``` **Findings: New York has a majority neutral sentiment tweets from the dataframe. It has more positive sentiment tweets than there are negative sentiment tweets.** ``` stat df1 import datetime lst=[] #df1['date'] = datetime.datetime.strptime(df1['date'], '%Y-%m-%d' ) for i in range(len(df1)): dat= datetime.datetime.strptime(df1['date'][i], '%Y-%m-%d %H:%M:%S') df1['date'][i]= dat.date() df1.sort_values(by='date').reset_index(drop=True) a= ['New Jersey', 'Florida', 'Texas', 'New York'] lst=[] for i in range(len(stat)): if stat['state'][i] in a: lst.append(i) df_stat= stat.iloc[lst].reset_index(drop=True) df_stat ``` ## **Barplot for the Sentiments (Texas)** ``` b=df_tx['Sentiments'].value_counts().reset_index() plt.title('TX number of tweets and sentiments') plt.xlabel('Emotions') plt.ylabel('Number of Tweets') plt.bar(x=b['index'], height=b['Sentiments']) ``` **Findings: Texas has a majority neutral sentiment tweets from the dataframe. It has slightly more negative sentiment tweets than there are positive sentiment tweets.** ## **Barplot for the Sentiments (Florida)** ``` b=df_fl['Sentiments'].value_counts().reset_index() plt.title('FL number of tweets and sentiments') plt.xlabel('Emotions') plt.ylabel('Number of Tweets') plt.bar(x=b['index'], height=b['Sentiments']) ``` **Findings: Florida has a majority neutral sentiment tweets from the dataframe. It has slightly more positive sentiment tweets than there are negative sentiment tweets.** # **Total Covid Deaths Year to Date (Decemeber 4th)** --- Source: https://github.com/nytimes/covid-19-data ``` import plotly.express as px fig = px.line(df_stat, x='date', y='deaths', color='state') fig.show() ``` The above graph shows the total covid deaths from 02/13/2020 until 12/04/2021 for the states of Texas, Florida, New York, and New Jersey. Texas and Florida have the two most deaths with Texas leading. New York and New Jersey have the least deaths with New York leading New Jersey with the most deaths out of the two. ## **Initial Questions ▶** 1. **Would it stand to reason that the states with more positive-neutral sentiments toward COVID-19 had lower total deaths?** There are no correlations between tweet sentiments and total deaths according to the curated dataset. Looking at the CSV dataset from the New York Times' Github on the total COVID-19 deaths for the states of Texas, Florida, New York, and New Jersey, it shows that Texas and Florida are top out of the states in terms of the total death count. Texas and Florida had different Positive to Negative sentiments as is apparent from the graph. 2. **Which state had a higher infection death count?** Texas has the higher death count out of all of the states. New Jersey has the least. 3. **Which states had more negative than positive twitter sentiment to 'COVID-19' in their dataset?** Texas was the only state that had more negative twitter sentiments in its dataset than positive. 4. **What was the most common sentiment in all datasets?** Neutral sentiment tweet was the most popular category having much of the tweets in the total dataset: 45.7% of textual data was neutral. 28% of the total textual dataset was positive in sentiment and 26.3% was negative. 5. **Are the sentiment results correlated or related to total death count?** No. There are no correlations/realtions between sentiment and total death counts. In the Texas dataset, its graph observed more negative sentiments than positive. In the Florida dataset, its graph observed more positive sentiments than negative. If we look at New Jersey and New York, both datasets have more positive than negative sentiment tweets. New York has considerably more positive tweets than negative tweets. NJ has slightly more positive tweets than negative tweets. If we wanted to make a statement that states that have more positive tweets to negative tweets have higher total death counts, Texas would have to have that same trend. Texas breaks this trend such that there are more negative tweets than positive tweets in its dataset despite it having the highest total death count out of all of the states. ``` ## Visualizing the Text sentiments pos=df1[df1['Sentiments']=='Positive'] neg=df1[df1['Sentiments']=='Negative'] neu=df1[df1['Sentiments']=='Neutral'] import plotly.express as px #Frist_Day = Frist_Day fig = px.pie(df1, names='Sentiments') fig.show() ``` # **D. Implications** **For the hiring firm:** According to the dataset, twitter sentiments alone cannot give any meaningful indication as to whether or not tweets and their emotions have any bearing on COVID-19's death total death count. Better methodologies must be made: perhaps tweets of a certain popularity (perhaps a ratio between likes, retweets, sharing, etc) should be curated into a dataset. Simply looking at tweets at random is a good measure against bias however there is too much statistical noise within the dataset to make any meaningful correlations. **For Social Science:** Better methodologies in general should be developed when looking at social media posts. Considerable weight should be given to popular/viral content when curating a dataset as that is a category of data that inherently has the most interaction and 'social proof' due to its popularity on the website. # **E. Learning Outcomes** The more I developed my analytical skills, the more I realized that my project had a lot of statsitcal noise. First, I should have developed a better methodolgy for curating tweets. I simply used TWINT to currated 1300+ tweets randomly according to a fixed criteria. I did not add factors such as popularity of a tweet or its general social-media interaction score (primarily because I do not know how to do that yet). If I were to do this project again, I would start off by curating textual data that had a certain virality to it. I would alone curate tweets with specific likes, shares, and comments. This would be a difficult task, as I don't know if twitter has an ELO score for tweets: If twitter had a virality ratio for a tweet I would likely curate on that factor as it would come from a class of textual data that has genderated a certain amount of influence. However, this would add additional questions that would have to be considered as well: How much of the virality score would be coming from a particular state? For instance, if a score of 10 is VERY viral and that tweet comes from New York, are New York twitter users responsible for that tweet being a score of 10 or could it be users from another geographic location? This is a fair question because I would want to know how much influence the tweet has in its geographic location. It may be possible to develope a webscraper capable of achieving this goal, but It may involve many calculations that still would not gaurantee the results being adequately parsed.
github_jupyter
### Integrate plot Qarpo is a library to build a jupyter notebook user interface to submit jobs to job scheduler, display output interface to display accomplished jobs' outputs and plot its results. This notebook provides a recipe to integrate plot displaying the results of accomplished jobs in the jupyter notebook To start using the qarpo, run the following import line ``` import qarpo ``` The plot in qarpo UI interface consists of 2 main parts. The first part is the backend, which writes the resulted output to a file, in our example here, we are writing the time, fps and total number of frames into stats.json file. These code lines are integarted into the python script running. ```python import json import time t1 = time.time() //Inference execution infer_time = time.time()-t1 stats = {} stats['time'] = str(infer_time) stats['frame'] = str(num_frames) stats['fps'] = str(num_frames / infer_time) stats_file = "results/{}/stats.json".format(job_id) with open(stats_file, 'w') as f: json.dump(stats, f) ``` The second part is defined in the UI configuration, this UI configuration is an input to the class constructor Interface. To add the plot configuratoion to the UI configuration, use the following format: { "job": # Define how to launch the job and interpret results { "output_type": ".txt", # The type of input (text/video) "results_path": "app/results/", # Path to job result files "plots": #list of dictionaries, each dictionary represents a plot configuration [ { "title" : < plot title >, "type" : <plot type, "time" or "fps" or any different value specified in the json file in the backend part>, "xlabel" : <x-axis label>, "ylabel" : <y-axis label> } ] } } ``` job_interface = qarpo.Interface( { "job": # Define how to launch the job and interpret results { "output_type": ".png", # The type of input (text/video) "results_path": "app/results/", # Path to job result files "plots":[ { "title" : "", "type" : "time", "xlabel" : "Job ID", "ylabel" : "Time in seconds" } ] } } ) job_interface.displayUI() job_interface.submitJob("qsub app/example_job.sh -l nodes=1:idc001skl:i5-6500te -F 'app/results/'") ```
github_jupyter
# Наработки ``` import open3d as o3d import numpy as np def convert_from_bin_to_pcd(path_to_binary_file: str, path_to_new_pcd_file: str): bin_pcd = np.fromfile(path_to_binary_file, dtype=np.float32) points = bin_pcd.reshape((-1, 4))[:, 0:3] o3d_pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points)) o3d.io.write_point_cloud(path_to_new_pcd_file, o3d_pcd) def convert_from_pcd_to_ply(path_to_pcd_file: str, path_to_new_ply_file: str): pcd = o3d.io.read_point_cloud(path_to_pcd_file) o3d.io.write_point_cloud(path_to_new_ply_file, pcd) def scan_next_plane_from_point_cloud( point_cloud: o3d.geometry.PointCloud, distance: float = 0.1 ): try: _, inliers = point_cloud.segment_plane( distance_threshold=distance, ransac_n=3, num_iterations=5000 ) except Exception: return (point_cloud, point_cloud.clear()) inlier_cloud = point_cloud.select_by_index(inliers) inlier_cloud.paint_uniform_color([1.0, 0, 0]) outlier_cloud = point_cloud.select_by_index(inliers, invert=True) return (inlier_cloud, outlier_cloud) def select_points_by_label_id( path_to_pcd_file: str, path_to_label_file: str, label_id: int ) -> o3d.geometry.PointCloud: pcd = o3d.io.read_point_cloud(path_to_pcd_file) labels = np.fromfile(path_to_label_file, dtype=np.uint32) labels = labels.reshape((-1)) pcd_point_by_id = pcd.select_by_index(np.where(labels == label_id)[0]) return pcd_point_by_id def segment_all_planes_from_point_cloud(point_cloud: o3d.geometry.PointCloud) -> list: all_planes = [] inlier_cloud, outlier_cloud = scan_next_plane_from_point_cloud(point_cloud) while outlier_cloud.has_points(): all_planes.append(inlier_cloud) inlier_cloud, outlier_cloud = scan_next_plane_from_point_cloud(outlier_cloud) if inlier_cloud.has_points(): all_planes.append(inlier_cloud) return all_planes def segment_all_planes_by_list_of_labels( path_to_pcd_file: str, path_to_label_file: str, list_of_labels: list ) -> dict: result_dict = {} for label in list_of_labels: point_cloud = select_points_by_label_id( path_to_pcd_file, path_to_label_file, label ) planes = extract_all_planes_from_point_cloud(point_cloud) result_dict[label] = planes return result_dict path_to_bin_file = "/home/pavel/dataset/sequences/00/velodyne/000000.bin" path_to_pcd_file = "/home/pavel/Point-Cloud/src/test.pcd" path_to_label_file = "/home/pavel/dataset/sequences/00/labels/000000.label" BUILDING_LABEL = 50 OTHER_STRUCTURE_LABEL = 52 ROAD_LABEL = 40 list_of_planes = [BUILDING_LABEL, OTHER_STRUCTURE_LABEL, ROAD_LABEL] segmented_planes = segment_all_planes_by_list_of_labels( path_to_pcd_file, path_to_label_file, list_of_planes ) len(segmented_planes) ```
github_jupyter
![Pattern Match](https://pattern-match.com/img/new-logo.png) # **Amazon SageMaker in Practice - Workshop** ## **Click-Through Rate Prediction** This lab covers the steps for creating a click-through rate (CTR) prediction pipeline. The source code of the workshop prepared by [Pattern Match](https://pattern-match.com) is available on the [company's Github account](https://github.com/patternmatch/amazon-sagemaker-in-practice). You can reach authors us via the following emails: - [Sebastian Feduniak](mailto:sebastian.feduniak@pattern-match.com) - [Wojciech Gawroński](mailto:wojciech.gawronski@pattern-match.com) - [Paweł Pikuła](mailto:pawel.pikula@pattern-match.com) Today we use the [Criteo Labs](http://labs.criteo.com/) dataset, used for the old [Kaggle competition](https://www.kaggle.com/c/criteo-display-ad-challenge) for the same purpose. **WARNING**: First you need to update `pandas` to 0.23.4 for the `conda_python3` kernel. # Background In advertising, the most critical aspect when it comes to revenue is the final click on the ad. It is one of the ways to compensate for ad delivery for the provider. In the industry, an individual view of the specific ad is called an *impression*. To compare different algorithms and heuristics of ad serving, "clickability" of the ad is measured and presented in the form of [*click-through rate* metric (CTR)](https://en.wikipedia.org/wiki/Click-through_rate): ![CTR formula](https://wikimedia.org/api/rest_v1/media/math/render/svg/24ae7fdf648530de2083f72ab4b4ae2bc0c47d85) If you present randomly sufficient amount of ads to your user base, you get a baseline level of clicks. It is the easiest and simple solution. However, random ads have multiple problems - starting with a lack of relevance, causing distrust and annoyance. **Ad targeting** is a crucial technique for increasing the relevance of the ad presented to the user. Because resources and a customer's attention is limited, the goal is to provide an ad to most interested users. Predicting those potential clicks based on readily available information like device metadata, demographics, past interactions, and environmental factors is a universal machine learning problem. # Steps This notebook presents an example problem to predict if a customer clicks on a given advertisement. The steps include: - Prepare your *Amazon SageMaker* notebook. - Download data from the internet into *Amazon SageMaker*. - Investigate and transforming the data for usage inside *Amazon SageMaker* algorithms. - Estimate a model using the *Gradient Boosting* algorithm (`xgboost`). - Leverage hyperparameter optimization for training multiple models with varying hyperparameters in parallel. - Evaluate and compare the effectiveness of the models. - Host the model up to make on-going predictions. # What is *Amazon SageMaker*? *Amazon SageMaker* is a fully managed machine learning service. It enables discovery and exploration with use of *Jupyter* notebooks and then allows for very easy industrialization on a production-grade, distributed environment - that can handle and scale to extensive datasets. It provides solutions and algorithms for existing problems, but you can bring your algorithms into service without any problem. Everything mentioned above happens inside your *AWS infrastructure*. That includes secure and isolated *VPC* (*Virtual Private Cloud*), supported by the full power of the platform. [Typical workflow](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-mlconcepts.html) for creating machine learning models: ![Machine Learning with Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/images/ml-concepts-10.png) ## Note about *Amazon* vs. *AWS* prefix Why *Amazon* and not *AWS*? Some services available in *Amazon Web Services* portfolio are branded by *AWS* itself, and some by Amazon. Everything depends on the origin and team that maintains it - in that case, it originated from the core of the Amazon, and they maintain this service inside the core division. ## Working with *Amazon SageMaker* locally It is possible to fetch *Amazon SageMaker SDK* library via `pip` and use containers provided by *Amazon* locally, and you are free to do it. The reason why and when you should use *Notebook Instance* is when your datasets are far more significant than you want to store locally and they are residing on *S3* - for such cases it is very convenient to have the *Amazon SageMaker* notebooks available. # Preparation The primary way for interacting with *Amazon SageMaker* is to use *S3* as storage for input data and output results. For our workshops, we have prepared two buckets. One is a dedicated bucket for each user (see the credentials card you have received at the beginning of the workshop) - you should put the name of that bucket into `output_bucket` variable. That bucket is used for storing output models and transformed and split input datasets. We have also prepared a shared bucket called `amazon-sagemaker-in-practice-workshop` which contains the input dataset inside a path presented below. ``` data_bucket = 'amazon-sagemaker-in-practice-workshop' user_number = 'CHANGE_TO_YOUR_NUMBER' user_name = 'user-{}'.format(user_number) output_bucket = 'amazon-sagemaker-in-practice-bucket-{}'.format(user_name) path = 'criteo-display-ad-challenge' key = 'sample.csv' data_location = 's3://{}/{}/{}'.format(data_bucket, path, key) ``` *Amazon SageMaker* as a service runs is a specific security context applied via *IAM role*. You have created that role when creating *notebook instance* before we have uploaded this content. Each *notebook* instance provides a *Jupyter* environment with preinstalled libraries and *AWS SDKs*. One of such *SDKs* is *Amazon SageMaker SDK* available from the *Python* environment. With the use of that *SDK* we can check which security context we can use: ``` import boto3 from sagemaker import get_execution_role role = get_execution_role() print(role) ``` As a next, we need to import some stuff. It includes *IPython*, *Pandas*, *numpy*, commonly used libraries from *Python's* Standard Library and *Amazon SageMaker* utilities: ``` import numpy as np # For matrix operations and numerical processing import pandas as pd # For munging tabular data import matplotlib.pyplot as plt # For charts and visualizations from IPython.display import Image # For displaying images in the notebook from IPython.display import display # For displaying outputs in the notebook from time import gmtime, strftime # For labeling SageMaker models, endpoints, etc. import sys # For writing outputs to notebook import math # For ceiling function import json # For parsing hosting outputs import os # For manipulating filepath names import sagemaker # Amazon SageMaker's Python SDK provides helper functions from sagemaker.predictor import csv_serializer # Converts strings for HTTP POST requests on inference from sagemaker.tuner import IntegerParameter # Importing HPO elements. from sagemaker.tuner import CategoricalParameter from sagemaker.tuner import ContinuousParameter from sagemaker.tuner import HyperparameterTuner ``` Now we are ready to investigate the dataset. # Data The training dataset consists of a portion of Criteo's traffic over a period of 7 days. Each row corresponds to a display ad served by Criteo and the first column indicates whether this ad was clicked or not. The positive (clicked) and negative (non-clicked) examples have both been subsampled (but at different rates) to reduce the dataset size. There are 13 features taking integer values (mostly count features) and 26 categorical features. Authors hashed values of the categorical features onto 32 bits for anonymization purposes. The semantics of these features is undisclosed. Some features may have missing values (represented as a `-1` for integer values and empty string for categorical ones). Order of the rows is chronological. You may ask, why in the first place we are investigating such *obfuscated* dataset. In *ad tech* it is not unusual to deal with anonymized, or pseudonymized data, which are not semantical - mostly due to privacy and security reasons. The test set is similar to the training set but, it corresponds to events on the day following the training period. For that dataset author removed *label* (the first column). Unfortunately, because of that, it is hard to guess for sure which feature means what, but we can infer that based on the distribution - as we can see below. ## Format The columns are tab separeted with the following schema: ``` <label> <integer feature 1> ... <integer feature 13> <categorical feature 1> ... <categorical feature 26> ``` When a value is missing, the field is just empty. There is no label field in the test set. Sample dataset (`sample.csv`) contains *100 000* random rows which are taken from a training dataset to ease the exploration. ## How to load the dataset? Easy, if it is less than 5 GB - as the disk available on our Notebook instance is equal to 5 GB. However, there is no way to increase that. :( It is because of that EBS volume size is fixed at 5GB. As a workaround, you can use the `/tmp` directory for storing large files temporarily. The `/tmp` directory is on the root drive that has around 20GB of free space. However, data stored there cannot be persisted across stopping and restarting of the notebook instance. What if we need more? We need to preprocess the data in another way (e.g., using *AWS Glue*) and store it on *S3* available for *Amazon SageMaker* training machines. To read a *CSV* correctly we use *Pandas*. We need to be aware that dataset uses tabs as separators and we do not have the header: ``` data = pd.read_csv(data_location, header = None, sep = '\t') pd.set_option('display.max_columns', 500) # Make sure we can see all of the columns. pd.set_option('display.max_rows', 20) # Keep the output on one page. ``` ## Exploration Now we would like to explore our data, especially that we do not know anything about the semantics. How can we do that? We can do that by reviewing the histograms, frequency tables, correlation matrix, and scatter matrix. Based on that we can try to infer and *"sniff"* the meaning and semantics of the particular features. ### Integer features First 13 features from the dataset are represented as an integer features, let's review them: ``` # Histograms for each numeric features: display(data.describe()) %matplotlib inline hist = data.hist(bins = 30, sharey = True, figsize = (10, 10)) display(data.corr()) pd.plotting.scatter_matrix(data, figsize = (12, 12)) plt.show() ``` ### Categorical features Next 26 features from the dataset are represented as an categorical features. Now it's time to review those: ``` # Frequency tables for each categorical feature: for column in data.select_dtypes(include = ['object']).columns: display(pd.crosstab(index = data[column], columns = '% observations', normalize = 'columns')) categorical_feature = data[14] unique_values = data[14].unique() print("Number of unique values in 14th feature: {}\n".format(len(unique_values))) print(data[14]) ``` As for *integer features*, we can push them as-is to the *Amazon SageMaker* algorithms. We cannot do the same thing for *categorical* one. As you can see above, we have many unique values inside the categorical column. They hashed that into a *32-bit number* represented in a hexadecimal format - as a *string*. We need to convert that into a number, and we can leverage *one-hot encoding* for that. #### One-Hot Encoding It is a way of converting categorical data (e.g., type of animal - *dog*, *cat*, *bear*, and so on) into a numerical one, one-hot encoding means that for a row we create `N` additional columns and we put a `1` if that category is applicable for such row. #### Sparse Vectors It is the more efficient way to store data points which are not dense and do not contain all features. It is possible to efficiently compute various operations between those two forms - dense and sparse. ### Problem with *one-hot encoding* in this dataset Unfortunately, we cannot use *OHE* as-is for this dataset. Why? ``` for column in data.select_dtypes(include=['object']).columns: size = data.groupby([column]).size() print("Column '{}' - number of categories: {}".format(column, len(size))) for column in data.select_dtypes(include=['number']).columns: size = data.groupby([column]).size() print("Column '{}' - number of categories: {}".format(column, len(size))) ``` We have too many distinct categories per feature! In the worst case, for an individual feature, we create couple hundred thousands of new columns. Even with the sparse representation it significantly affects memory usage and execution time. What kind of features are represented by that? Examples of such features are *Device ID*, *User Agent* strings and similar. How to workaround that? We can use *indexing*. ``` for column in data.select_dtypes(include = ['object']).columns: print("Converting '{}' column to indexed values...".format(column)) indexed_column = "{}_index".format(column) data[indexed_column] = pd.Categorical(data[column]) data[indexed_column] = data[indexed_column].cat.codes categorical_feature = data['14_index'] unique_values = data['14_index'].unique() print("Number of unique values in 14th feature: {}\n".format(len(unique_values))) print(data['14_index']) for column in data.select_dtypes(include=['object']).columns: data.drop([ column ], axis = 1, inplace = True) display(data) ``` It is another way of representing a categorical feature in *encoded* form. It is not friendly for *Linear Learner* and classical logistic regression, but we use `xgboost` library - which can leverage such a column without any problems. ## Finishing Touches Last, but not least - we need to unify the values that are pointing out a missing value `NaN` and `-1`. We use `NaN` everywhere: ``` # Replace all -1 to NaN: for column in data.columns: data[column] = data[column].replace(-1, np.nan) testing = data[2] testing_unique_values = data[2].unique() print("Number of unique values in 2nd feature: {}\n".format(len(testing_unique_values))) print(testing) ``` ## Splitting the dataset We need to split the dataset. We decided to randomize the dataset, and split into 70% for training, 20% for validation and 10% for the test. ``` # Randomly sort the data then split out first 70%, second 20%, and last 10%: data_len = len(data) sampled_data = data.sample(frac = 1) train_data, validation_data, test_data = np.split(sampled_data, [ int(0.7 * data_len), int(0.9 * data_len) ]) ``` After splitting, we need to save new training and validation dataset as *CSV* files. After saving, we upload them to the `output_bucket`. ``` train_data.to_csv('train.sample.csv', index = False, header = False) validation_data.to_csv('validation.sample.csv', index = False, header = False) s3client = boto3.Session().resource('s3') train_csv_file = os.path.join(path, 'train/train.csv') validation_csv_file = os.path.join(path, 'validation/validation.csv') s3client.Bucket(output_bucket).Object(train_csv_file).upload_file('train.sample.csv') s3client.Bucket(output_bucket).Object(validation_csv_file).upload_file('validation.sample.csv') ``` Now we are ready to leverage *Amazon SageMaker* for training. # Training ## Preparation As a first step, we need to point which libraries we want to use. We do that by fetching the container name based on the name of the library we want to use. In our case, it is `xgboost`. ``` from sagemaker.amazon.amazon_estimator import get_image_uri container = get_image_uri(boto3.Session().region_name, 'xgboost') ``` Then, we need to point out where to look for input data. In our case, we use *CSV* files uploaded in the previous section to `output_bucket`. ``` train_csv_key = 's3://{}/{}/train/train.csv'.format(output_bucket, path) validation_csv_key = 's3://{}/{}/validation/validation.csv'.format(output_bucket, path) s3_input_train = sagemaker.s3_input(s3_data = train_csv_key, content_type = 'csv') s3_input_validation = sagemaker.s3_input(s3_data = validation_csv_key, content_type = 'csv') ``` ## Differences from usual workflow and frameworks usage Even that *Amazon SageMaker* supports *CSV* files, most of the algorithms work best when you use the optimized `protobuf` `recordIO` format for the training data. Using this format allows you to take advantage of *pipe mode* when training the algorithms that support it. File mode loads all of your data from *Amazon S3* to the training instance volumes. In *pipe mode*, your training job streams data directly from *Amazon S3*. Streaming can provide faster start times for training jobs and better throughput. With this mode, you also reduce the size of the *Amazon EBS* volumes for your training instances. *Pipe mode* needs only enough disk space to store your final model artifacts. File mode needs disk space to store both your final model artifacts and your full training dataset. For our use case - we leverage *CSV* files. ## Single training job ``` sess = sagemaker.Session() xgb = sagemaker.estimator.Estimator(container, role, train_instance_count = 1, train_instance_type = 'ml.m4.xlarge', base_job_name = user_name, output_path = 's3://{}/{}/output'.format(output_bucket, path), sagemaker_session = sess) xgb.set_hyperparameters(eval_metric = 'logloss', objective = 'binary:logistic', eta = 0.2, max_depth = 10, colsample_bytree = 0.7, colsample_bylevel = 0.8, min_child_weight = 4, rate_drop = 0.3, num_round = 75, gamma = 0.8) xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}) ``` Now, we are ready to create *Amazon SageMaker session* and `xgboost` framework objects. For a single training job, we need to create *Estimator*, where we point the container and *security context*. In this step, we are specifying the instance type and amount of those used for learning. Last, but not least - we need to specify `output_path` and pass the session object. For the created *Estimator* instance we need to specify the `objective`, `eval_metric` and other hyperparameters used for that training session. As the last step, we need to start the training process passing the training and validation datasets. Whole training job takes approximately 1-2 minutes at most for the following setup. ## FAQ **Q**: I see a strange error: `ClientError: Hidden file found in the data path! Remove that before training`. What is that? **A**: There is something wrong with your input files, probably you messed up the *S3* path passed into training job. ## Hyperparameter Tuning (HPO) The single job is just one way. We can automate the whole process with use of *hyperparameter tuning*. As in the case of a single training job, we need to create *Estimator* with the specification for an individual job and set up initial and fixed values for *hyperparameters*. However, outside those - we are setting up the ranges in which algorithm automatically tune in, inside the process of the *HPO*. Inside the *HyperparameterTuner* specification we are specifying how many jobs we want to run and how many of them we want to run in parallel. ``` hpo_sess = sagemaker.Session() hpo_xgb = sagemaker.estimator.Estimator(container, role, train_instance_count = 1, train_instance_type = 'ml.m4.xlarge', output_path = 's3://{}/{}/output_hpo'.format(output_bucket, path), sagemaker_session = hpo_sess) hpo_xgb.set_hyperparameters(eval_metric = 'logloss', objective = 'binary:logistic', colsample_bytree = 0.7, colsample_bylevel = 0.8, num_round = 75, rate_drop = 0.3, gamma = 0.8) hyperparameter_ranges = { 'eta': ContinuousParameter(0, 1), 'min_child_weight': ContinuousParameter(1, 10), 'alpha': ContinuousParameter(0, 2), 'max_depth': IntegerParameter(1, 10), } objective_metric_name = 'validation:logloss' objective_type = 'Minimize' tuner = HyperparameterTuner(hpo_xgb, objective_metric_name, hyperparameter_ranges, base_tuning_job_name = user_name, max_jobs = 20, max_parallel_jobs = 5, objective_type = objective_type) tuner.fit({'train': s3_input_train, 'validation': s3_input_validation}) ``` Another thing that is different is how we see the progress of that particular type of the job. In the previous case, logs were shipped automatically into a *notebook*. For *HPO*, we need to fetch job status via *Amazon SageMaker SDK*. Unfortunately, it allows fetching the only status - logs are available in *Amazon CloudWatch*. **Beware**, that with current setup whole *HPO* job may take 20-30 minutes. ``` smclient = boto3.client('sagemaker') job_name = tuner.latest_tuning_job.job_name hpo_job = smclient.describe_hyper_parameter_tuning_job(HyperParameterTuningJobName = job_name) hpo_job['HyperParameterTuningJobStatus'] ``` # Hosting the single model After finishing the training, *Amazon SageMaker* by default saves the model inside *S3* bucket we have specified. Moreover, based on that model we can either download the archive and use inside our source code and services when deploying, or we can leverage the hosting mechanism available in the *Amazon SageMaker* service. ## How it works? After you deploy a model into production using *Amazon SageMaker* hosting services, it creates the endpoint with its configuration. Your client applications use `InvokeEndpoint` API to get inferences from the model hosted at the specified endpoint. *Amazon SageMaker* strips all `POST` headers except those supported by the *API*. Service may add additional headers. Does it mean that everyone can call our model? No, calls to `InvokeEndpoint` are authenticated by using *AWS Signature Version 4*. A customer's model containers must respond to requests within 60 seconds. The model itself can have a maximum processing time of 60 seconds before responding to the /invocations. If your model is going to take 50-60 seconds of processing time, the SDK socket timeout should be set to be 70 seconds. ``` xgb_predictor = xgb.deploy(initial_instance_count = 1, instance_type = 'ml.m4.xlarge') ``` **Beware**, the '!' in the output after hosting model means that it deployed with success. # Hosting the best model from HPO Hosting *HPO* model is no different from a single job. *Amazon SageMaker SDK* in very convenient way selects the best model automatically and uses that as a back-end for the endpoint. ``` xgb_predictor_hpo = tuner.deploy(initial_instance_count = 1, instance_type = 'ml.m4.xlarge') ``` # Evaluation After training and hosting the best possible model, we would like to evaluate its performance with `test_data` subset prepared when splitting data. As a first step, we need to prepare our hosted predictors to expect `text/csv` payload, which deserializes via *Amazon SageMaker SDK* entity `csv_serializer`. ``` xgb_predictor.content_type = 'text/csv' xgb_predictor.serializer = csv_serializer xgb_predictor_hpo.content_type = 'text/csv' xgb_predictor_hpo.serializer = csv_serializer ``` As a next step, we need to prepare a helper function that split `test_data` into smaller chunks and serialize them before passing it to predictors. ``` def predict(predictor, data, rows = 500): split_array = np.array_split(data, int(data.shape[0] / float(rows) + 1)) predictions = '' for array in split_array: predictions = ','.join([predictions, predictor.predict(array).decode('utf-8')]) return np.fromstring(predictions[1:], sep =',') predictions = predict(xgb_predictor, test_data.drop([0], axis=1).values) hpo_predictions = predict(xgb_predictor_hpo, test_data.drop([0], axis=1).values) ``` As a final step, we would like to compare how many clicks available in `test_data` subset were predicted correctly for job trained individually and with *HPO* jobs. ``` rows = ['actuals'] cols = ['predictions'] clicks = np.round(predictions) result = pd.crosstab(index = test_data[0], columns = clicks, rownames = rows, colnames = cols) display("Single job results:") display(result) display(result.apply(lambda r: r/r.sum(), axis = 1)) hpo_clicks = np.round(hpo_predictions) result_hpo = pd.crosstab(index = test_data[0], columns = hpo_clicks, rownames = rows, colnames = cols) display("HPO job results:") display(result_hpo) display(result_hpo.apply(lambda r: r/r.sum(), axis = 1)) ``` As you may expect, the model trained with the use of *HPO* works better. What is interesting - without any tuning and significant improvements, we were able to be classified in the first 25-30 results of the leaderboard from the old [Kaggle competition](https://www.kaggle.com/c/criteo-display-ad-challenge/leaderboard). Impressive! # Clean-up To avoid incurring unnecessary charges, use the *AWS Management Console* to delete the resources that you created for this exercise. Open the *Amazon SageMaker* console at and delete the following resources: 1. The endpoint - that also deletes the ML compute instance or instances. 2. The endpoint configuration. 3. The model. 4. The notebook instance. You need to stop the instance before deleting it. Keep in mind that *you can not* delete the history of trained individual and hyperparameter optimization jobs, but that do not incur any charges. Open the Amazon S3 console at and delete the bucket that you created for storing model artifacts and the training dataset. Remember, that before deleting you need to empty it, by removing all objects. Open the *IAM* console at and delete the *IAM* role. If you created permission policies, you could delete them, too. Open the *Amazon CloudWatch* console at and delete all of the log groups that have names starting with `/aws/sagemaker`. When it comes to *endpoints* you can leverage the *Amazon SageMaker SDK* for that operation: ``` sagemaker.Session().delete_endpoint(xgb_predictor.endpoint) sagemaker.Session().delete_endpoint(xgb_predictor_hpo.endpoint) ```
github_jupyter
<a href="https://colab.research.google.com/github/patprem/IMDb-SentimentAnalysis/blob/main/SentimentAnalysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> **Sentiment Analysis of IMDb Movie Reviews** Importing the basic and required libraries used in this project ``` import torch from torchtext.legacy import data from torchtext.legacy import datasets import torchvision import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import torch.nn as nn import torch.nn.functional as F import random ``` Mounting personal Google Drive to load the dataset. **IMPORTANT: Change the directory and root path variable accordingly to yours.** ``` from google.colab import drive import sys #Mount your Google drive to the VM drive.mount('/content/gdrive') sys.path.append("/content/gdrive/My Drive/ECE4179 S1 2021 Prathik") #set a root path variable to use ROOT = "/content/gdrive/My Drive/ECE4179 S1 2021 Prathik/Final Project" #Follow link and give permission, copy code and paste in text box #You only have to do this once per session ``` Reading the data from the loaded dataset **IMPORTANT:** 1. Download the dataset provided under Datasets section on README.md or download from this links: [IMDB Dataset (csv)](https://www.kaggle.com/lakshmi25npathi/sentiment-analysis-of-imdb-movie-reviews/data) and [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment/). 2. Import the downloaded datasets onto your local Google Drive and **change the path variable** accordingly. ``` #from google.colab import files #uploaded = files.upload() #import io #dataset = pd.read_csv(io.BytesIO(uploaded['IMDB Dataset.csv'])) # Dataset is now stored in a Pandas Dataframe # Reading the data from the dataset. dataset = pd.read_csv('gdrive/My Drive/ECE4179 S1 2021 Prathik/Final Project/IMDB Dataset.csv') ``` ### If you have successfully executed all cells upto this point, then just simply click *Run all* under Runtime tab or press *Ctrl+F9* to execute the remanining cells or follow through the comments besides each cell below to get an understanding of the methodology of this project. Exploring the loaded dataset ``` pd.set_option('display.max_colwidth',2000) # set the column width to 2000 so that we can read the complete review. pd.set_option('max_rows', 200) dataset.head(10) # setting .head(10) to read just the first 10 reviews from the dataset. dataset.info() # information about the dataset; two columns: review and sentiment, # where sentiment is the target column or the column that we need to predict. # number of positive and negative reviews in the dataset. # dataset is completely balanced and has equal number of positive and negative # sentiments. dataset['sentiment'].value_counts() # reading second review from the dataset and checking how the contents of the review is # and why we need to use NLP (Natural Language Processing) tasks on this dataset. review = dataset['review'].loc[10] review ``` From the above review (output), we can see that there HTML contents, punctuations, special characters, stopwords and others which do not offer much insight into the prediction of our model. The following NLP tasks (text cleaning technqiues) are implemented. 1. Eliminating HTML tags/contents like 'br" 2. Removing punctuations and special characters like |, /, apostrophes, commas and other punctuation marks and etc. 3. Remove stopwords that do not affect the prediction of our outcome and does not offer much insight such as 'are', 'is', 'the' and etc. 4. Use Lemmatization to bring back multiple forms of the same word to their common/base root. For example, words like 'ran', 'running', 'runs' to 'run'. 5. Using Text Tokenization and Vectorization to encode numerical values to our data after the above text cleaning techniques. 6. Lastly, fit these data to a deep learning model like Convolutional Neural Network (CNN) and LinearSVC model and compare the discrepancies between them ``` # Removing HTML contents like "<br>" # BeautifulSoup is a Python library for extracting data out of HTML and XML files, # by omitting HTML contents such as "<br>" from bs4 import BeautifulSoup soup = BeautifulSoup(review, "html.parser") review = soup.get_text() review # notice that the HTML tags are eliminated. # Removal of other special characters or punctuations except upper or lower case # letters using Regular Expressions (Regex) import re # importing Regex review = re.sub('\[[^]]*\]', ' ', review) # removing punctuations review = re.sub('[^a-zA-Z]', ' ', review) # regex; removing strings that contains a non-letter # i.e., remove except a-z to A-Z review # set all characters to lower case for simplicity review = review.lower() review ``` Tokenization of reviews in the dataset ``` # Tokenization of reviews # Stopwords removal: Split the text into tokens since stopwords removal # works on every word in the text. review = review.split() review ``` Removal of Stopwords ``` # importing nltk library to remove stopwords # Stopwords are words (English language words) that does not add much # meaning to a sentence. Could be safely ignored without sacrificing the # meaning of the sentence or review in this case. Words like 'he', 'have', # 'the' does not provide any insights. import nltk nltk.download('stopwords') from nltk.corpus import stopwords review = [word for word in review if not word in set(stopwords.words('english'))] review ``` **Stemming technique** Stemming is a process to extract the base form of the words by removing affixes from the words. Both Stemming and Lemmatization technqiues are implemented on a sample review here to observe the discrepancies between them and why Lemmatization is a better algorithm. ``` # importing PorterStemmer library to perform stemming from nltk.stem.porter import PorterStemmer p_stem = PorterStemmer() review_p_stem = [p_stem.stem(word) for word in review] review_p_stem ``` **Lemmatization technique** Lemmatization has the same objective as Stemming, however, it takes into consideration the morphological analysis of the words, i.e., it ensures that the root word is a valid English word alphabetically and meaningfully. ``` from nltk.stem import WordNetLemmatizer nltk.download('wordnet') lemma = WordNetLemmatizer() review = [lemma.lemmatize(word) for word in review] review ``` From the above results, we can notice that there is a huge difference between the techniques used. For example, 'little' has become 'littl' after Stemming, whereas it remained as 'little' after Lemmatization. Stemming tries to achieve a reduction in words to their root form but the stem itself is not a valid English word. Hence, Lemmatization is used in this project. ``` # merging the words to form a cleaned up version of the text. review = ' '.join(review) review ``` We can now see that the text is all cleaned up with no HTML tags, punctuations, special characters and stopwords, and it is ready for vectorization and training the model. **Vectorization of reviews in the dataset** ``` # create a corpus to convert the text to mathematical forms or numeric values corpus = [] # empty vector corpus.append(review) ``` Two Vectorization techniques are applied to check the discrepancy between them and the technique with the highest accuracy will be choosen. 1. CountVectorizer (Bag of Words (BoW) Model) 2. Tfidf Vectorizer (Bag of Words (BoW) Model) CountVectorizer (Bag of Words (BoW) Model) ``` # importing CountVectorizer to perform vectorization # Data becomes numeric with 1,2,3s based on the number of times # they appear in the text from sklearn.feature_extraction.text import CountVectorizer count_vect = CountVectorizer() review_count_vect = count_vect.fit_transform(corpus) # fitting this technique # onto the corpus review_count_vect.toarray() ``` Tfidf Vectorizer (Bag of Words (BoW) Model) 1. Text Frequency (TF): how many times a word appears in a review 2. Inverse Document Frequency (IDF): log(total number of reviews/# reviews with that particular word) TF-IDF score = TF*IDF ``` # importing TfidfVectorizer to perform vectorization from sklearn.feature_extraction.text import TfidfVectorizer # IDF acts as a diminishing factor and diminishes the weights of terms that # occurs frequently in the text and increases the weights of the terms # that occurs rarely. tfidf_vect = TfidfVectorizer() review_tfidf_vect = tfidf_vect.fit_transform(corpus) review_tfidf_vect.toarray() ``` So far, the techniques mentioned above have been implemented on only one sample review. Now, the above techniques will be applied on all the reviews in the dataset. As there is no test dataset, the dataset is split into 25% of the data as test dataset to test the performance of the model. ``` # splitting the dataset into training and test data # 25% of the data as test dataset and pseudo random generator # to randomly distribute the reviews to each dataset from sklearn.model_selection import train_test_split train_dataset, test_dataset, traindata_label, testdata_label = train_test_split(dataset['review'], dataset['sentiment'], test_size=0.25, random_state=42) # Convert the sentiments (target column) to numeric forms (1s and 0s) for simplicity traindata_label = (traindata_label.replace({'positive': 1, 'negative': 0})).values testdata_label = (testdata_label.replace({'positive': 1, 'negative': 0})).values ``` Implementation of text cleaning techniques discussed above on the whole dataset and build the train and test corpus. ``` # test and training corpus train_corpus = [] test_corpus = [] # text cleaning techniques for training dataset for i in range(train_dataset.shape[0]): soup = BeautifulSoup(train_dataset.iloc[i], "html.parser") review = soup.get_text() review = re.sub('\[[^]]*\]', ' ', review) review = re.sub('[^a-zA-Z]', ' ', review) review = review.lower() review = review.split() review = [word for word in review if not word in set(stopwords.words('english'))] lemma = WordNetLemmatizer() review = [lemma.lemmatize(word) for word in review] review = ' '.join(review) train_corpus.append(review) # text cleaning techniques for test dataset for j in range(test_dataset.shape[0]): soup = BeautifulSoup(test_dataset.iloc[j], "html.parser") review = soup.get_text() review = re.sub('\[[^]]*\]', ' ', review) review = re.sub('[^a-zA-Z]', ' ', review) review = review.lower() review = review.split() review = [word for word in review if not word in set(stopwords.words('english'))] lemma = WordNetLemmatizer() review = [lemma.lemmatize(word) for word in review] review = ' '.join(review) test_corpus.append(review) ``` Validate one sample entry ``` # training corpus train_corpus[1] # test corpus test_corpus[1] ``` Vectorize the training and test corpus using TFIDF technique ``` # lower and upper boundary of the range of n-values for different word n-grams to be extracted. # (1,3) means unigrams and trigrams. tfidf_vect = TfidfVectorizer(ngram_range=(1, 3)) # fitting training corpus and test corpus onto TFIDF Vectorizer tfidf_vect_train = tfidf_vect.fit_transform(train_corpus) tfidf_vect_test = tfidf_vect.transform(test_corpus) ``` **First model: LinearSVC** ``` # importing LinearSVC library and fitting the data onto the model from sklearn.svm import LinearSVC # C: float; regularization parameter, must be positive. # random_state: controls pseudo random number generation for # shuffling data for dual coordinate descent. linear_SVC = LinearSVC(C = 0.5, random_state = 42) linear_SVC.fit(tfidf_vect_train, traindata_label) predict = linear_SVC.predict(tfidf_vect_test) ``` LinearSVC with TFIDF Vectorization ``` # Check the performance of the model from sklearn.metrics import classification_report, confusion_matrix, accuracy_score print("Classification Report of LinearSVC model with TFIDF: \n", classification_report(testdata_label, predict,target_names=['Negative','Positive'])) print("Confusion Matrix of LinearSVC with TFIDF: \n", confusion_matrix(testdata_label, predict)) print("Accuracy of LinearSVC with TFIDF: \n", accuracy_score(testdata_label, predict)) import seaborn as sns con_matrix = confusion_matrix(testdata_label, predict) plt.figure(figsize = (10,10)) sns.heatmap(con_matrix, cmap= "Blues", linecolor = 'black', linewidth = 1, annot = True, fmt= '', xticklabels = ['Negative Reviews','Positive Reviews'], yticklabels = ['Negative Reviews','Positive Reviews']) plt.xlabel("Predicted Sentiment") plt.ylabel("Actual Sentiment") ``` LinearSVC with CountVectorizer (binary=False) Vectorization ``` # fitting the data onto the model using CountVectorizer technique # binary = False -> If you set binary=True then CountVectorizer no longer uses the counts of terms/tokens. # If a token is present in a document, it is 1, if absent it is 0 regardless of its frequency of occurrence. # So you will be dealing with just binary values. By default, binary=False. # If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. count_vect = CountVectorizer(ngram_range=(1, 3), binary = False) # lower and upper boundary # of the range of n-values for different word n-grams to be extracted. # (1,3) means unigrams and trigrams. count_vect_train = count_vect.fit_transform(train_corpus) count_vect_test = count_vect.transform(test_corpus) linear_SVC_count = LinearSVC(C = 0.5, random_state = 42, max_iter = 5000) linear_SVC_count.fit(count_vect_train, traindata_label) predict_count = linear_SVC_count.predict(count_vect_test) # Check the performance of the model print("Classification Report of LinearSVC with CountVectorizer: \n", classification_report(testdata_label, predict_count,target_names=['Negative','Positive'])) print("Confusion Matrix of LinearSVC with CountVectorizer: \n", confusion_matrix(testdata_label, predict_count)) print("Accuracy of LinearSVC with CountVectorizer: \n", accuracy_score(testdata_label, predict_count)) con_matrix = confusion_matrix(testdata_label, predict_count) plt.figure(figsize = (10,10)) sns.heatmap(con_matrix,cmap= "Blues", linecolor = 'black' , linewidth = 1 , annot = True, fmt='' , xticklabels = ['Negative Reviews','Positive Reviews'] , yticklabels = ['Negative Reviews','Positive Reviews']) plt.xlabel("Predicted Sentiment") plt.ylabel("Actual Sentiment") ``` From the above results, we can observe that **LinearSVC with TFIDF vectorization** gives the maximum accuracy and the outcome on our test dataset can be observed. ``` # prediction of data using the above model predict_dataset = test_dataset.copy() predict_dataset = pd.DataFrame(predict_dataset) # setting columns of the predicted outcomes on the dataset predict_dataset.columns = ['Review'] predict_dataset = predict_dataset.reset_index() predict_dataset = predict_dataset.drop(['index'], axis=1) # set the maximum column width to 100000 or more to view the complete review pd.set_option('display.max_colwidth',100000) pd.set_option('max_rows', 200) predict_dataset.head(10) # comparing the actual/original label with the predicted label testactual_label = testdata_label.copy() testactual_label = pd.DataFrame(testactual_label) testactual_label.columns = ['Sentiment'] # replacing back the numeric forms of the sentiments to positive and negative respectively testactual_label['Sentiment'] = testactual_label['Sentiment'].replace({1: 'positive', 0: 'negative'}) # predicted sentiments testpredicted_label = predict.copy() testpredicted_label = pd.DataFrame(testpredicted_label) testpredicted_label.columns = ['Predicted Sentiment'] testpredicted_label['Predicted Sentiment'] = testpredicted_label['Predicted Sentiment'].replace({1: 'positive', 0: 'negative'}) # concatenate the original and predicted labels along with its corresponding review test_result = pd.concat([predict_dataset, testactual_label, testpredicted_label], axis=1) pd.set_option('display.max_colwidth',100000) pd.set_option('max_rows', 200) test_result.head(10) ``` **Second model: Convolutional Neural Network (CNN)** Using CNN to conduct sentiment analysis Preparing the data using a different dataset ``` n = 1234 random.seed(n) np.random.seed(n) torch.manual_seed(n) torch.backends.cudnn.deterministic = True # for convolutional layers # batch dimension is first # 'batch_first = true' argument used to tell torchtext to return the permuted data # in CNN, batch dimension is first, so no need to permute data as 'batch_first' is set to true in TEXT field TEXT = data.Field(tokenize = 'spacy', tokenizer_language = 'en_core_web_sm', batch_first = True) LABEL = data.LabelField(dtype = torch.float) # splitting the dataset into training and test data train_dataset, test_dataset = datasets.IMDB.splits(TEXT, LABEL) train_dataset, valid_dataset = train_dataset.split(random_state = random.seed(n)) # building the vocabulary and loading the pre-trained word embeddings MAX_VOCAB_SIZE = 25_000 TEXT.build_vocab(train_dataset, max_size = MAX_VOCAB_SIZE, vectors = "glove.6B.100d", unk_init = torch.Tensor.normal_) LABEL.build_vocab(train_dataset) # creating the iterators # batch size of 64 is used BATCH_SIZE = 64 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits((train_dataset, valid_dataset, test_dataset), batch_size = BATCH_SIZE, device = device) # checking the number of reviews in training, test and validation datasets print(f'Training reviews: {len(train_dataset)}') print(f'Validation reviews: {len(valid_dataset)}') print(f'Testing reviews : {len(test_dataset)}') ``` Building a CNN for the dataset (Text is 1 dimensional) 1. Convert words into word embeddings to visualize words in 2 dimensions, each word along one axis and other axis for the elements of vectors. 2. Use a filter size of [n*width]. 'n' is the number of sequential words (n-grams, number of tokens in the review) and width is the dimensions of the word or dimensional embeddings (depth of filter). 3. Bi-grams are filters that covers two words at a time, tri-grams covers three words and so on. And each element of the filter has a weight associated with it. 4. The output of this filter is the weighted sum of all elements covered by the filter (single real number). Similarly, the filter moves to cover the next bi-gram and another output is calculated and so on. 5. This is an example of one such filter. CNNs has a plethora of these filters. The main idea is that each filter will learn a different feature to extract. For example, each of the [2*width] filters looks for the occurence of different bi-grams that are relevant for analysing sentiment of movie reviews. And the same goes for different sizes of filters (n-grams) with heights of 3,4,5 etc. 6. Then, use max pooling on the output of the convolutional layers, which takes the maximum value over a dimension. 7. The maximum value is the most important feature for determining the sentiment of the review, which corresponds to the most essential n-gram within the review. Through backpropagation, the weights of the filters are updated so that whenever certain n-grams that are highly indicative of the sentiment are seen, the output of the filter is a high or the highest value amongst all. This high value is then passed through the max pooling layer if it is the maximum value in the output. 8. This model has 100 filters of 3 different sizes (n-grams), i.e., 300 different n-grams. Later, these are concatenated into a single vector and passed through a linear layer to predict the sentiment. 9. Most importantly, input review has to be atleast as long as the largest filter height used. ``` import torch.nn as nn import torch.nn.functional as F # implementing the convolutional layers (nn.Conv2d) class CNN(nn.Module): def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim, dropout, pad_idx): # in_channels: number of channels in text/image fed into convolutional layer # in text, only one single channel # in_channels: number of filters # kernel_size: size of filters (n*emb_dim); n is the size of n-grams # and emb_dim is the dimensional embedding or width of the text super().__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx) self.conv_0 = nn.Conv2d(in_channels = 1, out_channels = n_filters, kernel_size = (filter_sizes[0], embedding_dim)) self.conv_1 = nn.Conv2d(in_channels = 1, out_channels = n_filters, kernel_size = (filter_sizes[1], embedding_dim)) self.conv_2 = nn.Conv2d(in_channels = 1, out_channels = n_filters, kernel_size = (filter_sizes[2], embedding_dim)) self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, text): # pass review to an embedding layer to get embeddings # second dimension of the input to nn.Conv2d is the channel dimension embed_done = self.embedding(text) # text has no channel dimension, so unsqueeze to make one # and matches with in_channels (=1) dimension embed_done = embed_done.unsqueeze(1) # pass tensors through convolutional and pooling layers using ReLU # (non-linearity) activation function after the conv layers conv_layer0 = F.relu(self.conv_0(embed_done).squeeze(3)) conv_layer1 = F.relu(self.conv_1(embed_done).squeeze(3)) conv_layer2 = F.relu(self.conv_2(embed_done).squeeze(3)) # pooling layers handles reviews of different lengths # with max pooling, input to linear layer is the total no. of filters max_pool0 = F.max_pool1d(conv_layer0, conv_layer0.shape[2]).squeeze(2) max_pool1 = F.max_pool1d(conv_layer1, conv_layer1.shape[2]).squeeze(2) max_pool2 = F.max_pool1d(conv_layer2, conv_layer2.shape[2]).squeeze(2) # output size of conv layers depends on the input size # different batches contains reviews of different lengths # lastly, apply dropout on the concatenated filter outputs concatenation = self.dropout(torch.cat((max_pool0, max_pool1, max_pool2), dim = 1)) # pass through a linear layer (fully-connected layer) to make predictions return self.fc(concatenation) ``` The above CNN uses only 3 different sized filters. The below code is a generic CNN that takes in any number of filters. ``` # place all conv layers in a nn.ModuleList - function in PyTorch to hold a list # of PyTorch nn.Module # pass arbitrary sized list of filter sizes (generic model) # creates a conv layer for each class CNN(nn.Module): def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim, dropout, pad_idx): super().__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx) self.convs = nn.ModuleList([nn.Conv2d(in_channels = 1, out_channels = n_filters, kernel_size = (fs, embedding_dim)) for fs in filter_sizes]) self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim) self.dropout = nn.Dropout(dropout) # iterate through the list applying each conv layer to get a list of # conv outputs which is fed into max pooling layer in a list # comprehension before concatenation and passing through dropout # and linear layers def forward(self, text): embed_done = self.embedding(text) embed_done = embed_done.unsqueeze(1) conv_layer_relu = [F.relu(conv(embed_done)).squeeze(3) for conv in self.convs] max_pool_drop = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conv_layer_relu] concatenation = self.dropout(torch.cat(max_pool_drop, dim = 1)) return self.fc(concatenation) ``` Creating an instance of our CNN model ``` dimension_input = len(TEXT.vocab) # dimensional embeddings dimn_embedding = 100 # number of filters number_filters = 100 # size of the filters size_filter = [3,4,5] # output size dimension_output = 1 # dropout (value of 'p') p = 0.5 # padding padding = TEXT.vocab.stoi[TEXT.pad_token] # applying all these to the CNN model = CNN(dimension_input, dimn_embedding, number_filters, size_filter, dimension_output, p, padding) # check number of parameters in CNN model def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters') ``` Loading the pre-trained embeddings ``` embed_pretrain = TEXT.vocab.vectors # weights model.embedding.weight.data.copy_(embed_pretrain) # zero the initial weights of the unknown and padding tokens token = TEXT.vocab.stoi[TEXT.unk_token] model.embedding.weight.data[token] = torch.zeros(dimn_embedding) model.embedding.weight.data[padding] = torch.zeros(dimn_embedding) ``` Next, now it is ready to train our model. The optimizer and loss function (criterion) are initialized. Here, I have used the ADAM optimizer and Binary Cross Entropy with Logits Loss function. ``` # importing ADAM optimizer import torch.optim as optim # set ADAM optimizer optimizer = optim.Adam(model.parameters()) # set the loss function criterion = nn.BCEWithLogitsLoss() # set model and criterion on GPU model = model.to(device) criterion = criterion.to(device) ``` Implementing a function to calculate accuracy in order to check the performance of the model ``` # returns accuracy per batch, will return, for example, 0.8 instead of 8. def binary_accuracy(preds, y): # rounds predictions to the closest integer predictions_rounded = torch.round(torch.sigmoid(preds)) true_prediction = (predictions_rounded == y).float() # float better for division purposes accuracy = true_prediction.sum() / len(true_prediction) return accuracy # function for training the model def train(model, iterator, optimizer, criterion): # initialise the epoch loss and accuracy epoch_accuracy = 0 epoch_loss = 0 model.train() # to ensure dropout is turned ON while training for batch in iterator: optimizer.zero_grad() predictions = model(batch.text).squeeze(1) loss = criterion(predictions, batch.label) accuracy = binary_accuracy(predictions, batch.label) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_accuracy += accuracy .item() return epoch_loss / len(iterator), epoch_accuracy / len(iterator) # function for testing the model def evaluate(model, iterator, criterion): # initialise the epoch loss and accuracy epoch_loss = 0 epoch_accuracy = 0 model.eval() # to ensure dropout is turned OFF while evaluating/testing with torch.no_grad(): for batch in iterator: predictions = model(batch.text).squeeze(1) loss = criterion(predictions, batch.label) accuracy = binary_accuracy(predictions, batch.label) epoch_loss += loss.item() epoch_accuracy += accuracy.item() return epoch_loss / len(iterator), epoch_accuracy / len(iterator) # importing time library to define function to tell the time taken of our # epochs import time def epoch_time(start_time, end_time): time_taken = end_time - start_time time_taken_mins = int(time_taken / 60) time_taken_secs = int(time_taken - (time_taken_mins * 60)) return time_taken_mins, time_taken_secs ``` **Training the CNN model** ``` # 5 epochs are enough to view the values of loss and accuracy number_epochs = 5 good_validationloss = float('inf') # set to float for epoch in range(number_epochs): start_time = time.time() # calculating the training loss and accuracy and the validation loss # and accuracy train_loss, train_accuracy = train(model, train_iterator, optimizer, criterion) valid_loss, valid_accuracy = evaluate(model, valid_iterator, criterion) end_time = time.time() epoch_minutes, epoch_secs = epoch_time(start_time, end_time) if valid_loss < good_validationloss: good_validationloss = valid_loss torch.save(model.state_dict(), 'tut4-model.pt') # print the training loss and accuracy and validation loss and accuracy print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_minutes}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_accuracy*100:.2f}%') print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_accuracy*100:.2f}%') ``` This function will prompt the user to input their reviews. Based on the review, the model will the predict whether the sentiment of the review is positive or negative along with how accurate the model predicts the sentiment. ``` import spacy prompt = spacy.load('en_core_web_sm') # minimum_length is set to 10000 so that utmost 10000 tokens are accepted for computing # the outcome, i.e., 10000 words in a review which is more than enough def classify_predict_sentiment(model, sentence, minimum_length = 10000): model.eval() tokenization_done = [tok.text for tok in prompt.tokenizer(sentence)] # classify_predict_sentiment function accepts minimum length argument also by changing # minimum_length # If tokenization_done input sentence is less than minimum_length tokens, then we append # padding tokens ('<pad>') to make it minimum_length tokens if len(tokenization_done) < minimum_length: tokenization_done += ['<pad>'] * (minimum_length - len(tokenization_done)) indexing = [TEXT.vocab.stoi[t] for t in tokenization_done] box = torch.LongTensor(indexing).to(device) box = box.unsqueeze(0) prediction = torch.sigmoid(model(box)) # if the accuracy of the review is less than 0.5, it shall be considered # a negative review and anything above 0.5 shall be considered a positive # review if prediction.item() < 0.5: print(f'Negative Review') else: print(f'Positive Review') return print(f'Accuracy of this review: {prediction.item():.8f}') ``` The following positive and negative reviews are fed into the model and the outcome is displayed along with the accuracy from the model, i.e., how accurate the model predicts whether it is a positive or negative review. ``` classify_predict_sentiment(model, "I thought this was a wonderful way to spend time on a too hot summer weekend, sitting in the air conditioned theater and watching a light-hearted comedy. The plot is simplistic, but the dialogue is witty and the characters are likable (even the well bread suspected serial killer). While some may be disappointed when they realize this is not Match Point 2: Risk Addiction, I thought it was proof that Woody Allen is still fully in control of the style many of us have grown to love.<br /><br />This was the most I'd laughed at one of Woody's comedies in years (dare I say a decade?). While I've never been impressed with Scarlet Johanson, in this she managed to tone down her sexy image and jumped right into a average, but spirited young woman.<br /><br />This may not be the crown jewel of his career, but it was wittier than Devil Wears Prada and more interesting than Superman a great comedy to go see with friends.") classify_predict_sentiment(model, "This show was an amazing, fresh & innovative idea in the 70's when it first aired. The first 7 or 8 years were brilliant, but things dropped off after that. By 1990, the show was not really funny anymore, and it's continued its decline further to the complete waste of time it is today.<br /><br />It's truly disgraceful how far this show has fallen. The writing is painfully bad, the performances are almost as bad - if not for the mildly entertaining respite of the guest-hosts, this show probably wouldn't still be on the air. I find it so hard to believe that the same creator that hand-selected the original cast also chose the band of hacks that followed. How can one recognize such brilliance and then see fit to replace it with such mediocrity? I felt I must give 2 stars out of respect for the original cast that made this show such a huge success. As it is now, the show is just awful. I can't believe it's still on the air.") classify_predict_sentiment(model, "This a fantastic movie of three prisoners who become famous. One of the actors is george clooney and I'm not a fan but this roll is not bad. Another good thing about the movie is the soundtrack (The man of constant sorrow). I recommand this movie to everybody. Greetings Bart") classify_predict_sentiment(model,"I saw this movie when I was about 12 when it came out. I recall the scariest scene was the big bird eating men dangling helplessly from parachutes right out of the air. The horror. The horror.<br /><br />As a young kid going to these cheesy B films on Saturday afternoons, I still was tired of the formula for these monster type movies that usually included the hero, a beautiful woman who might be the daughter of a professor and a happy resolution when the monster died in the end. I didn't care much for the romantic angle as a 12 year old and the predictable plots. I love them now for the unintentional humor.<br /><br />But, about a year or so later, I saw Psycho when it came out and I loved that the star, Janet Leigh, was bumped off early in the film. I sat up and took notice at that point. Since screenwriters are making up the story, make it up to be as scary as possible and not from a well-worn formula. There are no rules.") classify_predict_sentiment(model,"The Karen Carpenter Story shows a little more about singer Karen Carpenter's complex life. Though it fails in giving accurate facts, and details.<br /><br />Cynthia Gibb (portrays Karen) was not a fine election. She is a good actress , but plays a very naive and sort of dumb Karen Carpenter. I think that the role needed a stronger character. Someone with a stronger personality.<br /><br />Louise Fletcher role as Agnes Carpenter is terrific, she does a great job as Karen's mother.<br /><br />It has great songs, which could have been included in a soundtrack album. Unfortunately they weren't, though this movie was on the top of the ratings in USA and other several countries.") classify_predict_sentiment(model,"I watched this film not really expecting much, I got it in a pack of 5 films, all of which were pretty terrible in their own way for under a fiver so what could I expect? and you know what I was right, they were all terrible, this movie has a few (and a few is stretching it) interesting points, the occasional camcorder view is a nice touch, the drummer is very like a drummer, i.e damned annoying and, well thats about it actually, the problem is that its just so boring, in what I can only assume was an attempt to build tension, a whole lot of nothing happens and when it does its utterly tedious (I had my thumb on the fast forward button, ready to press for most of the movie, but gave it a go) and seriously is the lead singer of the band that great looking, coz they don't half mention how beautiful he is a hell of a lot, I thought he looked a bit like a meercat, all this and I haven't even mentioned the killer, I'm not even gonna go into it, its just not worth explaining. Anyway as far as I'm concerned Star and London are just about the only reason to watch this and with the exception of London (who was actually quite funny) it wasn't because of their acting talent, I've certainly seen a lot worse, but I've also seen a lot better. Best avoid unless your bored of watching paint dry.") ```
github_jupyter
### Linear Problem ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import torch from torch.utils.data import DataLoader, Dataset import seaborn as sns from torch import nn from torch.nn import functional as F ``` ### Data Preparation ``` data = pd.read_csv('data/test.csv') data.head() sns.scatterplot(data=data, x='x', y='y',hue='color') class Data(Dataset): def __init__(self, path, transform=None, shuffle=True): self.dataFrame = pd.read_csv(path) self.xy = pd.read_csv(path).values if shuffle: np.random.shuffle(self.xy) self.len = self.xy.shape[0] self.x = self.xy[:, :-1] self.y = self.xy[:, -1] self.transform = transform print(self.x.shape) def __getitem__(self, index): sample = self.x[index], self.y[index] if self.transform: sample = self.transform(sample) return sample def __len__(self): return self.len def plot(self): sns.scatterplot(data=self.dataFrame, x='x', y='y',hue='color') plt.show() ``` ### Transformers on our data ``` class ToTensor: def __call__(self, samples): x, y = samples return torch.from_numpy(x.astype('float32')) ,torch.from_numpy(np.array(y, dtype='float32')) train = Data(path='data/train.csv', transform=ToTensor(), shuffle=True) test = Data(path='data/test.csv', transform=ToTensor(),shuffle=True ) train.plot() test.plot() train_set = DataLoader(dataset=train, batch_size =5, shuffle=True) test_set = DataLoader(dataset=test, batch_size =5, shuffle=False) ``` ### Predicting the Color ``` class Net(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(2,32) self.fc2 = nn.Linear(32,64) self.fc3 = nn.Linear(64, 1) def forward(self,x): x = torch.relu(self.fc1(x)) x = torch.relu(self.fc2(x)) x = torch.sigmoid(self.fc3(x)) return x net = Net() net criterion = torch.nn.BCELoss() optimizer = torch.optim.SGD(net.parameters(), lr=0.01) EPOCHS = 5 for epoch in range(EPOCHS): print(f'Epochs: {epoch+1}/{EPOCHS}') for data in train_set: X, y = data optimizer.zero_grad() # forward pass output = net(X.view(-1, 2)) #calculate loss loss = criterion(output, y.unsqueeze(1)) ## backward pass loss.backward() # update the weights optimizer.step() print("loss: ", loss.item()) total, correct = 0, 0 with torch.no_grad(): for data in test_set: X, y = data outputs = net(X.view(-1, 2)) for i in range(len(torch.round(outputs))): if y[i] == torch.round(outputs[i]): correct +=1 total +=1 print(correct/total) total, correct = 0, 0 with torch.no_grad(): for data in train_set: X, y = data outputs = net(X.view(-1, 2)) for i in range(len(torch.round(outputs))): if y[i] == torch.round(outputs[i]): correct +=1 total +=1 print(correct/total) ``` ### Making Predictions ``` test.plot() test[0] torch.Tensor([1., 0.]) torch.round(net(torch.Tensor([1., 2.]))).item() ``` > Done
github_jupyter
# NBAiLab - Finetuning and Evaluating a BERT model for NER and POS <img src="https://raw.githubusercontent.com/NBAiLab/notram/master/images/nblogo_2.png"> In this notebook we will finetune the [NB-BERTbase Model](https://github.com/NBAiLab/notram) released by the National Library of Norway. This is a model trained on a large corpus (110GB) of Norwegian texts. We will finetune this model on the [NorNE dataset](https://github.com/ltgoslo/norne). for Named Entity Recognition (NER) and Part of Speech (POS) tags using the [Transformers Library by Huggingface](https://huggingface.co/transformers/). After training the model should be able to accept any text string input (up to 512 tokens) and return POS or NER-tags for this text. This is useful for a number of NLP tasks, for instance for extracting/removing names/places from a document. After training, we will save the model, evaluate it and use it for predictions. The Notebook is intended for experimentation with the pre-release NoTram models from the National Library of Norway, and is made for educational purposes. If you just want to use the model, you can instead initiate one of our finetuned models. ## Before proceeding Create a copy of this notebook by going to "File - Save a Copy in Drive" # Install Dependencies and Define Helper Functions You need to run the code below to install some libraries and initiate some helper functions. Click "Show Code" if you later want to examine this part as well. ``` #@title #The notebook is using some functions for reporting that are only available in Transformers 4.2.0. Until that is released, we are installing from the source. !pip -q install https://github.com/huggingface/transformers/archive/0ecbb698064b94560f24c24fbfbd6843786f088b.zip !pip install -qU scikit-learn datasets seqeval conllu pyarrow import logging import os import sys from dataclasses import dataclass from dataclasses import field from typing import Optional import numpy as np import pandas as pd import tensorflow as tf import tensorflow_datasets as tfds import transformers from datasets import load_dataset from seqeval.metrics import accuracy_score from seqeval.metrics import f1_score from seqeval.metrics import precision_score from seqeval.metrics import recall_score from seqeval.metrics import classification_report from transformers.training_args import TrainingArguments from tqdm import tqdm from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorForTokenClassification, PreTrainedTokenizerFast, Trainer, TrainingArguments, pipeline, set_seed ) from google.colab import output from IPython.display import Markdown from IPython.display import display # Helper Funtions - Allows us to format output by Markdown def printm(string): display(Markdown(string)) ## Preprocessing the dataset # Tokenize texts and align the labels with them. def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer( examples[text_column_name], max_length=max_length, padding=padding, truncation=True, # We use this argument because the texts in our dataset are lists of words (with a label for each word). is_split_into_words=True, ) labels = [] for i, label in enumerate(examples[label_column_name]): word_ids = tokenized_inputs.word_ids(batch_index=i) previous_word_idx = None label_ids = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label_to_id[label[word_idx]]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: label_ids.append(label_to_id[label[word_idx]] if label_all_tokens else -100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs # Metrics def compute_metrics(pairs): predictions, labels = pairs predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] return { "accuracy_score": accuracy_score(true_labels, true_predictions), "precision": precision_score(true_labels, true_predictions), "recall": recall_score(true_labels, true_predictions), "f1": f1_score(true_labels, true_predictions), "report": classification_report(true_labels, true_predictions, digits=4) } ``` # Settings Try running this with the default settings first. The default setting should give you a pretty good result. If you want training to go even faster, reduce the number of epochs. The first variables you should consider changing are the one in the dropdown menus. Later you can also experiment with the other settings to get even better results. ``` #Model, Dataset, and Task #@markdown Set the main model that the training should start from model_name = 'NbAiLab/nb-bert-base' #@param ["NbAiLab/nb-bert-base", "bert-base-multilingual-cased"] #@markdown --- #@markdown Set the dataset for the task we are training on dataset_name = "NbAiLab/norne" #@param ["NbAiLab/norne", "norwegian_ner"] dataset_config = "bokmaal" #@param ["bokmaal", "nynorsk"] task_name = "ner" #@param ["ner", "pos"] #General overwrite_cache = False #@#param {type:"boolean"} cache_dir = ".cache" #param {type:"string"} output_dir = "./output" #param {type:"string"} overwrite_output_dir = False #param {type:"boolean"} seed = 42 #param {type:"number"} set_seed(seed) #Tokenizer padding = False #param ["False", "'max_length'"] {type: 'raw'} max_length = 512 #param {type: "number"} label_all_tokens = False #param {type:"boolean"} # Training #@markdown --- #@markdown Set training parameters per_device_train_batch_size = 8 #param {type: "integer"} per_device_eval_batch_size = 8 #param {type: "integer"} learning_rate = 3e-05 #@param {type: "number"} weight_decay = 0.0 #param {type: "number"} adam_beta1 = 0.9 #param {type: "number"} adam_beta2 = 0.999 #param {type: "number"} adam_epsilon = 1e-08 #param {type: "number"} max_grad_norm = 1.0 #param {type: "number"} num_train_epochs = 4.0 #@param {type: "number"} num_warmup_steps = 750 #@param {type: "number"} save_total_limit = 1 #param {type: "integer"} load_best_model_at_end = True #@param {type: "boolean"} ``` # Load the Dataset used for Finetuning The default setting is to use the NorNE dataset. This is currently the largest (and best) dataset with annotated POS/NER tags that are available today. All sentences is tagged both for POS and NER. The dataset is available as a Huggingface dataset, so loading it is very easy. ``` #Load the dataset dataset = load_dataset(dataset_name, dataset_config) #Getting some variables from the dataset column_names = dataset["train"].column_names features = dataset["train"].features text_column_name = "tokens" if "tokens" in column_names else column_names[0] label_column_name = ( f"{task_name}_tags" if f"{task_name}_tags" in column_names else column_names[1] ) label_list = features[label_column_name].feature.names label_to_id = {i: i for i in range(len(label_list))} num_labels = len(label_list) #Look at the dataset printm(f"###Quick Look at the NorNE Dataset") print(dataset["train"].data.to_pandas()[[text_column_name, label_column_name]]) printm(f"###All labels ({num_labels})") print(label_list) if task_name == "ner": mlabel_list = {label.split("-")[-1] for label in label_list} printm(f"###Main labels ({len(mlabel_list)})") print(mlabels) ``` # Initialize Training We are here using the native Trainer interface provided by Huggingface. Huggingface also has an interface for Tensorflow and PyTorch. To see an example of how to use the Tensorflow interface, please take a look at our notebook about classification. ``` config = AutoConfig.from_pretrained( model_name, num_labels=num_labels, finetuning_task=task_name, cache_dir=cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_name, cache_dir=cache_dir, use_fast=True, ) model = AutoModelForTokenClassification.from_pretrained( model_name, from_tf=bool(".ckpt" in model_name), config=config, cache_dir=cache_dir, ) data_collator = DataCollatorForTokenClassification(tokenizer) tokenized_datasets = dataset.map( tokenize_and_align_labels, batched=True, load_from_cache_file=not overwrite_cache, num_proc=os.cpu_count(), ) training_args = TrainingArguments( output_dir=output_dir, overwrite_output_dir=overwrite_output_dir, do_train=True, do_eval=True, do_predict=True, per_device_train_batch_size=per_device_train_batch_size, per_device_eval_batch_size=per_device_eval_batch_size, learning_rate=learning_rate, weight_decay=weight_decay, adam_beta1=adam_beta1, adam_beta2=adam_beta2, adam_epsilon=adam_epsilon, max_grad_norm=max_grad_norm, num_train_epochs=num_train_epochs, warmup_steps=num_warmup_steps, load_best_model_at_end=load_best_model_at_end, seed=seed, save_total_limit=save_total_limit, ) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) ``` # Start Training Training for the default 4 epochs should take around 10-15 minutes if you have access to GPU. ``` %%time train_result = trainer.train() trainer.save_model() # Saves the tokenizer too for easy upload # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json")) #Print Results output_train_file = os.path.join(output_dir, "train_results.txt") with open(output_train_file, "w") as writer: printm("**Train results**") for key, value in sorted(train_result.metrics.items()): printm(f"{key} = {value}") writer.write(f"{key} = {value}\n") ``` # Evaluate the Model The model is now saved on your Colab disk. This is a temporary disk that will disappear when the Colab is closed. You should copy it to another place if you want to keep the result. Now we can evaluate the model and play with it. Expect some UserWarnings since there might be errors in the training file. ``` printm("**Evaluate**") results = trainer.evaluate() output_eval_file = os.path.join(output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: printm("**Eval results**") for key, value in results.items(): printm(f"{key} = {value}") writer.write(f"{key} = {value}\n") ``` # Run Preditions on the Test Dataset You should be able to end up with a result not far from what we have reported for the NB-BERT-model: <table align="left"> <tr><td></td><td>Bokmål</td><td>Nynorsk</td></tr> <tr><td>POS</td><td>98.86</td><td>98.77</td></tr> <tr><td>NER</td><td>93.66</td><td>92.02</td></tr> </table> ``` printm("**Predict**") test_dataset = tokenized_datasets["test"] predictions, labels, metrics = trainer.predict(test_dataset) predictions = np.argmax(predictions, axis=2) output_test_results_file = os.path.join(output_dir, "test_results.txt") with open(output_test_results_file, "w") as writer: printm("**Predict results**") for key, value in sorted(metrics.items()): printm(f"{key} = {value}") writer.write(f"{key} = {value}\n") # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] # Save predictions output_test_predictions_file = os.path.join(output_dir, "test_predictions.txt") with open(output_test_predictions_file, "w") as writer: for prediction in true_predictions: writer.write(" ".join(prediction) + "\n") ``` # Use the model This model will assign labels to the different word/tokens. B-TAG marks the beginning of the entity, while I-TAG is a continuation of the entity. In the example below the model should be able to pick out the individual names as well as understand how many places and organisations that are mentioned. ``` text = "Svein Arne Brygfjeld, Freddy Wetjen, Javier de la Rosa og Per E Kummervold jobber alle ved AILABen til Nasjonalbiblioteket. Nasjonalbiblioteket har lokaler b\xE5de i Mo i Rana og i Oslo. " #@param {type:"string"} group_entities = True #param {type:"boolean"} #Load the saved model in the pipeline, and run some predicions model = AutoModelForTokenClassification.from_pretrained(output_dir) try: tokenizer = AutoTokenizer.from_pretrained(output_dir) except TypeError: tokenizer = AutoTokenizer.from_pretrained(model_name) ner_model = pipeline( "ner", model=model, tokenizer=tokenizer, grouped_entities=group_entities ) result = ner_model(text) output = [] for token in result: entity = int(token['entity_group'].replace("LABEL_", "")) output.append({ "word": token['word'], "entity": label_list[entity], "score": token['score'], }) pd.DataFrame(output).style.hide_index() ``` --- ##### Copyright 2020 &copy; National Library of Norway
github_jupyter
# Table of Contents <p><div class="lev1"><a href="#Introduction-to-Pandas"><span class="toc-item-num">1&nbsp;&nbsp;</span>Introduction to Pandas</a></div><div class="lev2"><a href="#Pandas-Data-Structures"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Pandas Data Structures</a></div><div class="lev3"><a href="#Series"><span class="toc-item-num">1.1.1&nbsp;&nbsp;</span>Series</a></div><div class="lev3"><a href="#DataFrame"><span class="toc-item-num">1.1.2&nbsp;&nbsp;</span>DataFrame</a></div><div class="lev3"><a href="#Exercise-1"><span class="toc-item-num">1.1.3&nbsp;&nbsp;</span>Exercise 1</a></div><div class="lev3"><a href="#Exercise-2"><span class="toc-item-num">1.1.4&nbsp;&nbsp;</span>Exercise 2</a></div><div class="lev2"><a href="#Importing-data"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Importing data</a></div><div class="lev3"><a href="#Microsoft-Excel"><span class="toc-item-num">1.2.1&nbsp;&nbsp;</span>Microsoft Excel</a></div><div class="lev2"><a href="#Pandas-Fundamentals"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Pandas Fundamentals</a></div><div class="lev3"><a href="#Manipulating-indices"><span class="toc-item-num">1.3.1&nbsp;&nbsp;</span>Manipulating indices</a></div><div class="lev2"><a href="#Indexing-and-Selection"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Indexing and Selection</a></div><div class="lev3"><a href="#Exercise-3"><span class="toc-item-num">1.4.1&nbsp;&nbsp;</span>Exercise 3</a></div><div class="lev2"><a href="#Operations"><span class="toc-item-num">1.5&nbsp;&nbsp;</span>Operations</a></div><div class="lev2"><a href="#Sorting-and-Ranking"><span class="toc-item-num">1.6&nbsp;&nbsp;</span>Sorting and Ranking</a></div><div class="lev3"><a href="#Exercise-4"><span class="toc-item-num">1.6.1&nbsp;&nbsp;</span>Exercise 4</a></div><div class="lev2"><a href="#Hierarchical-indexing"><span class="toc-item-num">1.7&nbsp;&nbsp;</span>Hierarchical indexing</a></div><div class="lev2"><a href="#Missing-data"><span class="toc-item-num">1.8&nbsp;&nbsp;</span>Missing data</a></div><div class="lev3"><a href="#Exercise-5"><span class="toc-item-num">1.8.1&nbsp;&nbsp;</span>Exercise 5</a></div><div class="lev2"><a href="#Data-summarization"><span class="toc-item-num">1.9&nbsp;&nbsp;</span>Data summarization</a></div><div class="lev2"><a href="#Writing-Data-to-Files"><span class="toc-item-num">1.10&nbsp;&nbsp;</span>Writing Data to Files</a></div><div class="lev3"><a href="#Advanced-Exercise:-Compiling-Ebola-Data"><span class="toc-item-num">1.10.1&nbsp;&nbsp;</span>Advanced Exercise: Compiling Ebola Data</a></div><div class="lev2"><a href="#References"><span class="toc-item-num">1.11&nbsp;&nbsp;</span>References</a></div> # Introduction to Pandas **pandas** is a Python package providing fast, flexible, and expressive data structures designed to work with *relational* or *labeled* data both. It is a fundamental high-level building block for doing practical, real world data analysis in Python. pandas is well suited for: - Tabular data with heterogeneously-typed columns, as in an SQL table or Excel spreadsheet - Ordered and unordered (not necessarily fixed-frequency) time series data. - Arbitrary matrix data (homogeneously typed or heterogeneous) with row and column labels - Any other form of observational / statistical data sets. The data actually need not be labeled at all to be placed into a pandas data structure Key features: - Easy handling of **missing data** - **Size mutability**: columns can be inserted and deleted from DataFrame and higher dimensional objects - Automatic and explicit **data alignment**: objects can be explicitly aligned to a set of labels, or the data can be aligned automatically - Powerful, flexible **group by functionality** to perform split-apply-combine operations on data sets - Intelligent label-based **slicing, fancy indexing, and subsetting** of large data sets - Intuitive **merging and joining** data sets - Flexible **reshaping and pivoting** of data sets - **Hierarchical labeling** of axes - Robust **IO tools** for loading data from flat files, Excel files, databases, and HDF5 - **Time series functionality**: date range generation and frequency conversion, moving window statistics, moving window linear regressions, date shifting and lagging, etc. ``` import pandas as pd import numpy as np pd.options.mode.chained_assignment = None # default='warn' ``` ## Pandas Data Structures ### Series A **Series** is a single vector of data (like a NumPy array) with an *index* that labels each element in the vector. ``` counts = pd.Series([632, 1638, 569, 115]) counts ``` If an index is not specified, a default sequence of integers is assigned as the index. A NumPy array comprises the values of the `Series`, while the index is a pandas `Index` object. ``` counts.values counts.index ``` We can assign meaningful labels to the index, if they are available: ``` bacteria = pd.Series([632, 1638, 569, 115], index=['Firmicutes', 'Proteobacteria', 'Actinobacteria', 'Bacteroidetes']) bacteria ``` These labels can be used to refer to the values in the `Series`. ``` bacteria['Actinobacteria'] bacteria[[name.endswith('bacteria') for name in bacteria.index]] [name.endswith('bacteria') for name in bacteria.index] ``` Notice that the indexing operation preserved the association between the values and the corresponding indices. We can still use positional indexing if we wish. ``` bacteria[0] ``` We can give both the array of values and the index meaningful labels themselves: ``` bacteria.name = 'counts' bacteria.index.name = 'phylum' bacteria ``` NumPy's math functions and other operations can be applied to Series without losing the data structure. ``` # natural logarithm np.log(bacteria) # log base 10 np.log10(bacteria) ``` We can also filter according to the values in the `Series`: ``` bacteria[bacteria>1000] ``` A `Series` can be thought of as an ordered key-value store. In fact, we can create one from a `dict`: ``` bacteria_dict = {'Firmicutes': 632, 'Proteobacteria': 1638, 'Actinobacteria': 569, 'Bacteroidetes': 115} pd.Series(bacteria_dict) ``` Notice that the `Series` is created in key-sorted order. If we pass a custom index to `Series`, it will select the corresponding values from the dict, and treat indices without corrsponding values as missing. Pandas uses the `NaN` (not a number) type for missing values. ``` bacteria2 = pd.Series(bacteria_dict, index=['Cyanobacteria','Firmicutes', 'Proteobacteria','Actinobacteria']) bacteria2 bacteria2.isnull() ``` Critically, the labels are used to **align data** when used in operations with other Series objects: ``` bacteria + bacteria2 ``` Contrast this with NumPy arrays, where arrays of the same length will combine values element-wise; adding Series combined values with the same label in the resulting series. Notice also that the missing values were propogated by addition. ### DataFrame Inevitably, we want to be able to store, view and manipulate data that is *multivariate*, where for every index there are multiple fields or columns of data, often of varying data type. A `DataFrame` is a tabular data structure, encapsulating multiple series like columns in a spreadsheet. Data are stored internally as a 2-dimensional object, but the `DataFrame` allows us to represent and manipulate higher-dimensional data. ``` data = pd.DataFrame({'value':[632, 1638, 569, 115, 433, 1130, 754, 555], 'patient':[1, 1, 1, 1, 2, 2, 2, 2], 'phylum':['Firmicutes', 'Proteobacteria', 'Actinobacteria', 'Bacteroidetes', 'Firmicutes', 'Proteobacteria', 'Actinobacteria', 'Bacteroidetes']}) data ``` Notice the `DataFrame` is sorted by column name. We can change the order by indexing them in the order we desire: ``` data[['phylum','value','patient']] ``` A `DataFrame` has a second index, representing the columns: ``` data.columns ``` The `dtypes` attribute reveals the data type for each column in our DataFrame. - `int64` is numeric integer values - `object` strings (letters and numbers) - `float64` floating-point values ``` data.dtypes ``` If we wish to access columns, we can do so either by dict-like indexing or by attribute: ``` data['patient'] data.patient type(data.value) data[['value']] ``` Notice this is different than with `Series`, where dict-like indexing retrieved a particular element (row). If we want access to a row in a `DataFrame`, we index its `loc` attribute. ``` data.loc[3] ``` ### Exercise 1 Try out these commands to see what they return: - `data.head()` - `data.tail(3)` - `data.shape` ``` data.head() # returns the first (5 by default) rows of data. data.tail(3) # returns the 3 last rows of data data.shape # returns the dimension of data (nbr rows, nbr cols) ``` An alternative way of initializing a `DataFrame` is with a list of dicts: ``` data = pd.DataFrame([{'patient': 1, 'phylum': 'Firmicutes', 'value': 632}, {'patient': 1, 'phylum': 'Proteobacteria', 'value': 1638}, {'patient': 1, 'phylum': 'Actinobacteria', 'value': 569}, {'patient': 1, 'phylum': 'Bacteroidetes', 'value': 115}, {'patient': 2, 'phylum': 'Firmicutes', 'value': 433}, {'patient': 2, 'phylum': 'Proteobacteria', 'value': 1130}, {'patient': 2, 'phylum': 'Actinobacteria', 'value': 754}, {'patient': 2, 'phylum': 'Bacteroidetes', 'value': 555}]) data ``` Its important to note that the Series returned when a DataFrame is indexted is merely a **view** on the DataFrame, and not a copy of the data itself. So you must be cautious when manipulating this data: ``` vals = data.value vals vals[5] = 0 vals ``` If we plan on modifying an extracted Series, its a good idea to make a copy. ``` vals = data.value.copy() vals[5] = 1000 vals ``` We can create or modify columns by assignment: ``` data.value[[3,4,6]] = [14, 21, 5] data data['year'] = 2013 data ``` But note, we cannot use the attribute indexing method to add a new column: ``` data.treatment = 1 data data.treatment ``` ### Exercise 2 From the `data` table above, create an index to return all rows for which the phylum name ends in "bacteria" and the value is greater than 1000. ---------------------------- Find the values of 'phylum' ending in 'bacteria' ``` colwitbacteria = [col for col in data['phylum'] if col.endswith('bacteria')] colwitbacteria ``` then filter the rows having one of the 'bacteria' values ``` rowswithbacteria = data[data['phylum'].isin(colwitbacteria)] ``` then take the values bigger than 1000 ``` rowswithbacteria[rowswithbacteria.value > 1000] ``` Note that it is probably faster to first filter the values bigger than 1000 as it filters more values out. Specifying a `Series` as a new columns cause its values to be added according to the `DataFrame`'s index: ``` treatment = pd.Series([0]*4 + [1]*2) treatment data['treatment'] = treatment data ``` Other Python data structures (ones without an index) need to be the same length as the `DataFrame`: ``` month = ['Jan', 'Feb', 'Mar', 'Apr'] # data['month'] = month # throws error (done on puropse) data['month'] = ['Jan']*len(data) data ``` We can use the `drop` method to remove rows or columns, which by default drops rows. We can be explicit by using the `axis` argument: ``` data_nomonth = data.drop('month', axis=1) data_nomonth ``` We can extract the underlying data as a simple `ndarray` by accessing the `values` attribute: ``` data.values ``` Notice that because of the mix of string and integer (and `NaN`) values, the dtype of the array is `object`. The dtype will automatically be chosen to be as general as needed to accomodate all the columns. ``` df = pd.DataFrame({'foo': [1,2,3], 'bar':[0.4, -1.0, 4.5]}) df.values ``` Pandas uses a custom data structure to represent the indices of Series and DataFrames. ``` data.index ``` Index objects are immutable: ``` # data.index[0] = 15 # throws error ``` This is so that Index objects can be shared between data structures without fear that they will be changed. ``` bacteria2.index = bacteria.index bacteria2 ``` ## Importing data A key, but often under-appreciated, step in data analysis is importing the data that we wish to analyze. Though it is easy to load basic data structures into Python using built-in tools or those provided by packages like NumPy, it is non-trivial to import structured data well, and to easily convert this input into a robust data structure: genes = np.loadtxt("genes.csv", delimiter=",", dtype=[('gene', '|S10'), ('value', '<f4')]) Pandas provides a convenient set of functions for importing tabular data in a number of formats directly into a `DataFrame` object. These functions include a slew of options to perform type inference, indexing, parsing, iterating and cleaning automatically as data are imported. Let's start with some more bacteria data, stored in csv format. ``` !cat Data/microbiome.csv ``` This table can be read into a DataFrame using `read_csv`: ``` mb = pd.read_csv("Data/microbiome.csv") mb ``` Notice that `read_csv` automatically considered the first row in the file to be a header row. We can override default behavior by customizing some the arguments, like `header`, `names` or `index_col`. ``` pd.read_csv("Data/microbiome.csv", header=None).head() ``` `read_csv` is just a convenience function for `read_table`, since csv is such a common format: ``` mb = pd.read_table("Data/microbiome.csv", sep=',') ``` The `sep` argument can be customized as needed to accomodate arbitrary separators. For example, we can use a regular expression to define a variable amount of whitespace, which is unfortunately very common in some data formats: sep='\s+' For a more useful index, we can specify the first two columns, which together provide a unique index to the data. ``` mb = pd.read_csv("Data/microbiome.csv", index_col=['Patient','Taxon']) mb.head() ``` This is called a *hierarchical* index, which we will revisit later in the section. If we have sections of data that we do not wish to import (for example, known bad data), we can populate the `skiprows` argument: ``` pd.read_csv("Data/microbiome.csv", skiprows=[3,4,6]).head() ``` If we only want to import a small number of rows from, say, a very large data file we can use `nrows`: ``` pd.read_csv("Data/microbiome.csv", nrows=4) ``` Alternately, if we want to process our data in reasonable chunks, the `chunksize` argument will return an iterable object that can be employed in a data processing loop. For example, our microbiome data are organized by bacterial phylum, with 14 patients represented in each: ``` pd.read_csv("Data/microbiome.csv", chunksize=14) data_chunks = pd.read_csv("Data/microbiome.csv", chunksize=14) mean_tissue = pd.Series({chunk.Taxon[0]: chunk.Tissue.mean() for chunk in data_chunks}) mean_tissue ``` Most real-world data is incomplete, with values missing due to incomplete observation, data entry or transcription error, or other reasons. Pandas will automatically recognize and parse common missing data indicators, including `NA` and `NULL`. ``` !cat Data/microbiome_missing.csv pd.read_csv("Data/microbiome_missing.csv").head(20) ``` Above, Pandas recognized `NA` and an empty field as missing data. ``` pd.isnull(pd.read_csv("Data/microbiome_missing.csv")).head(20) ``` Unfortunately, there will sometimes be inconsistency with the conventions for missing data. In this example, there is a question mark "?" and a large negative number where there should have been a positive integer. We can specify additional symbols with the `na_values` argument: ``` pd.read_csv("Data/microbiome_missing.csv", na_values=['?', -99999]).head(20) ``` These can be specified on a column-wise basis using an appropriate dict as the argument for `na_values`. ### Microsoft Excel Since so much financial and scientific data ends up in Excel spreadsheets (regrettably), Pandas' ability to directly import Excel spreadsheets is valuable. This support is contingent on having one or two dependencies (depending on what version of Excel file is being imported) installed: `xlrd` and `openpyxl` (these may be installed with either `pip` or `easy_install`). The read_excel convenience function in pandas imports a specific sheet from an Excel file ``` mb = pd.read_excel('Data/microbiome/MID2.xls', sheetname='Sheet 1', header=None) mb.head() ``` There are several other data formats that can be imported into Python and converted into DataFrames, with the help of buitl-in or third-party libraries. These include JSON, XML, HDF5, relational and non-relational databases, and various web APIs. These are beyond the scope of this tutorial, but are covered in [Python for Data Analysis](http://shop.oreilly.com/product/0636920023784.do). ## Pandas Fundamentals This section introduces the new user to the key functionality of Pandas that is required to use the software effectively. For some variety, we will leave our digestive tract bacteria behind and employ some baseball data. ``` baseball = pd.read_csv("Data/baseball.csv", index_col='id') baseball.head() ``` Notice that we specified the `id` column as the index, since it appears to be a unique identifier. We could try to create a unique index ourselves by combining `player` and `year`: ``` player_id = baseball.player + baseball.year.astype(str) baseball_newind = baseball.copy() baseball_newind.index = player_id baseball_newind.head() ``` This looks okay, but let's check: ``` baseball_newind.index.is_unique ``` So, indices need not be unique. Our choice is not unique because some players change teams within years. ``` pd.Series(baseball_newind.index).value_counts() ``` The most important consequence of a non-unique index is that indexing by label will return multiple values for some labels: ``` baseball_newind.loc['wickmbo012007'] ``` We will learn more about indexing below. We can create a truly unique index by combining `player`, `team` and `year`: ``` player_unique = baseball.player + baseball.team + baseball.year.astype(str) baseball_newind = baseball.copy() baseball_newind.index = player_unique baseball_newind.head() baseball_newind.index.is_unique ``` We can create meaningful indices more easily using a hierarchical index; for now, we will stick with the numeric `id` field as our index. ### Manipulating indices **Reindexing** allows users to manipulate the data labels in a DataFrame. It forces a DataFrame to conform to the new index, and optionally, fill in missing data if requested. A simple use of `reindex` is to alter the order of the rows: ``` baseball.reindex(baseball.index[::-1]).head() ``` Notice that the `id` index is not sequential. Say we wanted to populate the table with every `id` value. We could specify and index that is a sequence from the first to the last `id` numbers in the database, and Pandas would fill in the missing data with `NaN` values: ``` id_range = range(baseball.index.values.min(), baseball.index.values.max()) baseball.reindex(id_range).head() ``` Missing values can be filled as desired, either with selected values, or by rule: ``` baseball.reindex(id_range, method='ffill', columns=['player','year']).head() baseball.reindex(id_range, fill_value='charliebrown', columns=['player']).head() ``` Keep in mind that `reindex` does not work if we pass a non-unique index series. We can remove rows or columns via the `drop` method: ``` baseball.shape baseball.drop([89525, 89526]) baseball.drop(['ibb','hbp'], axis=1) ``` ## Indexing and Selection Indexing works analogously to indexing in NumPy arrays, except we can use the labels in the `Index` object to extract values in addition to arrays of integers. ``` # Sample Series object hits = baseball_newind.h hits # Numpy-style indexing hits[:3] # Indexing by label hits[['womacto01CHN2006','schilcu01BOS2006']] ``` We can also slice with data labels, since they have an intrinsic order within the Index: ``` hits['womacto01CHN2006':'gonzalu01ARI2006'] hits['womacto01CHN2006':'gonzalu01ARI2006'] = 5 hits ``` In a `DataFrame` we can slice along either or both axes: ``` baseball_newind[['h','ab']] baseball_newind[baseball_newind.ab>500] ``` For a more concise (and readable) syntax, we can use the new `query` method to perform selection on a `DataFrame`. Instead of having to type the fully-specified column, we can simply pass a string that describes what to select. The query above is then simply: ``` baseball_newind.query('ab > 500') ``` The `DataFrame.index` and `DataFrame.columns` are placed in the query namespace by default. If you want to refer to a variable in the current namespace, you can prefix the variable with `@`: ``` min_ab = 450 baseball_newind.query('ab > @min_ab') ``` The indexing field `loc` allows us to select subsets of rows and columns in an intuitive way: ``` baseball_newind.loc['gonzalu01ARI2006', ['h','X2b', 'X3b', 'hr']] baseball_newind.loc[:'myersmi01NYA2006', 'hr'] ``` In addition to using `loc` to select rows and columns by **label**, pandas also allows indexing by **position** using the `iloc` attribute. So, we can query rows and columns by absolute position, rather than by name: ``` baseball_newind.iloc[:5, 5:8] ``` ### Exercise 3 You can use the `isin` method query a DataFrame based upon a list of values as follows: data['phylum'].isin(['Firmacutes', 'Bacteroidetes']) Use `isin` to find all players that played for the Los Angeles Dodgers (LAN) or the San Francisco Giants (SFN). How many records contain these values? ``` baseball[baseball['team'].isin(['LAN', 'SFN'])] ``` 15 records contains those values ## Operations `DataFrame` and `Series` objects allow for several operations to take place either on a single object, or between two or more objects. For example, we can perform arithmetic on the elements of two objects, such as combining baseball statistics across years. First, let's (artificially) construct two Series, consisting of home runs hit in years 2006 and 2007, respectively: ``` hr2006 = baseball.loc[baseball.year==2006, 'hr'] hr2006.index = baseball.player[baseball.year==2006] hr2007 = baseball.loc[baseball.year==2007, 'hr'] hr2007.index = baseball.player[baseball.year==2007] hr2007 ``` Now, let's add them together, in hopes of getting 2-year home run totals: ``` hr_total = hr2006 + hr2007 hr_total ``` Pandas' data alignment places `NaN` values for labels that do not overlap in the two Series. In fact, there are only 6 players that occur in both years. ``` hr_total[hr_total.notnull()] ``` While we do want the operation to honor the data labels in this way, we probably do not want the missing values to be filled with `NaN`. We can use the `add` method to calculate player home run totals by using the `fill_value` argument to insert a zero for home runs where labels do not overlap: ``` hr2007.add(hr2006, fill_value=0) ``` Operations can also be **broadcast** between rows or columns. For example, if we subtract the maximum number of home runs hit from the `hr` column, we get how many fewer than the maximum were hit by each player: ``` baseball.hr - baseball.hr.max() ``` Or, looking at things row-wise, we can see how a particular player compares with the rest of the group with respect to important statistics ``` baseball.loc[89521, "player"] stats = baseball[['h','X2b', 'X3b', 'hr']] diff = stats - stats.loc[88641] diff[:10] ``` We can also apply functions to each column or row of a `DataFrame` ``` stats.apply(np.median) def range_calc(x): return x.max() - x.min() stat_range = lambda x: x.max() - x.min() stats.apply(stat_range) ``` Lets use apply to calculate a meaningful baseball statistics, [slugging percentage](https://en.wikipedia.org/wiki/Slugging_percentage): $$SLG = \frac{1B + (2 \times 2B) + (3 \times 3B) + (4 \times HR)}{AB}$$ And just for fun, we will format the resulting estimate. ``` def slugging(x): bases = x['h']-x['X2b']-x['X3b']-x['hr'] + 2*x['X2b'] + 3*x['X3b'] + 4*x['hr'] ab = x['ab']+1e-6 return bases/ab baseball.apply(slugging, axis=1).round(3) ``` ## Sorting and Ranking Pandas objects include methods for re-ordering data. ``` baseball_newind.sort_index().head() baseball_newind.sort_index(ascending=False).head() ``` Try sorting the **columns** instead of the rows, in ascending order: ``` baseball_newind.sort_index(axis=1).head() ``` We can also use `sort_values` to sort a `Series` by value, rather than by label. ``` baseball.hr.sort_values(ascending=False) ``` For a `DataFrame`, we can sort according to the values of one or more columns using the `by` argument of `sort_values`: ``` baseball[['player','sb','cs']].sort_values(ascending=[False,True], by=['sb', 'cs']).head(10) ``` **Ranking** does not re-arrange data, but instead returns an index that ranks each value relative to others in the Series. ``` baseball.hr.rank() ``` Ties are assigned the mean value of the tied ranks, which may result in decimal values. ``` pd.Series([100,100]).rank() ``` Alternatively, you can break ties via one of several methods, such as by the order in which they occur in the dataset: ``` baseball.hr.rank(method='first') ``` Calling the `DataFrame`'s `rank` method results in the ranks of all columns: ``` baseball.rank(ascending=False).head() baseball[['r','h','hr']].rank(ascending=False).head() ``` ### Exercise 4 Calculate **on base percentage** for each player, and return the ordered series of estimates. $$OBP = \frac{H + BB + HBP}{AB + BB + HBP + SF}$$ define the function and appy it. ``` def on_base_perc(pl): nominator = pl['h'] + pl['bb'] + pl['hbp'] #H+BB+HBP denom = pl['ab'] + pl['bb'] +pl['hbp'] +pl['sf'] if(denom == 0) : # If the denom == 0 we can not compute nominator/denom, hence we retrun NaN return np.NaN return nominator / denom baseball.apply(on_base_perc, axis=1).round(3) ``` and again but ordered ``` baseball.apply(on_base_perc, axis=1).round(3).sort_values(ascending=False) ``` ## Hierarchical indexing In the baseball example, I was forced to combine 3 fields to obtain a unique index that was not simply an integer value. A more elegant way to have done this would be to create a hierarchical index from the three fields. ``` baseball_h = baseball.set_index(['year', 'team', 'player']) baseball_h.head(10) ``` This index is a `MultiIndex` object that consists of a sequence of tuples, the elements of which is some combination of the three columns used to create the index. Where there are multiple repeated values, Pandas does not print the repeats, making it easy to identify groups of values. ``` baseball_h.index[:10] baseball_h.index.is_unique ``` Try using this hierarchical index to retrieve Julio Franco (`francju01`), who played for the Atlanta Braves (`ATL`) in 2007: ``` baseball_h.loc[(2007, 'ATL', 'francju01')] ``` Recall earlier we imported some microbiome data using two index columns. This created a 2-level hierarchical index: ``` mb = pd.read_csv("Data/microbiome.csv", index_col=['Taxon','Patient']) mb.head(10) ``` With a hierachical index, we can select subsets of the data based on a *partial* index: ``` mb.loc['Proteobacteria'] ``` Hierarchical indices can be created on either or both axes. Here is a trivial example: ``` frame = pd.DataFrame(np.arange(12).reshape(( 4, 3)), index =[['a', 'a', 'b', 'b'], [1, 2, 1, 2]], columns =[['Ohio', 'Ohio', 'Colorado'], ['Green', 'Red', 'Green']]) frame ``` If you want to get fancy, both the row and column indices themselves can be given names: ``` frame.index.names = ['key1', 'key2'] frame.columns.names = ['state', 'color'] frame ``` With this, we can do all sorts of custom indexing: ``` frame.loc['a', 'Ohio'] ``` Try retrieving the value corresponding to `b2` in `Colorado`: --------------------------- fetch b2 and then Colorado ``` frame.loc['b', 2]['Colorado'] ``` Additionally, the order of the set of indices in a hierarchical `MultiIndex` can be changed by swapping them pairwise: ``` mb.swaplevel('Patient', 'Taxon').head() ``` Data can also be sorted by any index level, using `sortlevel`: ``` mb.sortlevel('Patient', ascending=False).head() ``` ## Missing data The occurence of missing data is so prevalent that it pays to use tools like Pandas, which seamlessly integrates missing data handling so that it can be dealt with easily, and in the manner required by the analysis at hand. Missing data are represented in `Series` and `DataFrame` objects by the `NaN` floating point value. However, `None` is also treated as missing, since it is commonly used as such in other contexts (*e.g.* NumPy). ``` foo = pd.Series([np.nan, -3, None, 'foobar']) foo foo.isnull() ``` Missing values may be dropped or indexed out: ``` bacteria2 bacteria2.dropna() bacteria2.isnull() bacteria2[bacteria2.notnull()] ``` By default, `dropna` drops entire rows in which one or more values are missing. ``` data.dropna() ``` This can be overridden by passing the `how='all'` argument, which only drops a row when every field is a missing value. ``` data.dropna(how='all') ``` This can be customized further by specifying how many values need to be present before a row is dropped via the `thresh` argument. ``` data.loc[7, 'year'] = np.nan data data.dropna(thresh=5) ``` This is typically used in time series applications, where there are repeated measurements that are incomplete for some subjects. ### Exercise 5 Try using the `axis` argument to drop columns with missing values: ``` data.dropna(axis=1) ``` Rather than omitting missing data from an analysis, in some cases it may be suitable to fill the missing value in, either with a default value (such as zero) or a value that is either imputed or carried forward/backward from similar data points. We can do this programmatically in Pandas with the `fillna` argument. ``` bacteria2.fillna(0) data.fillna({'year': 2013, 'treatment':2}) ``` Notice that `fillna` by default returns a new object with the desired filling behavior, rather than changing the `Series` or `DataFrame` in place (**in general, we like to do this, by the way!**). We can alter values in-place using `inplace=True`. ``` data.year.fillna(2013, inplace=True) data ``` Missing values can also be interpolated, using any one of a variety of methods: ``` bacteria2.fillna(method='bfill') ``` ## Data summarization We often wish to summarize data in `Series` or `DataFrame` objects, so that they can more easily be understood or compared with similar data. The NumPy package contains several functions that are useful here, but several summarization or reduction methods are built into Pandas data structures. ``` baseball.sum() ``` Clearly, `sum` is more meaningful for some columns than others. For methods like `mean` for which application to string variables is not just meaningless, but impossible, these columns are automatically exculded: ``` baseball.mean() ``` The important difference between NumPy's functions and Pandas' methods is that the latter have built-in support for handling missing data. ``` bacteria2 bacteria2.mean() ``` Sometimes we may not want to ignore missing values, and allow the `nan` to propagate. ``` bacteria2.mean(skipna=False) ``` Passing `axis=1` will summarize over rows instead of columns, which only makes sense in certain situations. ``` extra_bases = baseball[['X2b','X3b','hr']].sum(axis=1) extra_bases.sort_values(ascending=False) ``` A useful summarization that gives a quick snapshot of multiple statistics for a `Series` or `DataFrame` is `describe`: ``` baseball.describe() ``` `describe` can detect non-numeric data and sometimes yield useful information about it. ``` baseball.player.describe() ``` We can also calculate summary statistics *across* multiple columns, for example, correlation and covariance. $$cov(x,y) = \sum_i (x_i - \bar{x})(y_i - \bar{y})$$ ``` baseball.hr.cov(baseball.X2b) ``` $$corr(x,y) = \frac{cov(x,y)}{(n-1)s_x s_y} = \frac{\sum_i (x_i - \bar{x})(y_i - \bar{y})}{\sqrt{\sum_i (x_i - \bar{x})^2 \sum_i (y_i - \bar{y})^2}}$$ ``` baseball.hr.corr(baseball.X2b) baseball.ab.corr(baseball.h) ``` Try running `corr` on the entire `baseball` DataFrame to see what is returned: ---------------------------- ``` baseball.corr() ``` it returns the correlation matrix for all features ---------------------------- If we have a `DataFrame` with a hierarchical index (or indices), summary statistics can be applied with respect to any of the index levels: ``` mb.head() mb.sum(level='Taxon') ``` ## Writing Data to Files As well as being able to read several data input formats, Pandas can also export data to a variety of storage formats. We will bring your attention to just a couple of these. ``` mb.to_csv("mb.csv") ``` The `to_csv` method writes a `DataFrame` to a comma-separated values (csv) file. You can specify custom delimiters (via `sep` argument), how missing values are written (via `na_rep` argument), whether the index is writen (via `index` argument), whether the header is included (via `header` argument), among other options. An efficient way of storing data to disk is in binary format. Pandas supports this using Python’s built-in pickle serialization. ``` baseball.to_pickle("baseball_pickle") ``` The complement to `to_pickle` is the `read_pickle` function, which restores the pickle to a `DataFrame` or `Series`: ``` pd.read_pickle("baseball_pickle") ``` As Wes warns in his book, it is recommended that binary storage of data via pickle only be used as a temporary storage format, in situations where speed is relevant. This is because there is no guarantee that the pickle format will not change with future versions of Python. ### Advanced Exercise: Compiling Ebola Data The `Data/ebola` folder contains summarized reports of Ebola cases from three countries during the recent outbreak of the disease in West Africa. For each country, there are daily reports that contain various information about the outbreak in several cities in each country. From these data files, use pandas to import them and create a single data frame that includes the daily totals of new cases and deaths for each country. ### Our solution is in a seperate notebook
github_jupyter
# Scalar and vector > Marcos Duarte, Renato Naville Watanabe > [Laboratory of Biomechanics and Motor Control](http://pesquisa.ufabc.edu.br/bmclab) > Federal University of ABC, Brazil <h1>Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Python-setup" data-toc-modified-id="Python-setup-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Python setup</a></span></li><li><span><a href="#Scalar" data-toc-modified-id="Scalar-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Scalar</a></span><ul class="toc-item"><li><span><a href="#Scalar-operations-in-Python" data-toc-modified-id="Scalar-operations-in-Python-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Scalar operations in Python</a></span></li></ul></li><li><span><a href="#Vector" data-toc-modified-id="Vector-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Vector</a></span><ul class="toc-item"><li><span><a href="#Magnitude-(length-or-norm)-of-a-vector" data-toc-modified-id="Magnitude-(length-or-norm)-of-a-vector-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Magnitude (length or norm) of a vector</a></span></li><li><span><a href="#Vecton-addition-and-subtraction" data-toc-modified-id="Vecton-addition-and-subtraction-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Vecton addition and subtraction</a></span></li></ul></li><li><span><a href="#Dot-product" data-toc-modified-id="Dot-product-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Dot product</a></span></li><li><span><a href="#Vector-product" data-toc-modified-id="Vector-product-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Vector product</a></span><ul class="toc-item"><li><span><a href="#Gram–Schmidt-process" data-toc-modified-id="Gram–Schmidt-process-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Gram–Schmidt process</a></span></li></ul></li><li><span><a href="#Further-reading" data-toc-modified-id="Further-reading-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Further reading</a></span></li><li><span><a href="#Video-lectures-on-the-Internet" data-toc-modified-id="Video-lectures-on-the-Internet-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Video lectures on the Internet</a></span></li><li><span><a href="#Problems" data-toc-modified-id="Problems-8"><span class="toc-item-num">8&nbsp;&nbsp;</span>Problems</a></span></li><li><span><a href="#References" data-toc-modified-id="References-9"><span class="toc-item-num">9&nbsp;&nbsp;</span>References</a></span></li></ul></div> Python handles very well all mathematical operations with numeric scalars and vectors and you can use [Sympy](http://sympy.org) for similar stuff but with abstract symbols. Let's briefly review scalars and vectors and show how to use Python for numerical calculation. For a review about scalars and vectors, see chapter 2 of [Ruina and Rudra's book](http://ruina.tam.cornell.edu/Book/index.html). ## Python setup ``` from IPython.display import IFrame import math import numpy as np ``` ## Scalar >A **scalar** is a one-dimensional physical quantity, which can be described by a single real number. For example, time, mass, and energy are examples of scalars. ### Scalar operations in Python Simple arithmetic operations with scalars are indeed simple: ``` a = 2 b = 3 print('a =', a, ', b =', b) print('a + b =', a + b) print('a - b =', a - b) print('a * b =', a * b) print('a / b =', a / b) print('a ** b =', a ** b) print('sqrt(b) =', math.sqrt(b)) ``` If you have a set of numbers, or an array, it is probably better to use Numpy; it will be faster for large data sets, and combined with Scipy, has many more mathematical funcions. ``` a = 2 b = [3, 4, 5, 6, 7, 8] b = np.array(b) print('a =', a, ', b =', b) print('a + b =', a + b) print('a - b =', a - b) print('a * b =', a * b) print('a / b =', a / b) print('a ** b =', a ** b) print('np.sqrt(b) =', np.sqrt(b)) # use numpy functions for numpy arrays ``` Numpy performs the arithmetic operations of the single number in `a` with all the numbers of the array `b`. This is called broadcasting in computer science. Even if you have two arrays (but they must have the same size), Numpy handles for you: ``` a = np.array([1, 2, 3]) b = np.array([4, 5, 6]) print('a =', a, ', b =', b) print('a + b =', a + b) print('a - b =', a - b) print('a * b =', a * b) print('a / b =', a / b) print('a ** b =', a ** b) ``` ## Vector >A **vector** is a quantity with magnitude (or length) and direction expressed numerically as an ordered list of values according to a coordinate reference system. For example, position, force, and torque are physical quantities defined by vectors. For instance, consider the position of a point in space represented by a vector: <br> <figure><img src="./../images/vector3D.png" width=300/><figcaption><center><i>Figure. Position of a point represented by a vector in a Cartesian coordinate system.</i></center></figcaption></figure> The position of the point (the vector) above can be represented as a tuple of values: $$ (x,\: y,\: z) \; \Rightarrow \; (1, 3, 2) $$ or in matrix form: $$ \begin{bmatrix} x \\y \\z \end{bmatrix} \;\; \Rightarrow \;\; \begin{bmatrix} 1 \\3 \\2 \end{bmatrix}$$ We can use the Numpy array to represent the components of vectors. For instance, for the vector above is expressed in Python as: ``` a = np.array([1, 3, 2]) print('a =', a) ``` Exactly like the arrays in the last example for scalars, so all operations we performed will result in the same values, of course. However, as we are now dealing with vectors, now some of the operations don't make sense. For example, for vectors there are no multiplication, division, power, and square root in the way we calculated. A vector can also be represented as: <span class="notranslate"> $$ \overrightarrow{\mathbf{a}} = a_x\hat{\mathbf{i}} + a_y\hat{\mathbf{j}} + a_z\hat{\mathbf{k}} $$ </span> <br> <figure><img src="./../images/vector3Dijk.png" width=300/><figcaption><center><i>Figure. A vector representation in a Cartesian coordinate system. The versors <span class="notranslate"> $\hat{\mathbf{i}},\, \hat{\mathbf{j}},\, \hat{\mathbf{k}}\,$ </span> are usually represented in the color sequence <b>rgb</b> (red, green, blue) for easier visualization.</i></center></figcaption></figure> Where <span class="notranslate"> $\hat{\mathbf{i}},\, \hat{\mathbf{j}},\, \hat{\mathbf{k}}\,$ </span> are unit vectors, each representing a direction and <span class="notranslate"> $ a_x\hat{\mathbf{i}},\: a_y\hat{\mathbf{j}},\: a_z\hat{\mathbf{k}} $ </span> are the vector components of the vector $\overrightarrow{\mathbf{a}}$. A unit vector (or versor) is a vector whose length (or norm) is 1. The unit vector of a non-zero vector $\overrightarrow{\mathbf{a}}$ is the unit vector codirectional with $\overrightarrow{\mathbf{a}}$: <span class="notranslate"> $$ \mathbf{\hat{u}} = \frac{\overrightarrow{\mathbf{a}}}{||\overrightarrow{\mathbf{a}}||} = \frac{a_x\,\hat{\mathbf{i}} + a_y\,\hat{\mathbf{j}} + a_z\, \hat{\mathbf{k}}}{\sqrt{a_x^2+a_y^2+a_z^2}} $$ </span> ### Magnitude (length or norm) of a vector The magnitude (length) of a vector is often represented by the symbol $||\;||$, also known as the norm (or Euclidean norm) of a vector and it is defined as: <span class="notranslate"> $$ ||\overrightarrow{\mathbf{a}}|| = \sqrt{a_x^2+a_y^2+a_z^2} $$ </span> The function `numpy.linalg.norm` calculates the norm: ``` a = np.array([1, 2, 3]) np.linalg.norm(a) ``` Or we can use the definition and compute directly: ``` np.sqrt(np.sum(a*a)) ``` Then, the versor for the vector <span class="notranslate"> $ \overrightarrow{\mathbf{a}} = (1, 2, 3) $ </span> is: ``` a = np.array([1, 2, 3]) u = a/np.linalg.norm(a) print('u =', u) ``` And we can verify its magnitude is indeed 1: ``` np.linalg.norm(u) ``` But the representation of a vector as a tuple of values is only valid for a vector with its origin coinciding with the origin $ (0, 0, 0) $ of the coordinate system we adopted. For instance, consider the following vector: <br> <figure><img src="./../images/vector2.png" width=260/><figcaption><center><i>Figure. A vector in space.</i></center></figcaption></figure> Such a vector cannot be represented by $ (b_x, b_y, b_z) $ because this would be for the vector from the origin to the point B. To represent exactly this vector we need the two vectors <span class="notranslate"> $ \mathbf{a} $ </span> and <span class="notranslate"> $ \mathbf{b} $ </span>. This fact is important when we perform some calculations in Mechanics. ### Vecton addition and subtraction The addition of two vectors is another vector: <span class="notranslate"> $$ \overrightarrow{\mathbf{a}} + \overrightarrow{\mathbf{b}} = (a_x\hat{\mathbf{i}} + a_y\hat{\mathbf{j}} + a_z\hat{\mathbf{k}}) + (b_x\hat{\mathbf{i}} + b_y\hat{\mathbf{j}} + b_z\hat{\mathbf{k}}) = (a_x+b_x)\hat{\mathbf{i}} + (a_y+b_y)\hat{\mathbf{j}} + (a_z+b_z)\hat{\mathbf{k}} $$ </span> <figure><img src="http://upload.wikimedia.org/wikipedia/commons/2/28/Vector_addition.svg" width=300 alt="Vector addition"/><figcaption><center><i>Figure. Vector addition (image from Wikipedia).</i></center></figcaption></figure> The subtraction of two vectors is also another vector: <span class="notranslate"> $$ \overrightarrow{\mathbf{a}} - \overrightarrow{\mathbf{b}} = (a_x\hat{\mathbf{i}} + a_y\hat{\mathbf{j}} + a_z\hat{\mathbf{k}}) + (b_x\hat{\mathbf{i}} + b_y\hat{\mathbf{j}} + b_z\hat{\mathbf{k}}) = (a_x-b_x)\hat{\mathbf{i}} + (a_y-b_y)\hat{\mathbf{j}} + (a_z-b_z)\hat{\mathbf{k}} $$ </span> <figure><img src="http://upload.wikimedia.org/wikipedia/commons/2/24/Vector_subtraction.svg" width=160 alt="Vector subtraction"/><figcaption><center><i>Figure. Vector subtraction (image from Wikipedia).</i></center></figcaption></figure></div> Consider two 2D arrays (rows and columns) representing the position of two objects moving in space. The columns represent the vector components and the rows the values of the position vector in different instants. Once again, it's easy to perform addition and subtraction with these vectors: ``` a = np.array([[1, 2, 3], [1, 1, 1]]) b = np.array([[4, 5, 6], [7, 8, 9]]) print('a =', a, '\nb =', b) print('a + b =', a + b) print('a - b =', a - b) ``` Numpy can handle a N-dimensional array with the size limited by the available memory in your computer. And we can perform operations on each vector, for example, calculate the norm of each one. First let's check the shape of the variable `a` using the method `shape` or the function `numpy.shape`: ``` print(a.shape) print(np.shape(a)) ``` This means the variable `a` has 2 rows and 3 columns. We have to tell the function `numpy.norm` to calculate the norm for each vector, i.e., to operate through the columns of the variable `a` using the paraneter `axis`: ``` np.linalg.norm(a, axis=1) ``` ## Dot product Dot product (or scalar product or inner product) between two vectors is a mathematical operation algebraically defined as the sum of the products of the corresponding components (maginitudes in each direction) of the two vectors. The result of the dot product is a single number (a scalar). The dot product between vectors <span class="notranslate">$\overrightarrow{\mathbf{a}}$</span> and $\overrightarrow{\mathbf{b}}$ is: <span class="notranslate"> $$ \overrightarrow{\mathbf{a}} \cdot \overrightarrow{\mathbf{b}} = (a_x\,\hat{\mathbf{i}}+a_y\,\hat{\mathbf{j}}+a_z\,\hat{\mathbf{k}}) \cdot (b_x\,\hat{\mathbf{i}}+b_y\,\hat{\mathbf{j}}+b_z\,\hat{\mathbf{k}}) = a_x b_x + a_y b_y + a_z b_z $$ </span> Because by definition: <span class="notranslate"> $$ \hat{\mathbf{i}} \cdot \hat{\mathbf{i}} = \hat{\mathbf{j}} \cdot \hat{\mathbf{j}} = \hat{\mathbf{k}} \cdot \hat{\mathbf{k}}= 1 \quad \text{and} \quad \hat{\mathbf{i}} \cdot \hat{\mathbf{j}} = \hat{\mathbf{i}} \cdot \hat{\mathbf{k}} = \hat{\mathbf{j}} \cdot \hat{\mathbf{k}} = 0 $$ </span> The geometric equivalent of the dot product is the product of the magnitudes of the two vectors and the cosine of the angle between them: <span class="notranslate"> $$ \overrightarrow{\mathbf{a}} \cdot \overrightarrow{\mathbf{b}} = ||\overrightarrow{\mathbf{a}}||\:||\overrightarrow{\mathbf{b}}||\:cos(\theta) $$ </span> Which is also equivalent to state that the dot product between two vectors $\overrightarrow{\mathbf{a}}$ and $\overrightarrow{\mathbf{b}}$ is the magnitude of $\overrightarrow{\mathbf{a}}$ times the magnitude of the component of $\overrightarrow{\mathbf{b}}$ parallel to $\overrightarrow{\mathbf{a}}$ (or the magnitude of $\overrightarrow{\mathbf{b}}$ times the magnitude of the component of $\overrightarrow{\mathbf{a}}$ parallel to $\overrightarrow{\mathbf{b}}$). The dot product between two vectors can be visualized in this interactive animation: ``` IFrame('https://www.geogebra.org/classic/ncdf2jsw?embed', width='100%', height=500) ``` The Numpy function for the dot product is `numpy.dot`: ``` a = np.array([1, 2, 3]) b = np.array([4, 5, 6]) print('a =', a, '\nb =', b) print('np.dot(a, b) =', np.dot(a, b)) ``` Or we can use the definition and compute directly: ``` np.sum(a*b) ``` For 2D arrays, the `numpy.dot` function performs matrix multiplication rather than the dot product; so let's use the `numpy.sum` function: ``` a = np.array([[1, 2, 3], [1, 1, 1]]) b = np.array([[4, 5, 6], [7, 8, 9]]) np.sum(a*b, axis=1) ``` ## Vector product Cross product or vector product between two vectors is a mathematical operation in three-dimensional space which results in a vector perpendicular to both of the vectors being multiplied and a length (norm) equal to the product of the perpendicular components of the vectors being multiplied (which is equal to the area of the parallelogram that the vectors span). The cross product between vectors $\overrightarrow{\mathbf{a}}$ and $\overrightarrow{\mathbf{b}}$ is: <span class="notranslate"> $$ \overrightarrow{\mathbf{a}} \times \overrightarrow{\mathbf{b}} = (a_x\,\hat{\mathbf{i}} + a_y\,\hat{\mathbf{j}} + a_z\,\hat{\mathbf{k}}) \times (b_x\,\hat{\mathbf{i}}+b_y\,\hat{\mathbf{j}}+b_z\,\hat{\mathbf{k}}) = (a_yb_z-a_zb_y)\hat{\mathbf{i}} + (a_zb_x-a_xb_z)\hat{\mathbf{j}}+(a_xb_y-a_yb_x)\hat{\mathbf{k}} $$ </span> Because by definition: <span class="notranslate"> $$ \begin{array}{l l} \hat{\mathbf{i}} \times \hat{\mathbf{i}} = \hat{\mathbf{j}} \times \hat{\mathbf{j}} = \hat{\mathbf{k}} \times \hat{\mathbf{k}} = 0 \\ \hat{\mathbf{i}} \times \hat{\mathbf{j}} = \hat{\mathbf{k}}, \quad \hat{\mathbf{k}} \times \hat{\mathbf{k}} = \hat{\mathbf{i}}, \quad \hat{\mathbf{k}} \times \hat{\mathbf{i}} = \hat{\mathbf{j}} \\ \hat{\mathbf{j}} \times \hat{\mathbf{i}} = -\hat{\mathbf{k}}, \quad \hat{\mathbf{k}} \times \hat{\mathbf{j}}= -\hat{\mathbf{i}}, \quad \hat{\mathbf{i}} \times \hat{\mathbf{k}} = -\hat{\mathbf{j}} \end{array} $$ </span> The direction of the vector resulting from the cross product between the vectors $\overrightarrow{\mathbf{a}}$ and $\overrightarrow{\mathbf{b}}$ is given by the right-hand rule. The geometric equivalent of the cross product is: The geometric equivalent of the cross product is the product of the magnitudes of the two vectors and the sine of the angle between them: <span class="notranslate"> $$ \overrightarrow{\mathbf{a}} \times \overrightarrow{\mathbf{b}} = ||\overrightarrow{\mathbf{a}}||\:||\overrightarrow{\mathbf{b}}||\:sin(\theta) $$ </span> Which is also equivalent to state that the cross product between two vectors $\overrightarrow{\mathbf{a}}$ and $\overrightarrow{\mathbf{b}}$ is the magnitude of $\overrightarrow{\mathbf{a}}$ times the magnitude of the component of $\overrightarrow{\mathbf{b}}$ perpendicular to $\overrightarrow{\mathbf{a}}$ (or the magnitude of $\overrightarrow{\mathbf{b}}$ times the magnitude of the component of $\overrightarrow{\mathbf{a}}$ perpendicular to $\overrightarrow{\mathbf{b}}$). The definition above, also implies that the magnitude of the cross product is the area of the parallelogram spanned by the two vectors: <br> <figure><img src="http://upload.wikimedia.org/wikipedia/commons/4/4e/Cross_product_parallelogram.svg" width=160 alt="Vector subtraction"/><figcaption><center><i>Figure. Area of a parallelogram as the magnitude of the cross product (image from Wikipedia).</i></center></figcaption></figure> The cross product can also be calculated as the determinant of a matrix: <span class="notranslate"> $$ \overrightarrow{\mathbf{a}} \times \overrightarrow{\mathbf{b}} = \left| \begin{array}{ccc} \hat{\mathbf{i}} & \hat{\mathbf{j}} & \hat{\mathbf{k}} \\ a_x & a_y & a_z \\ b_x & b_y & b_z \end{array} \right| = a_y b_z \hat{\mathbf{i}} + a_z b_x \hat{\mathbf{j}} + a_x b_y \hat{\mathbf{k}} - a_y b_x \hat{\mathbf{k}}-a_z b_y \hat{\mathbf{i}} - a_x b_z \hat{\mathbf{j}} \\ \overrightarrow{\mathbf{a}} \times \overrightarrow{\mathbf{b}} = (a_yb_z-a_zb_y)\hat{\mathbf{i}} + (a_zb_x-a_xb_z)\hat{\mathbf{j}} + (a_xb_y-a_yb_x)\hat{\mathbf{k}} $$ </span> The same result as before. The cross product between two vectors can be visualized in this interactive animation: ``` IFrame('https://www.geogebra.org/classic/cz6v2U99?embed', width='100%', height=500) ``` The Numpy function for the cross product is `numpy.cross`: ``` print('a =', a, '\nb =', b) print('np.cross(a, b) =', np.cross(a, b)) ``` For 2D arrays with vectors in different rows: ``` a = np.array([[1, 2, 3], [1, 1, 1]]) b = np.array([[4, 5, 6], [7, 8, 9]]) np.cross(a, b, axis=1) ``` ### Gram–Schmidt process The [Gram–Schmidt process](http://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process) is a method for orthonormalizing (orthogonal unit versors) a set of vectors using the scalar product. The Gram–Schmidt process works for any number of vectors. For example, given three vectors, $\overrightarrow{\mathbf{a}}, \overrightarrow{\mathbf{b}}, \overrightarrow{\mathbf{c}}$, in the 3D space, a basis $\{\hat{e}_a, \hat{e}_b, \hat{e}_c\}$ can be found using the Gram–Schmidt process by: The first versor is in the $\overrightarrow{\mathbf{a}}$ direction (or in the direction of any of the other vectors): $$ \hat{e}_a = \frac{\overrightarrow{\mathbf{a}}}{||\overrightarrow{\mathbf{a}}||} $$ The second versor, orthogonal to $\hat{e}_a$, can be found considering we can express vector $\overrightarrow{\mathbf{b}}$ in terms of the $\hat{e}_a$ direction as: $$ \overrightarrow{\mathbf{b}} = \overrightarrow{\mathbf{b}}^\| + \overrightarrow{\mathbf{b}}^\bot $$ Then: $$ \overrightarrow{\mathbf{b}}^\bot = \overrightarrow{\mathbf{b}} - \overrightarrow{\mathbf{b}}^\| = \overrightarrow{\mathbf{b}} - (\overrightarrow{\mathbf{b}} \cdot \hat{e}_a ) \hat{e}_a $$ Finally: $$ \hat{e}_b = \frac{\overrightarrow{\mathbf{b}}^\bot}{||\overrightarrow{\mathbf{b}}^\bot||} $$ The third versor, orthogonal to $\{\hat{e}_a, \hat{e}_b\}$, can be found expressing the vector $\overrightarrow{\mathbf{C}}$ in terms of $\hat{e}_a$ and $\hat{e}_b$ directions as: $$ \overrightarrow{\mathbf{c}} = \overrightarrow{\mathbf{c}}^\| + \overrightarrow{\mathbf{c}}^\bot $$ Then: $$ \overrightarrow{\mathbf{c}}^\bot = \overrightarrow{\mathbf{c}} - \overrightarrow{\mathbf{c}}^\| $$ Where: $$ \overrightarrow{\mathbf{c}}^\| = (\overrightarrow{\mathbf{c}} \cdot \hat{e}_a ) \hat{e}_a + (\overrightarrow{\mathbf{c}} \cdot \hat{e}_b ) \hat{e}_b $$ Finally: $$ \hat{e}_c = \frac{\overrightarrow{\mathbf{c}}^\bot}{||\overrightarrow{\mathbf{c}}^\bot||} $$ Let's implement the Gram–Schmidt process in Python. For example, consider the positions (vectors) $\overrightarrow{\mathbf{a}} = [1,2,0], \overrightarrow{\mathbf{b}} = [0,1,3], \overrightarrow{\mathbf{c}} = [1,0,1]$: ``` a = np.array([1, 2, 0]) b = np.array([0, 1, 3]) c = np.array([1, 0, 1]) ``` The first versor is: ``` ea = a/np.linalg.norm(a) print(ea) ``` The second versor is: ``` eb = b - np.dot(b, ea)*ea eb = eb/np.linalg.norm(eb) print(eb) ``` And the third version is: ``` ec = c - np.dot(c, ea)*ea - np.dot(c, eb)*eb ec = ec/np.linalg.norm(ec) print(ec) ``` Let's check the orthonormality between these versors: ``` print('Versors:', '\nea =', ea, '\neb =', eb, '\nec =', ec) print('\nTest of orthogonality (scalar product between versors):', '\nea x eb:', np.dot(ea, eb), '\neb x ec:', np.dot(eb, ec), '\nec x ea:', np.dot(ec, ea)) print('\nNorm of each versor:', '\n||ea|| =', np.linalg.norm(ea), '\n||eb|| =', np.linalg.norm(eb), '\n||ec|| =', np.linalg.norm(ec)) ``` Or, we can simply use the built-in QR factorization function from NumPy: ``` vectors = np.vstack((a,b,c)).T Q, R = np.linalg.qr(vectors) print(Q) ea, eb, ec = Q[:, 0], Q[:, 1], Q[:, 2] print('Versors:', '\nea =', ea, '\neb =', eb, '\nec =', ec) print('\nTest of orthogonality (scalar product between versors):') print(np.dot(Q.T, Q)) print('\nTest of orthogonality (scalar product between versors):', '\nea x eb:', np.dot(ea, eb), '\neb x ec:', np.dot(eb, ec), '\nec x ea:', np.dot(ec, ea)) print('\nNorm of each versor:', '\n||ea|| =', np.linalg.norm(ea), '\n||eb|| =', np.linalg.norm(eb), '\n||ec|| =', np.linalg.norm(ec)) ``` Which results in the same basis with exception of the changed signals. ## Further reading - Read pages 44-92 of the first chapter of the [Ruina and Rudra's book](http://ruina.tam.cornell.edu/Book/index.html) about scalars and vectors in Mechanics. ## Video lectures on the Internet - Khan Academy: [Vectors](https://www.khanacademy.org/math/algebra-home/alg-vectors) - [Vectors, what even are they?](https://youtu.be/fNk_zzaMoSs) ## Problems 1. Given the vectors, $\overrightarrow{\mathbf{a}}=[1, 0, 0]$ and $\overrightarrow{\mathbf{b}}=[1, 1, 1]$, calculate the dot and cross products between them. 2. Calculate the unit vectors for $[2, −2, 3]$ and $[3, −3, 2]$ and determine an orthogonal vector to these two vectors. 3. Given the vectors $\overrightarrow{\mathbf{a}}$=[1, 0, 0] and $\overrightarrow{\mathbf{b}}$=[1, 1, 1], calculate $ \overrightarrow{\mathbf{a}} \times \overrightarrow{\mathbf{b}} $ and verify that this vector is orthogonal to vectors $\overrightarrow{\mathbf{a}}$ and $\overrightarrow{\mathbf{b}}$. Also, calculate $\overrightarrow{\mathbf{b}} \times \overrightarrow{\mathbf{a}}$ and compare it with $\overrightarrow{\mathbf{a}} \times \overrightarrow{\mathbf{b}}$. 4. Given the vectors $[1, 1, 0]; [1, 0, 1]; [0, 1, 1]$, calculate a basis using the Gram–Schmidt process. 5. Write a Python function to calculate a basis using the Gram–Schmidt process (implement the algorithm!) considering that the input are three variables where each one contains the coordinates of vectors as columns and different positions of these vectors as rows. For example, sample variables can be generated with the command `np.random.randn(5, 3)`. 6. Study the sample problems **1.1** to **1.9**, **1.11** (using Python), **1.12**, **1.14**, **1.17**, **1.18** to **1.24** of Ruina and Rudra's book 7. From Ruina and Rudra's book, solve the problems **1.1.1** to **1.3.16**. If you are new to scalars and vectors, you should solve these problems first by hand and then use Python to check the answers. ## References - Ruina A, Rudra P (2019) [Introduction to Statics and Dynamics](http://ruina.tam.cornell.edu/Book/index.html). Oxford University Press.
github_jupyter
<img src="images/logo.jpg" style="display: block; margin-left: auto; margin-right: auto;" alt="לוגו של מיזם לימוד הפייתון. נחש מצויר בצבעי צהוב וכחול, הנע בין האותיות של שם הקורס: לומדים פייתון. הסלוגן המופיע מעל לשם הקורס הוא מיזם חינמי ללימוד תכנות בעברית."> # <span style="text-align: right; direction: rtl; float: right;">לולאת for</span> ## <span style="text-align: right; direction: rtl; float: right; clear: both;">הקדמה</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> פעמים רבות אנחנו מנסים לפתור בעיה, שכדי להגיע לפתרונה נידרש לעבור על כל הערכים במבנה מסוים: </p> <ul style="text-align: right; direction: rtl; float: right; clear: both;"> <li>קבל את רשימת גובהי התלמידים בכיתה, והחזר את גובהו של התלמיד הגבוה ביותר.</li> <li>קבל את רשימת הקלפים שבידי, והחזר את הקלף המתאים ביותר לשליפה עכשיו.</li> <li>קבל רשימת השמעה (Playlist), והחזר את כל השירים של הלהקה Led Zeppelin.</li> <li>קבל את רשימת המסעדות בצרפת ואת הדירוגים שלהן, והחזר את 3 המסעדות בעלות הדירוג הגבוה ביותר.</li> </ul> <p style="text-align: right; direction: rtl; float: right; clear: both;"> מה משותף לכל הבעיות שהוצגו למעלה?<br> דרך ראויה לפתור אותן היא בעזרת לולאה שתעבור על כל האיברים שהוצגו בבעיה, ותבצע על כל איבר סדרת פעולות.<br> נכתוב בפסאודו־קוד דוגמה לפתרון הבעיה הראשונה – מציאת הגובה של התלמיד הגבוה ביותר בכיתה: </p> <ol style="text-align: right; direction: rtl; float: right; clear: both;"> <li><strong>קלוט</strong> <em>רשימת גבהים</em> לתוך המשתנה <var>גבהים</var>.</li> <li><strong>אפס</strong> את המשתנה ששמו <var>הכי_גבוה_שמצאנו</var> כך שיכיל את הערך <em>0</em>.</li> <li> <strong>עבור כל</strong> <var>גובה</var> שנמצא בתוך <var>גבהים</var>: <ol> <li style="list-style: numbers;"> <strong>אם</strong> הערך של <var>גובה</var> גדול יותר מ<var>הכי_גבוה_שמצאנו</var>: <ol> <li> <strong>שים</strong> בתוך <var>הכי_גבוה_שמצאנו</var> את הערך של <var>גובה</var>. </li> </ol> </li> </ol> </li> <li><strong>החזר</strong> את <var>הכי_גבוה_שמצאנו</var>.</li> </ol> <div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;"> <div style="display: flex; width: 10%; float: right; clear: both;"> <img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול"> </div> <div style="width: 70%"> <p style="text-align: right; direction: rtl; float: right; clear: both;"> ממשו פונקציה שמקבלת רשימת גבהים של האנשים בכיתה, ומחזירה את הגובה של התלמיד הגבוה ביותר.<br> לדוגמה, עבור הרשימה <code dir="ltr" style="direction: ltr;">[1.50, 1.84, 1.73, 1.51]</code> החזירו <samp>1.84</samp>. </p> </div> <div style="display: flex; width: 20%; border-right: 0.1rem solid #A5A5A5; padding: 1rem 2rem;"> <p style="text-align: center; direction: rtl; justify-content: center; align-items: center; clear: both;"> <strong>חשוב!</strong><br> פתרו לפני שתמשיכו! </p> </div> </div> ### <span style="text-align: right; direction: rtl; float: right; clear: both;">לולאת while</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> עד כה, אחד מהשימושים הנפוצים שעשינו בלולאת <code>while</code> היה לעבור על איברים ברשימה באמצעות מיקום.<br> הקוד שכתבתם בתרגול למעלה, שבו הייתם צריכים לעבור על גובהי התלמידים, זה אחר זה, הוא דוגמה טובה לכך.<br> ייתכן שהקוד שלכם דומה לקוד שאני כתבתי לפתרון התרגיל הזה: </p> ``` def get_tallest(student_heights): max_height_so_far = 0 current_student_index = 0 while current_student_index < len(student_heights): current_student_height = student_heights[current_student_index] if current_student_height > max_height_so_far: max_height_so_far = current_student_height current_student_index = current_student_index + 1 return max_height_so_far heights = [1.50, 1.84, 1.73, 1.51] print(get_tallest(heights)) ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> הקוד בתא האחרון עושה את הפעולות הבאות: </p> <ul style="text-align: right; direction: rtl; float: right; clear: both;"> <li>איפוס המשתנה <var>max_height_so_far</var>, ששומר את הגובה המרבי שמצאנו עד כה ברשימה.</li> <li>איפוס המשתנה <var>current_student_index</var>, שמצביע על מיקום התלמיד שאנחנו בודקים באיטרציה הנוכחית של הלולאה.</li> <li>בכל איטרציה, הביטוי <code>student_heights[current_student_index]</code> ישיג את גובהו של אחד התלמידים, לפי הסדר.</li> <li>אם התלמיד הנבדק גבוה יותר מהתלמיד הכי גבוה שמצאנו עד עכשיו, שמור את הגובה המרבי החדש בתוך <var>max_height_so_far</var>.</li> <li>קדם את <var>current_student_index</var> כך שיצביע לתא שבו מופיע התלמיד הבא.</li> <li>בסיום המעבר על כל הערכים, החזר את <var>max_height_so_far</var>.</li> </ul> <p style="text-align: right; direction: rtl; float: right; clear: both;"> הלולאה שמופיעה תעבור על מספרי התאים ברשימה ותבדוק את התוכן שלהם.<br> עד כה, פעמים רבות השימוש שלנו בלולאות היה לצורך <mark>מעבר על כל האיברים של iterable כלשהו</mark>. </p> <p style="text-align: right; direction: rtl; float: right; clear: both;"> למעשה, בדוגמה שלמעלה <mark>אנחנו מבצעים פעולה עבור כל איבר בתוך <var>student_heights</var></mark>. </p> ## <span style="text-align: right; direction: rtl; float: right; clear: both;">שימוש ב־for</span> ### <span style="text-align: right; direction: rtl; float: right; clear: both;">שימוש בסיסי</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> בפעמים שבהן אנחנו רוצים לבצע דבר מה <em>עבור</em> כל אחד מהאיברים במבנה כלשהו, נשתמש ב<dfn>לולאת <code>for</code></dfn>.<br> נראה, לדוגמה, איך נשתמש בלולאת <code>while</code> כדי להדפיס את שמותיהן של כל התלמידות בכיתה: </p> ``` names_of_students_in_class = ['Galia', 'Hadas', 'Hen', 'Ilana', 'Ivria', 'Karin', 'Maya', 'Noa'] student_index = 0 while student_index < len(names_of_students_in_class): student_name = names_of_students_in_class[student_index] print(student_name) student_index = student_index + 1 ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> בעברית, היינו משתמשים בנוסח דומה לזה: <mark>עבור כל שם של תלמידה בתוך רשימת שמות התלמידות שבכיתה, הדפס את שם התלמידה</mark>.</p> <p style="text-align: right; direction: rtl; float: right; clear: both;"> נכתוב בפייתון, הפעם בעזרת לולאת <code>for</code>: </p> ``` for student_name in names_of_students_in_class: print(student_name) ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> השוו את האלגנטיות של הקוד הזה לאלגנטיות של הקוד שמשתמש בלולאת <code>while</code>, ואת הדמיון בין כל אחת מהן לבין הנוסח המילולי שכתבנו. </p> ### <span style="text-align: right; direction: rtl; float: right; clear: both;">איך זה עובד?</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> לולאת ה־<code>for</code> שראיתם מתחלקת ל־3 חלקים: </p> <ol style="text-align: right; direction: rtl; float: right; clear: both;"> <li>iterable כלשהו – נסתכל על כל האיברים שבו לפי הסדר שלהם.</li> <li>שם משתנה חדש שנמציא – פייתון תיצור אותו במיוחד עבור הלולאה. המשתנה הזה יצביע בכל פעם על איבר אחד, לפי הסדר, מתוך ה־iterable.</li> <li>הפעולה או הפעולות שאנחנו רוצים לבצע על כל אחד מהאיברים האלו.</li> </ol> ``` names_of_students_in_class = ['Galia', 'Hadas', 'Hen', 'Ilana', 'Ivria', 'Karin', 'Maya', 'Noa'] # השם שנמציא # V Iterable, ערך שאפשר לפרק לכלל איבריו for student_name in names_of_students_in_class: print(student_name) # <---- הפעולות לביצוע ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> נבחן את הלולאה שמדפיסה את שמות התלמידות בכיתה, שלב אחר שלב, כדי להבין איך לולאת <code>for</code> פועלת. </p> <div class="align-center" style="display: flex; text-align: right; direction: rtl;"> <div style="display: flex; width: 10%; float: right; "> <img src="images/warning.png" style="height: 50px !important;" alt="אזהרה!"> </div> <div style="width: 90%"> <p style="text-align: right; direction: rtl;"> כמה מקטעי הקוד הבאים לא ירוצו, כיוון שחלק מהקוד מסומן בהערה.<br> המטרה של קטעי הקוד בחלק הזה של המחברת היא להדגיש איזה קוד רץ באותו רגע. </p> </div> </div> ``` names_of_students_in_class = ['Galia', 'Hadas', 'Hen', 'Ilana', 'Ivria', 'Karin', 'Maya', 'Noa'] ``` <table style="font-size: 1.5rem; border: 0px solid black; border-spacing: 0px;"> <caption style="direction: rtl; text-align: center;">תוכן המשתנה <var>names_of_students_in_class</var></caption> <tr> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-bottom: 1px solid;">0</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">1</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">3</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">4</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">5</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">6</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">7</td> </tr> <tbody> <tr> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Galia"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Hadas"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Hen"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Ilana"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Ivria"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Karin"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Maya"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Noa"</td> </tr> <tr style="background: #f5f5f5;"> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right;">-8</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-7</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-6</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-5</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-4</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-3</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-1</td> </tr> </tbody> </table> <p style="text-align: right; direction: rtl; float: right; clear: both;"> בחזרור הראשון, המשתנה החדש שיצרנו, בשם <var>student_name</var>, יצביע על הערך הראשון ברשימה, <samp>Galia</samp>.<br> נשים לב שהמשתנה <var>student_name</var> לא היה קיים לפני הלולאה, והלולאה היא המבנה הקסום שיוצר את המשתנה וגורם לו להצביע לערכים: </p> ``` for student_name in names_of_students_in_class: # <--- אנחנו פה # print(student_name) ``` <table style="font-size: 1.5rem; border: 0px solid black; border-spacing: 0px;"> <caption style="direction: rtl; text-align: center;">חזרור ראשון, <var>student_name</var> מצביע על "Galia"</caption> <tr> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-bottom: 1px solid;">0</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">1</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">3</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">4</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">5</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">6</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">7</td> </tr> <tbody> <tr> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid; background-color: yellow">"Galia"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Hadas"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Hen"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Ilana"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Ivria"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Karin"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Maya"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Noa"</td> </tr> <tr style="background: #f5f5f5;"> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: center;"><var>student_name</var> <span style="font-size: 1.8rem;">↑</span></td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-7</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-6</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-5</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-4</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-3</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-1</td> </tr> </tbody> </table> <div> </div> <p style="text-align: right; direction: rtl; float: right; clear: both;"> מייד לאחר מכן יודפס התוכן שאליו <var>student_name</var> מצביע:</p> ``` #for student_name in names_of_students_in_class: print(student_name) # <--- אנחנו פה ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> סיימנו את האיטרציה! מה עכשיו?<br> עולים חזרה לראש הלולאה כדי לבדוק אם נשארו עוד איברים לעבור עליהם:</p> ``` for student_name in names_of_students_in_class: # <--- אנחנו פה # print(student_name) ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> המשתנה <var>student_name</var> יעבור להצביע על האיבר הבא, <samp>Hadas</samp>: </p> <table style="font-size: 1.5rem; border: 0px solid black; border-spacing: 0px;"> <caption style="direction: rtl; text-align: center;">חזרור שני, <var>student_name</var> מצביע על "Hadas"</caption> <tr> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-bottom: 1px solid;">0</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">1</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">3</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">4</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">5</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">6</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">7</td> </tr> <tbody> <tr> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Galia"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid; background-color: yellow">"Hadas"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Hen"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Ilana"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Ivria"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Karin"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Maya"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Noa"</td> </tr> <tr style="background: #f5f5f5;"> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right;">-8</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;"><var>student_name</var> <span style="font-size: 1.8rem;">↑</span></td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-6</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-5</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-4</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-3</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-1</td> </tr> </tbody> </table> <div> </div> <p style="text-align: right; direction: rtl; float: right; clear: both;"> ושוב, נדפיס את המחרוזת ש־<var>student_name</var> מצביע עליה: </p> ``` #for student_name in names_of_students_in_class: print(student_name) # <---- עכשיו אנחנו פה ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> כך נמשיך לבצע את הלולאה, עד שנגיע לאיבר האחרון ברשימה, התלמידה <samp>Noa</samp>: </p> ``` for student_name in names_of_students_in_class: # אנחנו פה, אחרי שעברנו על שמות כללל התלמידות, פרט לנועה # print(student_name) ``` <table style="font-size: 1.5rem; border: 0px solid black; border-spacing: 0px;"> <caption style="direction: rtl; text-align: center;">חזרור אחרון, <var>student_name</var> מצביע על "Noa"</caption> <tr> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-bottom: 1px solid;">0</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">1</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">3</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">4</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">5</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">6</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">7</td> </tr> <tbody> <tr> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Galia"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Hadas"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Hen"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Ilana"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Ivria"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Karin"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;">"Maya"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid; background-color: yellow;">"Noa"</td> </tr> <tr style="background: #f5f5f5;"> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right;">-8</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-7</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-6</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-5</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-4</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-3</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;"><var>student_name</var> <span style="font-size: 1.8rem;">↑</span></td> </tr> </tbody> </table> <div> </div> <p style="text-align: right; direction: rtl; float: right; clear: both;"> נדפיס בפעם האחרונה את המחרוזת שעליה אנחנו מצביעים: </p> ``` #for student_name in names_of_students_in_class: print(student_name) # <---- עכשיו אנחנו פה ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> כשנחזור לראש הלולאה, נראה שעברנו על כל האיברים.<br> הלולאה תיפסק, וירוץ הקוד שנמצא אחרי הלולאה. </p> <table style="text-align: right; direction: rtl; clear: both; font-size: 1.3rem"> <caption style="text-align: center; direction: rtl; clear: both; font-size: 2rem; padding-bottom: 2rem;">החלקים בלולאת for</caption> <thead> <tr> <th>החלק בלולאה</th> <th>איפה בקוד</th> <th>דוגמה</th> </tr> </thead> <tbody> <tr> <td>המבנה שאנחנו רוצים לעבור על כלל איבריו – חייב להיות iterable</td> <td>אחרי המילה <em>in</em></td> <td><var>names_of_students_in_class</var></td> </tr> <tr> <td>שם שנמציא למשתנה – בכל חזרור יכיל איבר מתוך ה־iterable</td> <td>אחרי המילה <em>for</em>, לפני המילה <em>in</em></td> <td><var>student_name</var></td> </tr> <tr> <td>תוכן הלולאה – הפעולות שנרצה לבצע על כל איבר</td> <td>אחרי הנקודתיים, בשורה חדשה (אחת או יותר), בהזחה</td> <td><code>print(student_name)</code></td> </tr> </tbody> </table> <div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;"> <div style="display: flex; width: 10%; float: right; clear: both;"> <img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול"> </div> <div style="width: 70%"> <p style="text-align: right; direction: rtl; float: right; clear: both;"> שנו את הקוד שכתבתם למציאת האדם הגבוה ביותר בכיתה, כך שישתמש ב־<code>for</code> ולא ב־<code>while</code>.<br> טיפ: השתמשו בפסאודו־קוד שהוצג לפני כן, והשוו בין ה־<code>for</code> לבין ה־<code>while</code> בדוגמה של הדפסת שמות התלמידים.<br> </p> </div> <div style="display: flex; width: 20%; border-right: 0.1rem solid #A5A5A5; padding: 1rem 2rem;"> <p style="text-align: center; direction: rtl; justify-content: center; align-items: center; clear: both;"> <strong>חשוב!</strong><br> פתרו לפני שתמשיכו! </p> </div> </div> ### <span style="text-align: right; direction: rtl; float: right;">דוגמאות מילוליות נוספות</span> <ul style="text-align: right; direction: rtl; float: right; clear: both;"> <li>עבור כל עמוד בספר – קרא את העמוד.</li> <li>עבור כל צלחת – שטוף אותה במים חמים, קרצף אותה היטב בסקוץ' ספוג בסבון, נגב אותה במגבת יבשה ואחסן אותה בארון.</li> <li>בהינתן רשימה של 1,000 תלמידים, חשב את הגובה הממוצע של תלמיד.</li> <li>בליל כל הקדושים, התחפש, צא החוצה, ועבור כל בית ברחוב: גש לדלת, צלצל בפעמון, אמור "ממתק או תעלול", קח ממתק ואמור תודה.</li> </ul> ### <span style="text-align: right; direction: rtl; float: right;">מתי להשתמש?</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> לולאת <code>for</code> יוצרת מבנה אלגנטי וקריא, ומתכנתים רבים מעדיפים אותה על פני לולאת <code>while</code>.<br> ננסה לעמוד על ההבדלים בין הלולאות:<br> </p> <table style="text-align: right; direction: rtl; clear: both; font-size: 1.3rem"> <caption style="text-align: center; direction: rtl; clear: both; font-size: 2rem; padding-bottom: 2rem;">הבדלים בין while ל־for</caption> <thead> <tr> <th>נתון להשוואה</th> <th>לולאת <code>for</code></th> <th>לולאת <code>while</code></th> </tr> </thead> <tbody> <tr> <td>מה "מניע" את הלולאה?</td> <td>iterable שהלולאה תעבור על כל האיברים שלו</td> <td>ביטוי שערכו הבוליאני שקול ל־<code>True</code> או ל־<code>False</code></td> </tr> <tr> <td>מתי הלולאה מפסיקה</td> <td>כשהלולאה עברה על כל האיברים של ה־iterable</td> <td>כשמגיעים לתנאי של הפונקציה וערכו שקול ל־<code>False</code></td> </tr> <tr> <td>שימושים עיקריים</td> <td>ביצוע פעולה עבור כל ערך בסדרת ערכים, כמו איברי רשימה או תווים במחרוזת</td> <td>חזרה על פעולה כל עוד מצאנו שהמשימה לא הושלמה</td> </tr> </tbody> </table> <p style="text-align: right; direction: rtl; float: right; clear: both;"> בשלב הנוכחי בקורס, תמיד נוכל להשתמש בלולאת <code>while</code> במקום בלולאת <code>for</code>, אך לא תמיד נוכל להחליף לולאות <code>for</code> בלולאות <code>while</code>.<br> באופן כללי, לולאות <code>while</code> יכולות להוכיח את עצמן כשימושיות מאוד מפעם לפעם.<br> חשבו, לדוגמה, על מצב שבו אתם צריכים לקבל מהמשתמש קלט חדש כל עוד הקלט שלו לא תקין. </p> <div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;"> <div style="display: flex; width: 10%; float: right; clear: both;"> <img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול"> </div> <div style="width: 70%"> <p style="text-align: right; direction: rtl; float: right; clear: both;"> כתבו 3 דוגמאות מילוליות נוספות ללולאות <code>for</code>. </p> </div> <div style="display: flex; width: 20%; border-right: 0.1rem solid #A5A5A5; padding: 1rem 2rem;"> <p style="text-align: center; direction: rtl; justify-content: center; align-items: center; clear: both;"> <strong>חשוב!</strong><br> פתרו לפני שתמשיכו! </p> </div> </div> ### <span style="text-align: right; direction: rtl; float: right;">תרגיל ביניים: פורלולה 1</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> בתחרות המרוצים "פורלולה 1", שבה משתתפות בקביעות 6 מכוניות מרוץ, אפשר להמר על הסדר שבו יגיעו המכוניות לקו הסיום.<br> משתתף זוכה הוא משתתף שהצליח לנחש נכונה את סדר ההגעה של המכוניות לקו הסיום, עם לא יותר מ־2 טעויות.<br> כתבו פונקציה שמקבלת הימור בודד ואת סדר ההגעה של המכוניות במרוץ, והחזירו אם ההימור זכה או הפסיד. </p> <p style="text-align: right; direction: rtl; float: right; clear: both;"> לדוגמה, במרוץ האחרון סדר ההגעה לקו הסיום היה:<br> <samp dir="ltr">[1, 2, 3, 4, 5, 6]</samp> </p> <p style="text-align: right; direction: rtl; float: right; clear: both;"> הנה דוגמאות להימורים של משתתפים ולתוצאתם:<br> </p> <ul style="text-align: right; direction: rtl; float: right; clear: both;"> <li><samp dir="ltr">[1, 2, 3, 4, 5, 6]</samp> – הימור זוכה (0 טעויות)</li> <li><samp dir="ltr">[2, 1, 3, 4, 5, 6]</samp> – הימור זוכה (2 טעויות)</li> <li><samp dir="ltr">[1, 2, 6, 4, 5, 3]</samp> – הימור זוכה (2 טעויות)</li> <li><samp dir="ltr">[1, 2, 4, 4, 5, 6]</samp> – הימור זוכה (טעות אחת)</li> <li><samp dir="ltr">[1, 6, 2, 4, 5, 3]</samp> – הימור מפסיד (3 טעויות)</li> <li><samp dir="ltr">[5, 3, 2, 4, 6, 1]</samp> – הימור מפסיד (5 טעויות)</li> <li><samp dir="ltr">[6, 5, 4, 3, 2, 1]</samp> – הימור מפסיד (6 טעויות)</li> </ul> ### <span style="text-align: right; direction: rtl; float: right;">מבנים מורכבים</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> הרשימה הבאה מכילה tuple־ים בגודל 2 איברים: </p> ``` words = [('star', 'rats'), ('wolf', 'flow'), ('racecar', 'racecar'), ('ekans', 'snake')] ``` <div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;"> <div style="display: flex; width: 10%; float: right; clear: both;"> <img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול"> </div> <div style="width: 70%"> <p style="text-align: right; direction: rtl; float: right; clear: both;"> כתבו לולאת <code>for</code> שתדפיס עבור כל זוג מחרוזות ברשימה: <samp dir="ltr" style="direction: ltr">Flip "X" to get "Y"</samp>.<br> לדוגמה, עבור הזוג האחרון מתוך 4 הזוגות, היא תדפיס: <samp dir="ltr" style="direction: ltr">Flip "ekans" to get "snake"</samp>. </p> </div> <div style="display: flex; width: 20%; border-right: 0.1rem solid #A5A5A5; padding: 1rem 2rem;"> <p style="text-align: center; direction: rtl; justify-content: center; align-items: center; clear: both;"> <strong>חשוב!</strong><br> פתרו לפני שתמשיכו! </p> </div> </div> <p style="text-align: right; direction: rtl; float: right; clear: both;"> כדי לפתור את התרגיל, כתבתם קוד שהשתמש במיקום של הערך הראשון (0) ושל הערך השני (1).<br> כך אני פתרתי אותו: </p> ``` for word_and_reversed_word in words: word = word_and_reversed_word[0] reversed_word = word_and_reversed_word[1] print(f'Flip "{word}" to get "{reversed_word}".') ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> נבחן דרך נוספת לפתור את התרגיל, רק שהפעם נשתמש בטריק שנקרא <dfn>unpacking</dfn> (או "<dfn>פירוק</dfn>").<br> כיוון שכל tuple ברשימת <var>words</var> מכיל בדיוק 2 איברים, נוכל לתת להם שמות כבר בראש הלולאה ולחלץ אותם מה־tuple: </p> ``` for word, reversed_word in words: print(f'Flip "{word}" to get "{reversed_word}".') ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> בכל חזרור של הלולאה שכתובה למעלה, ה־<code>for</code> יעבור על tuple בודד מתוך <var>words</var>, בדיוק כמו שקרה עד עכשיו.<br> השינוי הוא שבמקום שה־tuple יישמר כמו שהוא במשתנה בודד שהוגדר בראש הלולאה, הערכים שבו "יחולצו" למשתנים בראש הלולאה.<br> הטריק הזה עובד כיוון שבראש הלולאה כתבנו מספר משתנים שזהה למספר הערכים שנמצאים בכל tuple. </p> <p style="text-align: right; direction: rtl; float: right; clear: both;"> לפניכם תרשים המתאר את תוכן המשתנה <var>words</var>.<br> הטבלה הגדולה מייצגת את הרשימה <var>words</var>, וכל אחד מהתאים שבה מייצג tuple בתוך הרשימה.<br> בכל חזרור של ה־<code>for</code> המופיע למעלה, המשתנה <var>word</var> יקבל ערך שמסומן באדום, והמשתנה <var>reversed_word</var> יקבל את הערך הירוק התואם לו. </p> <table style="font-size: 1.5rem; border: 0px solid black; border-spacing: 0px;"> <caption style="direction: rtl; text-align: center;">תצוגה של המשתנה <var>words</var> ושל צורת הפירוק שלו</caption> <tr> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-bottom: 1px solid;">0</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">1</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">3</td> </tr> <tbody> <tr> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;"> <table style="font-size: 1.5rem; border: 0px solid black; border-spacing: 0px;"> <tr> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-bottom: 1px solid;">0</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">1</td> </tr> <tbody> <tr> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid; background-color: #FF8578;">"star"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid; background-color: #98FB98;">"rats"</td> </tr> <tr style="background: #f5f5f5;"> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right;">-2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-1</td> </tr> </tbody> </table> </td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;"> <table style="font-size: 1.5rem; border: 0px solid black; border-spacing: 0px;"> <tr> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-bottom: 1px solid;">0</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">1</td> </tr> <tbody> <tr> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid; background-color: #FF8578;">"wolf"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid; background-color: #98FB98;">"flow"</td> </tr> <tr style="background: #f5f5f5;"> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right;">-2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-1</td> </tr> </tbody> </table> </td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;"> <table style="font-size: 1.5rem; border: 0px solid black; border-spacing: 0px;"> <tr> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-bottom: 1px solid;">0</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">1</td> </tr> <tbody> <tr> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid; background-color: #FF8578;">"racecar"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid; background-color: #98FB98;">"racecar"</td> </tr> <tr style="background: #f5f5f5;"> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right;">-2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-1</td> </tr> </tbody> </table> </td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid;"> <table style="font-size: 1.5rem; border: 0px solid black; border-spacing: 0px;"> <tr> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-bottom: 1px solid;">0</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: left; border-left: 1px solid #555555; border-bottom: 1px solid;">1</td> </tr> <tbody> <tr> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid; background-color: #FF8578;">"ekans"</td> <td style="padding-top: 8px; padding-bottom: 8px; padding-left: 10px; padding-right: 10px; vertical-align: bottom; border: 2px solid; background-color: #98FB98;">"snake"</td> </tr> <tr style="background: #f5f5f5;"> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right;">-2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-1</td> </tr> </tbody> </table> </td> </tr> <tr style="background: #f5f5f5;"> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right;">-4</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-3</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-2</td> <td style="padding-left: 4px; padding-top: 2px; padding-bottom: 3px; font-size: 1.3rem; color: #777; text-align: right; border-left: 1px solid #555555;">-1</td> </tr> </tbody> </table> ### <span style="text-align: right; direction: rtl; float: right;">שינויים בתוך הלולאה</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> בדרך כלל נעדיף להימנע משינוי רשימה בזמן ביצוע לולאת <code>for</code>.<br> שינויים כאלו עלולים לגרום להתנהגות בלתי צפויה, ללולאות אין־סופיות ולקוד קשה במיוחד לקריאה. </p> #### <span style="text-align: right; direction: rtl; float: right;">שינוי של מספר האיברים ברשימה בזמן ריצת הלולאה</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> נבחן את קטע הקוד הבא, שעבור כל איבר ברשימה, מוציא איבר מסוף הרשימה: </p> ``` numbers = ['a', 'b', 'c', 'd', 'e'] print(f"The reader expects {len(numbers)} iterations.") for i in numbers: j = numbers.pop() print(i, j) ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> הלולאה הסתיימה מוקדם מהרגיל, כיוון שכשניסתה להגיע לתא שערכו <code>'d'</code> הוא כבר לא היה שם.<br> קוד שכזה אינו צפוי, קשה לקריאה ויוצר תקלים. מומלץ מאוד לא לשנות את מספר האיברים ב־iterable בזמן הריצה. </p> <p style="text-align: right; direction: rtl; float: right; clear: both;"> פתרון אפשרי הוא ליצור עותק של הרשימה באמצעות הפעולה <code dir="ltr" style="direction: ltr;">list.copy()</code> ולהשתמש בו במקום: </p> ``` numbers = ['a', 'b', 'c', 'd', 'e'] numbers_to_pop = numbers.copy() print(f"The reader expects {len(numbers)} iterations.") for i in numbers: j = numbers_to_pop.pop() print(i, j) ``` #### <span style="text-align: right; direction: rtl; float: right;">עריכת הערכים שנמצאים ב־iterable</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> ננסה להכפיל כל תא ברשימה שלנו פי 2: </p> ``` numbers = [1, 3, 5] print(f'This code will multiply every item in {numbers} by 2.') print(f'The user expects:') print(f'[{numbers[0] * 2}, {numbers[1] * 2}, {numbers[2] * 2}]') for num in numbers: num = num * 2 print("The final result:") print(numbers) ``` <p style="text-align: right; direction: rtl; float: right; clear: both;"> נוכל לראות שהרשימה נותרה ללא שינוי, למרות הלולאה שתכליתה היה להכפיל את איברי הרשימה פי 2. </p> <p style="text-align: right; direction: rtl; float: right; clear: both;"> כדי להבין למה זה קרה, ננסה להיזכר בשיעור על mutability.<br> במהלך כל חזרור, המשתנה <var>num</var> מקבל ערך כלשהו <strong>להצביע</strong> עליו.<br> לדוגמה, בחזרור הראשון <var>num</var> <strong>מצביע</strong> על <code>numbers[0]</code>, המקום הראשון ברשימה: </p> <img src="images/mutability1.svg" style="max-width:100%; margin-right: auto; margin-left: auto; text-align: center;" alt="משתנה בשם numbers מצביע על רשימת מספרים שבה יש את האיברים 1, 3 ו־5. מתחתיו יש עוד משתנה שמצביע לאיבר הראשון בתוך הרשימה, 1."/> <p style="text-align: right; direction: rtl; float: right; clear: both;"> בעצם הפעולה <code>num * 2</code> אנחנו יוצרים ערך חדש שמאוחסן בכתובת שונה.<br> </p> <img src="images/mutability2.svg" style="max-width:100%; margin-right: auto; margin-left: auto; text-align: center;" alt="משתנה בשם numbers מצביע על רשימת מספרים שבה יש את האיברים 1, 3 ו־5. מתחתיו יש עוד משתנה שמצביע לאיבר הראשון בתוך הרשימה, 1. נוסף ערך חדש על המסך, 2, שעליו לא מצביע אף משתנה. לידו כתוב: 'התוצאה של num * 2:'"/> <p style="text-align: right; direction: rtl; float: right; clear: both;"> הפעולה <code>num = num * 2</code> לא באמת "תשנה ערך בתוך <var>num</var>", אלא תגרום לו להצביע על ערך אחר. </p> <img src="images/mutability3.svg" style="max-width:100%; margin-right: auto; margin-left: auto; text-align: center;" alt="משתנה בשם numbers מצביע על רשימת מספרים שבה יש את האיברים 1, 3 ו־5. מתחתיו יש עוד משתנה שמצביע לאיבר הראשון בתוך הרשימה, 1. הפעם num מצביע על הערך 2 שנוסף למסך בתמונה הקודמת. הוא כבר לא מצביע על 1."/> <p style="text-align: right; direction: rtl; float: right; clear: both;"> נסכם כך: <mark>המשתנה <var>num</var> הצביע תחילה על מספר שנמצא בתוך הרשימה, ועכשיו הוא מצביע על מספר אחר.</mark><br> ההשמה, כרגיל, שינתה את המקום שעליו המשתנה מצביע, ולא את הערך שהמשתנה מצביע עליו.<br> בקוד שמתואר למעלה, לא שינינו את האיבר בתוך הרשימה. </p> ### <span style="text-align: right; direction: rtl; float: right;">דוגמאות נוספות</span> #### <span style="text-align: right; direction: rtl; float: right;">סכום רשימה</span> ``` def total(numbers): total = 0 for number in numbers: total = total + number return total print(total([1, 2, 3])) ``` #### <span style="text-align: right; direction: rtl; float: right;">ראשי תיבות</span> ``` def acronym(sentence): acronym_word = '' for word in sentence.split(): if len(word) >= 1: acronym_word = acronym_word + word[0] return acronym_word print(acronym('')) ``` #### <span style="text-align: right; direction: rtl; float: right;">סכום איברים חיוביים</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> קלוט מהמשתמש מספרים. אם צריך, המר את הקלט כך שיהיה מסוג רשימה, ובה יהיו מספרים שלמים. סכום את האיברים הגדולים מ־0. </p> ``` def to_numbers(strings): numbers = [] for semi_number in strings: if semi_number.isdecimal(): numbers.append(int(semi_number)) return numbers def sum_positives(numbers): total = 0 for number in numbers: if number > 0: total = total + number return total user_numbers = input("Enter numbers seperated by comma: ") stringy_numbers = user_numbers.replace(' ', '').split(',') numbers = to_numbers(stringy_numbers) print(sum_positives(numbers)) ``` #### <span style="text-align: right; direction: rtl; float: right;">7 בום</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> סכום את האיברים שמופיעים בכל מקום שביעי ברשימה. </p> ``` def sum_only_7th_places(numbers): total = 0 for i in numbers[6::7]: total = total + i return total print(sum_only_7th_places([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])) ``` ## <span style="align: right; direction: rtl; float: right; clear: both;">תרגילים</span> ### <span style="align: right; direction: ltr; float: right; clear: both;">אקרוסטיכון</span> <p style="text-align: right; direction: rtl; float: right; clear: both;"> <dfn>אקרוסטיכון</dfn> הוא אמצעי ספרותי שבו משתמשים בכתיבת שירה.<br> בשיר שבו יש אקרוסטיכון, כשנחבר את האות הראשונה בכל השורות, נקבל מסר מסוים.<br> ניקח לדוגמה את שירו של אדגר אלן פו, "אקרוסטיכון", שאותו הוא הקדיש ללטישיה אליזבת' לאנדון ובו מופיע האקרוסטיכון <strong>ELIZABETH</strong>: </p> <blockquote style="border-right: none !important; position: relative; padding-left: 0.5em; padding: 1.5em; line-height: 1.5em; min-height: 2em; border-left: 3px solid #a93226; background-color: #fbe7e6; font-size: 1.4rem; direction: ltr; text-align: left; clear: both;"> <strong>E</strong>lizabeth it is in vain you say<br> <strong>L</strong>ove not — thou sayest it in so sweet a way:<br> <strong>I</strong>n vain those words from thee or L.E.L.<br> <strong>Z</strong>antippe's talents had enforced so well:<br> <strong>A</strong>h! if that language from thy heart arise,<br> <strong>B</strong>reath it less gently forth — and veil thine eyes.<br> <strong>E</strong>ndymion, recollect, when Luna tried<br> <strong>T</strong>o cure his love — was cured of all beside —<br> <strong>H</strong>is follie — pride — and passion — for he died.<br> </blockquote> <p style="text-align: right; direction: rtl; float: right; clear: both;"> כתבו פונקציה שמקבלת שיר כמחרוזת.<br> החזירו את האקרוסטיכון שנוצר אם ניקח את האות הראשונה מכל שורה בשיר.<br> </p> ### <span style="align: right; direction: ltr; float: right; clear: both;">שעורה תרבותית</span> <p style="text-align: right; direction: rtl; clear: both;"> שנים רבות חלפו מאז אותם ימי הקיץ בשדות השעורה, והזמרת סיגנט שכחה היכן החביאה את הזהב שלה.<br> בידי סיגנט יש מפה, לפיה היא נמצאת כרגע בנקודה <span dir="ltr">(0, 0)</span> בשדה החיטה.<br> המפה מתארת אילו צעדים היא צריכה לעשות כדי להגיע למטמון.<br> עזרו לסיגנט לחשב: בהינתן שהיא תלך לפי כל הצעדים שמופיעים במפה – באיזו נקודה יימצא המטמון? </p> <p style="text-align: right; direction: rtl; clear: both;"> לדוגמה: עבור הצעדים <code dir="ltr" style="direction: ltr;">[(1, 5), (6, -2), (4, 3)]</code> יוחזר שהמטמון נמצא בנקודה <code dir="ltr" style="direction: ltr;">(11, 6)</code>. </p> <p style="text-align: right; direction: rtl; clear: both;"> <strong>הסבר מורחב</strong>: קבלו רשימה של צעדים המורים לסיגנט כיצד ללכת.<br> כל "צעד" מורכב מזוג מספרים שלמים, שיכולים להיות שליליים – הראשון מסמל כמה צעדים ללכת ימינה, והשני מסמל כמה צעדים ללכת למעלה.<br> אם המספר הראשון שלילי, עליה ללכת את מספר הצעדים הזה שמאלה. אם המספר השני שלילי, עליה ללכת את מספר הצעדים הזה למטה.<br> כתבו פונקציה שמקבלת רשימה של צעדים ומחזירה את מיקום המטמון. </p> ### <span style="align: right; direction: ltr; float: right; clear: both;">גבעת ווטרשיפ</span> <p style="text-align: right; direction: rtl; clear: both;"> בגבעת ווטרשיפ קצב ההתרבות גבוה. בכל שנה נוספים עוד ועוד ארנבים לארנבייה.<br> חומש הארנב החליט לנהל מעקב דמוגרפי אחרי הגידול.<br> הוא מעוניין שתבנו לו פונקציה שמקבלת כפרמטר רשימה של מספר הארנבים שנולדו בכל שנה.<br> הפונקציה תחזיר רשימה שבה כל תא מייצג את הכמות הנצברת של הארנבים בארנבייה עד כה.<br> לדוגמה: עבור הרשימה <code dir="ltr" style="direction: ltr;">[1, 2, 3, 4]</code>, הפונקציה תחזיר <code dir="ltr" style="direction: ltr;">[1, 3, 6, 10]</code>. </p>
github_jupyter
``` from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import tensorflow as tf import tflearn import numpy as np from sklearn.model_selection import train_test_split import drqn import student as st import data_generator as dg import concept_dependency_graph as cdg from experience_buffer import ExperienceBuffer import dataset_utils as d_utils import utils import models_dict_utils from drqn_tests import * %load_ext autoreload %autoreload 2 %reload_ext autoreload ``` # General Workflow ## 1. Create Data Set ``` n_concepts = 4 use_student2 = True student2_str = '2' if use_student2 else '' learn_prob = 0.15 lp_str = '-lp{}'.format(int(learn_prob*100)) if not use_student2 else '' n_students = 100000 seqlen = 7 filter_mastery = False filter_str = '' if not filter_mastery else '-filtered' policy = 'expert' filename = 'test{}-n{}-l{}{}-{}{}.pickle'.format(student2_str, n_students, seqlen, lp_str, policy, filter_str) ``` #### only run the next two cells if dataset hasn't been created yet ``` #concept_tree = sm.create_custom_dependency() concept_tree = cdg.ConceptDependencyGraph() concept_tree.init_default_tree(n_concepts) if not use_student2: test_student = st.Student(n=n_concepts,p_trans_satisfied=learn_prob, p_trans_not_satisfied=0.0, p_get_ex_correct_if_concepts_learned=1.0) else: test_student = st.Student2(n_concepts) print(filename) print ("Initializing synthetic data sets...") dg.generate_data(concept_tree, student=test_student, n_students=n_students, filter_mastery=filter_mastery, seqlen=seqlen, policy=policy, filename="{}{}".format(dg.SYN_DATA_DIR, filename)) print ("Data generation completed. ") data = d_utils.load_data(filename="../synthetic_data/{}".format(filename)) dqn_data = d_utils.preprocess_data_for_dqn(data, reward_model="semisparse") dqn_data_train, dqn_data_test = train_test_split(dqn_data, test_size=0.2) # Creating training and validation data train_buffer = ExperienceBuffer() train_buffer.buffer = dqn_data_train train_buffer.buffer_sz = len(train_buffer.buffer) val_buffer = ExperienceBuffer() val_buffer.buffer = dqn_data_test val_buffer.buffer_sz = len(val_buffer.buffer) print (train_buffer.sample(1)) ``` ## 2. Create Model and Train ``` model_id = "test2_model_drqn_mid_expert" model = drqn.DRQNModel(model_id, timesteps=seqlen-1) model.init_trainer() # train the model (uses the previously initialized trainer object) date_time_string = datetime.datetime.now().strftime("%m-%d-%Y_%H-%M-%S") run_id = "{}".format(date_time_string) model.train(train_buffer, val_buffer, n_epoch=60, run_id=run_id, load_checkpoint=True) test_drqn(model_id=model_id, DEBUG=True) model_id = "test2_model_drqn_mid" model = drqn.DRQNModel(model_id, timesteps=seqlen-1) model.init_trainer() # train the model (uses the previously initialized trainer object) date_time_string = datetime.datetime.now().strftime("%m-%d-%Y_%H-%M-%S") run_id = "{}".format(date_time_string) model.train(train_buffer, val_buffer, n_epoch=1, run_id=run_id, load_checkpoint=True) a = np.array([[1,2,3], [0,5,6]]) np.argmax(a, axis=1) ```
github_jupyter
# Image Compression and Decompression ## Downloading the data and preprocessing it ``` from keras.datasets import mnist import numpy as np (x_train, _), (x_test, _) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. print(x_train.shape,x_test.shape) x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) x_train.shape,x_test.shape ``` ## Visualising training data image ``` from matplotlib import pyplot as plt import numpy as np first_image = x_train[0] first_image = np.array(first_image, dtype='float') pixels = first_image.reshape((28, 28)) plt.imshow(pixels, cmap='gray') plt.show() ``` ## Creating the Autoencoder ``` import keras from keras import layers input_img = keras.Input(shape=(28, 28, 1)) x = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(input_img) x = layers.MaxPooling2D((2, 2), padding='same')(x) x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = layers.MaxPooling2D((2, 2), padding='same')(x) x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x) encoded = layers.MaxPooling2D((2, 2), padding='same')(x) # at this point the representation is (4, 4, 8) i.e. 128-dimensional x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(encoded) x = layers.UpSampling2D((2, 2))(x) x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x) x = layers.UpSampling2D((2, 2))(x) x = layers.Conv2D(16, (3, 3), activation='relu')(x) x = layers.UpSampling2D((2, 2))(x) decoded = layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x) autoencoder = keras.Model(input_img, decoded) autoencoder.compile(optimizer='adam', loss='binary_crossentropy') autoencoder.summary() from keras.utils import plot_model plot_model(autoencoder, to_file='model_plot.png', show_shapes=True, show_layer_names=True) ``` ## Training the autoencoder ``` history = autoencoder.fit(x_train, x_train, epochs=5, batch_size=128, shuffle=True, validation_data=(x_test, x_test)) autoencoder.save("autoencoder.h5") from keras.models import load_model autoencoder=load_model("autoencoder.h5") ``` ## Testing the trained model and comparing it with the original data ``` decoded_imgs = autoencoder.predict(x_test) n = 10 plt.figure(figsize=(20, 4)) for i in range(1, n + 1): # Display original ax = plt.subplot(2, n, i) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # Display reconstruction ax = plt.subplot(2, n, i + n) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() ``` ## Visualising the states of a image through the autoencoder ``` from tensorflow.keras import Sequential import tensorflow as tf #encoder model model=tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16,(3,3),activation ='relu', input_shape=(28,28,1)), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(8,(3,3),activation ='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(8,(3,3),activation ='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Flatten(), ]) def visualize(img,encoder): code = encoder.predict(img[None])[0] # Display original plt.title("Original Image") plt.imshow(x_test[0].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() #Display compressed plt.subplot(1,3,2) plt.title("Compressed Image") plt.imshow(code.reshape([code.shape[-1]//2,-1])) plt.show() # Display reconstruction plt.title("Decompressed Image") plt.imshow(decoded_imgs[0].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() visualize(x_test[0],model) ``` ## Analysing the loss wrt epoch ``` plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() ``` # Denoising model for the Decompressed Image ## Adding noise to the train and test data ``` # Adding random noise to the images noise_factor = 0.5 x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape) x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape) x_train_noisy = np.clip(x_train_noisy, 0., 1.) x_test_noisy = np.clip(x_test_noisy, 0., 1.) ``` ## Visualising the training data ``` n = 10 plt.figure(figsize=(20, 2)) for i in range(1, n + 1): ax = plt.subplot(1, n, i) plt.imshow(x_test_noisy[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) print("Training Data:") plt.show() ``` ## Creating the encoder model ``` input_img = keras.Input(shape=(28, 28, 1)) x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(input_img) x = layers.MaxPooling2D((2, 2), padding='same')(x) x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x) encoded = layers.MaxPooling2D((2, 2), padding='same')(x) # At this point the representation is (7, 7, 32) x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(encoded) x = layers.UpSampling2D((2, 2))(x) x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x) x = layers.UpSampling2D((2, 2))(x) decoded = layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x) autoencoder = keras.Model(input_img, decoded) autoencoder.compile(optimizer='adam', loss='binary_crossentropy') ``` ## Training the model ``` history2 = autoencoder.fit(x_train_noisy, x_train, epochs=50, batch_size=128, shuffle=True, validation_data=(x_test_noisy, x_test)) from keras import models autoencoder = models.load_model('denoising_model.h5') ``` ## Visualising the results of denoising the decompressed data ``` denoised_imgs = autoencoder.predict(decoded_imgs) n = 10 plt.figure(figsize=(20, 4)) for i in range(1, n + 1): # Display original ax = plt.subplot(2, n, i) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # Display reconstruction ax = plt.subplot(2, n, i + n) plt.imshow(denoised_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() ``` ## Analysing the loss wrt epoch ``` plt.plot(history2.history['loss']) plt.plot(history2.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() ``` ## Quality Metrics - PSNR ``` from math import log10, sqrt import cv2 import numpy as np def PSNR(original, compressed): mse = np.mean((original - decompressed) ** 2) if(mse == 0): # MSE is zero means no noise is present in the signal . # Therefore PSNR have no importance. return 100 max_pixel = 255.0 psnr = 20 * log10(max_pixel / sqrt(mse)) return psnr psnr=0 for i in range(0,50): original = x_test[i].reshape(28, 28) decompressed =denoised_imgs[i].reshape(28,28) value = PSNR(original, decompressed) psnr+=value psnr=psnr/50 print(f"PSNR value is {psnr} dB") ```
github_jupyter
# Publications markdown generator for academicpages Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `publications.py`. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one containing your data. TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style. ## Data format The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top. - `excerpt` and `paper_url` can be blank, but the others must have values. - `pub_date` must be formatted as YYYY-MM-DD. - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]` This is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create). ``` !cat publications.csv ``` ## Import pandas We are using the very handy pandas library for dataframes. ``` import pandas as pd ``` ## Import TSV Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`. I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others. ``` publications = pd.read_csv("publications.csv", sep=",", header=0) publications ``` ## Escape special characters YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely. ``` html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;" } def html_escape(text): """Produce entities within text.""" return "".join(html_escape_table.get(c,c) for c in text) ``` ## Creating the markdown files This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. ``` import os for row, item in publications.iterrows(): print(item.pub_date) md_filename = str(item.pub_date) + "-" + item.url_slug + ".md" html_filename = str(item.pub_date) + "-" + item.url_slug year = str(item.pub_date)[:4] ## YAML variables md = "---\ntitle: \"" + item.title + '"\n' md += """collection: publications""" md += """\npermalink: /publication/""" + html_filename # if len(str(item.excerpt)) > 5: # md += "\nexcerpt: '" + html_escape(item.excerpt) + "'" md += "\ndate: " + str(item.pub_date) md += "\nvenue: '" + html_escape(item.venue) + "'" if len(str(item.paper_url)) > 5: md += "\npaperurl: '" + item.paper_url + "'" # md += "\ncitation: '" + html_escape(item.citation) + "'" md += "\nauthors: '" + item.authors + "'" md += "\n---" ## Markdown description for individual page # if len(str(item.excerpt)) > 5: # md += "\n" + html_escape(item.excerpt) + "\n" # if len(str(item.paper_url)) > 5: # md += "\n[Download paper here](" + item.paper_url + ")\n" # md += "\nRecommended citation: " + item.citation md_filename = os.path.basename(md_filename) with open("../_publications/" + md_filename, 'w') as f: f.write(md) ``` These files are in the publications directory, one directory below where we're working from. ``` !ls ../_publications/ !cat ../_publications/2018-09-01-icde.md ```
github_jupyter
# Causal Discovery using a Perfect Oracle A perfect oracle is a conditional independence (CI) test that always yields the true answer. For that, the oracle requires access to the true underlying graph from which it can read-off the true conditional independence relation. Although this is impractical, access to the true graph is granted in a simulated environment for evaluating the number of CI tests required by an algorithm in the large sample limit. For demonstrating how to use a perfect oracle we follow these steps: 1. Create a true underlying DAG 2. Learn a PAG using ICD and a perfect oracle 3. Plot the results Let's start by importing the required classes and methods. ``` import sys sys.path.append('..') from causal_discovery_algs import LearnStructICD # import ICD algorithm from causal_discovery_utils.cond_indep_tests import DSep # import the perfect oracle from graphical_models import DAG # import a DAG class from plot_utils import draw_graph # for plotting the graphs ``` Next, let's create the true underlying DAG, a graph with 7 nodes, and define which nodes are observed. ``` nodes_of_dag = set(range(7)) dag = DAG(nodes_of_dag) dag.add_edges(parents_set={5, 0}, target_node=1) dag.add_edges(parents_set={6, 0}, target_node=2) dag.add_edges(parents_set={5, 2}, target_node=3) dag.add_edges(parents_set={6, 1}, target_node=4) observed_set = {0, 1, 2, 3, 4} # latents set is {5, 6} fig_dag = draw_graph(dag, latent_nodes=nodes_of_dag - observed_set) # plot the DAG ``` Now, instantiate a perfect oracle, set it to count the performed CI tests, and turn on caching. ``` ci_test_dsep = DSep(true_dag=dag, # the DAG from which to read-off the true conditional independence relations count_tests=True, # count CI tests per conditioning set size use_cache=True) # make sure the same CI test is not performed and counted more than once ``` Then, learn the casual structure using the perfect oracle. ``` icd = LearnStructICD(nodes_set=observed_set, ci_test=ci_test_dsep) # instantiate ICD icd.learn_structure() # learn the structure fig_pag = draw_graph(icd.graph) # plot the PAG ``` Finally, let's plot the number of CI tests per conditioning set size. ``` num_ci_tests = ci_test_dsep.test_counter str_cs_size = 'Condition set size: ' str_num_cit = 'Number of CI tests: ' max_str_len = len(str(max(num_ci_tests))) + 3 for cs_size, num_cit in enumerate(num_ci_tests): str_cs_size += str(cs_size).ljust(max_str_len) str_num_cit += str(num_cit).ljust(max_str_len) print('Number of estimated CI tests') print('----------------------------') print(str_cs_size) print(str_num_cit) print('Total number of CI tests: ', sum(num_ci_tests)) ```
github_jupyter
``` import pandas as pd import pyspark.sql.functions as F from datetime import datetime from pyspark.sql.types import * from pyspark import StorageLevel import numpy as np pd.set_option("display.max_rows", 1000) pd.set_option("display.max_columns", 1000) pd.set_option("mode.chained_assignment", None) from pyspark.ml import Pipeline from pyspark.ml.classification import GBTClassifier from pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer # from pyspark.ml.evaluation import MulticlassClassificationEvaluator from pyspark.ml.feature import OneHotEncoderEstimator, StringIndexer, VectorAssembler from pyspark.ml.evaluation import BinaryClassificationEvaluator from pyspark.ml.tuning import CrossValidator, ParamGridBuilder from pyspark.sql import Row from pyspark.ml.linalg import Vectors # !pip install scikit-plot import sklearn import scikitplot as skplt from sklearn.metrics import classification_report, confusion_matrix, precision_score ``` <hr /> <hr /> <hr /> ``` result_schema = StructType([ StructField('experiment_filter', StringType(), True), StructField('undersampling_method', StringType(), True), StructField('undersampling_column', StringType(), True), StructField('filename', StringType(), True), StructField('experiment_id', StringType(), True), StructField('n_covid', IntegerType(), True), StructField('n_not_covid', IntegerType(), True), StructField('model_name', StringType(), True), StructField('model_seed', StringType(), True), StructField('model_maxIter', IntegerType(), True), StructField('model_maxDepth', IntegerType(), True), StructField('model_maxBins', IntegerType(), True), StructField('model_minInstancesPerNode', IntegerType(), True), StructField('model_minInfoGain', FloatType(), True), StructField('model_featureSubsetStrategy', StringType(), True), StructField('model_n_estimators', IntegerType(), True), StructField('model_learning_rate', FloatType(), True), StructField('model_impurity', StringType(), True), StructField('model_AUC_ROC', StringType(), True), StructField('model_AUC_PR', StringType(), True), StructField('model_covid_precision', StringType(), True), StructField('model_covid_recall', StringType(), True), StructField('model_covid_f1', StringType(), True), StructField('model_not_covid_precision', StringType(), True), StructField('model_not_covid_recall', StringType(), True), StructField('model_not_covid_f1', StringType(), True), StructField('model_avg_precision', StringType(), True), StructField('model_avg_recall', StringType(), True), StructField('model_avg_f1', StringType(), True), StructField('model_avg_acc', StringType(), True), StructField('model_TP', StringType(), True), StructField('model_TN', StringType(), True), StructField('model_FN', StringType(), True), StructField('model_FP', StringType(), True), StructField('model_time_exec', StringType(), True), StructField('model_col_set', StringType(), True) ]) ``` <hr /> <hr /> <hr /> ``` # undersamp_col = ['03-STRSAMP-AG', '04-STRSAMP-EW'] # dfs = ['ds-1', 'ds-2', 'ds-3'] # cols_sets = ['cols_set_1', 'cols_set_2', 'cols_set_3'] undersamp_col = ['02-KMODES'] dfs = ['ds-1'] cols_sets = ['cols_set_1'] # lists of params model_maxIter = [20, 50, 100] model_maxDepth = [3, 5, 7] model_maxBins = [32, 64] # model_learningRate = [0.01, 0.1, 0.5] # model_loss = ['logLoss', 'leastSquaresError', 'leastAbsoluteError'] list_of_param_dicts = [] for maxIter in model_maxIter: for maxDepth in model_maxDepth: for maxBins in model_maxBins: params_dict = {} params_dict['maxIter'] = maxIter params_dict['maxDepth'] = maxDepth params_dict['maxBins'] = maxBins list_of_param_dicts.append(params_dict) print("There is {} set of params.".format(len(list_of_param_dicts))) # list_of_param_dicts prefix = 'gs://ai-covid19-datalake/trusted/experiment_map/' ``` <hr /> <hr /> <hr /> ``` # filename = 'gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-1/cols_set_1/experiment0.parquet' # df = spark.read.parquet(filename) # df.limit(2).toPandas() # params_dict = {'maxIter': 100, # 'maxDepth': 3, # 'maxBins': 32, # 'learningRate': 0.5, # 'loss': 'leastAbsoluteError'} # cols = 'cols_set_1' # experiment_filter = 'ds-1' # undersampling_method = '03-STRSAMP-AG', # experiment_id = 0 # run_gbt(df, params_dict, cols, filename, experiment_filter, undersampling_method, experiment_id) ``` <hr /> <hr /> <hr /> ``` def run_gbt(exp_df, params_dict, cols, filename, experiment_filter, undersampling_method, experiment_id): import time start_time = time.time() n_covid = exp_df.filter(F.col('CLASSI_FIN') == 1.0).count() n_not_covid = exp_df.filter(F.col('CLASSI_FIN') == 0.0).count() id_cols = ['NU_NOTIFIC', 'CLASSI_FIN'] labelIndexer = StringIndexer(inputCol="CLASSI_FIN", outputCol="indexedLabel").fit(exp_df) input_cols = [x for x in exp_df.columns if x not in id_cols] assembler = VectorAssembler(inputCols = input_cols, outputCol= 'features') exp_df = assembler.transform(exp_df) # Automatically identify categorical features, and index them. # Set maxCategories so features with > 4 distinct values are treated as continuous. featureIndexer = VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=30).fit(exp_df) # Split the data into training and test sets (30% held out for testing) (trainingData, testData) = exp_df.randomSplit([0.7, 0.3]) trainingData = trainingData.persist(StorageLevel.MEMORY_ONLY) testData = testData.persist(StorageLevel.MEMORY_ONLY) # Train a RandomForest model. gbt = GBTClassifier(labelCol = "indexedLabel", featuresCol = "indexedFeatures", maxIter = params_dict['maxIter'], maxDepth = params_dict['maxDepth'], maxBins = params_dict['maxBins']) # Convert indexed labels back to original labels. labelConverter = IndexToString(inputCol="prediction", outputCol="predictedLabel", labels=labelIndexer.labels) # Chain indexers and forest in a Pipeline pipeline = Pipeline(stages=[labelIndexer, featureIndexer, gbt, labelConverter]) # Train model. This also runs the indexers. model = pipeline.fit(trainingData) # Make predictions. predictions = model.transform(testData) pred = predictions.select(['CLASSI_FIN', 'predictedLabel'])\ .withColumn('predictedLabel', F.col('predictedLabel').cast('double'))\ .withColumn('predictedLabel', F.when(F.col('predictedLabel') == 1.0, 'covid').otherwise('n-covid'))\ .withColumn('CLASSI_FIN', F.when(F.col('CLASSI_FIN') == 1.0, 'covid').otherwise('n-covid'))\ .toPandas() y_true = pred['CLASSI_FIN'].tolist() y_pred = pred['predictedLabel'].tolist() report = classification_report(y_true, y_pred, output_dict=True) evaluator_ROC = BinaryClassificationEvaluator(labelCol="indexedLabel", rawPredictionCol="prediction", metricName="areaUnderROC") accuracy_ROC = evaluator_ROC.evaluate(predictions) evaluator_PR = BinaryClassificationEvaluator(labelCol="indexedLabel", rawPredictionCol="prediction", metricName="areaUnderPR") accuracy_PR = evaluator_PR.evaluate(predictions) conf_matrix = confusion_matrix(y_true, y_pred) result_dict = {} result_dict['experiment_filter'] = experiment_filter result_dict['undersampling_method'] = undersampling_method result_dict['filename'] = filename result_dict['experiment_id'] = experiment_id result_dict['n_covid'] = n_covid result_dict['n_not_covid'] = n_not_covid result_dict['model_name'] = 'GBT' result_dict['params'] = params_dict result_dict['model_AUC_ROC'] = accuracy_ROC result_dict['model_AUC_PR'] = accuracy_PR result_dict['model_covid_precision'] = report['covid']['precision'] result_dict['model_covid_recall'] = report['covid']['recall'] result_dict['model_covid_f1'] = report['covid']['f1-score'] result_dict['model_not_covid_precision'] = report['n-covid']['precision'] result_dict['model_not_covid_recall'] = report['n-covid']['recall'] result_dict['model_not_covid_f1'] = report['n-covid']['f1-score'] result_dict['model_avg_precision'] = report['macro avg']['precision'] result_dict['model_avg_recall'] = report['macro avg']['recall'] result_dict['model_avg_f1'] = report['macro avg']['f1-score'] result_dict['model_avg_acc'] = report['accuracy'] result_dict['model_TP'] = conf_matrix[0][0] result_dict['model_TN'] = conf_matrix[1][1] result_dict['model_FN'] = conf_matrix[0][1] result_dict['model_FP'] = conf_matrix[1][0] result_dict['model_time_exec'] = time.time() - start_time result_dict['model_col_set'] = cols return result_dict ``` <hr /> <hr /> <hr /> # Running GBT on 10 samples for each experiment ### 3x col sets -> ['cols_set_1', 'cols_set_2', 'cols_set_3'] ### 3x model_maxIter -> [100, 200, 300] ### 3x model_maxDepth -> [5, 10, 15] ### 3x model_maxBins -> [16, 32, 64] Total: 10 * 3 * 3 * 3 * 3 = 810 ``` experiments = [] ``` ### Datasets: strat_samp_lab_agegrp ``` for uc in undersamp_col: for ds in dfs: for col_set in cols_sets: for params_dict in list_of_param_dicts: for id_exp in range(50): filename = prefix + uc + '/' + ds + '/' + col_set + '/' + 'experiment' + str(id_exp) + '.parquet' exp_dataframe = spark.read.parquet(filename) print('read {}'.format(filename)) undersampling_method = uc experiment_filter = ds experiment_id = id_exp try: model = run_gbt(exp_dataframe, params_dict, col_set, filename, experiment_filter, undersampling_method, experiment_id) experiments.append(model) print("Parameters ==> {}\n Results: \n AUC_PR: {} \n Precision: {} \n Time: {}".format(str(params_dict), str(model['model_AUC_PR']), str(model['model_avg_precision']), str(model['model_time_exec']))) print('=========================== \n') except: print('=========== W A R N I N G =========== \n') print('Something wrong with the exp: {}, {}, {}'.format(filename, params_dict, col_set)) ``` <hr /> <hr /> <hr /> ``` for i in range(len(experiments)): for d in list(experiments[i].keys()): experiments[i][d] = str(experiments[i][d]) # experiments cols = ['experiment_filter', 'undersampling_method', 'filename', 'experiment_id', 'n_covid', 'n_not_covid', 'model_name', 'params', 'model_AUC_ROC', 'model_AUC_PR', 'model_covid_precision', 'model_covid_recall', 'model_covid_f1', 'model_not_covid_precision', 'model_not_covid_recall', 'model_not_covid_f1', 'model_avg_precision', 'model_avg_recall', 'model_avg_f1', 'model_avg_acc', 'model_TP', 'model_TN', 'model_FN', 'model_FP', 'model_time_exec', 'model_col_set'] intermed_results = spark.createDataFrame(data=experiments).select(cols) intermed_results.toPandas() intermed_results.write.parquet('gs://ai-covid19-datalake/trusted/intermed_results/KMODES/GBT_experiments-kmodes-ds1-cs1.parquet', mode='overwrite') df = spark.read.parquet('gs://ai-covid19-datalake/trusted/intermed_results/KMODES/GBT_experiments-kmodes-ds1-cs1.parquet') df.limit(2).toPandas() print('finished') intermed_results.show() ```
github_jupyter
This application demonstrates how to build a simple neural network using the Graph mark. Interactions can be enabled by adding event handlers (click, hover etc) on the nodes of the network. See the [Mark Interactions notebook](../Interactions/Mark Interactions.ipynb) and the [Scatter Notebook](../Marks/Scatter.ipynb) for details. ``` from itertools import chain, product import numpy as np from bqplot import * class NeuralNet(Figure): def __init__(self, **kwargs): self.height = kwargs.get('height', 600) self.width = kwargs.get('width', 960) self.directed_links = kwargs.get('directed_links', False) self.num_inputs = kwargs['num_inputs'] self.num_hidden_layers = kwargs['num_hidden_layers'] self.nodes_output_layer = kwargs['num_outputs'] self.layer_colors = kwargs.get('layer_colors', ['Orange'] * (len(self.num_hidden_layers) + 2)) self.build_net() super(NeuralNet, self).__init__(**kwargs) def build_net(self): # create nodes self.layer_nodes = [] self.layer_nodes.append(['x' + str(i+1) for i in range(self.num_inputs)]) for i, h in enumerate(self.num_hidden_layers): self.layer_nodes.append(['h' + str(i+1) + ',' + str(j+1) for j in range(h)]) self.layer_nodes.append(['y' + str(i+1) for i in range(self.nodes_output_layer)]) self.flattened_layer_nodes = list(chain(*self.layer_nodes)) # build link matrix i = 0 node_indices = {} for layer in self.layer_nodes: for node in layer: node_indices[node] = i i += 1 n = len(self.flattened_layer_nodes) self.link_matrix = np.empty((n,n)) self.link_matrix[:] = np.nan for i in range(len(self.layer_nodes) - 1): curr_layer_nodes_indices = [node_indices[d] for d in self.layer_nodes[i]] next_layer_nodes = [node_indices[d] for d in self.layer_nodes[i+1]] for s, t in product(curr_layer_nodes_indices, next_layer_nodes): self.link_matrix[s, t] = 1 # set node x locations self.nodes_x = np.repeat(np.linspace(0, 100, len(self.layer_nodes) + 1, endpoint=False)[1:], [len(n) for n in self.layer_nodes]) # set node y locations self.nodes_y = np.array([]) for layer in self.layer_nodes: n = len(layer) ys = np.linspace(0, 100, n+1, endpoint=False)[1:] self.nodes_y = np.append(self.nodes_y, ys[::-1]) # set node colors n_layers = len(self.layer_nodes) self.node_colors = np.repeat(np.array(self.layer_colors[:n_layers]), [len(layer) for layer in self.layer_nodes]).tolist() xs = LinearScale(min=0, max=100) ys = LinearScale(min=0, max=100) self.graph = Graph(node_data=[{'label': d, 'label_display': 'none'} for d in self.flattened_layer_nodes], link_matrix=self.link_matrix, link_type='line', colors=self.node_colors, directed=self.directed_links, scales={'x': xs, 'y': ys}, x=self.nodes_x, y=self.nodes_y, # color=2 * np.random.rand(len(self.flattened_layer_nodes)) - 1 ) self.graph.hovered_style = {'stroke': '1.5'} self.graph.unhovered_style = {'opacity': '0.4'} self.graph.selected_style = {'opacity': '1', 'stroke': 'red', 'stroke-width': '2.5'} self.marks = [self.graph] self.title = 'Neural Network' self.layout.width = str(self.width) + 'px' self.layout.height = str(self.height) + 'px' NeuralNet(num_inputs=3, num_hidden_layers=[10, 10, 8, 5], num_outputs=1) ```
github_jupyter
# TF-Slim Walkthrough This notebook will walk you through the basics of using TF-Slim to define, train and evaluate neural networks on various tasks. It assumes a basic knowledge of neural networks. ## Table of contents <a href="#Install">Installation and setup</a><br> <a href='#MLP'>Creating your first neural network with TF-Slim</a><br> <a href='#ReadingTFSlimDatasets'>Reading Data with TF-Slim</a><br> <a href='#CNN'>Training a convolutional neural network (CNN)</a><br> <a href='#Pretained'>Using pre-trained models</a><br> ## Installation and setup <a id='Install'></a> Since the stable release of TF 1.0, the latest version of slim has been available as `tf.contrib.slim`. To test that your installation is working, execute the following command; it should run without raising any errors. ``` python -c "import tensorflow.contrib.slim as slim; eval = slim.evaluation.evaluate_once" ``` Although, to use TF-Slim for image classification (as we do in this notebook), you also have to install the TF-Slim image models library from [here](https://github.com/tensorflow/models/tree/master/research/slim). Let's suppose you install this into a directory called TF_MODELS. Then you should change directory to TF_MODELS/research/slim **before** running this notebook, so that these files are in your python path. To check you've got these two steps to work, just execute the cell below. If it complains about unknown modules, restart the notebook after moving to the TF-Slim models directory. ``` from __future__ import absolute_import from __future__ import division from __future__ import print_function import matplotlib %matplotlib inline import matplotlib.pyplot as plt import math import numpy as np import tensorflow.compat.v1 as tf import time from datasets import dataset_utils # Main slim library import tf_slim as slim ``` ## Creating your first neural network with TF-Slim <a id='MLP'></a> Below we give some code to create a simple multilayer perceptron (MLP) which can be used for regression problems. The model has 2 hidden layers. The output is a single node. When this function is called, it will create various nodes, and silently add them to whichever global TF graph is currently in scope. When a node which corresponds to a layer with adjustable parameters (eg., a fully connected layer) is created, additional parameter variable nodes are silently created, and added to the graph. (We will discuss how to train the parameters later.) We use variable scope to put all the nodes under a common name, so that the graph has some hierarchical structure. This is useful when we want to visualize the TF graph in tensorboard, or if we want to query related variables. The fully connected layers all use the same L2 weight decay and ReLu activations, as specified by **arg_scope**. (However, the final layer overrides these defaults, and uses an identity activation function.) We also illustrate how to add a dropout layer after the first fully connected layer (FC1). Note that at test time, we do not drop out nodes, but instead use the average activations; hence we need to know whether the model is being constructed for training or testing, since the computational graph will be different in the two cases (although the variables, storing the model parameters, will be shared, since they have the same name/scope). ``` def regression_model(inputs, is_training=True, scope="deep_regression"): """Creates the regression model. Args: inputs: A node that yields a `Tensor` of size [batch_size, dimensions]. is_training: Whether or not we're currently training the model. scope: An optional variable_op scope for the model. Returns: predictions: 1-D `Tensor` of shape [batch_size] of responses. end_points: A dict of end points representing the hidden layers. """ with tf.variable_scope(scope, 'deep_regression', [inputs]): end_points = {} # Set the default weight _regularizer and acvitation for each fully_connected layer. with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(0.01)): # Creates a fully connected layer from the inputs with 32 hidden units. net = slim.fully_connected(inputs, 32, scope='fc1') end_points['fc1'] = net # Adds a dropout layer to prevent over-fitting. net = slim.dropout(net, 0.8, is_training=is_training) # Adds another fully connected layer with 16 hidden units. net = slim.fully_connected(net, 16, scope='fc2') end_points['fc2'] = net # Creates a fully-connected layer with a single hidden unit. Note that the # layer is made linear by setting activation_fn=None. predictions = slim.fully_connected(net, 1, activation_fn=None, scope='prediction') end_points['out'] = predictions return predictions, end_points ``` ### Let's create the model and examine its structure. We create a TF graph and call regression_model(), which adds nodes (tensors) to the graph. We then examine their shape, and print the names of all the model variables which have been implicitly created inside of each layer. We see that the names of the variables follow the scopes that we specified. ``` with tf.Graph().as_default(): # Dummy placeholders for arbitrary number of 1d inputs and outputs inputs = tf.placeholder(tf.float32, shape=(None, 1)) outputs = tf.placeholder(tf.float32, shape=(None, 1)) # Build model predictions, end_points = regression_model(inputs) # Print name and shape of each tensor. print("Layers") for k, v in end_points.items(): print('name = {}, shape = {}'.format(v.name, v.get_shape())) # Print name and shape of parameter nodes (values not yet initialized) print("\n") print("Parameters") for v in slim.get_model_variables(): print('name = {}, shape = {}'.format(v.name, v.get_shape())) ``` ### Let's create some 1d regression data . We will train and test the model on some noisy observations of a nonlinear function. ``` def produce_batch(batch_size, noise=0.3): xs = np.random.random(size=[batch_size, 1]) * 10 ys = np.sin(xs) + 5 + np.random.normal(size=[batch_size, 1], scale=noise) return [xs.astype(np.float32), ys.astype(np.float32)] x_train, y_train = produce_batch(200) x_test, y_test = produce_batch(200) plt.scatter(x_train, y_train) ``` ### Let's fit the model to the data The user has to specify the loss function and the optimizer, and slim does the rest. In particular, the slim.learning.train function does the following: - For each iteration, evaluate the train_op, which updates the parameters using the optimizer applied to the current minibatch. Also, update the global_step. - Occasionally store the model checkpoint in the specified directory. This is useful in case your machine crashes - then you can simply restart from the specified checkpoint. ``` def convert_data_to_tensors(x, y): inputs = tf.constant(x) inputs.set_shape([None, 1]) outputs = tf.constant(y) outputs.set_shape([None, 1]) return inputs, outputs # The following snippet trains the regression model using a mean_squared_error loss. ckpt_dir = '/tmp/regression_model/' with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.INFO) inputs, targets = convert_data_to_tensors(x_train, y_train) # Make the model. predictions, nodes = regression_model(inputs, is_training=True) # Add the loss function to the graph. loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions) # The total loss is the user's loss plus any regularization losses. total_loss = slim.losses.get_total_loss() # Specify the optimizer and create the train op: optimizer = tf.train.AdamOptimizer(learning_rate=0.005) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training inside a session. final_loss = slim.learning.train( train_op, logdir=ckpt_dir, number_of_steps=5000, save_summaries_secs=5, log_every_n_steps=500) print("Finished training. Last batch loss:", final_loss) print("Checkpoint saved in %s" % ckpt_dir) ``` ### Training with multiple loss functions. Sometimes we have multiple objectives we want to simultaneously optimize. In slim, it is easy to add more losses, as we show below. (We do not optimize the total loss in this example, but we show how to compute it.) ``` with tf.Graph().as_default(): inputs, targets = convert_data_to_tensors(x_train, y_train) predictions, end_points = regression_model(inputs, is_training=True) # Add multiple loss nodes. mean_squared_error_loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions) absolute_difference_loss = slim.losses.absolute_difference(predictions, targets) # The following two ways to compute the total loss are equivalent regularization_loss = tf.add_n(slim.losses.get_regularization_losses()) total_loss1 = mean_squared_error_loss + absolute_difference_loss + regularization_loss # Regularization Loss is included in the total loss by default. # This is good for training, but not for testing. total_loss2 = slim.losses.get_total_loss(add_regularization_losses=True) init_op = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init_op) # Will initialize the parameters with random weights. total_loss1, total_loss2 = sess.run([total_loss1, total_loss2]) print('Total Loss1: %f' % total_loss1) print('Total Loss2: %f' % total_loss2) print('Regularization Losses:') for loss in slim.losses.get_regularization_losses(): print(loss) print('Loss Functions:') for loss in slim.losses.get_losses(): print(loss) ``` ### Let's load the saved model and use it for prediction. ``` with tf.Graph().as_default(): inputs, targets = convert_data_to_tensors(x_test, y_test) # Create the model structure. (Parameters will be loaded below.) predictions, end_points = regression_model(inputs, is_training=False) # Make a session which restores the old parameters from a checkpoint. sv = tf.train.Supervisor(logdir=ckpt_dir) with sv.managed_session() as sess: inputs, predictions, targets = sess.run([inputs, predictions, targets]) plt.scatter(inputs, targets, c='r'); plt.scatter(inputs, predictions, c='b'); plt.title('red=true, blue=predicted') ``` ### Let's compute various evaluation metrics on the test set. In TF-Slim termiology, losses are optimized, but metrics (which may not be differentiable, e.g., precision and recall) are just measured. As an illustration, the code below computes mean squared error and mean absolute error metrics on the test set. Each metric declaration creates several local variables (which must be initialized via tf.initialize_local_variables()) and returns both a value_op and an update_op. When evaluated, the value_op returns the current value of the metric. The update_op loads a new batch of data, runs the model, obtains the predictions and accumulates the metric statistics appropriately before returning the current value of the metric. We store these value nodes and update nodes in 2 dictionaries. After creating the metric nodes, we can pass them to slim.evaluation.evaluation, which repeatedly evaluates these nodes the specified number of times. (This allows us to compute the evaluation in a streaming fashion across minibatches, which is usefulf for large datasets.) Finally, we print the final value of each metric. ``` with tf.Graph().as_default(): inputs, targets = convert_data_to_tensors(x_test, y_test) predictions, end_points = regression_model(inputs, is_training=False) # Specify metrics to evaluate: names_to_value_nodes, names_to_update_nodes = slim.metrics.aggregate_metric_map({ 'Mean Squared Error': slim.metrics.streaming_mean_squared_error(predictions, targets), 'Mean Absolute Error': slim.metrics.streaming_mean_absolute_error(predictions, targets) }) # Make a session which restores the old graph parameters, and then run eval. sv = tf.train.Supervisor(logdir=ckpt_dir) with sv.managed_session() as sess: metric_values = slim.evaluation.evaluation( sess, num_evals=1, # Single pass over data eval_op=names_to_update_nodes.values(), final_op=names_to_value_nodes.values()) names_to_values = dict(zip(names_to_value_nodes.keys(), metric_values)) for key, value in names_to_values.items(): print('%s: %f' % (key, value)) ``` # Reading Data with TF-Slim <a id='ReadingTFSlimDatasets'></a> Reading data with TF-Slim has two main components: A [Dataset](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/data/dataset.py) and a [DatasetDataProvider](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py). The former is a descriptor of a dataset, while the latter performs the actions necessary for actually reading the data. Lets look at each one in detail: ## Dataset A TF-Slim [Dataset](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/data/dataset.py) contains descriptive information about a dataset necessary for reading it, such as the list of data files and how to decode them. It also contains metadata including class labels, the size of the train/test splits and descriptions of the tensors that the dataset provides. For example, some datasets contain images with labels. Others augment this data with bounding box annotations, etc. The Dataset object allows us to write generic code using the same API, regardless of the data content and encoding type. TF-Slim's Dataset works especially well when the data is stored as a (possibly sharded) [TFRecords file](https://www.tensorflow.org/versions/r0.10/how_tos/reading_data/index.html#file-formats), where each record contains a [tf.train.Example protocol buffer](https://github.com/tensorflow/tensorflow/blob/r0.10/tensorflow/core/example/example.proto). TF-Slim uses a consistent convention for naming the keys and values inside each Example record. ## DatasetDataProvider A [DatasetDataProvider](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py) is a class which actually reads the data from a dataset. It is highly configurable to read the data in various ways that may make a big impact on the efficiency of your training process. For example, it can be single or multi-threaded. If your data is sharded across many files, it can read each files serially, or from every file simultaneously. ## Demo: The Flowers Dataset For convenience, we've include scripts to convert several common image datasets into TFRecord format and have provided the Dataset descriptor files necessary for reading them. We demonstrate how easy it is to use these dataset via the Flowers dataset below. ### Download the Flowers Dataset <a id='DownloadFlowers'></a> We've made available a tarball of the Flowers dataset which has already been converted to TFRecord format. ``` import tensorflow.compat.v1 as tf from datasets import dataset_utils url = "http://download.tensorflow.org/data/flowers.tar.gz" flowers_data_dir = '/tmp/flowers' if not tf.gfile.Exists(flowers_data_dir): tf.gfile.MakeDirs(flowers_data_dir) dataset_utils.download_and_uncompress_tarball(url, flowers_data_dir) ``` ### Display some of the data. ``` from datasets import flowers import tensorflow.compat.v1 as tf from tensorflow.contrib import slim with tf.Graph().as_default(): dataset = flowers.get_split('train', flowers_data_dir) data_provider = slim.dataset_data_provider.DatasetDataProvider( dataset, common_queue_capacity=32, common_queue_min=1) image, label = data_provider.get(['image', 'label']) with tf.Session() as sess: with slim.queues.QueueRunners(sess): for i in range(4): np_image, np_label = sess.run([image, label]) height, width, _ = np_image.shape class_name = name = dataset.labels_to_names[np_label] plt.figure() plt.imshow(np_image) plt.title('%s, %d x %d' % (name, height, width)) plt.axis('off') plt.show() ``` # Convolutional neural nets (CNNs). <a id='CNN'></a> In this section, we show how to train an image classifier using a simple CNN. ### Define the model. Below we define a simple CNN. Note that the output layer is linear function - we will apply softmax transformation externally to the model, either in the loss function (for training), or in the prediction function (during testing). ``` def my_cnn(images, num_classes, is_training): # is_training is not used... with slim.arg_scope([slim.max_pool2d], kernel_size=[3, 3], stride=2): net = slim.conv2d(images, 64, [5, 5]) net = slim.max_pool2d(net) net = slim.conv2d(net, 64, [5, 5]) net = slim.max_pool2d(net) net = slim.flatten(net) net = slim.fully_connected(net, 192) net = slim.fully_connected(net, num_classes, activation_fn=None) return net ``` ### Apply the model to some randomly generated images. ``` import tensorflow as tf with tf.Graph().as_default(): # The model can handle any input size because the first layer is convolutional. # The size of the model is determined when image_node is first passed into the my_cnn function. # Once the variables are initialized, the size of all the weight matrices is fixed. # Because of the fully connected layers, this means that all subsequent images must have the same # input size as the first image. batch_size, height, width, channels = 3, 28, 28, 3 images = tf.random_uniform([batch_size, height, width, channels], maxval=1) # Create the model. num_classes = 10 logits = my_cnn(images, num_classes, is_training=True) probabilities = tf.nn.softmax(logits) # Initialize all the variables (including parameters) randomly. init_op = tf.global_variables_initializer() with tf.Session() as sess: # Run the init_op, evaluate the model outputs and print the results: sess.run(init_op) probabilities = sess.run(probabilities) print('Probabilities Shape:') print(probabilities.shape) # batch_size x num_classes print('\nProbabilities:') print(probabilities) print('\nSumming across all classes (Should equal 1):') print(np.sum(probabilities, 1)) # Each row sums to 1 ``` ### Train the model on the Flowers dataset. Before starting, make sure you've run the code to <a href="#DownloadFlowers">Download the Flowers</a> dataset. Now, we'll get a sense of what it looks like to use TF-Slim's training functions found in [learning.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/learning.py). First, we'll create a function, `load_batch`, that loads batches of dataset from a dataset. Next, we'll train a model for a single step (just to demonstrate the API), and evaluate the results. ``` from preprocessing import inception_preprocessing import tensorflow as tf from tensorflow.contrib import slim def load_batch(dataset, batch_size=32, height=299, width=299, is_training=False): """Loads a single batch of data. Args: dataset: The dataset to load. batch_size: The number of images in the batch. height: The size of each image after preprocessing. width: The size of each image after preprocessing. is_training: Whether or not we're currently training or evaluating. Returns: images: A Tensor of size [batch_size, height, width, 3], image samples that have been preprocessed. images_raw: A Tensor of size [batch_size, height, width, 3], image samples that can be used for visualization. labels: A Tensor of size [batch_size], whose values range between 0 and dataset.num_classes. """ data_provider = slim.dataset_data_provider.DatasetDataProvider( dataset, common_queue_capacity=32, common_queue_min=8) image_raw, label = data_provider.get(['image', 'label']) # Preprocess image for usage by Inception. image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training) # Preprocess the image for display purposes. image_raw = tf.expand_dims(image_raw, 0) image_raw = tf.image.resize_images(image_raw, [height, width]) image_raw = tf.squeeze(image_raw) # Batch it up. images, images_raw, labels = tf.train.batch( [image, image_raw, label], batch_size=batch_size, num_threads=1, capacity=2 * batch_size) return images, images_raw, labels from datasets import flowers # This might take a few minutes. train_dir = '/tmp/tfslim_model/' print('Will save model to %s' % train_dir) with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.INFO) dataset = flowers.get_split('train', flowers_data_dir) images, _, labels = load_batch(dataset) # Create the model: logits = my_cnn(images, num_classes=dataset.num_classes, is_training=True) # Specify the loss function: one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes) slim.losses.softmax_cross_entropy(logits, one_hot_labels) total_loss = slim.losses.get_total_loss() # Create some summaries to visualize the training process: tf.summary.scalar('losses/Total Loss', total_loss) # Specify the optimizer and create the train op: optimizer = tf.train.AdamOptimizer(learning_rate=0.01) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training: final_loss = slim.learning.train( train_op, logdir=train_dir, number_of_steps=1, # For speed, we just do 1 epoch save_summaries_secs=1) print('Finished training. Final batch loss %d' % final_loss) ``` ### Evaluate some metrics. As we discussed above, we can compute various metrics besides the loss. Below we show how to compute prediction accuracy of the trained model, as well as top-5 classification accuracy. (The difference between evaluation and evaluation_loop is that the latter writes the results to a log directory, so they can be viewed in tensorboard.) ``` from datasets import flowers # This might take a few minutes. with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.DEBUG) dataset = flowers.get_split('train', flowers_data_dir) images, _, labels = load_batch(dataset) logits = my_cnn(images, num_classes=dataset.num_classes, is_training=False) predictions = tf.argmax(logits, 1) # Define the metrics: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'eval/Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'eval/Recall@5': slim.metrics.streaming_recall_at_k(logits, labels, 5), }) print('Running evaluation Loop...') checkpoint_path = tf.train.latest_checkpoint(train_dir) metric_values = slim.evaluation.evaluate_once( master='', checkpoint_path=checkpoint_path, logdir=train_dir, eval_op=names_to_updates.values(), final_op=names_to_values.values()) names_to_values = dict(zip(names_to_values.keys(), metric_values)) for name in names_to_values: print('%s: %f' % (name, names_to_values[name])) ``` # Using pre-trained models <a id='Pretrained'></a> Neural nets work best when they have many parameters, making them very flexible function approximators. However, this means they must be trained on big datasets. Since this process is slow, we provide various pre-trained models - see the list [here](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models). You can either use these models as-is, or you can perform "surgery" on them, to modify them for some other task. For example, it is common to "chop off" the final pre-softmax layer, and replace it with a new set of weights corresponding to some new set of labels. You can then quickly fine tune the new model on a small new dataset. We illustrate this below, using inception-v1 as the base model. While models like Inception V3 are more powerful, Inception V1 is used for speed purposes. Take into account that VGG and ResNet final layers have only 1000 outputs rather than 1001. The ImageNet dataset provied has an empty background class which can be used to fine-tune the model to other tasks. VGG and ResNet models provided here don't use that class. We provide two examples of using pretrained models: Inception V1 and VGG-19 models to highlight this difference. ### Download the Inception V1 checkpoint ``` from datasets import dataset_utils url = "http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz" checkpoints_dir = '/tmp/checkpoints' if not tf.gfile.Exists(checkpoints_dir): tf.gfile.MakeDirs(checkpoints_dir) dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir) ``` ### Apply Pre-trained Inception V1 model to Images. We have to convert each image to the size expected by the model checkpoint. There is no easy way to determine this size from the checkpoint itself. So we use a preprocessor to enforce this. ``` import numpy as np import os import tensorflow as tf try: import urllib2 as urllib except ImportError: import urllib.request as urllib from datasets import imagenet from nets import inception from preprocessing import inception_preprocessing from tensorflow.contrib import slim image_size = inception.inception_v1.default_image_size with tf.Graph().as_default(): url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/EnglishCockerSpaniel_simon.jpg' image_string = urllib.urlopen(url).read() image = tf.image.decode_jpeg(image_string, channels=3) processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False) processed_images = tf.expand_dims(processed_image, 0) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(inception.inception_v1_arg_scope()): logits, _ = inception.inception_v1(processed_images, num_classes=1001, is_training=False) probabilities = tf.nn.softmax(logits) init_fn = slim.assign_from_checkpoint_fn( os.path.join(checkpoints_dir, 'inception_v1.ckpt'), slim.get_model_variables('InceptionV1')) with tf.Session() as sess: init_fn(sess) np_image, probabilities = sess.run([image, probabilities]) probabilities = probabilities[0, 0:] sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])] plt.figure() plt.imshow(np_image.astype(np.uint8)) plt.axis('off') plt.show() names = imagenet.create_readable_names_for_imagenet_labels() for i in range(5): index = sorted_inds[i] print('Probability %0.2f%% => [%s]' % (probabilities[index] * 100, names[index])) ``` ### Download the VGG-16 checkpoint ``` from datasets import dataset_utils import tensorflow as tf url = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz" checkpoints_dir = '/tmp/checkpoints' if not tf.gfile.Exists(checkpoints_dir): tf.gfile.MakeDirs(checkpoints_dir) dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir) ``` ### Apply Pre-trained VGG-16 model to Images. We have to convert each image to the size expected by the model checkpoint. There is no easy way to determine this size from the checkpoint itself. So we use a preprocessor to enforce this. Pay attention to the difference caused by 1000 classes instead of 1001. ``` import numpy as np import os import tensorflow as tf try: import urllib2 except ImportError: import urllib.request as urllib from datasets import imagenet from nets import vgg from preprocessing import vgg_preprocessing from tensorflow.contrib import slim image_size = vgg.vgg_16.default_image_size with tf.Graph().as_default(): url = 'https://upload.wikimedia.org/wikipedia/commons/d/d9/First_Student_IC_school_bus_202076.jpg' image_string = urllib.urlopen(url).read() image = tf.image.decode_jpeg(image_string, channels=3) processed_image = vgg_preprocessing.preprocess_image(image, image_size, image_size, is_training=False) processed_images = tf.expand_dims(processed_image, 0) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(vgg.vgg_arg_scope()): # 1000 classes instead of 1001. logits, _ = vgg.vgg_16(processed_images, num_classes=1000, is_training=False) probabilities = tf.nn.softmax(logits) init_fn = slim.assign_from_checkpoint_fn( os.path.join(checkpoints_dir, 'vgg_16.ckpt'), slim.get_model_variables('vgg_16')) with tf.Session() as sess: init_fn(sess) np_image, probabilities = sess.run([image, probabilities]) probabilities = probabilities[0, 0:] sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])] plt.figure() plt.imshow(np_image.astype(np.uint8)) plt.axis('off') plt.show() names = imagenet.create_readable_names_for_imagenet_labels() for i in range(5): index = sorted_inds[i] # Shift the index of a class name by one. print('Probability %0.2f%% => [%s]' % (probabilities[index] * 100, names[index+1])) ``` ### Fine-tune the model on a different set of labels. We will fine tune the inception model on the Flowers dataset. ``` # Note that this may take several minutes. import os from datasets import flowers from nets import inception from preprocessing import inception_preprocessing from tensorflow.contrib import slim image_size = inception.inception_v1.default_image_size def get_init_fn(): """Returns a function run by the chief worker to warm-start the training.""" checkpoint_exclude_scopes=["InceptionV1/Logits", "InceptionV1/AuxLogits"] exclusions = [scope.strip() for scope in checkpoint_exclude_scopes] variables_to_restore = [] for var in slim.get_model_variables(): for exclusion in exclusions: if var.op.name.startswith(exclusion): break else: variables_to_restore.append(var) return slim.assign_from_checkpoint_fn( os.path.join(checkpoints_dir, 'inception_v1.ckpt'), variables_to_restore) train_dir = '/tmp/inception_finetuned/' with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.INFO) dataset = flowers.get_split('train', flowers_data_dir) images, _, labels = load_batch(dataset, height=image_size, width=image_size) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(inception.inception_v1_arg_scope()): logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True) # Specify the loss function: one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes) slim.losses.softmax_cross_entropy(logits, one_hot_labels) total_loss = slim.losses.get_total_loss() # Create some summaries to visualize the training process: tf.summary.scalar('losses/Total Loss', total_loss) # Specify the optimizer and create the train op: optimizer = tf.train.AdamOptimizer(learning_rate=0.01) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training: final_loss = slim.learning.train( train_op, logdir=train_dir, init_fn=get_init_fn(), number_of_steps=2) print('Finished training. Last batch loss %f' % final_loss) ``` ### Apply fine tuned model to some images. ``` import numpy as np import tensorflow as tf from datasets import flowers from nets import inception from tensorflow.contrib import slim image_size = inception.inception_v1.default_image_size batch_size = 3 with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.INFO) dataset = flowers.get_split('train', flowers_data_dir) images, images_raw, labels = load_batch(dataset, height=image_size, width=image_size) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(inception.inception_v1_arg_scope()): logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True) probabilities = tf.nn.softmax(logits) checkpoint_path = tf.train.latest_checkpoint(train_dir) init_fn = slim.assign_from_checkpoint_fn( checkpoint_path, slim.get_variables_to_restore()) with tf.Session() as sess: with slim.queues.QueueRunners(sess): sess.run(tf.initialize_local_variables()) init_fn(sess) np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels]) for i in range(batch_size): image = np_images_raw[i, :, :, :] true_label = np_labels[i] predicted_label = np.argmax(np_probabilities[i, :]) predicted_name = dataset.labels_to_names[predicted_label] true_name = dataset.labels_to_names[true_label] plt.figure() plt.imshow(image.astype(np.uint8)) plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name)) plt.axis('off') plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D5_DimensionalityReduction/student/W1D5_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Neuromatch Academy: Week 1, Day 5, Tutorial 1 # Dimensionality Reduction: Geometric view of data --- Tutorial objectives In this notebook we'll explore how multivariate data can be represented in different orthonormal bases. This will help us build intuition that will be helpful in understanding PCA in the following tutorial. Steps: 1. Generate correlated multivariate data. 2. Define an arbitrary orthonormal basis. 3. Project data onto new basis. --- ``` #@title Video: Geometric view of data from IPython.display import YouTubeVideo video = YouTubeVideo(id="emLW0F-VUag", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` # Setup Run these cells to get the tutorial started. ``` #library imports import time # import time import numpy as np # import numpy import scipy as sp # import scipy import math # import basic math functions import random # import basic random number generator functions import matplotlib.pyplot as plt # import matplotlib from IPython import display #@title Figure Settings %matplotlib inline fig_w, fig_h = (8, 8) plt.rcParams.update({'figure.figsize': (fig_w, fig_h)}) plt.style.use('ggplot') %config InlineBackend.figure_format = 'retina' #@title Helper functions def get_data(cov_matrix): """ Returns a matrix of 1000 samples from a bivariate, zero-mean Gaussian Note that samples are sorted in ascending order for the first random variable. Args: cov_matrix (numpy array of floats): desired covariance matrix Returns: (numpy array of floats) : samples from the bivariate Gaussian, with each column corresponding to a different random variable """ mean = np.array([0,0]) X = np.random.multivariate_normal(mean,cov_matrix,size = 1000) indices_for_sorting = np.argsort(X[:,0]) X = X[indices_for_sorting,:] return X def plot_data(X): """ Plots bivariate data. Includes a plot of each random variable, and a scatter plot of their joint activity. The title indicates the sample correlation calculated from the data. Args: X (numpy array of floats): Data matrix each column corresponds to a different random variable Returns: Nothing. """ fig = plt.figure(figsize=[8,4]) gs = fig.add_gridspec(2,2) ax1 = fig.add_subplot(gs[0,0]) ax1.plot(X[:,0],color='k') plt.ylabel('Neuron 1') plt.title('Sample var 1: {:.1f}'.format(np.var(X[:,0]))) ax1.set_xticklabels([]) ax2 = fig.add_subplot(gs[1,0]) ax2.plot(X[:,1],color='k') plt.xlabel('Sample Number') plt.ylabel('Neuron 2') plt.title('Sample var 2: {:.1f}'.format(np.var(X[:,1]))) ax3 = fig.add_subplot(gs[:, 1]) ax3.plot(X[:,0],X[:,1],'.',markerfacecolor=[.5,.5,.5], markeredgewidth=0) ax3.axis('equal') plt.xlabel('Neuron 1 activity') plt.ylabel('Neuron 2 activity') plt.title('Sample corr: {:.1f}'.format(np.corrcoef(X[:,0],X[:,1])[0,1])) def plot_basis_vectors(X,W): """ Plots bivariate data as well as new basis vectors. Args: X (numpy array of floats): Data matrix each column corresponds to a different random variable W (numpy array of floats): Square matrix representing new orthonormal basis each column represents a basis vector Returns: Nothing. """ plt.figure(figsize=[4,4]) plt.plot(X[:,0],X[:,1],'.',color=[.5,.5,.5],label='Data') plt.axis('equal') plt.xlabel('Neuron 1 activity') plt.ylabel('Neuron 2 activity') plt.plot([0,W[0,0]],[0,W[1,0]],color='r',linewidth=3,label = 'Basis vector 1') plt.plot([0,W[0,1]],[0,W[1,1]],color='b',linewidth=3,label = 'Basis vector 2') plt.legend() def plot_data_new_basis(Y): """ Plots bivariate data after transformation to new bases. Similar to plot_data but with colors corresponding to projections onto basis 1 (red) and basis 2 (blue). The title indicates the sample correlation calculated from the data. Note that samples are re-sorted in ascending order for the first random variable. Args: Y (numpy array of floats): Data matrix in new basis each column corresponds to a different random variable Returns: Nothing. """ fig = plt.figure(figsize=[8,4]) gs = fig.add_gridspec(2,2) ax1 = fig.add_subplot(gs[0,0]) ax1.plot(Y[:,0],'r') plt.xlabel plt.ylabel('Projection \n basis vector 1') plt.title('Sample var 1: {:.1f}'.format(np.var(Y[:,0]))) ax1.set_xticklabels([]) ax2 = fig.add_subplot(gs[1,0]) ax2.plot(Y[:,1],'b') plt.xlabel('Sample number') plt.ylabel('Projection \n basis vector 2') plt.title('Sample var 2: {:.1f}'.format(np.var(Y[:,1]))) ax3 = fig.add_subplot(gs[:, 1]) ax3.plot(Y[:,0],Y[:,1],'.',color=[.5,.5,.5]) ax3.axis('equal') plt.xlabel('Projection basis vector 1') plt.ylabel('Projection basis vector 2') plt.title('Sample corr: {:.1f}'.format(np.corrcoef(Y[:,0],Y[:,1])[0,1])) ``` # Generate correlated multivariate data ``` #@title Video: Multivariate data from IPython.display import YouTubeVideo video = YouTubeVideo(id="YOan2BQVzTQ", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` To study multivariate data, first we generate it. In this exercise we generate data from a *bivariate normal distribution*. This is an extension of the one-dimensional normal distribution to two dimensions, in which each $x_i$ is marginally normal with mean $\mu_i$ and variance $\sigma_i^2$: \begin{align} x_i \sim \mathcal{N}(\mu_i,\sigma_i^2) \end{align} Additionally, the joint distribution for $x_1$ and $x_2$ has a specified correlation coefficient $\rho$. Recall that the correlation coefficient is a normalized version of the covariance, and ranges between -1 and +1. \begin{align} \rho = \frac{\text{cov}(x_1,x_2)}{\sqrt{\sigma_1^2 \sigma_2^2}} \end{align} For simplicity, we will assume that the mean of each variable has already been subtracted, so that $\mu_i=0$. The remaining parameters can be summarized in the covariance matrix: \begin{equation*} {\bf \Sigma} = \begin{pmatrix} \text{var}(x_1) & \text{cov}(x_1,x_2) \\ \text{cov}(x_1,x_2) &\text{var}(x_2) \end{pmatrix} \end{equation*} Note that this is a symmetric matrix with the variances $\text{var}(x_i) = \sigma_i^2$ on the diagonal, and the covariance on the off-diagonal. ### Exercise We have provided code to draw random samples from a zero-mean bivariate normal distribution. These samples could be used to simulate changes in firing rates for two neurons. Fill in the function below to calculate the covariance matrix given the desired variances and correlation coefficient. The covariance can be found by rearranging the equation above: \begin{align} \text{cov}(x_1,x_2) = \rho \sqrt{\sigma_1^2 \sigma_2^2} \end{align} Use these functions to generate and plot data while varying the parameters. You should get a feel for how changing the correlation coefficient affects the geometry of the simulated data. **Suggestions** * Fill in the function `calculate_cov_matrix` to calculate the covariance. * Generate and plot the data for $\sigma_1^2 =1$, $\sigma_1^2 =1$, and $\rho = .8$. Try plotting the data for different values of the correlation coefficent: $\rho = -1, -.5, 0, .5, 1$. ``` help(plot_data) help(get_data) def calculate_cov_matrix(var_1,var_2,corr_coef): """ Calculates the covariance matrix based on the variances and correlation coefficient. Args: var_1 (scalar): variance of the first random variable var_2 (scalar): variance of the second random variable corr_coef (scalar): correlation coefficient Returns: (numpy array of floats) : covariance matrix """ ################################################################### ## Insert your code here to: ## calculate the covariance from the variances and correlation # cov = ... cov_matrix = np.array([[var_1,cov],[cov,var_2]]) #uncomment once you've filled in the function raise NotImplementedError("Student excercise: calculate the covariance matrix!") ################################################################### return cov ################################################################### ## Insert your code here to: ## generate and plot bivariate Gaussian data with variances of 1 ## and a correlation coefficients of: 0.8 ## repeat while varying the correlation coefficient from -1 to 1 ################################################################### variance_1 = 1 variance_2 = 1 corr_coef = 0.8 #uncomment to test your code and plot #cov_matrix = calculate_cov_matrix(variance_1,variance_2,corr_coef) #X = get_data(cov_matrix) #plot_data(X) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_62df7ae6.py) *Example output:* <img alt='Solution hint' align='left' width=510 height=303 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/static/W1D5_Tutorial1_Solution_62df7ae6_0.png> # Define a new orthonormal basis ``` #@title Video: Orthonormal bases from IPython.display import YouTubeVideo video = YouTubeVideo(id="dK526Nbn2Xo", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` Next, we will define a new orthonormal basis of vectors ${\bf u} = [u_1,u_2]$ and ${\bf w} = [w_1,w_2]$. As we learned in the video, two vectors are orthonormal if: 1. They are orthogonal (i.e., their dot product is zero): \begin{equation} {\bf u\cdot w} = u_1 w_1 + u_2 w_2 = 0 \end{equation} 2. They have unit length: \begin{equation} ||{\bf u} || = ||{\bf w} || = 1 \end{equation} In two dimensions, it is easy to make an arbitrary orthonormal basis. All we need is a random vector ${\bf u}$, which we have normalized. If we now define the second basis vector to be ${\bf w} = [-u_2,u_1]$, we can check that both conditions are satisfied: \begin{equation} {\bf u\cdot w} = - u_1 u_2 + u_2 u_1 = 0 \end{equation} and \begin{equation} {|| {\bf w} ||} = \sqrt{(-u_2)^2 + u_1^2} = \sqrt{u_1^2 + u_2^2} = 1, \end{equation} where we used the fact that ${\bf u}$ is normalized. So, with an arbitrary input vector, we can define an orthonormal basis, which we will write in matrix by stacking the basis vectors horizontally: \begin{equation} {{\bf W} } = \begin{pmatrix} u_1 & w_1 \\ u_2 & w_2 \end{pmatrix}. \end{equation} ### Exercise In this exercise you will fill in the function below to define an orthonormal basis, given a single arbitrary 2-dimensional vector as an input. **Suggestions** * Modify the function `define_orthonormal_basis` to first normalize the first basis vector $\bf u$. * Then complete the function by finding a basis vector $\bf w$ that is orthogonal to $\bf u$. * Test the function using initial basis vector ${\bf u} = [3,1]$. Plot the resulting basis vectors on top of the data scatter plot using the function `plot_basis_vectors`. (For the data, use $\sigma_1^2 =1$, $\sigma_1^2 =1$, and $\rho = .8$). ``` help(plot_basis_vectors) def define_orthonormal_basis(u): """ Calculates an orthonormal basis given an arbitrary vector u. Args: u (numpy array of floats): arbitrary 2-dimensional vector used for new basis Returns: (numpy array of floats) : new orthonormal basis columns correspond to basis vectors """ ################################################################### ## Insert your code here to: ## normalize vector u ## calculate vector w that is orthogonal to w #u = .... #w = ... #W = np.column_stack((u,w)) #comment this once you've filled the function raise NotImplementedError("Student excercise: implement the orthonormal basis function") ################################################################### return W variance_1 = 1 variance_2 = 1 corr_coef = 0.8 cov_matrix = calculate_cov_matrix(variance_1,variance_2,corr_coef) X = get_data(cov_matrix) u = np.array([3,1]) #uncomment and run below to plot the basis vectors ##define_orthonormal_basis(u) #plot_basis_vectors(X,W) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_c9ca4afa.py) *Example output:* <img alt='Solution hint' align='left' width=286 height=281 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/static/W1D5_Tutorial1_Solution_c9ca4afa_0.png> # Project data onto new basis ``` #@title Video: Change of basis from IPython.display import YouTubeVideo video = YouTubeVideo(id="5MWSUtpbSt0", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` Finally, we will express our data in the new basis that we have just found. Since $\bf W$ is orthonormal, we can project the data into our new basis using simple matrix multiplication : \begin{equation} {\bf Y = X W}. \end{equation} We will explore the geometry of the transformed data $\bf Y$ as we vary the choice of basis. #### Exercise In this exercise you will fill in the function below to define an orthonormal basis, given a single arbitrary vector as an input. **Suggestions** * Complete the function `change_of_basis` to project the data onto the new basis. * Plot the projected data using the function `plot_data_new_basis`. * What happens to the correlation coefficient in the new basis? Does it increase or decrease? * What happens to variance? ``` def change_of_basis(X,W): """ Projects data onto new basis W. Args: X (numpy array of floats) : Data matrix each column corresponding to a different random variable W (numpy array of floats): new orthonormal basis columns correspond to basis vectors Returns: (numpy array of floats) : Data matrix expressed in new basis """ ################################################################### ## Insert your code here to: ## project data onto new basis described by W #Y = ... #comment this once you've filled the function raise NotImplementedError("Student excercise: implement change of basis") ################################################################### return Y ## Unomment below to transform the data by projecting it into the new basis ## Plot the projected data # Y = change_of_basis(X,W) # plot_data_new_basis(Y) # disp(...) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_b434bc0d.py) *Example output:* <img alt='Solution hint' align='left' width=544 height=303 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/static/W1D5_Tutorial1_Solution_b434bc0d_0.png> #### Exercise To see what happens to the correlation as we change the basis vectors, run the cell below. The parameter $\theta$ controls the angle of $\bf u$ in degrees. Use the slider to rotate the basis vectors. **Questions** * What happens to the projected data as you rotate the basis? * How does the correlation coefficient change? How does the variance of the projection onto each basis vector change? * Are you able to find a basis in which the projected data is uncorrelated? ``` ###### MAKE SURE TO RUN THIS CELL VIA THE PLAY BUTTON TO ENABLE SLIDERS ######## import ipywidgets as widgets def refresh(theta = 0): u = [1,np.tan(theta * np.pi/180.)] W = define_orthonormal_basis(u) Y = change_of_basis(X,W) plot_basis_vectors(X,W) plot_data_new_basis(Y) _ = widgets.interact(refresh, theta = (0, 90, 5)) ```
github_jupyter
# Django UnChained <img src="images/django.jpg"> # View <img src="https://mdn.mozillademos.org/files/13931/basic-django.png"> # EXP1 # URLs ``` from django.conf.urls import url from . import views app_name = 'polls' urlpatterns = [ url(r'^$', views.index, name='index'), url(r'^(?P<question_id>[0-9]+)/$', views.detail, name='detail'), url(r'^(?P<question_id>[0-9]+)/results/$', views.results, name='results'), url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'), ] ``` # models ``` from django.db import models class Question(models.Model): question_text = models.CharField(max_length=200) pub_date = models.DateTimeField('date published') class Choice(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE) choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0) ``` # HTTP Request, HTTP Response ``` from django.http import HttpResponse def index(request): return HttpResponse("Hello, world. You're at the polls index.") def detail(request, question_id): return HttpResponse("You're looking at question %s." % question_id) def results(request, question_id): response = "You're looking at the results of question %s." return HttpResponse(response % question_id) def vote(request, question_id): return HttpResponse("You're voting on question %s." % question_id) ``` # views and models ``` def list_view(request): objs=models.ModelName.objects.all() return HttpResponse("You're looking at list_view" %objs) from django.shortcuts import get_object_or_404 def detail_view(request, pk): obj = get_object_or_404(ModelName, pk=pk) return HttpResponse("You're looking at detail_view", pk, obj) def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] output = ', '.join([q.question_text for q in latest_question_list]) return HttpResponse(output) ``` # views and templates ``` def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] template = loader.get_template('polls/index.html') context = { 'latest_question_list': latest_question_list, } return HttpResponse(template.render(context, request)) def index(request): latest_question_list = Question.objects.order_by('-pub_date')[:5] context = {'latest_question_list': latest_question_list} return render(request, 'polls/index.html', context) {% if latest_question_list %} <ul> {% for question in latest_question_list %} <li><a href="/polls/{{ question.id }}/">{{ question.question_text }}</a></li> <li><a href="{% url 'detail' question.id %}">{{ question.question_text }}</a></li> <li><a href="{% url 'polls:detail' question.id %}">{{ question.question_text }}</a></li> {% endfor %} </ul> {% else %} <p>No polls are available.</p> {% endif %} ``` # detail_view ``` from django.shortcuts import render def detail_view(request, pk): obj = get_object_or_404(ModelName, pk=pk) return render(request, 'app/template.html', {'obj': obj}) def detail(request, question_id): question = get_object_or_404(Question, pk=question_id) return render(request, 'polls/detail.html', {'question': question}) <h1>{{ question.question_text }}</h1> <ul> {% for choice in question.choice_set.all %} <li>{{ choice.choice_text }}</li> {% endfor %} </ul> ``` # Forms ``` <h1>{{ question.question_text }}</h1> {% if error_message %}<p><strong>{{ error_message }}</strong></p>{% endif %} <form action="{% url 'polls:vote' question.id %}" method="post"> {% csrf_token %} {% for choice in question.choice_set.all %} <input type="radio" name="choice" id="choice{{ forloop.counter }}" value="{{ choice.id }}" /> <label for="choice{{ forloop.counter }}">{{ choice.choice_text }}</label><br /> {% endfor %} <input type="submit" value="Vote" /> </form> def vote(request, question_id): question = get_object_or_404(Question, pk=question_id) try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # Redisplay the question voting form. return render(request, 'polls/detail.html', { 'question': question, 'error_message': "You didn't select a choice.", }) else: selected_choice.votes += 1 selected_choice.save() # Always return an HttpResponseRedirect after successfully dealing # with POST data. This prevents data from being posted twice if a # user hits the Back button. return HttpResponseRedirect(reverse('polls:results', args=(question.id,))) ``` # Final Step ``` def results(request, question_id): question = get_object_or_404(Question, pk=question_id) return render(request, 'polls/results.html', {'question': question}) <h1>{{ question.question_text }}</h1> <ul> {% for choice in question.choice_set.all %} <li>{{ choice.choice_text }} -- {{ choice.votes }} vote{{ choice.votes|pluralize }}</li> {% endfor %} </ul> <a href="{% url 'polls:detail' question.id %}">Vote again?</a> ``` # Built-in class-based views API ### Base vs Generic views¶ Base class-based views can be thought of as parent views, which can be used by themselves or inherited from. They may not provide all the capabilities required for projects, in which case there are Mixins which extend what base views can do. Django’s generic views are built off of those base views, and were developed as a shortcut for common usage patterns such as displaying the details of an object. They take certain common idioms and patterns found in view development and abstract them so that you can quickly write common views of data without having to repeat yourself. https://docs.djangoproject.com/en/2.0/ref/class-based-views/ # Generic display views ``` class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self): """Return the last five published questions.""" return Question.objects.order_by('-pub_date')[:5] class DetailView(generic.DetailView): model = Question template_name = 'polls/detail.html' class ResultsView(generic.DetailView): model = Question template_name = 'polls/results.html' app_name = 'polls' urlpatterns = [ url(r'^$', views.IndexView.as_view(), name='index'), url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'), url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'), url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'), ] ```
github_jupyter
# Introduction to C++ ## Hello world There are many lessons in writing a simple "Hello world" program - C++ programs are normally written using a text editor or integrated development environment (IDE) - we use the %%file magic to simulate this - The #include statement literally pulls in and prepends the source code from the `iostream` header file - Types must be declared - note the function return type is `int` - There is a single function called `main` - every program has `main` as the entry point although you can write libraries without a `main` function - Notice the use of braces to delimit blocks - Notice the use of semi-colons to delimit expressions - Unlike Python, white space is not used to delimit blocks or expressions only tokens - Note the use of the `std` *namespace* - this is similar to Python except C++ uses `::` rather than `.` (like R) - The I/O shown here uses *streaming* via the `<<` operator to send output to `cout`, which is the name for the standard output - `std::endl` provides a line break and flushes the input buffer ``` %%file hello.cpp #include <iostream> int main() { std::cout << "Hello, world!" << std::endl; } ``` ### Compilation - The source file must be compiled to machine code before it can be exeuted - Compilation is done with a C++ compiler - here we use one called `g++` - By default, the output of compilation is called `a.out` - we use `-o` to change the output executable filename to `hello.exe` - Note the use of `.exe` is a Windows convention; Unix executables typically have no extension - for example, just be the name `hello` ``` %%bash g++ hello.cpp -o hello.exe ``` ### Execution ``` %%bash ./hello.exe ``` ### C equivalent Before we move on, we briefly show the similar `Hello world` program in C. C is a precursor to C++ that is still widely used. While C++ is derived from C, it is a much richer and more complex language. We focus on C++ because the intent is to show how to wrap C++ code using `pybind11` and take advantage of C++ numerical libraries that do not exist in C. ``` %%file hello01.c #include <stdio.h> int main() { printf("Hello, world from C!\n"); } %%bash gcc hello01.c %%bash ./a.out ``` ## Namespaces Just like Python, C++ has namespaces that allow us to build large libraries without worrying about name collisions. In the `Hello world` program, we used the explicit name `std::cout` indicating that `cout` is a member of the standard workspace. We can also use the `using` keyword to import selected functions or classes from a namespace. ```c++ using std::cout; int main() { cout << "Hello, world!\n"; } ``` For small programs, we sometimes import the entire namespace for convenience, but this may cause namespace collisions in larger programs. ```c++ using namespace std; int main() { cout << "Hello, world!\n"; } ``` You can easily create your own namespace. ```c++ namespace sta_663 { const double pi=2.14159; void greet(string name) { cout << "\nTraditional first program\n"; cout << "Hello, " << name << "\n"; } } int main() { cout << "\nUsing namespaces\n"; string name = "Tom"; cout << sta_663::pi << "\n"; sta_663::greet(name); } ``` #### Using qualified imports ``` %%file hello02.cpp #include <iostream> using std::cout; using std::endl; int main() { cout << "Hello, world!" << endl; } %%bash g++ hello02.cpp -o hello02 %%bash ./hello02 ``` #### Global imports of a namespace Wholesale imports of namespace is generally frowned upon, similar to how `from X import *` is frowned upon in Python. ``` %%file hello03.cpp #include <iostream> using namespace std; int main() { cout << "Hello, world!" << endl; } %%bash g++ hello03.cpp -o hello03 %%bash ./hello03 ``` ## Types ``` %%file dtypes.cpp #include <iostream> #include <complex> using std::cout; int main() { // Boolean bool a = true, b = false; cout << "and " << (a and b) << "\n"; cout << "&& " << (a && b) << "\n"; cout << "or " << (a or b) << "\n"; cout << "|| " << (a || b) << "\n"; cout << "not " << not (a or b) << "\n"; cout << "! " << !(a or b) << "\n"; // Integral numbers cout << "char " << sizeof(char) << "\n"; cout << "short int " << sizeof(short int) << "\n"; cout << "int " << sizeof(int) << "\n"; cout << "long " << sizeof(long) << "\n"; // Floating point numbers cout << "float " << sizeof(float) << "\n"; cout << "double " << sizeof(double) << "\n"; cout << "long double " << sizeof(long double) << "\n"; cout << "complex double " << sizeof(std::complex<double>) << "\n"; // Characters and strings char c = 'a'; // Note single quotes char word[] = "hello"; // C char arrays std::string s = "hello"; // C++ string cout << c << "\n"; cout << word << "\n"; cout << s << "\n"; } %%bash g++ dtypes.cpp -o dtypes.exe ./dtypes.exe ``` ## Type conversions Converting between types can get pretty complicated in C++. We will show some simple versions. ``` %%file type.cpp #include <iostream> using std::cout; using std::string; using std::stoi; int main() { char c = '3'; // A char is an integer type string s = "3"; // A string is not an integer type int i = 3; float f = 3.1; double d = 3.2; cout << c << "\n"; cout << i << "\n"; cout << f << "\n"; cout << d << "\n"; cout << "c + i is " << c + i << "\n"; cout << "c + i is " << c - '0' + i << "\n"; // Casting string to number cout << "s + i is " << stoi(s) + i << "\n"; // Use std::stod to convert to double // Two ways to cast float to int cout << "f + i is " << f + i << "\n"; cout << "f + i is " << int(f) + i << "\n"; cout << "f + i is " << static_cast<int>(f) + i << "\n"; } %%bash g++ -o type.exe type.cpp -std=c++14 %%bash ./type.exe ``` ## Header, source, and driver files C++ allows separate compilation of functions and programs that use those functions. The way it does this is to write functions in *source* files that can be compiled. To use these compiled functions, the calling program includes *header* files that contain the function signatures - this provides enough information for the compiler to link to the compiled function machine code when executing the program. - Here we show a toy example of typical C++ program organization - We build a library of math functions in `my_math.cpp` - We add a header file for the math functions in `my_math.hpp` - We build a library of stats functions in `my_stats.cpp` - We add a header file for the stats functions in `my_stats.hpp` - We write a program that uses math and stats functions called `my_driver.cpp` - We pull in the function signatures with `#include` for the header files - Once you understand the code, move on to see how compilation is done - Note that it is customary to include the header file in the source file itself to let the compiler catch any mistakes in the function signatures ``` %%file my_math.hpp #pragma once int add(int a, int b); int multiply(int a, int b); %%file my_math.cpp #include "my_math.hpp" int add(int a, int b) { return a + b; } int multiply(int a, int b) { return a * b; } %%file my_stats.hpp #pragma once int mean(int xs[], int n); %%file my_stats.cpp #include "my_math.hpp" int mean(int xs[], int n) { double s = 0; for (int i=0; i<n; i++) { s += xs[i]; } return s/n; } %%file my_driver.cpp #include <iostream> #include "my_math.hpp" #include "my_stats.hpp" int main() { int xs[] = {1,2,3,4,5}; int n = 5; int a = 3, b= 4; std::cout << "sum = " << add(a, b) << "\n"; std::cout << "prod = " << multiply(a, b) << "\n"; std::cout << "mean = " << mean(xs, n) << "\n"; } ``` Compilation - Notice in the first 2 compile statements, that the source files are compiled to *object* files with default extension `.o` by usin gthe flag `-c` - The 3rd compile statement builds an *executable* by linking the `main` file with the recently created object files - The function signatures in the included header files tells the compiler how to match the function calls `add`, `multiply` and `mean` with the matching compiled functions ``` %%bash g++ -c my_math.cpp g++ -c my_stats.cpp g++ my_driver.cpp my_math.o my_stats.o %%bash ./a.out ``` ### Using `make` As building C++ programs can quickly become quite complicated, there are *builder* programs that help simplify this task. One of the most widely used is `make`, which uses a file normally called `Makefile` to coordinate the instructions for building a program - Note that `make` can be used for more than compiling programs; for example, you can use it to automatically rebuild tables and figures for a manuscript whenever the data is changed - Another advantage of `make` is that it keeps track of dependencies, and only re-compiles files that have changed or depend on another changed file since the last compilation We will build a simple `Makefile` to build the `my_driver` executable: - Each section consists of a make target denoted by `<targget>:` followed by files the target depends on - The next line is the command given to build the target. This must begin with a TAB character (it MUST be a TAB and not spaces) - If a target has dependencies that are not met, `make` will see if each dependency itself is a target and build that first - It uses timestamps to decide whether to rebuild a target (not actually changes) - By default, `make` builds the first target, but can also build named targets How to get the TAB character. Copy and paste the blank space between `a` and `b`. ``` ! echo "a\tb" %%file Makefile driver: my_math.o my_stats.o g++ my_driver.cpp my_math.o my_stats.o -o my_driver my_math.o: my_math.cpp my_math.hpp g++ -c my_math.cpp my_stats.o: my_stats.cpp my_stats.hpp g++ -c my_stats.cpp ``` - We first start with a clean slate ``` %%capture logfile %%bash rm *\.o rm my_driver %%bash make %%bash ./my_driver ``` - Re-building does not trigger re-compilation of source files since the timestamps have not changed ``` %%bash make %%bash touch my_stats.hpp ``` - As `my_stats.hpp` was listed as a dependency of the target `my_stats.o`, `touch`, which updates the timestamp, forces a recompilation of `my_stats.o` ``` %%bash make ``` #### Use of variables in Makefile ``` %%file Makefile2 CC=g++ CFLAGS=-Wall -std=c++14 driver: my_math.o my_stats.o $(CC) $(CFLAGS) my_driver.cpp my_math.o my_stats.o -o my_driver2 my_math.o: my_math.cpp my_math.hpp $(CC) $(CFLAGS) -c my_math.cpp my_stats.o: my_stats.cpp my_stats.hpp $(CC) $(CFLAGS) -c my_stats.cpp ``` ### Compilation Note that no re-compilation occurs! ``` %%bash make -f Makefile2 ``` ### Execution ``` %%bash ./my_driver2 ``` ## Input and output #### Arguments to main ``` %%file main_args.cpp #include <iostream> using std::cout; int main(int argc, char* argv[]) { for (int i=0; i<argc; i++) { cout << i << ": " << argv[i] << "\n"; } } %%bash g++ main_args.cpp -o main_args %%bash ./main_args hello 1 2 3 ``` **Exercise** Write, compile and execute a progrm called `greet` that when called on the command line with ```bash greet Santa 3 ``` gives the output ``` Hello Santa! Hello Santa! Hello Santa! ``` #### Reading from files ``` %%file data.txt 9 6 %%file io.cpp #include <fstream> #include "my_math.hpp" int main() { std::ifstream fin("data.txt"); std::ofstream fout("result.txt"); double a, b; fin >> a >> b; fin.close(); fout << add(a, b) << std::endl; fout << multiply(a, b) << std::endl; fout.close(); } %%bash g++ io.cpp -o io.exe my_math.cpp %%bash ./io.exe ! cat result.txt ``` ## Arrays ``` %%file array.cpp #include <iostream> using std::cout; using std::endl; int main() { int N = 3; double counts[N]; counts[0] = 1; counts[1] = 3; counts[2] = 3; double avg = (counts[0] + counts[1] + counts[2])/3; cout << avg << endl; } %%bash g++ -o array.exe array.cpp %%bash ./array.exe ``` ## Loops ``` %%file loop.cpp #include <iostream> using std::cout; using std::endl; using std::begin; using std::end; int main() { int x[] = {1, 2, 3, 4, 5}; cout << "\nTraditional for loop\n"; for (int i=0; i < sizeof(x)/sizeof(x[0]); i++) { cout << i << endl; } cout << "\nUsing iterators\n"; for (auto it=begin(x); it != end(x); it++) { cout << *it << endl; } cout << "\nRanged for loop\n\n"; for (auto const &i : x) { cout << i << endl; } } %%bash g++ -o loop.exe loop.cpp -std=c++14 %%bash ./loop.exe ``` ## Function arguments - A value argument means that the argument is copied in the body of the function - A referene argument means that the addresss of the value is useed in the function. Reference or pointer arugments are used to avoid copying large objects. ``` %%file func_arg.cpp #include <iostream> using std::cout; using std::endl; // Value parameter void f1(int x) { x *= 2; cout << "In f1 : x=" << x << endl; } // Reference parameter void f2(int &x) { x *= 2; cout << "In f2 : x=" << x << endl; } /* Note If you want to avoid side effects but still use references to avoid a copy operation use a const refernece like this to indicate that x cannot be changed void f2(const int &x) */ /* Note Raw pointers are prone to error and generally avoided in modern C++ See unique_ptr and shared_ptr */ // Raw pointer parameter void f3(int *x) { *x *= 2; cout << "In f3 : x=" << *x << endl; } int main() { int x = 1; cout << "Before f1: x=" << x << "\n"; f1(x); cout << "After f1 : x=" << x << "\n"; cout << "Before f2: x=" << x << "\n"; f2(x); cout << "After f2 : x=" << x << "\n"; cout << "Before f3: x=" << x << "\n"; f3(&x); cout << "After f3 : x=" << x << "\n"; } %%bash c++ -o func_arg.exe func_arg.cpp --std=c++14 %%bash ./func_arg.exe ``` ## Arrays, pointers and dynamic memory A pointer is a number that represents an address in computer memory. What is stored at the address is a bunch of binary numbers. How those binary numbers are interpetedd depends on the type of the pointer. To get the value at the pointer adddress, we *derefeernce* the pointer using `*ptr`. Pointers are often used to indicagte the start of a block of value - the name of a plain C-style array is essentialy a pointer to the start of the array. For example, the argument `char** argv` means that `argv` has type pointer to pointer to `char`. The pointer to `char` can be thought of as an array of `char`, hence the argument is also sometimes written as `char* argv[]` to indicate pointer to `char` array. So conceptually, it refers to an array of `char` arrays - or a colleciton of strings. We generally avoid using raw pointers in C++, but this is standard in C and you should at least understand what is going on. In C++, we typically use smart pointers, STL containers or convenient array constructs provided by libraries such as Eigen and Armadillo. ### Pointers and addresses ``` %%file p01.cpp #include <iostream> using std::cout; int main() { int x = 23; int *xp; xp = &x; cout << "x " << x << "\n"; cout << "Address of x " << &x << "\n"; cout << "Pointer to x " << xp << "\n"; cout << "Value at pointer to x " << *xp << "\n"; } %%bash g++ -o p01.exe p01.cpp -std=c++14 ./p01.exe ``` ### Arrays ``` %%file p02.cpp #include <iostream> using std::cout; using std::begin; using std::end; int main() { int xs[] = {1,2,3,4,5}; int ys[3]; for (int i=0; i<5; i++) { ys[i] = i*i; } for (auto x=begin(xs); x!=end(xs); x++) { cout << *x << " "; } cout << "\n"; for (auto x=begin(ys); x!=end(ys); x++) { cout << *x << " "; } cout << "\n"; } %%bash g++ -o p02.exe p02.cpp -std=c++14 ./p02.exe ``` ### Dynamic memory - Use `new` and `delete` for dynamic memory allocation in C++. - Do not use the C style `malloc`, `calloc` and `free` - Abosolutely never mix the C++ and C style dynamic memory allocation ``` %%file p03.cpp #include <iostream> using std::cout; using std::begin; using std::end; int main() { // declare memory int *z = new int; // single integer *z = 23; // Allocate on heap int *zs = new int[3]; // array of 3 integers for (int i=0; i<3; i++) { zs[i] = 10*i; } cout << *z << "\n"; for (int i=0; i < 3; i++) { cout << zs[i] << " "; } cout << "\n"; // need for manual management of dynamically assigned memory delete z; delete[] zs; } %%bash g++ -o p03.exe p03.cpp -std=c++14 ./p03.exe ``` ### Pointer arithmetic When you increemnt or decrement an array, it moves to the preceding or next locaion in memory as aprpoprite for the pointer type. You can also add or substract an number, since that is equivalent to mulitple increments/decrements. This is know as pointer arithmetic. ``` %%file p04.cpp #include <iostream> using std::cout; using std::begin; using std::end; int main() { int xs[] = {100,200,300,400,500,600,700,800,900,1000}; cout << xs << ": " << *xs << "\n"; cout << &xs << ": " << *xs << "\n"; cout << &xs[3] << ": " << xs[3] << "\n"; cout << xs+3 << ": " << *(xs+3) << "\n"; } %%bash g++ -std=c++11 -o p04.exe p04.cpp ./p04.exe ``` ### C style dynamic memory for jagged array ("matrix") ``` %%file p05.cpp #include <iostream> using std::cout; using std::begin; using std::end; int main() { int m = 3; int n = 4; int **xss = new int*[m]; // assign memory for m pointers to int for (int i=0; i<m; i++) { xss[i] = new int[n]; // assign memory for array of n ints for (int j=0; j<n; j++) { xss[i][j] = i*10 + j; } } for (int i=0; i<m; i++) { for (int j=0; j<n; j++) { cout << xss[i][j] << "\t"; } cout << "\n"; } // Free memory for (int i=0; i<m; i++) { delete[] xss[i]; } delete[] xss; } %%bash g++ -std=c++11 -o p05.exe p05.cpp ./p05.exe ``` ## Functions ``` %%file func01.cpp #include <iostream> double add(double x, double y) { return x + y; } double mult(double x, double y) { return x * y; } int main() { double a = 3; double b = 4; std::cout << add(a, b) << std::endl; std::cout << mult(a, b) << std::endl; } %%bash g++ -o func01.exe func01.cpp -std=c++14 ./func01.exe ``` ### Function parameters In the example below, the space allocated *inside* a function is deleted *outside* the function. Such code in practice will almost certainly lead to memory leakage. This is why C++ functions often put the *output* as an argument to the function, so that all memory allocation can be controlled outside the function. ``` void add(double *x, double *y, double *res, n) ``` ``` %%file func02.cpp #include <iostream> double* add(double *x, double *y, int n) { double *res = new double[n]; for (int i=0; i<n; i++) { res[i] = x[i] + y[i]; } return res; } int main() { double a[] = {1,2,3}; double b[] = {4,5,6}; int n = 3; double *c = add(a, b, n); for (int i=0; i<n; i++) { std::cout << c[i] << " "; } std::cout << "\n"; delete[] c; // Note difficulty of book-keeping when using raw pointers! } %%bash g++ -o func02.exe func02.cpp -std=c++14 ./func02.exe %%file func03.cpp #include <iostream> using std::cout; // Using value void foo1(int x) { x = x + 1; } // Using pointer void foo2(int *x) { *x = *x + 1; } // Using ref void foo3(int &x) { x = x + 1; } int main() { int x = 0; cout << x << "\n"; foo1(x); cout << x << "\n"; foo2(&x); cout << x << "\n"; foo3(x); cout << x << "\n"; } %%bash g++ -o func03.exe func03.cpp -std=c++14 ./func03.exe ``` ## Generic programming with templates In C, you need to write a *different* function for each input type - hence resulting in duplicated code like ```C int iadd(int a, int b) float fadd(float a, float b) ``` In C++, you can make functions *generic* by using *templates*. Note: When you have a template function, the entire funciton must be written in the header file, and not the source file. Hence, heavily templated libaries are often "header-only". ``` %%file template.cpp #include <iostream> template<typename T> T add(T a, T b) { return a + b; } int main() { int m =2, n =3; double u = 2.5, v = 4.5; std::cout << add(m, n) << std::endl; std::cout << add(u, v) << std::endl; } %%bash g++ -o template.exe template.cpp %%bash ./template.exe ``` ## Anonymous functions ``` %%file lambda.cpp #include <iostream> using std::cout; using std::endl; int main() { int a = 3, b = 4; int c = 0; // Lambda function with no capture auto add1 = [] (int a, int b) { return a + b; }; // Lambda function with value capture auto add2 = [c] (int a, int b) { return c * (a + b); }; // Lambda funciton with reference capture auto add3 = [&c] (int a, int b) { return c * (a + b); }; // Change value of c after function definition c += 5; cout << "Lambda function\n"; cout << add1(a, b) << endl; cout << "Lambda function with value capture\n"; cout << add2(a, b) << endl; cout << "Lambda function with reference capture\n"; cout << add3(a, b) << endl; } %%bash c++ -o lambda.exe lambda.cpp --std=c++14 %%bash ./lambda.exe ``` ## Function pointers ``` %%file func_pointer.cpp #include <iostream> #include <vector> #include <functional> using std::cout; using std::endl; using std::function; using std::vector; int main() { cout << "\nUsing generalized function pointers\n"; using func = function<double(double, double)>; auto f1 = [](double x, double y) { return x + y; }; auto f2 = [](double x, double y) { return x * y; }; auto f3 = [](double x, double y) { return x + y*y; }; double x = 3, y = 4; vector<func> funcs = {f1, f2, f3,}; for (auto& f : funcs) { cout << f(x, y) << "\n"; } } %%bash g++ -o func_pointer.exe func_pointer.cpp -std=c++14 %%bash ./func_pointer.exe ``` ## Standard template library (STL) The STL provides templated containers and gneric algorithms acting on these containers with a consistent API. ``` %%file stl.cpp #include <iostream> #include <vector> #include <map> #include <unordered_map> using std::vector; using std::map; using std::unordered_map; using std::string; using std::cout; using std::endl; struct Point{ int x; int y; Point(int x_, int y_) : x(x_), y(y_) {}; }; int main() { vector<int> v1 = {1,2,3}; v1.push_back(4); v1.push_back(5); cout << "Vecotr<int>" << endl; for (auto n: v1) { cout << n << endl; } cout << endl; vector<Point> v2; v2.push_back(Point(1, 2)); v2.emplace_back(3,4); cout << "Vector<Point>" << endl; for (auto p: v2) { cout << "(" << p.x << ", " << p.y << ")" << endl; } cout << endl; map<string, int> v3 = {{"foo", 1}, {"bar", 2}}; v3["hello"] = 3; v3.insert({"goodbye", 4}); // Note the a C++ map is ordered // Note using (traditional) iterators instead of ranged for loop cout << "Map<string, int>" << endl; for (auto iter=v3.begin(); iter != v3.end(); iter++) { cout << iter->first << ": " << iter->second << endl; } cout << endl; unordered_map<string, int> v4 = {{"foo", 1}, {"bar", 2}}; v4["hello"] = 3; v4.insert({"goodbye", 4}); // Note the unordered_map is similar to Python' dict.' // Note using ranged for loop with const ref to avoid copying or mutation cout << "Unordered_map<string, int>" << endl; for (const auto& i: v4) { cout << i.first << ": " << i.second << endl; } cout << endl; } %%bash g++ -o stl.exe stl.cpp -std=c++14 %%bash ./stl.exe ``` ## STL algorithms ``` %%file stl_algorithm.cpp #include <vector> #include <iostream> #include <numeric> using std::cout; using std::endl; using std::vector; using std::begin; using std::end; int main() { vector<int> v(10); // iota is somewhat like range std::iota(v.begin(), v.end(), 1); for (auto i: v) { cout << i << " "; } cout << endl; // C++ version of reduce cout << std::accumulate(begin(v), end(v), 0) << endl; // Accumulate with lambda cout << std::accumulate(begin(v), end(v), 1, [](int a, int b){return a * b; }) << endl; } %%bash g++ -o stl_algorithm.exe stl_algorithm.cpp -std=c++14 %%bash ./stl_algorithm.exe ``` ## Random numbers ``` %%file random.cpp #include <iostream> #include <random> #include <functional> using std::cout; using std::random_device; using std::mt19937; using std::default_random_engine; using std::uniform_int_distribution; using std::poisson_distribution; using std::student_t_distribution; using std::bind; // start random number engine with fixed seed // Note default_random_engine may give differnet values on different platforms // default_random_engine re(1234); // or // Using a named engine will work the same on differnt platforms // mt19937 re(1234); // start random number generator with random seed random_device rd; mt19937 re(rd()); uniform_int_distribution<int> uniform(1,6); // lower and upper bounds poisson_distribution<int> poisson(30); // rate student_t_distribution<double> t(10); // degrees of freedom int main() { cout << "\nGenerating random numbers\n"; auto runif = bind (uniform, re); auto rpois = bind(poisson, re); auto rt = bind(t, re); for (int i=0; i<10; i++) { cout << runif() << ", " << rpois() << ", " << rt() << "\n"; } } %%bash g++ -o random.exe random.cpp -std=c++14 %%bash ./random.exe ``` ## Numerics ### Using Armadillo ``` %%file test_arma.cpp #include <iostream> #include <armadillo> using std::cout; using std::endl; int main() { using namespace arma; vec u = linspace<vec>(0,1,5); vec v = ones<vec>(5); mat A = randu<mat>(4,5); // uniform random deviates mat B = randn<mat>(4,5); // normal random deviates cout << "\nVecotrs in Armadillo\n"; cout << u << endl; cout << v << endl; cout << u.t() * v << endl; cout << "\nRandom matrices in Armadillo\n"; cout << A << endl; cout << B << endl; cout << A * B.t() << endl; cout << A * v << endl; cout << "\nQR in Armadillo\n"; mat Q, R; qr(Q, R, A.t() * A); cout << Q << endl; cout << R << endl; } %%bash g++ -o test_arma.exe test_arma.cpp -std=c++14 -larmadillo %%bash ./test_arma.exe ``` ### Using Eigen ``` %%file test_eigen.cpp #include <iostream> #include <fstream> #include <random> #include <Eigen/Dense> #include <functional> using std::cout; using std::endl; using std::ofstream; using std::default_random_engine; using std::normal_distribution; using std::bind; // start random number engine with fixed seed default_random_engine re{12345}; normal_distribution<double> norm(5,2); // mean and standard deviation auto rnorm = bind(norm, re); int main() { using namespace Eigen; VectorXd x1(6); x1 << 1, 2, 3, 4, 5, 6; VectorXd x2 = VectorXd::LinSpaced(6, 1, 2); VectorXd x3 = VectorXd::Zero(6); VectorXd x4 = VectorXd::Ones(6); VectorXd x5 = VectorXd::Constant(6, 3); VectorXd x6 = VectorXd::Random(6); double data[] = {6,5,4,3,2,1}; Map<VectorXd> x7(data, 6); VectorXd x8 = x6 + x7; MatrixXd A1(3,3); A1 << 1 ,2, 3, 4, 5, 6, 7, 8, 9; MatrixXd A2 = MatrixXd::Constant(3, 4, 1); MatrixXd A3 = MatrixXd::Identity(3, 3); Map<MatrixXd> A4(data, 3, 2); MatrixXd A5 = A4.transpose() * A4; MatrixXd A6 = x7 * x7.transpose(); MatrixXd A7 = A4.array() * A4.array(); MatrixXd A8 = A7.array().log(); MatrixXd A9 = A8.unaryExpr([](double x) { return exp(x); }); MatrixXd A10 = MatrixXd::Zero(3,4).unaryExpr([](double x) { return rnorm(); }); VectorXd x9 = A1.colwise().norm(); VectorXd x10 = A1.rowwise().sum(); MatrixXd A11(x1.size(), 3); A11 << x1, x2, x3; MatrixXd A12(3, x1.size()); A12 << x1.transpose(), x2.transpose(), x3.transpose(); JacobiSVD<MatrixXd> svd(A10, ComputeThinU | ComputeThinV); cout << "x1: comman initializer\n" << x1.transpose() << "\n\n"; cout << "x2: linspace\n" << x2.transpose() << "\n\n"; cout << "x3: zeors\n" << x3.transpose() << "\n\n"; cout << "x4: ones\n" << x4.transpose() << "\n\n"; cout << "x5: constant\n" << x5.transpose() << "\n\n"; cout << "x6: rand\n" << x6.transpose() << "\n\n"; cout << "x7: mapping\n" << x7.transpose() << "\n\n"; cout << "x8: element-wise addition\n" << x8.transpose() << "\n\n"; cout << "max of A1\n"; cout << A1.maxCoeff() << "\n\n"; cout << "x9: norm of columns of A1\n" << x9.transpose() << "\n\n"; cout << "x10: sum of rows of A1\n" << x10.transpose() << "\n\n"; cout << "head\n"; cout << x1.head(3).transpose() << "\n\n"; cout << "tail\n"; cout << x1.tail(3).transpose() << "\n\n"; cout << "slice\n"; cout << x1.segment(2, 3).transpose() << "\n\n"; cout << "Reverse\n"; cout << x1.reverse().transpose() << "\n\n"; cout << "Indexing vector\n"; cout << x1(0); cout << "\n\n"; cout << "A1: comma initilizer\n"; cout << A1 << "\n\n"; cout << "A2: constant\n"; cout << A2 << "\n\n"; cout << "A3: eye\n"; cout << A3 << "\n\n"; cout << "A4: mapping\n"; cout << A4 << "\n\n"; cout << "A5: matrix multiplication\n"; cout << A5 << "\n\n"; cout << "A6: outer product\n"; cout << A6 << "\n\n"; cout << "A7: element-wise multiplication\n"; cout << A7 << "\n\n"; cout << "A8: ufunc log\n"; cout << A8 << "\n\n"; cout << "A9: custom ufucn\n"; cout << A9 << "\n\n"; cout << "A10: custom ufunc for normal deviates\n"; cout << A10 << "\n\n"; cout << "A11: np.c_\n"; cout << A11 << "\n\n"; cout << "A12: np.r_\n"; cout << A12 << "\n\n"; cout << "2x2 block startign at (0,1)\n"; cout << A1.block(0,1,2,2) << "\n\n"; cout << "top 2 rows of A1\n"; cout << A1.topRows(2) << "\n\n"; cout << "bottom 2 rows of A1"; cout << A1.bottomRows(2) << "\n\n"; cout << "leftmost 2 cols of A1"; cout << A1.leftCols(2) << "\n\n"; cout << "rightmost 2 cols of A1"; cout << A1.rightCols(2) << "\n\n"; cout << "Diagonal elements of A1\n"; cout << A1.diagonal() << "\n\n"; A1.diagonal() = A1.diagonal().array().square(); cout << "Transforming diagonal eelemtns of A1\n"; cout << A1 << "\n\n"; cout << "Indexing matrix\n"; cout << A1(0,0) << "\n\n"; cout << "singular values\n"; cout << svd.singularValues() << "\n\n"; cout << "U\n"; cout << svd.matrixU() << "\n\n"; cout << "V\n"; cout << svd.matrixV() << "\n\n"; } import os if not os.path.exists('./eigen'): ! git clone https://gitlab.com/libeigen/eigen.git %%bash g++ -o test_eigen.exe test_eigen.cpp -std=c++11 -I./eigen %%bash ./test_eigen.exe ``` ### Check SVD ``` import numpy as np A10 = np.array([ [5.17237, 3.73572, 6.29422, 6.55268], [5.33713, 3.88883, 1.93637, 4.39812], [8.22086, 6.94502, 6.36617, 6.5961] ]) U, s, Vt = np.linalg.svd(A10, full_matrices=False) s U Vt.T ``` ## Probability distributions and statistics A nicer library for working with probability distributions. Show integration with Armadillo. Integration with Eigen is also possible. ``` import os if not os.path.exists('./stats'): ! git clone https://github.com/kthohr/stats.git if not os.path.exists('./gcem'): ! git clone https://github.com/kthohr/gcem.git %%file stats.cpp #define STATS_ENABLE_STDVEC_WRAPPERS #define STATS_ENABLE_ARMA_WRAPPERS // #define STATS_ENABLE_EIGEN_WRAPPERS #include <iostream> #include <vector> #include "stats.hpp" using std::cout; using std::endl; using std::vector; // set seed for randome engine to 1776 std::mt19937_64 engine(1776); int main() { // evaluate the normal PDF at x = 1, mu = 0, sigma = 1 double dval_1 = stats::dnorm(1.0,0.0,1.0); // evaluate the normal PDF at x = 1, mu = 0, sigma = 1, and return the log value double dval_2 = stats::dnorm(1.0,0.0,1.0,true); // evaluate the normal CDF at x = 1, mu = 0, sigma = 1 double pval = stats::pnorm(1.0,0.0,1.0); // evaluate the Laplacian quantile at p = 0.1, mu = 0, sigma = 1 double qval = stats::qlaplace(0.1,0.0,1.0); // draw from a normal distribution with mean 100 and sd 15 double rval = stats::rnorm(100, 15); // Use with std::vectors vector<int> pois_rvs = stats::rpois<vector<int> >(1, 10, 3); cout << "Poisson draws with rate=3 inton std::vector" << endl; for (auto &x : pois_rvs) { cout << x << ", "; } cout << endl; // Example of Armadillo usage: only one matrix library can be used at a time arma::mat beta_rvs = stats::rbeta<arma::mat>(5,5,3.0,2.0); // matrix input arma::mat beta_cdf_vals = stats::pbeta(beta_rvs,3.0,2.0); /* Example of Eigen usage: only one matrix library can be used at a time Eigen::MatrixXd gamma_rvs = stats::rgamma<Eigen::MatrixXd>(10, 5,3.0,2.0); */ cout << "evaluate the normal PDF at x = 1, mu = 0, sigma = 1" << endl; cout << dval_1 << endl; cout << "evaluate the normal PDF at x = 1, mu = 0, sigma = 1, and return the log value" << endl; cout << dval_2 << endl; cout << "evaluate the normal CDF at x = 1, mu = 0, sigma = 1" << endl; cout << pval << endl; cout << "evaluate the Laplacian quantile at p = 0.1, mu = 0, sigma = 1" << endl; cout << qval << endl; cout << "draw from a normal distribution with mean 100 and sd 15" << endl; cout << rval << endl; cout << "draws from a beta distribuiotn to populate Armadillo matrix" << endl; cout << beta_rvs << endl; cout << "evaluaate CDF for beta draws from Armadillo inputs" << endl; cout << beta_cdf_vals << endl; /* If using Eigen cout << "draws from a Gamma distribuiotn to populate Eigen matrix" << endl; cout << gamma_rvs << endl; */ } %%bash g++ -std=c++11 -I./stats/include -I./gcem/include -I./eigen stats.cpp -o stats.exe %%bash ./stats.exe ``` **Solution to exercise** ``` %%file greet.cpp #include <iostream> #include <string> using std::string; using std::cout; int main(int argc, char* argv[]) { string name = argv[1]; int n = std::stoi(argv[2]); for (int i=0; i<n; i++) { cout << "Hello " << name << "!" << "\n"; } } %%bash g++ -std=c++11 greet.cpp -o greet %%bash ./greet Santa 3 ```
github_jupyter
``` import numpy as np import pandas as pd from matplotlib import pyplot as plt from tqdm import tqdm as tqdm %matplotlib inline import torch import torchvision import torchvision.transforms as transforms import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import random transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) type(trainset.targets) type(trainset.data) index1 = [np.where(np.array(trainset.targets)==0)[0] , np.where(np.array(trainset.targets)==1)[0], np.where(np.array(trainset.targets)==2)[0] ] index1 = np.concatenate(index1,axis=0) len(index1) #15000 #index1 disp = np.array(trainset.targets) true = 100 total = 35000 sin = total-true sin epochs= 100 indices = np.random.choice(index1,true) _,count = np.unique(disp[indices],return_counts=True) print(count, indices.shape) index = np.where(np.logical_and(np.logical_and(np.array(trainset.targets)!=0, np.array(trainset.targets)!=1), np.array(trainset.targets)!=2))[0] #35000 len(index) req_index = np.random.choice(index.shape[0], sin, replace=False) index = index[req_index] index.shape values = np.random.choice([0,1,2],size= len(index)) #labeling others as 0,1,2 print(sum(values ==0),sum(values==1), sum(values==2)) # trainset.data = torch.tensor( trainset.data ) # trainset.targets = torch.tensor(trainset.targets) trainset.data = np.concatenate((trainset.data[indices],trainset.data[index])) trainset.targets = np.concatenate((np.array(trainset.targets)[indices],values)) trainset.targets.shape, trainset.data.shape # mnist_trainset.targets[index] = torch.Tensor(values).type(torch.LongTensor) j =20078 # Without Shuffle upto True Training numbers correct , after that corrupted print(plt.imshow(trainset.data[j]),trainset.targets[j]) trainloader = torch.utils.data.DataLoader(trainset, batch_size=256,shuffle=True, num_workers=2) testloader = torch.utils.data.DataLoader(testset, batch_size=256,shuffle=False, num_workers=2) classes = ('zero', 'one','two') dataiter = iter(trainloader) images, labels = dataiter.next() images[:4].shape # def imshow(img): # img = img / 2 + 0.5 # unnormalize # npimg = img.numpy() # plt.imshow(np.transpose(npimg, (1, 2, 0))) # plt.show() def imshow(img): img = img / 2 + 0.5 # unnormalize npimg = img#.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() imshow(torchvision.utils.make_grid(images[:10])) print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(10))) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') class Conv_module(nn.Module): def __init__(self,inp_ch,f,s,k,pad): super(Conv_module,self).__init__() self.inp_ch = inp_ch self.f = f self.s = s self.k = k self.pad = pad self.conv = nn.Conv2d(self.inp_ch,self.f,k,stride=s,padding=self.pad) self.bn = nn.BatchNorm2d(self.f) self.act = nn.ReLU() def forward(self,x): x = self.conv(x) x = self.bn(x) x = self.act(x) return x class inception_module(nn.Module): def __init__(self,inp_ch,f0,f1): super(inception_module, self).__init__() self.inp_ch = inp_ch self.f0 = f0 self.f1 = f1 self.conv1 = Conv_module(self.inp_ch,self.f0,1,1,pad=0) self.conv3 = Conv_module(self.inp_ch,self.f1,1,3,pad=1) #self.conv1 = nn.Conv2d(3,self.f0,1) #self.conv3 = nn.Conv2d(3,self.f1,3,padding=1) def forward(self,x): x1 = self.conv1.forward(x) x3 = self.conv3.forward(x) #print(x1.shape,x3.shape) x = torch.cat((x1,x3),dim=1) return x class downsample_module(nn.Module): def __init__(self,inp_ch,f): super(downsample_module,self).__init__() self.inp_ch = inp_ch self.f = f self.conv = Conv_module(self.inp_ch,self.f,2,3,pad=0) self.pool = nn.MaxPool2d(3,stride=2,padding=0) def forward(self,x): x1 = self.conv(x) #print(x1.shape) x2 = self.pool(x) #print(x2.shape) x = torch.cat((x1,x2),dim=1) return x,x1 class inception_net(nn.Module): def __init__(self): super(inception_net,self).__init__() self.conv1 = Conv_module(3,96,1,3,0) self.incept1 = inception_module(96,32,32) self.incept2 = inception_module(64,32,48) self.downsample1 = downsample_module(80,80) self.incept3 = inception_module(160,112,48) self.incept4 = inception_module(160,96,64) self.incept5 = inception_module(160,80,80) self.incept6 = inception_module(160,48,96) self.downsample2 = downsample_module(144,96) self.incept7 = inception_module(240,176,60) self.incept8 = inception_module(236,176,60) self.pool = nn.AvgPool2d(5) self.linear = nn.Linear(236,10) def forward(self,x): x = self.conv1.forward(x) #act1 = x x = self.incept1.forward(x) #act2 = x x = self.incept2.forward(x) #act3 = x x,act4 = self.downsample1.forward(x) x = self.incept3.forward(x) #act5 = x x = self.incept4.forward(x) #act6 = x x = self.incept5.forward(x) #act7 = x x = self.incept6.forward(x) #act8 = x x,act9 = self.downsample2.forward(x) x = self.incept7.forward(x) #act10 = x x = self.incept8.forward(x) #act11 = x #print(x.shape) x = self.pool(x) #print(x.shape) x = x.view(-1,1*1*236) x = self.linear(x) return x inc = inception_net() inc = inc.to("cuda") criterion_inception = nn.CrossEntropyLoss() optimizer_inception = optim.SGD(inc.parameters(), lr=0.01, momentum=0.9) acti = [] loss_curi = [] for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") # zero the parameter gradients optimizer_inception.zero_grad() # forward + backward + optimize outputs = inc(inputs) loss = criterion_inception(outputs, labels) loss.backward() optimizer_inception.step() # print statistics running_loss += loss.item() if i % 50 == 49: # print every 50 mini-batches print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 50)) ep_lossi.append(running_loss/50) # loss per minibatch running_loss = 0.0 loss_curi.append(np.mean(ep_lossi)) #loss per epoch if(np.mean(ep_lossi)<=0.03): break # if (epoch%5 == 0): # _,actis= inc(inputs) # acti.append(actis) print('Finished Training') correct = 0 total = 0 with torch.no_grad(): for data in trainloader: images, labels = data images, labels = images.to("cuda"), labels.to("cuda") outputs = inc(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 35000 train images: %d %%' % ( 100 * correct / total)) total,correct correct = 0 total = 0 out = [] pred = [] with torch.no_grad(): for data in testloader: images, labels = data images, labels = images.to("cuda"),labels.to("cuda") out.append(labels.cpu().numpy()) outputs= inc(images) _, predicted = torch.max(outputs.data, 1) pred.append(predicted.cpu().numpy()) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) out = np.concatenate(out,axis=0) pred = np.concatenate(pred,axis=0) index = np.logical_or(np.logical_or(out ==1,out==0),out == 2) print(index.shape) acc = sum(out[index] == pred[index])/sum(index) print('Accuracy of the network on the 0-1-2 test images: %d %%' % ( 100*acc)) np.unique(out[index],return_counts = True) #== pred[index]) np.unique(pred[index],return_counts = True) #== pred[index]) sum(out[index] == pred[index]) cnt = np.zeros((3,3)) true = out[index] predict = pred[index] for i in range(len(true)): cnt[true[i]][predict[i]] += 1 cnt # torch.save(inc.state_dict(),"/content/drive/My Drive/Research/CIFAR Random/model_True_"+str(true_data_count)+"_epoch_"+str(epochs)+".pkl") ```
github_jupyter
**※ GPU環境で利用してください** ``` !pip install timm import argparse import operator import os import time from collections import OrderedDict import timm import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms from PIL import Image from timm.data import create_dataset, create_loader, resolve_data_config from timm.optim import create_optimizer from timm.utils import AverageMeter, accuracy from timm.utils.summary import update_summary from torch.autograd import Variable from IPython.display import display parser = argparse.ArgumentParser(description="Training Config", add_help=False) parser.add_argument( "--opt", default="sgd", type=str, metavar="OPTIMIZER", help='Optimizer (default: "sgd"', ) parser.add_argument( "--weight-decay", type=float, default=0.0001, help="weight decay (default: 0.0001)" ) parser.add_argument( "--lr", type=float, default=0.01, metavar="LR", help="learning rate (default: 0.01)" ) parser.add_argument( "--momentum", type=float, default=0.9, metavar="M", help="Optimizer momentum (default: 0.9)", ) parser.add_argument( "--input-size", default=None, nargs=3, type=int, metavar="N N N", help="Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty", ) args = parser.parse_args(["--input-size", "3", "224", "224"]) EPOCHS = 30 BATCH_SIZE = 32 NUM_WORKERS = 2 # 適宜GoogleColab上のデータセットディレクトリ(train, validation, testが含まれれるディレクトリ)のパスを指定してください dataset_path = '/content/drive/MyDrive/VisionTransformer/' # 対応モデルを確認 model_names = timm.list_models(pretrained=True) model_names NUM_FINETUNE_CLASSES = 2 # {'clear': 0, 'cloudy': 1} の2種類 model = timm.create_model('vit_base_patch16_224', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) model.cuda() data_config = resolve_data_config(vars(args), model=model) dataset_train = create_dataset('train', root=os.path.join(dataset_path, 'train'), is_training=True, batch_size=BATCH_SIZE) dataset_eval = create_dataset('validation', root=os.path.join(dataset_path, 'validation'), is_training=False, batch_size=BATCH_SIZE) dataset_test = create_dataset('test', root=os.path.join(dataset_path, 'test'), is_training=False, batch_size=BATCH_SIZE) loader_train = create_loader(dataset_train, input_size=data_config['input_size'], batch_size=BATCH_SIZE, is_training=True, num_workers=NUM_WORKERS) loader_eval = create_loader(dataset_eval, input_size=data_config['input_size'], batch_size=BATCH_SIZE, is_training=False, num_workers=NUM_WORKERS) loader_test = create_loader(dataset_test, input_size=data_config['input_size'], batch_size=BATCH_SIZE, is_training=False, num_workers=NUM_WORKERS) train_loss_fn = nn.CrossEntropyLoss().cuda() validate_loss_fn = nn.CrossEntropyLoss().cuda() optimizer = create_optimizer(args, model) def train_one_epoch(epoch, model, loader, optimizer, loss_fn, args, output_dir=None): second_order = hasattr(optimizer, "is_second_order") and optimizer.is_second_order batch_time_m = AverageMeter() data_time_m = AverageMeter() losses_m = AverageMeter() model.train() end = time.time() num_updates = epoch * len(loader) for _, (input, target) in enumerate(loader): data_time_m.update(time.time() - end) output = model(input) loss = loss_fn(output, target) optimizer.zero_grad() loss.backward(create_graph=second_order) optimizer.step() torch.cuda.synchronize() num_updates += 1 batch_time_m.update(time.time() - end) end = time.time() if hasattr(optimizer, "sync_lookahead"): optimizer.sync_lookahead() return OrderedDict([("loss", losses_m.avg)]) def validate(model, loader, loss_fn, args): batch_time_m = AverageMeter() losses_m = AverageMeter() accuracy_m = AverageMeter() model.eval() end = time.time() with torch.no_grad(): for _, (input, target) in enumerate(loader): input = input.cuda() target = target.cuda() output = model(input) if isinstance(output, (tuple, list)): output = output[0] loss = loss_fn(output, target) acc1, _ = accuracy(output, target, topk=(1, 2)) reduced_loss = loss.data torch.cuda.synchronize() losses_m.update(reduced_loss.item(), input.size(0)) accuracy_m.update(acc1.item(), output.size(0)) batch_time_m.update(time.time() - end) end = time.time() metrics = OrderedDict([("loss", losses_m.avg), ("accuracy", accuracy_m.avg)]) return metrics num_epochs = EPOCHS eval_metric = "accuracy" best_metric = None best_epoch = None compare = operator.gt # 学習結果CSVファイルやファインチューニング後のモデルデータの出力先 output_dir = "/content/drive/MyDrive/VisionTransformer/output" for epoch in range(0, num_epochs): train_metrics = train_one_epoch( epoch, model, loader_train, optimizer, train_loss_fn, args, output_dir=output_dir ) eval_metrics = validate(model, loader_eval, validate_loss_fn, args) if output_dir is not None: update_summary( epoch, train_metrics, eval_metrics, os.path.join(output_dir, "summary.csv"), write_header=best_metric is None, ) metric = eval_metrics[eval_metric] if best_metric is None or compare(metric, best_metric): best_metric = metric best_epoch = epoch torch.save(model.state_dict(), os.path.join(output_dir, "best_model.pth")) print(epoch) print(eval_metrics) print("Best metric: {0} (epoch {1})".format(best_metric, best_epoch)) model.load_state_dict( torch.load( os.path.join(output_dir, "best_model.pth"), map_location=torch.device("cuda") ) ) model.eval() image_size = data_config["input_size"][-1] loader = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor()]) def image_loader(image_name): image = Image.open(image_name).convert("RGB") image = loader(image) image = Variable(image, requires_grad=True) image = image.unsqueeze(0) return image.cuda() m = nn.Softmax(dim=1) clear_image_path = os.path.join(dataset_path, 'test/clear/12_3542_1635.png') predicted_clear_image = image_loader(clear_image_path) display(Image.open(clear_image_path)) m(model(predicted_clear_image)) cloudy_image_path = os.path.join(dataset_path, 'test/cloudy/12_3503_1735.png') predicted_cloudy_image = image_loader(cloudy_image_path) display(Image.open(cloudy_image_path)) m(model(predicted_cloudy_image)) def test(model, loader, args): batch_time_m = AverageMeter() accuracy_m = AverageMeter() model.eval() end = time.time() with torch.no_grad(): for _, (input, target) in enumerate(loader): input = input.cuda() target = target.cuda() output = model(input) if isinstance(output, (tuple, list)): output = output[0] acc1, _ = accuracy(output, target, topk=(1, 2)) torch.cuda.synchronize() accuracy_m.update(acc1.item(), output.size(0)) batch_time_m.update(time.time() - end) end = time.time() return {'accuracy': accuracy_m.avg} test(model, loader_test, args) ```
github_jupyter
# Tutorial 6: Population Level Modeling (with PopNet) In this tutorial we will focus on modeling of populations and population firing rates. This is done with the PopNet simulator application of bmtk which uses [DiPDE](https://github.com/AllenInstitute/dipde) engine as a backend. We will first build our networks using the bmtk NetworkBuilder and save them into the SONATA data format. Then we will show how to simulate the firing rates over a given time-source. Requirements: * BMTK * DiPDE ## 1. Building the network #### Converting existing networks Like BioNet for biophysically detailed modeling, and PointNet with point-based networks, PopNet stores networks in the SONATA data format. PopNet supports simulating networks of individual cells at the population level. First thing you have to do is modify the node-types and edge-types of an existing network to use Population level models (rather than models of individual cells. <div class="alert alert-warning"> **WARNING** - Converting a network of individual nodes into population of nodes is good for a quick and naive simulation, but for faster and more reliable results it's best to build a network from scratch (next section). </div> Here is the node-types csv file of a network set to work with BioNet ``` import pandas as pd pd.read_csv('sources/chapter06/converted_network/V1_node_types_bionet.csv', sep=' ') ``` vs the equivelent form for PopNet ``` pd.read_csv('sources/chapter06/converted_network/V1_node_types_popnet.csv', sep=' ') ``` Some things to note: * **model_type** is now a population for all nodes, rather than individual biophysical/point types * We have set **model_template** to dipde:Internal which will tell the simulator to use special DiPDE model types * We are using new **dynamic_params** files with parameters that have been adjusted to appropiate range for DiPDE models. * **morophology_file** and **model_processing**, which were used to set and processes individual cell morphologies, is no longer applicable. We must make similar adjustments to our edge_types.csv files. And finally when we run the simulation we must tell PopNet to cluster nodes together using the **group_by** property ```python network = popnet.PopNetwork.from_config(configure, group_by='node_type_id') ``` #### Building a network We will create a network of two populations, one population of excitatory cells and another of inhibitory cells. Then we will save the network into SONATA formated data files. The first step is to use the NetworkBuilder to instantiate a new network with two populations: ``` from bmtk.builder import NetworkBuilder net = NetworkBuilder('V1') net.add_nodes(pop_name='excitatory', # name of specific population optional ei='e', # Optional location='VisL4', # Optional model_type='population', # Required, indicates what types of cells are being model model_template='dipde:Internal', # Required, instructs what DiPDE objects will be created dynamics_params='exc_model.json' # Required, contains parameters used by DiPDE during initialization of object ) net.add_nodes(pop_name='inhibitory', ei='i', model_type='population', model_template='dipde:Internal', dynamics_params='inh_model.json') ``` Next we will create connections between the two populations: ``` net.add_edges(source={'ei': 'e'}, target={'ei': 'i'}, syn_weight=0.005, nsyns=20, delay=0.002, dynamics_params='ExcToInh.json') net.add_edges(source={'ei': 'i'}, target={'ei': 'e'}, syn_weight=-0.002, nsyns=10, delay=0.002, dynamics_params='InhToExc.json') ``` and finally we must build and save the network ``` net.build() net.save_nodes(output_dir='network') net.save_edges(output_dir='network') ``` ##### External Nodes The *dipde:Internal* nodes we created don't carry intrinsic firing rates, and instead we will use External Populations to drive the network activity. To do this we will create a separate network of 'virtual' populations, or alternativly use model_type=dipde:External, that connect to our excitatory population. Note: we could add 'virtual' populations directly to our V1 network. However creating them as a separate network provides a great advantage if/when we want to replace our external connections with a different model (Or if we want to remove the reccurrent connections and simulation with only feed-foward activity). ``` input_net = NetworkBuilder('LGN') input_net.add_nodes(pop_name='tON', ei='e', model_type='virtual') input_net.add_edges(target=net.nodes(ei='e'), syn_weight=0.0025, nsyns=10, delay=0.002, dynamics_params='input_ExcToExc.json') input_net.build() input_net.save_nodes(output_dir='network') input_net.save_edges(output_dir='network') ``` ## 2. Setting up the PopNet environment Before running the simulation we need to set up our simulation environment, inlcuding setting up run-scripts, configuration parameters, and placing our parameter files in their appropiate location. The easiest way to do this is through the command-line: ```bash $ python -m bmtk.utils.sim_setup -n network --run-time 1500.0 popnet ``` Which creates initial files to run a 1500 ms simulation using the network files found in our ./network directory. #### Inputs We next need to set the firing rates of the External Population. There are multiple ways to set this value which will be discussed later. The best way is to set the firing rates using a input-rates file for each External Population, we can fetch an existing one using the command: ```bash $ wget https://github.com/AllenInstitute/bmtk/raw/develop/docs/examples/pop_2pops/lgn_rates.csv ``` Then we must open the simulation_config.json file with a text editor and add the lgn_rates.csv file as a part of our inputs: ```json { "inputs": { "LGN_pop_rates": { "input_type": "csv", "module": "pop_rates", "rates": "${BASE_DIR}/lgn_rates.csv", "node_set": "LGN" } } } ``` ## 3. Running the simulation The call to sim_setup created a file run_pointnet.py which we can run directly in a command line: ```bash $ python run_popnet.py config.json ``` Or we can run it directly using the following python code: ``` from bmtk.simulator import popnet configure = popnet.config.from_json('simulation_config.json') configure.build_env() network = popnet.PopNetwork.from_config(configure) sim = popnet.PopSimulator.from_config(configure, network) sim.run() ``` ## 4. Analyzing results As specified in the "output" section of simulation_config.json, the results will be written to ouput/spike_rates.csv. The BMTK analyzer includes code for ploting and analyzing the firing rates of our network: ``` from bmtk.analyzer.visualization.spikes import plot_rates_popnet plot_rates_popnet('network/V1_node_types.csv', 'output/firing_rates.csv', model_keys='pop_name') ```
github_jupyter
``` import pandas as pd import numpy as np import plotly import matplotlib.pyplot as plt from random import seed from random import randrange from csv import reader from google.colab import drive drive.mount('/content/drive') ''' We proceed as follows: 1. Compute the Gini Index of the Dataset. 2. Create a split of each node based on the Gini Index calculated. 3. Build a decision tree. 4. Make predictions with the decision tree. ''' def Gini_Index(groups, classes): ''' To compute the Gini Index ''' n_instances = float(sum([len(group) for group in groups])) gini = 0.0 for group in groups: size = float(len(group)) if size == 0: continue score = 0.0 for class_val in classes: p = [row[-1] for row in group].count(class_val) / size score += p * p gini += (1.0 - score) * (size / n_instances) return gini print(Gini_Index([[[1, 1], [1, 0]], [[1, 1], [1, 0]]], [0, 1])) def test_split(index, value, dataset): left, right = list(), list() for row in dataset: if row[index] < value: left.append(row) else: right.append(row) return left, right def get_split(dataset): class_values = list(set(row[-1] for row in dataset)) b_index, b_value, b_score, b_groups = 999, 999, 999, None for index in range(len(dataset[0])-1): for row in dataset: groups = test_split(index, row[index], dataset) gini = Gini_Index(groups, class_values) if gini < b_score: b_index, b_value, b_score, b_groups = index, row[index], gini, groups return {'index':b_index, 'value':b_value, 'groups':b_groups} def to_terminal(group): outcomes = [row[-1] for row in group] return max(set(outcomes), key=outcomes.count) def split(node, max_depth, min_size, depth): left, right = node['groups'] del(node['groups']) # check for a no split if not left or not right: node['left'] = node['right'] = to_terminal(left + right) return # check for max depth if depth >= max_depth: node['left'], node['right'] = to_terminal(left), to_terminal(right) return # process left child if len(left) <= min_size: node['left'] = to_terminal(left) else: node['left'] = get_split(left) split(node['left'], max_depth, min_size, depth+1) # process right child if len(right) <= min_size: node['right'] = to_terminal(right) else: node['right'] = get_split(right) split(node['right'], max_depth, min_size, depth+1) def build_tree(train, max_depth, min_size): root = get_split(train) split(root, max_depth, min_size, 1) return root def predict(node, row): if row[node['index']] < node['value']: if isinstance(node['left'], dict): return predict(node['left'], row) else: return node['left'] else: if isinstance(node['right'], dict): return predict(node['right'], row) else: return node['right'] def load_csv(filename): file = open(filename, "rt") lines = reader(file) dataset = list(lines) return dataset def str_column_to_float(dataset, column): for row in dataset: row[column] = float(row[column].strip()) def cross_validation_split(dataset, n_folds): dataset_split = list() dataset_copy = list(dataset) fold_size = int(len(dataset) / n_folds) for i in range(n_folds): fold = list() while len(fold) < fold_size: index = randrange(len(dataset_copy)) fold.append(dataset_copy.pop(index)) dataset_split.append(fold) return dataset_split def accuracy_metric(actual, predicted): correct = 0 for i in range(len(actual)): if actual[i] == predicted[i]: correct += 1 return correct / float(len(actual)) * 100.0 def evaluate_algorithm(dataset, algorithm, n_folds, *args): folds = cross_validation_split(dataset, n_folds) scores = list() for fold in folds: train_set = list(folds) train_set.remove(fold) train_set = sum(train_set, []) test_set = list() for row in fold: row_copy = list(row) test_set.append(row_copy) row_copy[-1] = None predicted = algorithm(train_set, test_set, *args) actual = [row[-1] for row in fold] accuracy = accuracy_metric(actual, predicted) scores.append(accuracy) return scores def decision_tree(train, test, max_depth, min_size): tree = build_tree(train, max_depth, min_size) predictions = list() for row in test: prediction = predict(tree, row) predictions.append(prediction) return(predictions) file_path = "/content/drive/My Drive/banknotes.csv" dataset = load_csv(file_path) for i in range(len(dataset[0])): str_column_to_float(dataset, i) n_folds = 5 max_depth = 5 min_size = 10 scores = evaluate_algorithm(dataset, decision_tree, n_folds, max_depth, min_size) print('Scores: %s' % scores) print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores)))) ```
github_jupyter
Evaluation of the frame-based matching algorithm ================================================ This notebook aims at evaluating the performance of the Markov Random Field (MRF) algorithm implemented in `stereovis/framed/algorithms/mrf.py` on the three datasets presented above. For each, the following experiments have been done: * running MRF on each dataset without any SNN-based prior * running MRF with prior initialisation from best-performing SNN configuration * running MRF with prior initialisation and adjustment from motion * comparing difference between the above scenarios and visually assessing their quality for no ground truth is recorded or computed. A slightly altered and abbreviated version of this notebook can also be found under `notebooks/evaluation.ipynb`. ``` %matplotlib inline import numpy as np import sys import skimage.io as skio import os import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import Normalize from skimage import transform, filters, feature, morphology sys.path.append("../") from stereovis.framed.algorithms.mrf import StereoMRF from stereovis.spiking.algorithms.vvf import VelocityVectorField from stereovis.utils.frames_io import load_frames, load_ground_truth, generate_frames_from_spikes, split_frames_by_time from stereovis.utils.spikes_io import load_spikes from stereovis.utils.config import load_config ``` In the next we define some usefull functions to load, compute and plot data. They should be used for each dataset independetly and although they export some experiment-specific parametes to the user, other configuration options are "hard-coded" into configuration files -- at least one for each dataset. They define the data paths, camera resolution, frame rate and similar parameters and can be found under `experiments/config/hybrid/experiment_name.yaml`, where `experiment_name` shoud be substituded with the respective name of the experiment. ``` def load_mrf_frames(config): """ Load the images used for the frame-based matching. Args: config: dict, configuration object. Should be loaded beforehand. Returns: A tuple of numpy arrays with the left-camera frames, right-camera frames and the timestamps provided by the left-camera. """ frames_left, times = load_frames(input_path=os.path.join('..', config['input']['frames_path'], 'left'), resolution=config['input']['resolution'], crop_region=config['input']['crop'], scale_down_factor=config['input']['scale_down_factor'], simulation_time=config['simulation']['duration'], timestamp_unit=config['input']['timestamp_unit'], adjust_contrast=True) frames_right, _ = load_frames(input_path=os.path.join('..', config['input']['frames_path'], 'right'), resolution=config['input']['resolution'], crop_region=config['input']['crop'], scale_down_factor=config['input']['scale_down_factor'], simulation_time=config['simulation']['duration'], timestamp_unit=config['input']['timestamp_unit'], adjust_contrast=True) return frames_left, frames_right, times def load_retina_spikes(config, build_frames=True, pivots=None, buffer_length=10): """ Load the events used for visualisation purposes. Args: config: dict, configuration object. build_frames: bool, whether to load the events in buffered frame-wise manner or as a continuous stream. pivots: list, timestamps which serve as ticks to buffer the events in frames at precise locations. Otherwise, equdistant buffering will be performed, according to the buffer length. buffer_length: int, buffer span time in ms. Returns: Buffered left and right retina events, or non-buffered numpy array. Notes: The SNN's output is assumed fixed for this evaluation and only the MRF tests are performed. To experiment with the SNN, please see the framework. """ retina_spikes = load_spikes(input_file=os.path.join('..', config['input']['spikes_path']), resolution=config['input']['resolution'], crop_region=config['input']['crop'], simulation_time=config['simulation']['duration'], timestep_unit=config['input']['timestamp_unit'], dt_thresh=1, scale_down_factor=config['input']['scale_down_factor'], as_spike_source_array=False) if not build_frames: return retina_spikes effective_frame_resolution = (np.asarray(config['input']['resolution']) / config['input']['scale_down_factor']).astype(np.int32) retina_frames_l, times_l = \ generate_frames_from_spikes(resolution=effective_frame_resolution, xs=retina_spikes['left'][:, 1], ys=retina_spikes['left'][:, 2], ts=retina_spikes['left'][:, 0], zs=retina_spikes['left'][:, 3], time_interval=buffer_length, pivots=pivots, non_pixel_value=-1) retina_frames_r, times_r = \ generate_frames_from_spikes(resolution=effective_frame_resolution, xs=retina_spikes['right'][:, 1], ys=retina_spikes['right'][:, 2], ts=retina_spikes['right'][:, 0], zs=retina_spikes['right'][:, 3], time_interval=buffer_length, pivots=pivots, non_pixel_value=-1) assert retina_frames_l.shape == retina_frames_r.shape return retina_spikes, retina_frames_l, retina_frames_r def load_snn_spikes(spikes_file, build_frames=True, pivots=None, buffer_length=10, non_pixel_value=-1): """ Load the SNN output events used as a prior for the frame-based matching. Args: spikes_file: str, filepath for the SNN output events. build_frames: bool, whether to buffer the events as frames. pivots: list, timestamps for the frames. buffer_length: int, buffered frame time span in ms non_pixel_value: numerical value for the frame pixels for which there is no event Returns: Buffered frames, timestamps and indices of the events that hass been buffered in each frame accordingly. """ prior_disparities = load_spikes(spikes_file) if not build_frames: return prior_disparities effective_frame_resolution = prior_disparities['meta']['resolution'] prior_frames, timestamps, prior_frame_indices = \ generate_frames_from_spikes(resolution=effective_frame_resolution, xs=prior_disparities['xs'], ys=prior_disparities['ys'], ts=prior_disparities['ts'], zs=prior_disparities['disps'], time_interval=buffer_length, pivots=pivots, non_pixel_value=non_pixel_value, return_time_indices=True) return prior_frames, timestamps, prior_frame_indices def eval_mrf(left_img, right_img, max_disp, prior=None, prior_mode='adaptive', prior_const=1.0, n_iter=10, show_outline=False, show_plots=True): """ Run the MRF frame-based matching from given frames and algorithm parameters. Args: left_img: 2d array with the pre-processed left image right_img: 2d array with the pre-processed right image max_dist: int, largest detectable disparity value prior: optionally a 2d array with the prior frame oriented to the left image prior_mode: str, mode of incorporating the prior frame. Can be 'adaptive' for mixing proportionally to the data cost, or 'const' for normal mixing. prior_const: float, if the prior mode is 'const', this is the mixing coefficient. n_iter: int, number of BP iterations show_outline: bool, whether to plot the outline of the objects (using Canny filter) show_plots: bool, whether to plot the results Returns: A 2d numpy array with the resulted disparity map. """ img_res = left_img.shape mrf = StereoMRF(img_res, n_levels=max_disp) disp_map = mrf.lbp(left_img, right_img, prior=prior, prior_influence_mode=prior_mode, prior_trust_factor=prior_const, n_iter=n_iter).astype(np.float32) disp_map[:, :max_disp] = np.nan if not show_plots: return disp_map fig, axs = plt.subplots(2, 2) fig.set_size_inches(10, 8) axs[0, 0].imshow(left_img, interpolation='none', cmap='gray') axs[0, 0].set_title("Left frame") axs[0, 1].imshow(right_img, interpolation='none', cmap='gray') axs[0, 1].set_title("Right frame") print("Image resolution is: {}".format(img_res)) if show_outline: val = filters.threshold_otsu(left_img) ref_shape = (left_img > val).reshape(img_res).astype(np.float32) ref_outline = feature.canny(ref_shape, sigma=1.0) > 0 disp_map[ref_outline] = np.nan cmap = plt.cm.jet cmap.set_bad((1, 1, 1, 1)) depth_map_im = axs[1, 0].imshow(disp_map, interpolation='none') axs[1, 0].set_title("Depth frame") depth_map_pos = axs[1, 0].get_position() cbaxes = plt.axes([depth_map_pos.x0*1.05 + depth_map_pos.width * 1.05, depth_map_pos.y0, 0.01, depth_map_pos.height]) fig.colorbar(depth_map_im, cax=cbaxes) axs[1, 1].set_visible(False) return disp_map def eval_snn(experiment_name, disparity_max, frame_id, buffer_len=20): """ Visualise the pre-computed SNN output along with the retina input. Args: experiment_name: str, the name of the experiment which also match an existing config file. disparity_max: int, maximum computable disparity frame_id: int, the index of the frame (pair of frames) which are used to produce a depth map. buffer_len: int, time in ms for the buffer length of retina events Returns: The bufferen SNN output at the timestamps of the frames. """ print("Sample images from experiment: {}".format(experiment_name)) config = load_config(os.path.join("..", "experiments", "configs", "hybrid", experiment_name + ".yaml")) left_frames, right_frames, timestamps = load_mrf_frames(config) left_img = left_frames[frame_id] right_img = right_frames[frame_id] # remove the _downsampled suffix in the experiment name for the pivots pivots = np.load(os.path.join("..", "data", "input", "frames", experiment_name[:-12], "left", "timestamps.npy")) / 1000. retina_spikes, left_retina, right_retina = \ load_retina_spikes(config, build_frames=True, pivots=pivots, buffer_length=buffer_len) snn_spikes_file = os.path.join("..", "data", "output", "experiments", "best_snn_spikes", experiment_name + '.pickle') prior_frames, _, prior_frame_indices = \ load_snn_spikes(snn_spikes_file, build_frames=True, pivots=pivots, buffer_length=buffer_len) fig, axs = plt.subplots(3, 2) fig.set_size_inches(11, 11) # fig.tight_layout() axs[0, 0].imshow(left_img, interpolation='none', cmap='gray') axs[0, 0].set_title("Left frame") axs[0, 1].imshow(right_img, interpolation='none', cmap='gray') axs[0, 1].set_title("Right frame") axs[1, 0].imshow(left_retina[frame_id], interpolation='none') axs[1, 0].set_title("Left retina frame") axs[1, 1].imshow(right_retina[frame_id], interpolation='none') axs[1, 1].set_title("Right retina frame") depth_map_snn = axs[2, 0].imshow(prior_frames[frame_id], interpolation='none', vmin=0, vmax=disparity_max) depth_map_pos = axs[2, 0].get_position() cbaxes = plt.axes([depth_map_pos.x0*1.05 + depth_map_pos.width * 1.05, depth_map_pos.y0, 0.01, depth_map_pos.height]) fig.colorbar(depth_map_snn, cax=cbaxes) axs[2, 0].set_title("Network depth map") axs[2, 1].set_visible(False) return prior_frames def compute_optical_flow(experiment_name, background=None): pivots = np.load(os.path.join("..", "data", "input", "frames", experiment_name[:-12], "left", "timestamps.npy")) / 1000. config = load_config(os.path.join("..", "experiments", "configs", "hybrid", experiment_name + ".yaml")) vvf = VelocityVectorField(time_interval=20, neighbourhood_size=(3, 3), rejection_threshold=0.005, convergence_threshold=1e-5, max_iter_steps=5, min_num_events_in_timespace_interval=30) events = load_spikes(input_file=os.path.join('..', config['input']['spikes_path']), resolution=config['input']['resolution'], crop_region=config['input']['crop'], simulation_time=config['simulation']['duration'], timestep_unit=config['input']['timestamp_unit'], dt_thresh=1, scale_down_factor=config['input']['scale_down_factor'], as_spike_source_array=False) time_ind, _ = split_frames_by_time(ts=events['left'][:, 0], time_interval=50, pivots=pivots) velocities = vvf.fit_velocity_field(events['left'][time_ind[frame_id_head], :], assume_sorted=False, concatenate_polarity_groups=True) xs, ys, us, vs = events['left'][time_ind[frame_id_head], 1], \ events['left'][time_ind[frame_id_head], 2], \ velocities[:, 0], velocities[:, 1] fig, axs = plt.subplots(1, 1) # fig.set_size_inches(5, 5) if background is not None: plt.imshow(background) colors = np.arctan2(us, vs) norm = Normalize() if colors.size > 0: norm.autoscale(colors) colormap = cm.inferno axs.invert_yaxis() plt.quiver(xs, ys, us, vs, angles='xy', scale_units='xy', scale=1, color=colormap(norm(colors))) return xs, ys, us, vs def adjust_events_from_motion(prior_frame, velocities): """ Modify the position of the events according to the detected motion. As the algorithm for optical flow operates on the 3d non-buffered retina events, some additional parameters such as frame resolution etc. will be required (unfortunately they cannot be inferred). Args: prior_frame: ndarray, the buffered SNN output used as a prior. velocities: tuple, xs, ys, us, vs -- start and end positions of the velocity vectors. Returns: One adjusted prior frame. """ xs, ys, us, vs = velocities # store the velocities onto a 2D image plane which will be queried for a shift velocity_lut = np.zeros(prior_frame.shape + (2,)) for x, y, u, v in zip(xs, ys, us, vs): velocity_lut[int(y), int(x), :] = np.array([u, v]) # compute shift based on 8 directional compass shifts = np.asarray([(1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1)], dtype=np.int32) compute_shift = lambda x, y: shifts[int(np.floor(np.round(8 * np.arctan2(y, x) / (2 * np.pi)))) % 8] \ if np.linalg.norm([x, y]) > 1. else np.array([0, 0]) adjusted_frame = np.ones_like(prior_frame) * -1 # compute the corresponding shift for all detected disparity event_frames for row, col in np.argwhere(prior_frame >= 0): x, y = velocity_lut[row, col] dcol, drow = compute_shift(y, x) # going up in the image is equivalent to decrementing the row number, hence the minus in row - drow if 0 <= col + dcol < prior_frame.shape[1] and 0 <= row - drow < prior_frame.shape[0]: adjusted_frame[row - drow, col + dcol] = prior_frame[row, col] return adjusted_frame def run_mrf_without_prior(experiment_name, disparity_max, frame_id=0, n_iter=5): """ Perform the MRF depth map computation on a pair of images without any prior knowledge. The experiment parameters are loaded from the corresponding configuration yaml file. Args: experiment_name: str, the name of the experiment which also match an existing config file. disparity_max: int, maximum computable disparity. frame_id: int, the index of the frame (pair of frames) which are used to produce a depth map. n_iter: int, number of MRF BP iterations. Returns: The resolved depth map. """ print("Sample images from experiment: {}".format(experiment_name)) config = load_config(os.path.join("..", "experiments", "configs", "hybrid", experiment_name + ".yaml")) left_frames, right_frames, timestamps = load_mrf_frames(config) left_img = left_frames[frame_id] right_img = right_frames[frame_id] depth_map_raw = eval_mrf(left_img, right_img, disparity_max, n_iter=n_iter) return depth_map_raw def run_mrf_with_prior(experiment_name, disparity_max, prior_frames, frame_id=0, n_iter=5, prior_mode='const', prior_const=1): """ Run the MRF computation on an image pair using a SNN prior frame in the initialisation phase. Again, load the experiment parameters from a configuration file. Args: experiment_name: str, the name of the experiment which also match an existing config file. disparity_max: int, maximum computable disparity prior_frames: ndarray, list of all buffered frames from the SNN output. frame_id: int, the index of the frame (pair of frames) which are used to produce a depth map. n_iter: int, number of MRF BP iterations. prior_mode: str, the way of incorporating the prior. Can be `adaptive` or `const`. prior_const: float, if the chosen mode is `const` than this is the influence of the prior. Returns: The depth map of the MRF using the prior frame. """ config = load_config(os.path.join("..", "experiments", "configs", "hybrid", experiment_name + ".yaml")) left_frames, right_frames, timestamps = load_mrf_frames(config) left_img = left_frames[frame_id] right_img = right_frames[frame_id] depth_map_prior = eval_mrf(left_img, right_img, disparity_max, prior=prior_frames[frame_id], prior_mode=prior_mode, prior_const=prior_const, n_iter=n_iter, show_plots=False) return depth_map_prior def plot_difference_prior_raw(depth_map_raw, depth_map_prior, disparity_max): """ Visualise the outcome from the MRF with the prior and without and show the absolute value difference. Args: depth_map_raw: ndarray, depth map result of the MRF applied on the frames only. depth_map_prior: ndarray, depth map of the MRF applied on the image and prior frames. disparity_max: int, maximum detectable disparity, used to normalise the plot colors. """ fig, axs = plt.subplots(1, 3) fig.set_size_inches(12, 20) axs[0].imshow(depth_map_prior, interpolation='none', vmax=disparity_max) axs[0].set_title("With prior") axs[1].imshow(depth_map_raw, interpolation='none', vmax=disparity_max) axs[1].set_title("Without prior") axs[2].imshow(np.abs(depth_map_raw - depth_map_prior), interpolation='none', vmax=disparity_max) axs[2].set_title("Absolute value difference") def plot_adjusted_prior(experiment_name, frame_id=0): """ Visualise the prior before and after the adjustment. Args: experiment_name: str, name of the experiment to load frame_id: int, the index of the frame to plot as background """ config = load_config(os.path.join("..", "experiments", "configs", "hybrid", experiment_name + ".yaml")) left_frames, _, _ = load_mrf_frames(config) left_img = left_frames[frame_id] fig, axs = plt.subplots(1, 2) fig.set_size_inches(10, 16) axs[0].imshow(left_img, interpolation='none', cmap='gray') axs[0].imshow(prior_frames_head[frame_id_head], interpolation='none', alpha=0.7) axs[0].set_title("Reference frame with prior overlayed") axs[1].imshow(left_img, interpolation='none', cmap='gray') axs[1].imshow(adjusted_events, interpolation='none', alpha=0.7) axs[1].set_title("Reference frame with adjusted prior overlayed") ``` ## MRF on frames without prior information The following experiment provides a baseline for the stereo-matching performance of the MRF algorothm. For an algorithm test on a standard stereo benchmark dataset see the notebook `MRF_StereoMatching.ipynb`. These results also provide a baseline for the next experiment in which prior information is included. For the sake of completeness, a [third-party algorithm](http://www.ivs.auckland.ac.nz/quick_stereo/index.php) was applied on a subset of the data to compare against our MRF implementation. The results are included in the submitted data (see `data/output/demo/online_algorithm`). ### Head experiment ``` experiment_name = 'head_downsampled' disparity_max_head = 30 # note that these should be scaled if the scale factor in the config file is changed. frame_id_head = 40 depth_map_raw_head = run_mrf_without_prior(experiment_name, disparity_max_head, frame_id=frame_id_head, n_iter=5) ``` **Result Analysis:** The head is mostly correctly matched, with some errors in the middle. However, if one increases the number of iterations, then in some cases (different `frame_id`s) these spots tend to disappear. Another interesting effect is the misclassified backgorund area on the left side of the head and the correctly classified right side. This can be explained as follows: when comparing the left and right images for the zero disparity case, the background of the two images overlap and due to the homogeneity of the color, the energy values for the right-side background pixels are quite small and the algorithm correctly assigns the small disparity. On the left side however, the background, albeit not really shifted, is occluded from the object in the right image and the nearest matching point to the left of the object (the direction of search) is some 26-27 pixels apart from the reference location. This inevitably produces the wrong depth values on the left side of the reference object. Altough the situation below the head statue is different, the algorithm produces unsatisfying results due to the absence of corresponding pixels (as the shadow is not the same in the left and the right image, and the signal from neighbours from above gets propagated to the lower rows of the depth image). ### Checkerboard experiment ``` experiment_name = 'checkerboard_downsampled' disparity_max_checkerboard = 22 # note that these should be scaled if the scale factor in the config file is changed. frame_id_checkerboard = 40 depth_map_raw_checkerboard = run_mrf_without_prior(experiment_name, disparity_max_checkerboard, frame_id=frame_id_checkerboard, n_iter=5) ``` **Result Analysis:** The outcome of this experiment shows that the MRF is producing good results for the regions which can be matched unambigously, such as object edges. The detected disparities for the regions with homogeneuos colors, e.g. the floor or the wall are mostly incorrect. Nevertheless, the pixel correspondece there is not trivially computable and without any additional knowledge, such as "the floor spans perpendicularly to the image plane" no known to us matching algorithm will be able to generate an adequate depth map. In the experiment with the checkerboard, special difficulty is posed by the repetitive patterns, which in some frames (e.g. No. 40) is fully visible and has therefore a globally optimal matching configuration. There is, however, no guarantee that this configuraiton will be found by the algorithm and in practice we see that only a small portion is correctly matched. ### Boxes and cones experiment ``` experiment_name = 'boxes_and_cones_downsampled' disparity_max_boxes = 20 # note that these should be scaled if the scale factor in the config file is changed. frame_id_boxes = 20 depth_map_raw_boxes = run_mrf_without_prior(experiment_name, disparity_max_boxes, frame_id=frame_id_boxes, n_iter=5) ``` **Result Analysis:** Some depth maps from frames in this dataset are particularly badly computed as they are overexposed and wash out the object edges. Although the first several frames of the video present a table with sharply outlined edges, some parts which are present in the reference image are missing from the target one, which makes their matching impossible and hinders the correspondence assignment of the visible sections. It is worth putting more effort into pre-processing, such the contrast it normalised locally and overexposed areas do not affect the global contrast normalisation. ## MRF on frames with prior information from SNN output This experiment will take the pre-computed depth events from the spiking network and will run the MRF on the same data. This time however the initial state of the random field will be computed as a convex combination between the data (i.e. image differences) and the prior. The reader is encouraged to play with the parameters. The arguably well-performing parameters are set as the default in the cell below. ### Head experiment ``` experiment_name = 'head_downsampled' prior_frames_head = eval_snn(experiment_name, disparity_max_head, frame_id=frame_id_head, buffer_len=20) ``` The prior frame, obtained from the buffered SNN output in the time interval `buffer_len` ms before the actual frames, is mixed with the data-term computed in the image difference operation. The mixing coefficient can be proportional to the difference term, which has the following interpretation: _the lower the matching confidence from the data, the higher the prior influence should be_. ``` depth_map_prior_head = run_mrf_with_prior(experiment_name, disparity_max_head, prior_frames_head, frame_id=frame_id_head, n_iter=5, prior_mode='const', prior_const=1) plot_difference_prior_raw(depth_map_raw=depth_map_raw_head, depth_map_prior=depth_map_prior_head, disparity_max=disparity_max_head) ``` Part of this experiment is to evaluate the contribution of the prior with varying prior constants. Below we plot the results from several independent evaluations with the `prior_const` ranging in [0, 0.1, 0.5, 1, 2, 10, 100] and finaly the result from the adaptive mode. ``` prior_consts = [0, 0.5, 1, 10, 100] depth_maps = [] fig, axs = plt.subplots(1, len(prior_consts)+1) fig.set_size_inches(40, 40) for i, p_c in enumerate(prior_consts): depth_map = run_mrf_with_prior(experiment_name, disparity_max_head, prior_frames_head, frame_id=frame_id_head, n_iter=5, prior_mode='const', prior_const=p_c) axs[i].imshow(depth_map, interpolation='none', vmax=disparity_max_head) axs[i].set_title("Prior const: {}".format(p_c)) depth_map = run_mrf_with_prior(experiment_name, disparity_max_head, prior_frames_head, frame_id=frame_id_head, n_iter=5, prior_mode='adaptive') axs[i+1].imshow(depth_map, interpolation='none', vmax=disparity_max_head) axs[i+1].set_title("Adaptive Prior const") ``` **Result Analysis:** In some examples the prior has visually deteriorated the results (especially if taken with great influence, i.e, >> 1) and in the rest of the cases it hasn't change much of the quality of the depth map. The former is due to the noisy output that the SNN produces on these datasets and the latter - due to its sparsity. In any case, these results do not support the claim that using SNN as prior initialisation for the MRF will improve the quality of the depth map. ### Checkerboard experiment ``` experiment_name = 'checkerboard_downsampled' prior_frames = eval_snn(experiment_name, disparity_max_checkerboard, frame_id=frame_id_checkerboard, buffer_len=20) depth_map_prior_checkerboard = run_mrf_with_prior(experiment_name, disparity_max_checkerboard, prior_frames, frame_id=frame_id_checkerboard, n_iter=5, prior_mode='const', prior_const=1) plot_difference_prior_raw(depth_map_raw=depth_map_raw_checkerboard, depth_map_prior=depth_map_prior_checkerboard, disparity_max=disparity_max_checkerboard) ``` **Result Analysis:** The same observations as in the _head experiment_: prior doesn't change much, and if it does, then the depth map has not become better in quality. ### Boxes and cones experiment ``` experiment_name = 'boxes_and_cones_downsampled' prior_frames = eval_snn(experiment_name, disparity_max_boxes, frame_id=frame_id_boxes, buffer_len=20) depth_map_prior_boxes = run_mrf_with_prior(experiment_name, disparity_max_boxes, prior_frames, frame_id=frame_id_boxes, n_iter=5, prior_mode='const', prior_const=1) plot_difference_prior_raw(depth_map_raw=depth_map_raw_boxes, depth_map_prior=depth_map_prior_boxes, disparity_max=disparity_max_boxes) ``` **Result Analysis:** Same as above. ## Inspecting the spatial precision of the prior Since the prior is an accumulated information from the past, and motion is present, it can happend that the SNN output will have spikes on locations which are slightly off form the gray-scale image. If this is the case (which, by the way, is not easily detectable in an automatic fashion) then one can try to compute the motion of the object and adapt the SNN output accordingly. An optical flow algorithm on the SNN events is applied to estimate the future posiiton of the object and the shift is added to the prior. We will perform this experiment on the _head_ dataset only, as this is rather unnecessary evaluation and serves only to show that this approach has been considered. Feel free to try on different frames and/or datasets. The optical flow algorithm is implemented according to _Benosman, Ryad, et al., "Event-based visual flow."_ [10], which in short is based on fitting a plane in 3D space-time (2D image space and 1D time dimensions), where the inverse of the slopes of the plane in the orthogonal _x_, _y_ directions (partial derivatives) are used to compute the velocities. ``` experiment_name = 'head_downsampled' xs, ys, us, vs = compute_optical_flow(experiment_name) pivots = np.load(os.path.join("..", "data", "input", "frames", experiment_name[:-12], "left", "timestamps.npy")) / 1000. adjusted_events = adjust_events_from_motion(prior_frames_head[frame_id_head], (xs, ys, us, vs)) plot_adjusted_prior(experiment_name, frame_id=frame_id_head) ``` **Result Analysis:** Since the prior adjustment did not turn out to be beneficial, we decided to stop any furhter analysis of the performance. In a different application or under different circumstances (e.g. when immediate depth SNN spikes cannot be computed and older result should be extrapolated in the future) this technique might prove helpful.
github_jupyter
``` %matplotlib inline import numpy as np from sklearn.svm import LinearSVC, SVC from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA import matplotlib.pyplot as plt class OVRSVC(): def __init__(self, **kwargs): self.c2svc = {} self.kwargs = kwargs def fit(self, X, y): for c in set(i for i in y): self.c2svc[c] = LinearSVC(**self.kwargs) self.c2svc[c].fit(X, y==c) return self def predict(self, X): confidences = np.zeros((X.shape[0], len(self.c2svc))) for c, svc in self.c2svc.items(): confidences[:, int(c)] = svc.decision_function(X) result = confidences.argmax(axis=1) return result def score(self, X, y): result = self.predict(X) score = (result == y).sum() / len(y) return score scaler = MinMaxScaler(copy=False) X_train = np.load('data_hw2/train_data.npy') y_train = np.load('data_hw2/train_label.npy').astype(int) + 1 # X_train, X_dev, y_train, y_dev = train_test_split(X_train, y_train, test_size=0.5) X_test = np.load('data_hw2/test_data.npy') y_test = np.load('data_hw2/test_label.npy').astype(int) + 1 scaler.fit(X_train) X_train = scaler.transform(X_train) # X_dev = scaler.transform(X_dev) X_test = scaler.transform(X_test) pca = PCA(n_components=2) pca.fit(X_train) # pca.fit(np.concatenate((X_train, X_test))) X_train_r = pca.transform(X_train) X_dev_r = pca.transform(X_dev) X_test_r = pca.transform(X_test) np.random.shuffle(X_train_r) np.random.shuffle(X_dev_r) np.random.shuffle(X_test_r) X_train_r_0 = np.random.permutation(pca.transform(X_train[y_train == 0])) X_test_r_0 = np.random.permutation(pca.transform(X_test[y_test == 0])) X_train_r_1 = np.random.permutation(pca.transform(X_train[y_train == 1])) X_test_r_1 = np.random.permutation(pca.transform(X_test[y_test == 1])) X_train_r_2 = np.random.permutation(pca.transform(X_train[y_train == 2])) X_test_r_2 = np.random.permutation(pca.transform(X_test[y_test == 2])) legends = [] plt.scatter(X_train_r_0[:50, 0], X_train_r_0[:50, 1], label="train_0", color='#EC5D57') plt.scatter(X_train_r_1[:50, 0], X_train_r_1[:50, 1], label="train_1", color='#70BF41') plt.scatter(X_train_r_2[:50, 0], X_train_r_2[:50, 1], label="train_2", color='#51A7F9') plt.scatter(X_test_r_0[:50, 0], X_test_r_0[:50, 1], label="test_0", color='#F5D328') plt.scatter(X_test_r_1[:50, 0], X_test_r_1[:50, 1], label="test_1", color='#B36AE2') plt.scatter(X_test_r_2[:50, 0], X_test_r_2[:50, 1], label="test_2", color='#F39019') plt.legend() plt.savefig('pca.pdf') plt.scatter(X_train_r_1[:, 0], X_train_r_1[:, 1], color='navy') plt.scatter(X_dev_r_1[:, 0], X_dev_r_1[:, 1], color='turquoise') plt.scatter(X_test_r_1[:, 0], X_test_r_1[:, 1], color='darkorange') plt.scatter(X_train_r_2[:, 0], X_train_r_2[:, 1], color='navy') plt.scatter(X_dev_r_2[:, 0], X_dev_r_2[:, 1], color='turquoise') plt.scatter(X_test_r_2[:, 0], X_test_r_2[:, 1], color='darkorange') plt.scatter(X_train_r[:70, 0], X_train_r[:70, 1], color='navy') plt.scatter(X_dev_r[:70, 0], X_dev_r[:70, 1], color='turquoise') plt.scatter(X_test_r[:70, 0], X_test_r[:70, 1], color='darkorange') ```
github_jupyter
``` import os import glob import sys import numpy as np import pickle import tensorflow as tf import PIL import ipywidgets import io """ make sure this notebook is running from root directory """ while os.path.basename(os.getcwd()) in ('notebooks', 'src'): os.chdir('..') assert ('README.md' in os.listdir('./')), 'Can not find project root, please cd to project root before running the following code' import src.tl_gan.generate_image as generate_image import src.tl_gan.feature_axis as feature_axis import src.tl_gan.feature_celeba_organize as feature_celeba_organize """ load feature directions """ path_feature_direction = './asset_results/pg_gan_celeba_feature_direction_40' pathfile_feature_direction = glob.glob(os.path.join(path_feature_direction, 'feature_direction_*.pkl'))[-1] with open(pathfile_feature_direction, 'rb') as f: feature_direction_name = pickle.load(f) feature_direction = feature_direction_name['direction'] feature_name = feature_direction_name['name'] num_feature = feature_direction.shape[1] import importlib importlib.reload(feature_celeba_organize) feature_name = feature_celeba_organize.feature_name_celeba_rename feature_direction = feature_direction_name['direction']* feature_celeba_organize.feature_reverse[None, :] """ start tf session and load GAN model """ # path to model code and weight path_pg_gan_code = './src/model/pggan' path_model = './asset_model/karras2018iclr-celebahq-1024x1024.pkl' sys.path.append(path_pg_gan_code) """ create tf session """ yn_CPU_only = False if yn_CPU_only: config = tf.ConfigProto(device_count = {'GPU': 0}, allow_soft_placement=True) else: config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True sess = tf.InteractiveSession(config=config) try: with open(path_model, 'rb') as file: G, D, Gs = pickle.load(file) except FileNotFoundError: print('before running the code, download pre-trained model to project_root/asset_model/') raise len_z = Gs.input_shapes[0][1] z_sample = np.random.randn(len_z) x_sample = generate_image.gen_single_img(z_sample, Gs=Gs) def img_to_bytes(x_sample): imgObj = PIL.Image.fromarray(x_sample) imgByteArr = io.BytesIO() imgObj.save(imgByteArr, format='PNG') imgBytes = imgByteArr.getvalue() return imgBytes z_sample = np.random.randn(len_z) x_sample = generate_image.gen_single_img(Gs=Gs) w_img = ipywidgets.widgets.Image(value=img_to_bytes(x_sample), format='png', width=512, height=512) class GuiCallback(object): counter = 0 # latents = z_sample def __init__(self): self.latents = z_sample self.feature_direction = feature_direction self.feature_lock_status = np.zeros(num_feature).astype('bool') self.feature_directoion_disentangled = feature_axis.disentangle_feature_axis_by_idx( self.feature_direction, idx_base=np.flatnonzero(self.feature_lock_status)) def random_gen(self, event): self.latents = np.random.randn(len_z) self.update_img() def modify_along_feature(self, event, idx_feature, step_size=0.01): self.latents += self.feature_directoion_disentangled[:, idx_feature] * step_size self.update_img() def set_feature_lock(self, event, idx_feature, set_to=None): if set_to is None: self.feature_lock_status[idx_feature] = np.logical_not(self.feature_lock_status[idx_feature]) else: self.feature_lock_status[idx_feature] = set_to self.feature_directoion_disentangled = feature_axis.disentangle_feature_axis_by_idx( self.feature_direction, idx_base=np.flatnonzero(self.feature_lock_status)) def update_img(self): x_sample = generate_image.gen_single_img(z=self.latents, Gs=Gs) x_byte = img_to_bytes(x_sample) w_img.value = x_byte guicallback = GuiCallback() step_size = 0.4 def create_button(idx_feature, width=96, height=40): """ function to built button groups for one feature """ w_name_toggle = ipywidgets.widgets.ToggleButton( value=False, description=feature_name[idx_feature], tooltip='{}, Press down to lock this feature'.format(feature_name[idx_feature]), layout=ipywidgets.Layout(height='{:.0f}px'.format(height/2), width='{:.0f}px'.format(width), margin='2px 2px 2px 2px') ) w_neg = ipywidgets.widgets.Button(description='-', layout=ipywidgets.Layout(height='{:.0f}px'.format(height/2), width='{:.0f}px'.format(width/2), margin='1px 1px 5px 1px')) w_pos = ipywidgets.widgets.Button(description='+', layout=ipywidgets.Layout(height='{:.0f}px'.format(height/2), width='{:.0f}px'.format(width/2), margin='1px 1px 5px 1px')) w_name_toggle.observe(lambda event: guicallback.set_feature_lock(event, idx_feature)) w_neg.on_click(lambda event: guicallback.modify_along_feature(event, idx_feature, step_size=-1 * step_size)) w_pos.on_click(lambda event: guicallback.modify_along_feature(event, idx_feature, step_size=+1 * step_size)) button_group = ipywidgets.VBox([w_name_toggle, ipywidgets.HBox([w_neg, w_pos])], layout=ipywidgets.Layout(border='1px solid gray')) return button_group list_buttons = [] for idx_feature in range(num_feature): list_buttons.append(create_button(idx_feature)) yn_button_select = True def arrange_buttons(list_buttons, yn_button_select=True, ncol=4): num = len(list_buttons) if yn_button_select: feature_celeba_layout = feature_celeba_organize.feature_celeba_layout layout_all_buttons = ipywidgets.VBox([ipywidgets.HBox([list_buttons[item] for item in row]) for row in feature_celeba_layout]) else: layout_all_buttons = ipywidgets.VBox([ipywidgets.HBox(list_buttons[i*ncol:(i+1)*ncol]) for i in range(num//ncol+int(num%ncol>0))]) return layout_all_buttons # w_button.on_click(on_button_clicked) guicallback.update_img() w_button_random = ipywidgets.widgets.Button(description='random face', button_style='success', layout=ipywidgets.Layout(height='40px', width='128px', margin='1px 1px 5px 1px')) w_button_random.on_click(guicallback.random_gen) w_box = ipywidgets.HBox([w_img, ipywidgets.VBox([w_button_random, arrange_buttons(list_buttons, yn_button_select=True)]) ], layout=ipywidgets.Layout(height='1024}px', width='1024px') ) print('press +/- to adjust feature, toggle feature name to lock the feature') display(w_box) ```
github_jupyter
# Similarity Functions This notebook describes about the similarity functions that can be used to measure the similarity between two sets. Firstly we import the shingling functions and other helpful functions. ``` from src.shingle import * from math import ceil, floor import numpy as np ``` We will then count how frequent a shingle is in the document. For this I have calculated the frequencies in the document called `data/portuguese/two_ends.txt`. Here we are using portuguese corpus. Then we create a dictionary called `frequencies` which goes like from the word to its frequency. ``` # Initialize counts frequencies = {} text = open("data/portuguese/two_ends.txt", "r+") for line in text: word = line.strip().split(' ') frequencies[word[0]] = float(word[1]) ``` ## TF - IDF TF-IDF (Term-frequency and Inverse Document Frequency) measures similarity using this: <img src="utils/tfidf.png" alt="tfidf" width="400px"/> Firstly, we define `tf` using this, which is just the frequency counts in the intersection. ``` def tf(intersection, query): '''Counts term frequency''' tf = [query.count(word) for word in intersection] return np.array(tf) ``` Afterwards, we compute `idf`, which is inverse document frequency. Here we will make use of the dictionary that we created earlier in order to compute document frequencies. ``` def idf(intersection, document, N): '''Counts inverse document frequency''' idf = np.array([frequencies[word] for word in intersection]) idf = np.log10(np.divide(N + 1, idf + 0.5)) return idf ``` Finally we simulate the function `tf_idf` which takes the dot product of `tf` and `idf` arrays. ``` def tf_idf(query, document, N): intersection = [word for word in document if word in query] # intersection score = np.dot(tf(intersection, query), idf(intersection, document, N)) return score ``` We can then run the similarity function in the following manner: ``` query = two_ends("pizza", 2) document = two_ends("pizza", 2) tf_idf(query, document, 50000) # number of documents are around 50000 ``` ## BM25 The formula of BM25 is given like this: <img src="utils/bm25.png" alt="tfidf" width="800px"/> Here we define the `bm25_tf` in the following manner: ``` def bm25_tf(intersection, query, document, k1, b, avgdl, N): tf_ = tf(intersection, document) numerator = tf_ * (k1 + 1.0) denominator = tf_ + k1 * (1.0 - b + b * (len(query) / avgdl)) bm25_tf = np.divide(numerator, denominator) return bm25_tf ``` Finally we will take the dot product of `bm25_tf` and `idf` to get this: ``` def bm25(query, document, k1 = 1.2, b = 0.75, avgdl = 8.3, N = 50000): intersection = [word for word in document if word in query] # intersection score = np.dot(bm25_tf(intersection, query, document, k1, b, avgdl, N), idf(intersection, document, N)) return score ``` We can run the function in the following manner: ``` query = two_ends("pizza", 2) document = two_ends("pizza", 2) bm25(query, document) ``` ## Dirichlet The formula of Dirichlet is given like this: <img src="utils/dir.png" alt="tfidf" width="800px"/> Firstly, we will compute the sum dependent function here in the form of `smooth`. ``` shingles = 470751 def smooth(intersection, document, mu): smooth = [] for word in intersection: prob = 1.0 + np.divide(document.count(word), mu * frequencies[word] / shingles) smooth.append(np.log10(prob)) smooth = np.array(smooth) return smooth ``` We will add the sum independent function to `smooth` and take the dot product to `tf`. ``` def dirichlet(query, document, mu = 100.0): intersection = [word for word in document if word in query] # intersection add = len(query) * np.log10(np.divide(mu, mu + len(document))) score = np.dot(tf(intersection, query), smooth(intersection , document, mu)) + add return score ``` We can this function in following manner: ``` query = two_ends("pizzzza", 2) document = two_ends("pizzza", 2) print(dirichlet(query, document)) ```
github_jupyter
<h2><center>Predicting the probability of citation </center></h2> In this section, we predict the probability of receiving citation for the particular violation. We use the random forest model to make the prediction. ``` # Import necessary modules import pandas as pd import matplotlib.pyplot as plt import numpy as np from datetime import datetime from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import train_test_split from joblib import dump, load # Read the accident data from csv df_traffic = pd.read_csv('data_wrangled.csv', index_col = 'Date Of Stop_Time Of Stop', parse_dates = True, infer_datetime_format = True) df_traffic['V_sage'] = (df_traffic.V_Age - df_traffic.V_Age.min())/\ (df_traffic.V_Age.max() - df_traffic.V_Age.min()) des_col = df_traffic.Description not_relevant = ['Latitude', 'Longitude', 'Year', 'Description', 'Make', 'Model', 'Color', 'datetime', 'V_Age'] df_traffic.drop(labels = not_relevant, axis = 1, inplace = True) # Get X and y from data y = df_traffic['Violation Type_Warning'] df_X1 = df_traffic.drop('Violation Type_Warning', axis = 1) X = df_X1.values # now, do a Logistic regression to the data. X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.5, random_state=77) # The description column is full of text. We vectorize the test so that we #can fit it into the model. corpus = des_col vectorizer = CountVectorizer(stop_words = 'english', strip_accents = 'ascii', min_df = 0.025, max_features = 100, token_pattern='[a-z]+', max_df = 0.25, binary = True) X = vectorizer.fit_transform(corpus) """This translates text in each column as a vector array of following basis (keywords)""" print(vectorizer.get_feature_names()) ``` To calculate citation probability, we first copy the entire row from a random violation and then replace the description column of our choice. Then we make a prediction some of the unique violations. Now, let us take some violations of our choice and make some prediction. ``` plt.style.use('seaborn-dark') random_driver = df_X1.iloc[777777, :] some_violations = np.array(['DRIVING UNDER THE INFLUENCE OF ALCOHOL', 'SUSPENDED LICENSE AND PRIVILEGE', 'FAILURE TO CONTROL VEHICLE TO AVOID COLLISION', 'HOLDER OF LEARNERS PERMIT DRIVING W/O REQ. SUPERVISION', 'NEGLIGENT DRIVING ENDANGERING PROPERTY, LIFE AND PERSON', 'OPERATOR NOT RESTRAINED BY SEATBELT', 'KNOWINGLY DRIVING UNINSURED VEHICLE', 'DRIVER CHANGING LANES WHEN UNSAFE', 'FAILURE TO STOP AT STOP SIGN', 'FAILURE TO DISPLAY TWO LIGHTED FRONT LAMPS ' ]) vec_array = vectorizer.transform(some_violations).toarray() driver = len(some_violations)*[random_driver] driver = np.vstack(driver) driver[:,20:83] = vec_array random_forest = load('RANDOM_FOREST.joblib') prob = random_forest.predict_proba(driver)[:,0] df_prob = pd.DataFrame(data = prob, index = some_violations, columns=['Prob']) df_prob.sort_values(by = 'Prob', ascending = False, inplace = True) ax = df_prob.plot(kind = 'barh', rot = 0, legend = False, figsize = (5,5)) plt.xlabel('Citation Probability', fontsize = 15, fontweight = 'bold') for tick in ax.xaxis.get_major_ticks(): tick.label1.set_fontsize(15) tick.label1.set_fontweight('bold') for tick in ax.yaxis.get_major_ticks(): tick.label1.set_fontsize(15) tick.label1.set_fontweight('bold') plt.savefig('prob.png', dpi = 100, bbox_inches = 'tight') ```
github_jupyter
# Numerical solution to the 1-dimensional Time Independent Schroedinger Equation Based on the paper "Matrix Numerov method for solving Schroedinger's equation" by Mohandas Pillai, Joshua Goglio, and Thad G. Walker, _American Journal of Physics_ **80** (11), 1017 (2012). [doi:10.1119/1.4748813](http://dx.doi.org/10.1119/1.4748813) ``` # import some needed libraries import numpy as np import matplotlib.pyplot as plt import scipy.optimize as opt %matplotlib inline autoscale = False # set this equal to true to use Pillai's recommended step sizes # values of constants hbar = 1.0 mass = 1.0 # changing the mass will also change the energy scale omega = 1.0 L = 1.0 # width of SW # bounds (These are overwritten if autoscale=True) xmin = -L # lower bound of position xmax = 5.0 # upper bound of position n = 100 # number of steps (may be overwritten if autoscale == True) dx = (xmax-xmin)/(n-1) # the function V is the potential energy function def V(x): # make sure there is no division by zero # this also needs to be a "vectorizable" function # uncomment one of the examples below, or write your own. return 0.5*mass*omega**2*x*x*(0.5*(x+np.abs(x))) # half harmonic oscillator if (autoscale): #Emax is the maximum energy for which to check for eigenvalues Emax = 20.0 #The next lines make some reasonable choices for the position grid size and spacing xt = opt.brentq(lambda x: V(x)-Emax ,0,5*Emax) #classical turning point dx = 1.0/np.sqrt(2*Emax) #step size # bounds and number of steps n = np.int(0.5+2*(xt/dx + 4.0*np.pi)) #number of steps xmin = -dx*(n+1)/2 xmax = dx*(n+1)/2 xmin, xmax, n #show the limits and number of steps #define the x coordinates x = np.linspace(xmin,xmax,n) #define the numerov matrices B = np.matrix((np.eye(n,k=-1)+10.0*np.eye(n,k=0)+np.eye(n,k=1))/12.0) A = np.matrix((np.eye(n,k=-1)-2.0*np.eye(n,k=0)+np.eye(n,k=1))/(dx**2)) #calculate kinetic energy operator using Numerov's approximation KE = -0.5*hbar**2/mass*B.I*A #calculate hamiltonian operator approximation H = KE + np.diag(V(x)) #Calculate eigenvalues and eigenvectors of H energies, wavefunctions = np.linalg.eigh(H) # "wavefunctions" is a matrix with one eigenvector in each column. energies[0:5] #display the lowest four energies # extract color settings to help plotting prop_cycle = plt.rcParams['axes.prop_cycle'] colors = prop_cycle.by_key()['color'] plt.figure(figsize=(6,8)) number = [0,1,2,3,4,5] #which wavefunctions to plot, starting counting from zero zoom = -3.0 # zoom factor for wavefunctions to make them more visible plt.plot(x,V(x),'-k',label="V(x)") # plot the potential plt.vlines(-1,0,15,color="black") plt.vlines(0,0,15,color="black",lw=0.5) for num in number: plt.plot(x,zoom*wavefunctions[:,num]+energies[num],label="n={}".format(num)) #plot the num-th wavefunction plt.hlines(energies[num],-1,5,lw=0.5, color=colors[num]) plt.ylim(-1,15); # set limits of vertical axis for plot plt.legend(); plt.xlabel("x"); plt.ylabel("Energy or ϕ(x)"); ```
github_jupyter
<a href="https://colab.research.google.com/github/MinCiencia/Datos-COVID19/blob/master/DataObservatory_ex3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <p><img alt="Data Observatory logo" height="150px" src="http://dataobservatory.io/wp-content/themes/data-observatory-01/img/do-logo-web-01.png" align="left" hspace="10px" vspace="0px"></p> <h1 align="center">Bienvenido al jupyter notebook: ejemplos para ayudar a usar los datos publicados</h1> <h4 align="center">Una contribución del Data Observatory</h4> ## **Para empezar** Este documento te permite interactuar con los datos que se encuentran en el [repositorio](https://github.com/MinCiencia/Datos-COVID19) del [Ministerio de Ciencia, Tecnología, Conocimiento e Innovación](http://www.minciencia.gob.cl/COVID19). Estos datos son recopilados de las cifras oficiales publicadas por el [Ministerio de Salud](https://www.gob.cl/coronavirus/cifrasoficiales/#informes) sobre la pandemia del COVID-19 en Chile. Este notebook es un ejemplo y puedes usarlo como base para generar tus propios gráficos y/o productos. Los datos publicados están en https://colab.research.google.com/github/MinCiencia/output/blob/master # <h2>Para trabajar con los productos</h2> Este notebook está escrito utilizando el lenguaje de programación [Python](https://www.python.org/about/) versión [3.x](https://www.python.org/download/releases/3.0/), cuya lectura se facilita a programadores no expertos. Cada bloque de ejecución está separado en distintas celdas, es necesario "ejecutar" cada una en secuencia haciendo click en botón "play" que aparece al posicionar el mouse sobre el recuadro [ ] al inicio de cada celda. Una vez que la celda se ejecuta, aparece un número que indica el órden en que se ha ejecutado. Recomendamos ver los notebooks anteriores para utilizar algunas funciones útiles de python y las bibliotecas que hemos ido utilizando Una manera comun de manipular los datos, es usando [pandas](https://pandas.pydata.org/). Para cargar uno de los archivos en un dataframe. También es necesario utilizar [numpy](https://numpy.org/) para los distintos cálculos ``` import numpy as np import pandas as pd ``` **Nota:** Los datos están almacenados como tablas en formato csv. Algunas tablas están almacenadas por día. Es importante mantener el formato 'año-mes-día' (las comillas simples o dobles indican que es texto y no se ejecutará como una operación matemática) # <h4>Graficar datos por región de varios días</h4> En los notebook DataObservatory-ex1 se leyeron los datos totales del país en los que está el desgloce por región para una fecha dada. Ahora queremos ver el acumulado por cada región por un rango de fechas. Primero establecemos la fecha de interés en la variable 'date' y la primera fecha ('first_date') es cuando se reportó el primer contagio en el país. ``` # el símbolo # al inicio de la línea nos indica que esto es un comentario, no se ejecuta con el código # date indica la fecha de los datos que queremos utilizar date = '2020-05-05' first_date = '2020-03-03' ``` Contamos el rango de días ``` # contamos los días considerados con la siguiente instrucción total_days = (pd.to_datetime(date)-pd.to_datetime(first_date)).days total_days ``` Ahora leemos el archivo (tabla) dado en el producto 4 correpondiente a la fecha 'date' ``` dataTotalRegion = pd.read_csv("https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto4/" + date + "-CasosConfirmados-totalRegional.csv",index_col=0) ``` Revisamos cuales son las columnas en este archivo ``` columnas = list(dataTotalRegion.columns.values) columnas ``` El nombre de las columnas ha variado durante el curso de las publicaciones del Ministerio de Salud debido a la complejidad de la enfermedad. Esto puede agregar complejidad para utilizar los datos. Aquí tratamos de cubrir una variedad de bases para que se pueda trabajar con los datos sin mayores inconvenientes. Por ejemplo, revisamos las columnas del archivo csv del primer día: ``` dataTotalRegion = pd.read_csv("https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto4/2020-03-03-CasosConfirmados-totalRegional.csv",index_col=0) columnas = list(dataTotalRegion.columns.values) columnas ``` Se nota que las tablas ahora entregan mucha más información que las primeras. Para converger ambos estilos, optamos por utilizar la secuencia de instrucciones: 1. Declaramos nuevos dataFrames donde se asignaran las series de tiempo ``` # dataNew corresponde a los nuevos casos por día # dataCum a los datos acumulados al día dataNew = pd.DataFrame() dataCum = pd.DataFrame() ``` 2. Utilizamos reglas para clasificar los datos que siguen la misma serie de tiempo. Por ejemplo, los 'Casos nuevos' a partir del 29 de abril se llaman 'Casos nuevos totales'. Y los 'Casos totales' se llaman 'Casos totales acumulados' a partir de esa misma fecha. También consideramos ciertos "typos" en los nombres, algunos tienen espacios intercalados. Por ello, la asignación en los nuevos dataFrames la planteamos de la siguiente manera: ``` for i in np.arange(total_days+1): date = (pd.to_datetime(first_date)+pd.DateOffset(i)).strftime('%Y-%m-%d') s = "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto4/" + date + "-CasosConfirmados-totalRegional.csv" dataTotalRegion_by_date = pd.read_csv(s,index_col=0) columnas = list(dataTotalRegion.columns.values) if "Casos nuevos" in dataTotalRegion_by_date.columns: dataNew[date] = dataTotalRegion_by_date['Casos nuevos'].values elif 'Casos nuevos' in dataTotalRegion_by_date.columns: dataNew[date] = dataTotalRegion_by_date["Casos nuevos"].values elif " Casos nuevos" in dataTotalRegion_by_date.columns: dataNew[date] = dataTotalRegion_by_date[" Casos nuevos"].values if "Casos nuevos totales" in dataTotalRegion_by_date.columns: dataNew[date] = dataTotalRegion_by_date['Casos nuevos totales'].values elif 'Casos nuevos totales' in dataTotalRegion_by_date.columns: dataNew[date] = dataTotalRegion_by_date["Casos nuevos totales"].values elif " Casos nuevos totales" in dataTotalRegion_by_date.columns: dataNew[date] = dataTotalRegion_by_date[" Casos nuevos totales"].values if "Casos totales" in dataTotalRegion_by_date.columns: dataCum[date] = dataTotalRegion_by_date['Casos totales'].values elif 'Casos totales' in dataTotalRegion_by_date.columns: dataCum[date] = dataTotalRegion_by_date["Casos totales"].values elif " Casos totales" in dataTotalRegion_by_date.columns: dataCum[date] = dataTotalRegion_by_date[" Casos totales"].values if "Casos totales acumulados" in dataTotalRegion_by_date.columns: dataCum[date] = dataTotalRegion_by_date['Casos totales acumulados'].values elif 'Casos totales acumulados' in dataTotalRegion_by_date.columns: dataCum[date] = dataTotalRegion_by_date["Casos totales acumulados"].values ``` Antes de hacer el gráfico, preparamos las etiquetas a usar en la leyenda para que nos brinde mayor información. Queremos visualizar las regiones y el total de casos acumulados a la fecha escogida en la leyenda del gráfico ``` label_region = list() temp = dataCum[date].values.tolist() temp2 = dataTotalRegion.index.tolist() for i in range(len(temp)): label_region.append(temp2[i]+' '+str(temp[i])) ``` Para hacer gráficos utilizamos matplotlib, con el estilo (opcional) 'fivethirtyeight' ``` import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') # Definimos el tipo de línea explícitamente ara evitar la duplicación de # color/tipo de línea en las regiones filename = date+'-COVID-19-Chile-TotalConfirmados-Region.png' fig, ax = plt.subplots(tight_layout=True,figsize=(14,7)) lsRegion = ['-','--','-.',':',':','-.','--','-','-','--','-.',':',':','-.','--','-'] for i in np.arange(len(dataCum.index)-1): dataCum.iloc[i].plot(ax=ax,ls=lsRegion[i]) plt.legend(fontsize='medium', labels=label_region, handlelength=3.0, title='Region - total confirmados') ax.set_yscale('log') ax.set_title(f'COVID-19 en Chile: Número total confirmados por Región '+date, fontsize='large') ax.set_xlabel(f'fecha (año-mes-día)', fontsize='medium') ax.set_ylabel(f'total confirmados por Region', fontsize='medium') plt.annotate('Source:\nhttps://github.com/MinCiencia/Datos-COVID19', (0,0), (-80,-20), fontsize='medium', xycoords='axes fraction', textcoords='offset points', va='top') plt.savefig(filename, bbox_inches='tight', format='png', dvi=700) # ejecutar esta celda solo si se quiere descargar el gráfico anterior from google.colab import files files.download(filename) ``` Estas son figuras básicas para trabajar con los productos del repositorio. La idea es que generes tus propios gráficos modificando las columnas, estilos, colores, etc. En los próximos notebooks encontrarás ejemplos para utilizar los datos que se encuentran disponibles en [https://github.com/MinCiencia/Datos-COVID19/tree/master/output](https://github.com/MinCiencia/Datos-COVID19/tree/master/output)
github_jupyter
<a href="https://colab.research.google.com/github/rs-delve/tti-explorer/blob/master/notebooks/tti-experiment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # TTI Explorer #### `tti_explorer` is a library for simulating infection spread. This library is built to explore the impact of various test-trace-isolate strategies and social distancing measures on the spread of COVID-19 in the UK. This notebook is an introduction to the functionality offered by `tti-explorer`. ``` %pwd %cd ~/Desktop/College\ Work/Fourth\ Year/L48/L48Project/tti-explorer %pip install -q . import os import numpy as np import pandas as pd from tqdm.notebook import trange from tqdm import tqdm from tti_explorer import config, utils from tti_explorer.case import simulate_case, CaseFactors from tti_explorer.contacts import EmpiricalContactsSimulator from tti_explorer.strategies import TTIFlowModel, RETURN_KEYS def print_doc(func): print(func.__doc__) ``` Before we do anything, let's make a random state ``` rng = np.random.RandomState(0) ``` We will first do a short tour of the functionality, then show how this is put together to generate simulation results. ## Generate a case The function we use for this is `simulate_case` in `case.py` ``` print_doc(simulate_case) ``` We store our config values in `config.py`. You can retrieve them as follows ``` case_config = config.get_case_config("delve") case_config ``` We use these parameters to simulate a case ``` primary_case = simulate_case(rng, **case_config) print_doc(primary_case) ``` Returned is a `case.Case` with stochastically generated attributes. ### Deeper: Case attributes Let's go through the simulated attributes of a `case.Case`. The attributes `.under18`, `.covid` and `.symptomatic` are `bool` types indicating whether the generated `case.Case` is under 18, COVID positive and symptomatic respectively. All possible values of these attributes are possible apart from the combination `.covid = False` and `.symptomatic = False` (a configuration irrelevant for the purpose of simulating infection spread). The primary case we just simulated has the following attributes: ``` print(f'Under 18: {primary_case.under18}.') print(f'COVID positive: {primary_case.covid}.') print(f'Symptomatic: {primary_case.symptomatic}.') ``` Each `case.Case` also has an attribute `.day_noticed_symptoms` of type `int`, indicating the number of days from start of infectiousness until the `case.Case` noticed the symptoms. If a `case.Case` is asymptomatic, the attribute `.day_noticed_symptoms` is set to `-1`. ``` print(f'primary_case noticed symptoms {primary_case.day_noticed_symptoms} days after start of infectiousness.') ``` Finally, the attribute `.inf_profile` is a `list` describing the relative infectiousness of the case for each day of the infectious period. If `.covid = False` for a `case.Case`, this is `0` throughout. ``` print(f'inf_profile is: {primary_case.inf_profile}') ``` As mentioned above, the configuration for simulating these attributes are stored in `config.py`. This includes the distributions used for sampling attributes. For instance, the attribute `.under18` is sampled from a Bernoulli distribution with probability `0.21`: ``` print(f'Probability of case being under 18: {case_config["p_under18"]}') ``` As another example, if `case.Case` is symptomatic, the attribute `.days_noticed_symptoms` is sampled from a categorical distribution over the set {0, 1, ..., 9} (since we model an infection period of ten days in this configuration) with probabilities: ``` print(f'Probability distribution of .day_noticed_symptoms: {case_config["p_day_noticed_symptoms"]}') ``` ## Generate contacts Social contacts are represented by `Contacts` and defined in `contacts.py`. To simulate social contacts, we use the BBC Pandemic Dataset. This is stratified as over/under 18 to give different patterns of social contact depending on the age of the case. ``` def load_csv(pth): return np.loadtxt(pth, dtype=int, skiprows=1, delimiter=",") path_to_bbc_data = os.path.join("..", "data", "bbc-pandemic") over18 = load_csv(os.path.join(path_to_bbc_data, "contact_distributions_o18.csv")) under18 = load_csv(os.path.join(path_to_bbc_data, "contact_distributions_u18.csv")) ``` Now that we have the data loaded, we use `EmpiricalContactsSimulator` to sample these tables for contacts of the primary case, then simulate their infection under a no measures scenario (i.e. no government intervention) ``` print_doc(EmpiricalContactsSimulator.__init__) simulate_contacts = EmpiricalContactsSimulator(over18, under18, rng) ``` We can now use the callable `simulate_contacts` to simulate social contacts of the primary case ``` print_doc(simulate_contacts.__call__) ``` To do this we need some more parameters, which we also load from `config.py`. The user can, of course, specify this themselves if they would like. ``` contacts_config = config.get_contacts_config("delve") contacts_config.keys() ``` We now do the same as we did with when simulating a primary case. ``` social_contacts = simulate_contacts(primary_case, **contacts_config) print_doc(social_contacts) ``` ### Deeper: Contacts attributes Let's examine the attributes of `social_contacts`, which is an instance of `contacts.Contacts`. Note that `social_contacts` is simulated above by calling `simulate_contacts` which takes `primary_case` as in argument, so contact generation of course depends on the case simulated first. The first attribute to note is `.n_daily`, which is a `dict` containing the average number of daily contacts (split into three categories) of the case. This is simulated by sampling one row of the tables `over18` or `under18` depending on the value of `primary_case.under18`. In the case of `primary_case`, we can look at `social_contacts.n_daily`: ``` print(f'Average number of daily contacts for primary_case:') print(f'Home: {social_contacts.n_daily["home"]}') print(f'Work: {social_contacts.n_daily["work"]}') print(f'Other: {social_contacts.n_daily["other"]}') ``` The three remaining attributes `.home`, `.work` and `.other` are arrays containing information about each contact made by the case, with one row per contact. More specifically, for each contact, the row contains the first day (always measured relative to the start of infectiousness) of encounter between the case and contact and, if transmission occurred, then the day of transmission. Also, recall that home contacts are assumed to repeat every day of the infectious period, whereas work/other contacts are new for each day. This means the lengths of the arrays `.work` and `.other` are `10 * .n_daily['work']` and `10 * .n_daily['other']` respectively (recalling the infection period is assumed to last ten days, a parameter set in `contacts_config['period']`). Whereas, the length of the `.home` array is just `.n_daily['home']`. ``` print(f'Lengths of .home, .work and .other attributes:') print(f'Home: {len(social_contacts.home)}') print(f'Work: {len(social_contacts.work)}') print(f'Other: {len(social_contacts.other)}') ``` Digging further into the array, each row contains two integers. The first integer indicates the day of transmission, which is set to `-1` if no transmission occurred. The second integer contains the day of first encounter. So for instance, looking at one of the home contacts, we see transmission didn't occur and the day of first encounter is `0`, i.e. the first day of the infection period: ``` print(social_contacts.home[0]) ``` Looking at the first six work contacts, we see none of them were infected either. This is consistent with the fact that `primary_case.covid = False` so no transmission can occur in this case. ``` print(social_contacts.work[:6]) ``` In simulations where `case.Case` is COVID positive, each contact may get infected and the probability of getting infected depends on parameters such as the secondary attack rates (SARs), all of which are set in `contacts_config`. For details on the precise simulation procedure used to generate `contacts.Contacts`, see either Appendix A of the report or the `__call__` method of `EmpiricalContactsSimulator`. ## TTI Strategies All of the information about the primary case's infection and how they infect their social contacts (under no government intervention) is now contained in `primary_case` and `social_contacts`. Now we run a simulation, which works as follows. We start by generating a large number of cases, each with associated contacts. Given a particular strategy (e.g. test-based TTI with NPI of stringency level S3), each case is passed through the strategy, which computes various metrics for the case. For example, it computes the number of secondary cases due to primary case (reproduction number) and the number of tests required. We then collect the results for each case and average them, returning the final evaluation of the strategy. ## Running a Simulation ``` from tti_explorer.strategies import TTIFlowModel ``` We will analyse the `S3_test_based_TTI` strategy from our report. For clarity, we will show the whole process. First get the configurations: ``` name = 'S3_test_based_TTI' case_config = config.get_case_config("delve") print(case_config) contacts_config = config.get_contacts_config("delve") policy_config = config.get_strategy_configs("delve", name)[name] factor_config = utils.get_sub_dictionary(policy_config, config.DELVE_CASE_FACTOR_KEYS) strategy_config = utils.get_sub_dictionary(policy_config, config.DELVE_STRATEGY_FACTOR_KEYS) ``` Set a random state: ``` rng = np.random.RandomState(42) ``` Make contact simulator: ``` simulate_contacts = EmpiricalContactsSimulator(over18, under18, rng) ``` Make the TTI Model: ``` tti_model = TTIFlowModel(rng, **strategy_config) ``` Generate cases, contacts and run simulation: ``` n_cases = 10000 outputs = list() for i in tqdm(range(n_cases)): case = simulate_case(rng, **case_config) case_factors = CaseFactors.simulate_from(rng, case, **factor_config) contacts = simulate_contacts(case, **contacts_config) res = tti_model(case, contacts, case_factors) outputs.append(res) ``` Collate and average results across the cases simulated: ``` # This cell is mosltly just formatting results... to_show = [ RETURN_KEYS.base_r, RETURN_KEYS.reduced_r, RETURN_KEYS.man_trace, RETURN_KEYS.app_trace, RETURN_KEYS.tests ] # scale factor to turn simulation numbers into UK population numbers nppl = case_config['infection_proportions']['nppl'] scales = [1, 1, nppl, nppl, nppl] results = pd.DataFrame( outputs ).mean( 0 ).loc[ to_show ].mul( scales ).to_frame( name=f"Simulation results: {name.replace('_', ' ')}" ).rename( index=lambda x: x + " (k per day)" if x.startswith("#") else x ) results.round(1) ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Image Classification using tf.keras <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c04_exercise_flowers_with_data_augmentation_solution.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c04_exercise_flowers_with_data_augmentation_solution.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> In this Colab you will classify images of flowers. You will build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`. # Importing Packages Let's start by importing required packages. **os** package is used to read files and directory structure, **numpy** is used to convert python list to numpy array and to perform required matrix operations and **matplotlib.pyplot** is used to plot the graph and display images in our training and validation data. ``` from __future__ import absolute_import, division, print_function, unicode_literals import os import numpy as np import glob import shutil import matplotlib.pyplot as plt ``` ### TODO: Import TensorFlow and Keras Layers In the cell below, import Tensorflow and the Keras layers and models you will use to build your CNN. Also, import the `ImageDataGenerator` from Keras so that you can perform image augmentation. ``` import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras.preprocessing.image import ImageDataGenerator ``` # Data Loading In order to build our image classifier, we can begin by downloading the flowers dataset. We first need to download the archive version of the dataset and after the download we are storing it to "/tmp/" directory. After downloading the dataset, we need to extract its contents. ``` _URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz" zip_file = tf.keras.utils.get_file(origin=_URL, fname="flower_photos.tgz", extract=True) base_dir = os.path.join(os.path.dirname(zip_file), 'flower_photos') ``` The dataset we downloaded contains images of 5 types of flowers: 1. Rose 2. Daisy 3. Dandelion 4. Sunflowers 5. Tulips So, let's create the labels for these 5 classes: ``` classes = ['roses', 'daisy', 'dandelion', 'sunflowers', 'tulips'] ``` Also, The dataset we have downloaded has following directory structure. <pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" > <b>flower_photos</b> |__ <b>diasy</b> |__ <b>dandelion</b> |__ <b>roses</b> |__ <b>sunflowers</b> |__ <b>tulips</b> </pre> As you can see there are no folders containing training and validation data. Therefore, we will have to create our own training and validation set. Let's write some code that will do this. The code below creates a `train` and a `val` folder each containing 5 folders (one for each type of flower). It then moves the images from the original folders to these new folders such that 80% of the images go to the training set and 20% of the images go into the validation set. In the end our directory will have the following structure: <pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" > <b>flower_photos</b> |__ <b>diasy</b> |__ <b>dandelion</b> |__ <b>roses</b> |__ <b>sunflowers</b> |__ <b>tulips</b> |__ <b>train</b> |______ <b>daisy</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>dandelion</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>roses</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>sunflowers</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>tulips</b>: [1.jpg, 2.jpg, 3.jpg ....] |__ <b>val</b> |______ <b>daisy</b>: [507.jpg, 508.jpg, 509.jpg ....] |______ <b>dandelion</b>: [719.jpg, 720.jpg, 721.jpg ....] |______ <b>roses</b>: [514.jpg, 515.jpg, 516.jpg ....] |______ <b>sunflowers</b>: [560.jpg, 561.jpg, 562.jpg .....] |______ <b>tulips</b>: [640.jpg, 641.jpg, 642.jpg ....] </pre> Since we don't delete the original folders, they will still be in our `flower_photos` directory, but they will be empty. The code below also prints the total number of flower images we have for each type of flower. ``` for cl in classes: img_path = os.path.join(base_dir, cl) images = glob.glob(img_path + '/*.jpg') print("{}: {} Images".format(cl, len(images))) train, val = images[:round(len(images)*0.8)], images[round(len(images)*0.8):] for t in train: if not os.path.exists(os.path.join(base_dir, 'train', cl)): os.makedirs(os.path.join(base_dir, 'train', cl)) shutil.move(t, os.path.join(base_dir, 'train', cl)) for v in val: if not os.path.exists(os.path.join(base_dir, 'val', cl)): os.makedirs(os.path.join(base_dir, 'val', cl)) shutil.move(v, os.path.join(base_dir, 'val', cl)) ``` For convenience, let us set up the path for the training and validation sets ``` train_dir = os.path.join(base_dir, 'train') val_dir = os.path.join(base_dir, 'val') ``` # Data Augmentation Overfitting generally occurs when we have small number of training examples. One way to fix this problem is to augment our dataset so that it has sufficient number of training examples. Data augmentation takes the approach of generating more training data from existing training samples, by augmenting the samples via a number of random transformations that yield believable-looking images. The goal is that at training time, your model will never see the exact same picture twice. This helps expose the model to more aspects of the data and generalize better. In **tf.keras** we can implement this using the same **ImageDataGenerator** class we used before. We can simply pass different transformations we would want to our dataset as a form of arguments and it will take care of applying it to the dataset during our training process. ## Experiment with Various Image Transformations In this section you will get some practice doing some basic image transformations. Before we begin making transformations let's define the our `batch_size` and our image size. Remember that the input to our CNN are images of the same size. We therefore have to resize the images in our dataset to the same size. ### TODO: Set Batch and Image Size In the cell below, create a `batch_size` of 100 images and set a value to `IMG_SHAPE` such that our training data consists of images with width of 150 pixels and height of 150 pixels. ``` batch_size = 100 IMG_SHAPE = 150 ``` ### TODO: Apply Random Horizontal Flip In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random horizontal flip. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images. ``` image_gen = ImageDataGenerator(rescale=1./255, horizontal_flip=True) train_data_gen = image_gen.flow_from_directory( batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE,IMG_SHAPE) ) ``` Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action. ``` # This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column. def plotImages(images_arr): fig, axes = plt.subplots(1, 5, figsize=(20,20)) axes = axes.flatten() for img, ax in zip( images_arr, axes): ax.imshow(img) plt.tight_layout() plt.show() augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### TOO: Apply Random Rotation In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random 45 degree rotation. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images. ``` image_gen = ImageDataGenerator(rescale=1./255, rotation_range=45) train_data_gen = image_gen.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE, IMG_SHAPE)) ``` Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action. ``` augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### TODO: Apply Random Zoom In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random zoom of up to 50%. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images. ``` image_gen = ImageDataGenerator(rescale=1./255, zoom_range=0.5) train_data_gen = image_gen.flow_from_directory( batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE, IMG_SHAPE) ) ``` Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action. ``` augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### TODO: Put It All Together In the cell below, use ImageDataGenerator to create a transformation rescales the images by 255 and that applies: - random 45 degree rotation - random zoom of up to 50% - random horizontal flip - width shift of 0.15 - height shfit of 0.15 Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images,to shuffle the images, and to set the class mode to `sparse`. ``` image_gen_train = ImageDataGenerator( rescale=1./255, rotation_range=45, width_shift_range=.15, height_shift_range=.15, horizontal_flip=True, zoom_range=0.5 ) train_data_gen = image_gen_train.flow_from_directory( batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE,IMG_SHAPE), class_mode='sparse' ) ``` Let's visualize how a single image would look like 5 different times, when we pass these augmentations randomly to our dataset. ``` augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### TODO: Create a Data Generator for the Validation Set Generally, we only apply data augmentation to our training examples. So, in the cell below, use ImageDataGenerator to create a transformation that only rescales the images by 255. Then use the `.flow_from_directory` method to apply the above transformation to the images in our validation set. Make sure you indicate the batch size, the path to the directory of the validation images, the target size for the images, and to set the class mode to `sparse`. Remember that it is not necessary to shuffle the images in the validation set. ``` image_gen_val = ImageDataGenerator(rescale=1./255) val_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size, directory=val_dir, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode='sparse') ``` # TODO: Create the CNN In the cell below, create a convolutional neural network that consists of 3 convolution blocks. Each convolutional block contains a `Conv2D` layer followed by a max pool layer. The first convolutional block should have 16 filters, the second one should have 32 filters, and the third one should have 64 filters. All convolutional filters should be 3 x 3. All max pool layers should have a `pool_size` of `(2, 2)` . After the 3 convolutional blocks you should have a flatten layer followed by a fully connected layer with 512 units. The CNN should output class probabilities based on 5 classes which is done by the **softmax** activation function. All other layers should use a **relu** activation function. You should also add Dropout layers with a probability of 20%, where appropriate. ``` model = Sequential() model.add(Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_SHAPE,IMG_SHAPE, 3))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(32, 3, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, 3, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(5, activation='softmax')) ``` # TODO: Compile the Model In the cell below, compile your model using the ADAM optimizer, the sparse cross entropy function as a loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so make sure you also pass the metrics argument. ``` model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) ``` # TODO: Train the Model In the cell below, train your model using the **fit_generator** function instead of the usual **fit** function. We have to use the `fit_generator` function because we are using the **ImageDataGenerator** class to generate batches of training and validation data for our model. Train the model for 80 epochs and make sure you use the proper parameters in the `fit_generator` function . ``` epochs = 80 history = model.fit_generator( train_data_gen, steps_per_epoch=int(np.ceil(train_data_gen.n / float(batch_size))), epochs=epochs, validation_data=val_data_gen, validation_steps=int(np.ceil(val_data_gen.n / float(batch_size))) ) ``` # TODO: Plot Training and Validation Graphs. In the cell below, plot the training and validation accuracy/loss graphs. ``` acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() ```
github_jupyter
<center> <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/labs/Module%202/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" /> </center> # Area Plots, Histograms, and Bar Plots Estimated time needed: **30** minutes ## Objectives After completing this lab you will be able to: * Create additional labs namely area plots, histogram and bar charts ## Table of Contents <div class="alert alert-block alert-info" style="margin-top: 20px"> 1. [Exploring Datasets with *pandas*](#0)<br> 2. [Downloading and Prepping Data](#2)<br> 3. [Visualizing Data using Matplotlib](#4) <br> 4. [Area Plots](#6) <br> 5. [Histograms](#8) <br> 6. [Bar Charts](#10) <br> </div> # Exploring Datasets with *pandas* and Matplotlib<a id="0"></a> Toolkits: The course heavily relies on [**pandas**](http://pandas.pydata.org/?utm_medium=Exinfluencer\&utm_source=Exinfluencer\&utm_content=000026UJ\&utm_term=10006555\&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) and [**Numpy**](http://www.numpy.org/?utm_medium=Exinfluencer\&utm_source=Exinfluencer\&utm_content=000026UJ\&utm_term=10006555\&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) for data wrangling, analysis, and visualization. The primary plotting library that we are exploring in the course is [Matplotlib](http://matplotlib.org/?utm_medium=Exinfluencer\&utm_source=Exinfluencer\&utm_content=000026UJ\&utm_term=10006555\&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01). Dataset: Immigration to Canada from 1980 to 2013 - [International migration flows to and from selected countries - The 2015 revision](http://www.un.org/en/development/desa/population/migration/data/empirical2/migrationflows.shtml?utm_medium=Exinfluencer\&utm_source=Exinfluencer\&utm_content=000026UJ\&utm_term=10006555\&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) from United Nation's website. The dataset contains annual data on the flows of international migrants as recorded by the countries of destination. The data presents both inflows and outflows according to the place of birth, citizenship or place of previous / next residence both for foreigners and nationals. For this lesson, we will focus on the Canadian Immigration data. # Downloading and Prepping Data <a id="2"></a> Import Primary Modules. The first thing we'll do is import two key data analysis modules: `pandas` and `numpy`. ``` import numpy as np # useful for many scientific computing in Python import pandas as pd # primary data structure library ``` Let's download and import our primary Canadian Immigration dataset using *pandas*'s `read_excel()` method. Normally, before we can do that, we would need to download a module which *pandas* requires reading in Excel files. This module was **openpyxl** (formerlly **xlrd**). For your convenience, we have pre-installed this module, so you would not have to worry about that. Otherwise, you would need to run the following line of code to install the **openpyxl** module: ``` ! pip3 install openpyxl ``` Download the dataset and read it into a *pandas* dataframe. ``` df_can = pd.read_excel( 'https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/Data%20Files/Canada.xlsx', sheet_name='Canada by Citizenship', skiprows=range(20), skipfooter=2) print('Data downloaded and read into a dataframe!') ``` Let's take a look at the first five items in our dataset. ``` df_can.head() ``` Let's find out how many entries there are in our dataset. ``` # print the dimensions of the dataframe print(df_can.shape) ``` Clean up data. We will make some modifications to the original dataset to make it easier to create our visualizations. Refer to `Introduction to Matplotlib and Line Plots` lab for the rational and detailed description of the changes. #### 1. Clean up the dataset to remove columns that are not informative to us for visualization (eg. Type, AREA, REG). ``` df_can.drop(['AREA', 'REG', 'DEV', 'Type', 'Coverage'], axis=1, inplace=True) # let's view the first five elements and see how the dataframe was changed df_can.head() ``` Notice how the columns Type, Coverage, AREA, REG, and DEV got removed from the dataframe. #### 2. Rename some of the columns so that they make sense. ``` df_can.rename(columns={'OdName':'Country', 'AreaName':'Continent','RegName':'Region'}, inplace=True) # let's view the first five elements and see how the dataframe was changed df_can.head() ``` Notice how the column names now make much more sense, even to an outsider. #### 3. For consistency, ensure that all column labels of type string. ``` # let's examine the types of the column labels all(isinstance(column, str) for column in df_can.columns) ``` Notice how the above line of code returned *False* when we tested if all the column labels are of type **string**. So let's change them all to **string** type. ``` df_can.columns = list(map(str, df_can.columns)) # let's check the column labels types now all(isinstance(column, str) for column in df_can.columns) ``` #### 4. Set the country name as index - useful for quickly looking up countries using .loc method. ``` df_can.set_index('Country', inplace=True) # Let's view the first five elements and see how the dataframe was changed df_can.head() ``` Notice now the country names now serve as indices. #### 5. Add total column. ``` df_can['Total'] = df_can.sum(axis=1) # let's view the first five elements and see how the dataframe was changed df_can.head() ``` Now the dataframe has an extra column that presents the total number of immigrants from each country in the dataset from 1980 - 2013. So if we print the dimension of the data, we get: ``` print('data dimensions:', df_can.shape) ``` So now our dataframe has 38 columns instead of 37 columns that we had before. ``` # finally, let's create a list of years from 1980 - 2013 # this will come in handy when we start plotting the data years = list(map(str, range(1980, 2014))) years ``` # Visualizing Data using Matplotlib<a id="4"></a> Import the `matplotlib` library. ``` # use the inline backend to generate the plots within the browser # % matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.style.use('ggplot') # optional: for ggplot-like style # check for latest version of Matplotlib print('Matplotlib version: ', mpl.__version__) # >= 2.0.0 %matplotlib inline ``` # Area Plots<a id="6"></a> In the last module, we created a line plot that visualized the top 5 countries that contribued the most immigrants to Canada from 1980 to 2013. With a little modification to the code, we can visualize this plot as a cumulative plot, also knows as a **Stacked Line Plot** or **Area plot**. ``` df_can.sort_values(['Total'], ascending=False, axis=0, inplace=True) # get the top 5 entries df_top5 = df_can.head() # transpose the dataframe df_top5 = df_top5[years].transpose() df_top5.head() ``` Area plots are stacked by default. And to produce a stacked area plot, each column must be either all positive or all negative values (any `NaN`, i.e. not a number, values will default to 0). To produce an unstacked plot, set parameter `stacked` to value `False`. ``` # let's change the index values of df_top5 to type integer for plotting df_top5.index = df_top5.index.map(int) df_top5.plot(kind='area', stacked=False, figsize=(20, 10)) # pass a tuple (x, y) size plt.title('Immigration Trend of Top 5 Countries') plt.ylabel('Number of Immigrants') plt.xlabel('Years') plt.show() ``` The unstacked plot has a default transparency (alpha value) at 0.5. We can modify this value by passing in the `alpha` parameter. ``` df_top5.plot(kind='area', alpha=0.25, # 0 - 1, default value alpha = 0.5 stacked=False, figsize=(20, 10)) plt.title('Immigration Trend of Top 5 Countries') plt.ylabel('Number of Immigrants') plt.xlabel('Years') plt.show() ``` ### Two types of plotting As we discussed in the video lectures, there are two styles/options of plotting with `matplotlib`, plotting using the Artist layer and plotting using the scripting layer. \*\*Option 1: Scripting layer (procedural method) - using matplotlib.pyplot as 'plt' \*\* You can use `plt` i.e. `matplotlib.pyplot` and add more elements by calling different methods procedurally; for example, `plt.title(...)` to add title or `plt.xlabel(...)` to add label to the x-axis. ```python # Option 1: This is what we have been using so far df_top5.plot(kind='area', alpha=0.35, figsize=(20, 10)) plt.title('Immigration trend of top 5 countries') plt.ylabel('Number of immigrants') plt.xlabel('Years') ``` \*\*Option 2: Artist layer (Object oriented method) - using an `Axes` instance from Matplotlib (preferred) \*\* You can use an `Axes` instance of your current plot and store it in a variable (eg. `ax`). You can add more elements by calling methods with a little change in syntax (by adding "`set_`" to the previous methods). For example, use `ax.set_title()` instead of `plt.title()` to add title, or `ax.set_xlabel()` instead of `plt.xlabel()` to add label to the x-axis. This option sometimes is more transparent and flexible to use for advanced plots (in particular when having multiple plots, as you will see later). In this course, we will stick to the **scripting layer**, except for some advanced visualizations where we will need to use the **artist layer** to manipulate advanced aspects of the plots. ``` # option 2: preferred option with more flexibility ax = df_top5.plot(kind='area', alpha=0.35, figsize=(20, 10)) ax.set_title('Immigration Trend of Top 5 Countries') ax.set_ylabel('Number of Immigrants') ax.set_xlabel('Years') ``` **Question**: Use the scripting layer to create a stacked area plot of the 5 countries that contributed the least to immigration to Canada **from** 1980 to 2013. Use a transparency value of 0.45. ``` ### type your answer here #The correct answer is: # get the 5 countries with the least contribution df_least5 = df_can.tail(5) # transpose the dataframe df_least5 = df_least5[years].transpose() df_least5.head() df_least5.index = df_least5.index.map(int) # let's change the index values of df_least5 to type integer for plotting df_least5.plot(kind='area', alpha=0.45, figsize=(20, 10)) plt.title('Immigration Trend of 5 Countries with Least Contribution to Immigration') plt.ylabel('Number of Immigrants') plt.xlabel('Years') plt.show() ``` <details><summary>Click here for a sample python solution</summary> ```python #The correct answer is: # get the 5 countries with the least contribution df_least5 = df_can.tail(5) # transpose the dataframe df_least5 = df_least5[years].transpose() df_least5.head() df_least5.index = df_least5.index.map(int) # let's change the index values of df_least5 to type integer for plotting df_least5.plot(kind='area', alpha=0.45, figsize=(20, 10)) plt.title('Immigration Trend of 5 Countries with Least Contribution to Immigration') plt.ylabel('Number of Immigrants') plt.xlabel('Years') plt.show() ``` </details> **Question**: Use the artist layer to create an unstacked area plot of the 5 countries that contributed the least to immigration to Canada **from** 1980 to 2013. Use a transparency value of 0.55. ``` ### type your answer here #The correct answer is: # get the 5 countries with the least contribution df_least5 = df_can.tail(5) # transpose the dataframe df_least5 = df_least5[years].transpose() df_least5.head() df_least5.index = df_least5.index.map(int) # let's change the index values of df_least5 to type integer for plotting ax = df_least5.plot(kind='area', alpha=0.55, stacked=False, figsize=(20, 10)) ax.set_title('Immigration Trend of 5 Countries with Least Contribution to Immigration') ax.set_ylabel('Number of Immigrants') ax.set_xlabel('Years') ``` <details><summary>Click here for a sample python solution</summary> ```python #The correct answer is: # get the 5 countries with the least contribution df_least5 = df_can.tail(5) # transpose the dataframe df_least5 = df_least5[years].transpose() df_least5.head() df_least5.index = df_least5.index.map(int) # let's change the index values of df_least5 to type integer for plotting ax = df_least5.plot(kind='area', alpha=0.55, stacked=False, figsize=(20, 10)) ax.set_title('Immigration Trend of 5 Countries with Least Contribution to Immigration') ax.set_ylabel('Number of Immigrants') ax.set_xlabel('Years') ``` </details> # Histograms<a id="8"></a> A histogram is a way of representing the *frequency* distribution of numeric dataset. The way it works is it partitions the x-axis into *bins*, assigns each data point in our dataset to a bin, and then counts the number of data points that have been assigned to each bin. So the y-axis is the frequency or the number of data points in each bin. Note that we can change the bin size and usually one needs to tweak it so that the distribution is displayed nicely. **Question:** What is the frequency distribution of the number (population) of new immigrants from the various countries to Canada in 2013? Before we proceed with creating the histogram plot, let's first examine the data split into intervals. To do this, we will us **Numpy**'s `histrogram` method to get the bin ranges and frequency counts as follows: ``` # let's quickly view the 2013 data df_can['2013'].head() # np.histogram returns 2 values count, bin_edges = np.histogram(df_can['2013']) print(count) # frequency count print(bin_edges) # bin ranges, default = 10 bins ``` By default, the `histrogram` method breaks up the dataset into 10 bins. The figure below summarizes the bin ranges and the frequency distribution of immigration in 2013. We can see that in 2013: * 178 countries contributed between 0 to 3412.9 immigrants * 11 countries contributed between 3412.9 to 6825.8 immigrants * 1 country contributed between 6285.8 to 10238.7 immigrants, and so on.. <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/labs/Module%202/images/Mod2Fig1-Histogram.JPG" align="center" width=800> We can easily graph this distribution by passing `kind=hist` to `plot()`. ``` df_can['2013'].plot(kind='hist', figsize=(8, 5)) # add a title to the histogram plt.title('Histogram of Immigration from 195 Countries in 2013') # add y-label plt.ylabel('Number of Countries') # add x-label plt.xlabel('Number of Immigrants') plt.show() ``` In the above plot, the x-axis represents the population range of immigrants in intervals of 3412.9. The y-axis represents the number of countries that contributed to the aforementioned population. Notice that the x-axis labels do not match with the bin size. This can be fixed by passing in a `xticks` keyword that contains the list of the bin sizes, as follows: ``` # 'bin_edges' is a list of bin intervals count, bin_edges = np.histogram(df_can['2013']) df_can['2013'].plot(kind='hist', figsize=(8, 5), xticks=bin_edges) plt.title('Histogram of Immigration from 195 countries in 2013') # add a title to the histogram plt.ylabel('Number of Countries') # add y-label plt.xlabel('Number of Immigrants') # add x-label plt.show() ``` *Side Note:* We could use `df_can['2013'].plot.hist()`, instead. In fact, throughout this lesson, using `some_data.plot(kind='type_plot', ...)` is equivalent to `some_data.plot.type_plot(...)`. That is, passing the type of the plot as argument or method behaves the same. See the *pandas* documentation for more info http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.plot.html. We can also plot multiple histograms on the same plot. For example, let's try to answer the following questions using a histogram. **Question**: What is the immigration distribution for Denmark, Norway, and Sweden for years 1980 - 2013? ``` # let's quickly view the dataset df_can.loc[['Denmark', 'Norway', 'Sweden'], years] # generate histogram df_can.loc[['Denmark', 'Norway', 'Sweden'], years].plot.hist() ``` That does not look right! Don't worry, you'll often come across situations like this when creating plots. The solution often lies in how the underlying dataset is structured. Instead of plotting the population frequency distribution of the population for the 3 countries, *pandas* instead plotted the population frequency distribution for the `years`. This can be easily fixed by first transposing the dataset, and then plotting as shown below. ``` # transpose dataframe df_t = df_can.loc[['Denmark', 'Norway', 'Sweden'], years].transpose() df_t.head() # generate histogram df_t.plot(kind='hist', figsize=(10, 6)) plt.title('Histogram of Immigration from Denmark, Norway, and Sweden from 1980 - 2013') plt.ylabel('Number of Years') plt.xlabel('Number of Immigrants') plt.show() ``` Let's make a few modifications to improve the impact and aesthetics of the previous plot: * increase the bin size to 15 by passing in `bins` parameter; * set transparency to 60% by passing in `alpha` parameter; * label the x-axis by passing in `x-label` parameter; * change the colors of the plots by passing in `color` parameter. ``` # let's get the x-tick values count, bin_edges = np.histogram(df_t, 15) # un-stacked histogram df_t.plot(kind ='hist', figsize=(10, 6), bins=15, alpha=0.6, xticks=bin_edges, color=['coral', 'darkslateblue', 'mediumseagreen'] ) plt.title('Histogram of Immigration from Denmark, Norway, and Sweden from 1980 - 2013') plt.ylabel('Number of Years') plt.xlabel('Number of Immigrants') plt.show() ``` Tip: For a full listing of colors available in Matplotlib, run the following code in your python shell: ```python import matplotlib for name, hex in matplotlib.colors.cnames.items(): print(name, hex) ``` If we do not want the plots to overlap each other, we can stack them using the `stacked` parameter. Let's also adjust the min and max x-axis labels to remove the extra gap on the edges of the plot. We can pass a tuple (min,max) using the `xlim` paramater, as show below. ``` count, bin_edges = np.histogram(df_t, 15) xmin = bin_edges[0] - 10 # first bin value is 31.0, adding buffer of 10 for aesthetic purposes xmax = bin_edges[-1] + 10 # last bin value is 308.0, adding buffer of 10 for aesthetic purposes # stacked Histogram df_t.plot(kind='hist', figsize=(10, 6), bins=15, xticks=bin_edges, color=['coral', 'darkslateblue', 'mediumseagreen'], stacked=True, xlim=(xmin, xmax) ) plt.title('Histogram of Immigration from Denmark, Norway, and Sweden from 1980 - 2013') plt.ylabel('Number of Years') plt.xlabel('Number of Immigrants') plt.show() ``` **Question**: Use the scripting layer to display the immigration distribution for Greece, Albania, and Bulgaria for years 1980 - 2013? Use an overlapping plot with 15 bins and a transparency value of 0.35. ``` ### type your answer here #The correct answer is: # create a dataframe of the countries of interest (cof) df_cof = df_can.loc[['Greece', 'Albania', 'Bulgaria'], years] # transpose the dataframe df_cof = df_cof.transpose() # let's get the x-tick values count, bin_edges = np.histogram(df_cof, 15) # Un-stacked Histogram df_cof.plot(kind ='hist', figsize=(10, 6), bins=15, alpha=0.35, xticks=bin_edges, color=['coral', 'darkslateblue', 'mediumseagreen'] ) plt.title('Histogram of Immigration from Greece, Albania, and Bulgaria from 1980 - 2013') plt.ylabel('Number of Years') plt.xlabel('Number of Immigrants') plt.show() ``` <details><summary>Click here for a sample python solution</summary> ```python #The correct answer is: # create a dataframe of the countries of interest (cof) df_cof = df_can.loc[['Greece', 'Albania', 'Bulgaria'], years] # transpose the dataframe df_cof = df_cof.transpose() # let's get the x-tick values count, bin_edges = np.histogram(df_cof, 15) # Un-stacked Histogram df_cof.plot(kind ='hist', figsize=(10, 6), bins=15, alpha=0.35, xticks=bin_edges, color=['coral', 'darkslateblue', 'mediumseagreen'] ) plt.title('Histogram of Immigration from Greece, Albania, and Bulgaria from 1980 - 2013') plt.ylabel('Number of Years') plt.xlabel('Number of Immigrants') plt.show() ``` </details> # Bar Charts (Dataframe) <a id="10"></a> A bar plot is a way of representing data where the *length* of the bars represents the magnitude/size of the feature/variable. Bar graphs usually represent numerical and categorical variables grouped in intervals. To create a bar plot, we can pass one of two arguments via `kind` parameter in `plot()`: * `kind=bar` creates a *vertical* bar plot * `kind=barh` creates a *horizontal* bar plot **Vertical bar plot** In vertical bar graphs, the x-axis is used for labelling, and the length of bars on the y-axis corresponds to the magnitude of the variable being measured. Vertical bar graphs are particularly useful in analyzing time series data. One disadvantage is that they lack space for text labelling at the foot of each bar. **Let's start off by analyzing the effect of Iceland's Financial Crisis:** The 2008 - 2011 Icelandic Financial Crisis was a major economic and political event in Iceland. Relative to the size of its economy, Iceland's systemic banking collapse was the largest experienced by any country in economic history. The crisis led to a severe economic depression in 2008 - 2011 and significant political unrest. **Question:** Let's compare the number of Icelandic immigrants (country = 'Iceland') to Canada from year 1980 to 2013. ``` # step 1: get the data df_iceland = df_can.loc['Iceland', years] df_iceland.head() # step 2: plot data df_iceland.plot(kind='bar', figsize=(10, 6)) plt.xlabel('Year') # add to x-label to the plot plt.ylabel('Number of immigrants') # add y-label to the plot plt.title('Icelandic immigrants to Canada from 1980 to 2013') # add title to the plot plt.show() ``` The bar plot above shows the total number of immigrants broken down by each year. We can clearly see the impact of the financial crisis; the number of immigrants to Canada started increasing rapidly after 2008. Let's annotate this on the plot using the `annotate` method of the **scripting layer** or the **pyplot interface**. We will pass in the following parameters: * `s`: str, the text of annotation. * `xy`: Tuple specifying the (x,y) point to annotate (in this case, end point of arrow). * `xytext`: Tuple specifying the (x,y) point to place the text (in this case, start point of arrow). * `xycoords`: The coordinate system that xy is given in - 'data' uses the coordinate system of the object being annotated (default). * `arrowprops`: Takes a dictionary of properties to draw the arrow: * `arrowstyle`: Specifies the arrow style, `'->'` is standard arrow. * `connectionstyle`: Specifies the connection type. `arc3` is a straight line. * `color`: Specifies color of arrow. * `lw`: Specifies the line width. I encourage you to read the Matplotlib documentation for more details on annotations: http://matplotlib.orsg/api/pyplot_api.html#matplotlib.pyplot.annotate. ``` df_iceland.plot(kind='bar', figsize=(10, 6), rot=90) # rotate the xticks(labelled points on x-axis) by 90 degrees plt.xlabel('Year') plt.ylabel('Number of Immigrants') plt.title('Icelandic Immigrants to Canada from 1980 to 2013') # Annotate arrow plt.annotate('', # s: str. Will leave it blank for no text xy=(32, 70), # place head of the arrow at point (year 2012 , pop 70) xytext=(28, 20), # place base of the arrow at point (year 2008 , pop 20) xycoords='data', # will use the coordinate system of the object being annotated arrowprops=dict(arrowstyle='->', connectionstyle='arc3', color='blue', lw=2) ) plt.show() ``` Let's also annotate a text to go over the arrow. We will pass in the following additional parameters: * `rotation`: rotation angle of text in degrees (counter clockwise) * `va`: vertical alignment of text \[‘center’ | ‘top’ | ‘bottom’ | ‘baseline’] * `ha`: horizontal alignment of text \[‘center’ | ‘right’ | ‘left’] ``` df_iceland.plot(kind='bar', figsize=(10, 6), rot=90) plt.xlabel('Year') plt.ylabel('Number of Immigrants') plt.title('Icelandic Immigrants to Canada from 1980 to 2013') # Annotate arrow plt.annotate('', # s: str. will leave it blank for no text xy=(32, 70), # place head of the arrow at point (year 2012 , pop 70) xytext=(28, 20), # place base of the arrow at point (year 2008 , pop 20) xycoords='data', # will use the coordinate system of the object being annotated arrowprops=dict(arrowstyle='->', connectionstyle='arc3', color='blue', lw=2) ) # Annotate Text plt.annotate('2008 - 2011 Financial Crisis', # text to display xy=(28, 30), # start the text at at point (year 2008 , pop 30) rotation=72.5, # based on trial and error to match the arrow va='bottom', # want the text to be vertically 'bottom' aligned ha='left', # want the text to be horizontally 'left' algned. ) plt.show() ``` **Horizontal Bar Plot** Sometimes it is more practical to represent the data horizontally, especially if you need more room for labelling the bars. In horizontal bar graphs, the y-axis is used for labelling, and the length of bars on the x-axis corresponds to the magnitude of the variable being measured. As you will see, there is more room on the y-axis to label categorical variables. **Question:** Using the scripting later and the `df_can` dataset, create a *horizontal* bar plot showing the *total* number of immigrants to Canada from the top 15 countries, for the period 1980 - 2013. Label each country with the total immigrant count. Step 1: Get the data pertaining to the top 15 countries. ``` ### type your answer here # sort dataframe on 'Total' column (descending) df_can.sort_values(by='Total', ascending=True, inplace=True) # get top 15 countries df_top15 = df_can['Total'].tail(15) df_top15 ``` <details><summary>Click here for a sample python solution</summary> ```python #The correct answer is: # sort dataframe on 'Total' column (descending) df_can.sort_values(by='Total', ascending=True, inplace=True) # get top 15 countries df_top15 = df_can['Total'].tail(15) df_top15 ``` </details> Step 2: Plot data: 1. Use `kind='barh'` to generate a bar chart with horizontal bars. 2. Make sure to choose a good size for the plot and to label your axes and to give the plot a title. 3. Loop through the countries and annotate the immigrant population using the anotate function of the scripting interface. ``` ### type your answer here # generate plot df_top15.plot(kind='barh', figsize=(12, 12), color='steelblue') plt.xlabel('Number of Immigrants') plt.title('Top 15 Conuntries Contributing to the Immigration to Canada between 1980 - 2013') # annotate value labels to each country for index, value in enumerate(df_top15): label = format(int(value), ',') # format int with commas # place text at the end of bar (subtracting 47000 from x, and 0.1 from y to make it fit within the bar) plt.annotate(label, xy=(value - 47000, index - 0.10), color='white') plt.show() ``` <details><summary>Click here for a sample python solution</summary> ```python #The correct answer is: # generate plot df_top15.plot(kind='barh', figsize=(12, 12), color='steelblue') plt.xlabel('Number of Immigrants') plt.title('Top 15 Conuntries Contributing to the Immigration to Canada between 1980 - 2013') # annotate value labels to each country for index, value in enumerate(df_top15): label = format(int(value), ',') # format int with commas # place text at the end of bar (subtracting 47000 from x, and 0.1 from y to make it fit within the bar) plt.annotate(label, xy=(value - 47000, index - 0.10), color='white') plt.show() ``` </details>
github_jupyter
# Automated ML with azureml The dependencies are imported ``` import os import pandas as pd from azureml.core import Dataset, Datastore, Workspace, Experiment # from azureml.train.automl import AutoMLConfig from azureml.widgets import RunDetails ``` ## Dataset ### Overview We will try to predict the rating of modified version of the **Kaggle Trip advisor dataset**. The Dataset contains a Trip Advisor hotel review text column as well as a Rating column with Ratings from 0 - 5 stars. > The Tripadvisor Hotel Review Dataset file, is derived from the publication: > >_Alam, M. H., Ryu, W.-J., Lee, S., 2016. Joint multi-grain topic senti- ment: modeling semantic aspects for online >reviews. Information Sciences 339, 206–223._ > > You can download the Dataset with the link: > [trip-advisor-hotel-reviews](https://www.kaggle.com/andrewmvd/trip-advisor-hotel-reviews) In the original Dataset the target **Rating** column contains the values 0* - 5*. In a modified version of the dataset we will try to predict the **norm_rating** column based on the **Review** text column as a **classification task** with: * class 0 - Negative reviews (1* & 2* rating) * class 1 - Neutral reviews (3* rating) * class 2 - Positive reviews (4* & 5* rating) ## Initialize the Workspace and create an Experiment ``` ws = Workspace.from_config() # choose a name for experiment experiment_name = 'automl_review_classifier' experiment=Experiment(ws, experiment_name) experiment print(f"subscription key {ws.subscription_id}") print(f"resource group {ws.resource_group}") print(f"workspace name {ws.name}") ``` ## Load the Dataset and perform a train test split ``` import pandas as pd from sklearn.model_selection import train_test_split filepath_2_dataset = r"hotel_reviews_featurized_roberta.csv" # Read the Dataset as a pandas dataframe hotel_review_dataset = pd.read_csv(filepath_2_dataset) print(f"Dataset Shape: {hotel_review_dataset.shape}") hotel_review_dataset.describe() ``` ### First the same train test split is performed for the Dataset to make it available to both AutoML and Hyperdrive ``` # Get hotel review text and normalized rating X = hotel_review_dataset.drop(columns=['norm_rating']) y = list(hotel_review_dataset.norm_rating) X_train, X_test, y_train, y_test = train_test_split(hotel_review_dataset, y, test_size=0.2, random_state=42) print(f"X_train: {X_train.shape}\nX_test: {X_test.shape}\ny_train: {len(y_train)}\ny_test: {len(y_test)}") ``` ### The training set and test sets will be registered separately to ensure strict separation ``` X_train['norm_rating'] = y_train X_test['norm_rating'] = y_test print(X_train.shape) print(X_test.shape) ``` ### The AutoML train/testsets should contain just the text column and norm rating column (no feature engineering) #### Upload the different train/test sets ``` X_train_automl = X_train.loc[:, ['text', 'norm_rating']] X_test_automl = X_test.loc[:, ['text', 'norm_rating']] os.makedirs("data", exist_ok=True) # Upload the training/test data in the default datastore train_dataset_path_automl = "data/train_set_automl_clean.csv" X_train_automl.to_csv(train_dataset_path_automl, index=False) test_dataset_path_automl = "data/test_set_automl_clean.csv" X_test_automl.to_csv(test_dataset_path_automl, index=False) X_train_hyper = X_train.drop(columns =["text"]) X_test_hyper = X_test.drop(columns = ["text"]) train_dataset_path = "data/train_set_hyper_clean.csv" X_train_hyper.to_csv(train_dataset_path, index=False) test_dataset_path = "data/test_set_hyper_clean.csv" X_test_hyper.to_csv(test_dataset_path, index=False) datastore = ws.get_default_datastore() datastore.upload(src_dir="data", target_path="data") ``` ### Load the training and test Datasets and register them ``` dataset_training = Dataset.Tabular.from_delimited_files(path = [(datastore, ("data/train_set_automl_clean.csv"))]) dataset_training = dataset_training.register(workspace=ws, name="auto-ml-training-data", description="Hotel Review AutoML Training Data") dataset_test = Dataset.Tabular.from_delimited_files(path = [(datastore, ("data/test_set_automl_clean.csv"))]) dataset_test = dataset_training.register(workspace=ws, name="auto-ml-test-data", description="Hotel Review AutoML Test Data") pandas_df = dataset_test.to_pandas_dataframe() ``` ### Hotel Review example >outstanding cleanliness value location wanted stay central location london luna simone hotels fell search area checked reviews trip advisor decided book certainly not dissapointed location ideal short walk transit important sight seeing locations absolutely cleanest hotels stayed breakfast good served management gives chance talk owners management staff outstanding friendly assist site seeing plans no additional charge fact probably save money got london bus tour tickets hotel vendor rate thinking trip uk europe luna simone london hotel staying going london reccomend luna simone' ## Define a Compute Target for AutoML ``` ## Define a Compute Target for AutoML from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException cpu_cluster_name = "cpu-cluster-1" try: compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name) print("Found existing Compute Target") except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration(vm_size = "Standard_D2_V2", max_nodes=4) compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config) compute_target.wait_for_completion(show_output=True) ``` ## AutoML Configuration * _experiment_timeout_minutes_: was set to prevent the experiment from running for long timer periods with high cost * _max_concurrent_iterations_: was set to 4 since only 4 compute target nodes are available for paralle child runs * _primary_metric_: was set to AUC_weighted since this includes a balance between false positive and true positive rate * _n_cross_validations_: 5 crossvalidations were selected, since this results in a more robust mean/std estimation for each model * _enable_early_stopping_: to prevent unproductive runs which lead to no improvement and costs * _compute_target_: needs to be define to perform the AutoML computations * _task_: needs to be classification since the label column is defining separate classes * _training_data_: corresponds to the training set * _label_column_: corresponds to the target/label column defining the separate classes * _debug_log_: defined to enable detailed logging of automl errors ``` from azureml.train.automl.automlconfig import AutoMLConfig automl_settings = { ## Define key AutoML Settings automl_settings = { "experiment_timeout_minutes": 20, "max_concurrent_iterations": 4, "primary_metric": "accuracy", "n_cross_validations": 5 } ## Setup an AutoMLConfig object automl_config = AutoMLConfig( compute_target=compute_target, task="classification", training_data=dataset_training, label_column_name="norm_rating", enable_early_stopping=True, debug_log="automl_errors.log", **automl_settings ) # The Experiment needs to be submitted in order to execute the AutoML run automl_run = experiment.submit(automl_config) ``` ## Run Details Write about the different models trained and their performance. Why do you think some models did better than others? ``` from azureml.widgets import RunDetails RunDetails(automl_run ).show() automl_run.wait_for_completion(show_output=True) ``` ## Performance metrics and Best Model TODO: In the cell below, get the best model from the automl experiments and display all the properties of the model. ### Get the best model and the best run ``` best_child = automl_run.get_best_child() print(best_child.get_file_names()) best_model = best_child.register_model(model_name="best-automl-model", model_path="outputs/model.pkl") ``` ## Model Deployment In the cell below, register the model, create an inference config and deploy the model as a web service. ``` from azureml.core.environment import Environment from azureml.core.model import Model from azureml.core.conda_dependencies import CondaDependencies from azureml.core.model import InferenceConfig from azureml.core.webservice import AciWebservice # Create the environment myenv = Environment(name="myenv") conda_dep = CondaDependencies() # Define the packages needed by the model and scripts conda_dep.add_conda_package("pandas") conda_dep.add_conda_package("numpy") conda_dep.add_conda_package("scikit-learn") conda_dep.add_conda_package("xgboost") # You must list azureml-defaults as a pip dependency conda_dep.add_pip_package("azureml-defaults") # Adds dependencies to PythonSection of myenv myenv.python.conda_dependencies=conda_dep inference_config = InferenceConfig(entry_script="automl_score.py", environment=myenv) service_name = 'automl-review-classification' aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1) service = Model.deploy(workspace=ws, name=service_name, models=[best_model], inference_config=inference_config, deployment_config=aci_config, overwrite=True) service.wait_for_deployment(show_output=True) print("scoring URI: " + service.scoring_uri) ``` ### Safely enter the API key via getpass so it is not shown in plain text ``` import getpass key = getpass.getpass("Enter the API Key of the endpoint") import requests import json # Get an example text from the test set pandas dataframe and create a HTTP request payload example_text = pandas_df.iloc[0, 0] data = json.dumps({"data": [{'text': example_text}]}) input_data = bytes(data, encoding="utf-8") # Set the content type headers = {'Content-Type': 'application/json'} # authentication is enabled, so we set the authorization header headers['Authorization'] = f'Bearer {key}' scoring_uri = "http://824c9ffc-835d-4d97-990f-692ecc24aae0.southcentralus.azurecontainer.io/score" mapping_dict = {0: "Negative", 1: "Neutral", 2: "Positive"} # Make the request and display the classification results response = requests.post(scoring_uri, input_data, headers=headers) print(f"Prediction for hotel review: \n\n{example_text}\n") print(f"It is a: {mapping_dict[json.loads(response.json())['result'][0]]} hotel review!") ``` ### Print the logs of the webservice ``` print(service.get_logs()) ``` ### Delete the webservice ``` service.delete() ```
github_jupyter
<a href="https://colab.research.google.com/drive/1F22gG4PqDIuM0R4zbzEKu1DlGbnHeNxM?usp=sharing" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> By [Ibrahim Sobh](https://www.linkedin.com/in/ibrahim-sobh-phd-8681757/) ## In this code, we are going to implement a basic image classifier: - Load the dataset (MNIST hand written digits) - Design a deep learning model and inspect its learnable parameters - Train the model on the training data and inspect learning curves - Evaluate the trained model on the never seen testing data - Save the model for later use - Load and use the model ``` import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import plot_model from PIL import Image from keras import backend as K import matplotlib.pyplot as plt batch_size = 128 num_classes = 10 epochs = 10 #50 # input image dimensions img_rows, img_cols = 28, 28 ``` ## Load the data ![MNIST](https://upload.wikimedia.org/wikipedia/commons/2/27/MnistExamples.png) ``` # load data, split into train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 # small data data_size = 10000 x_train = x_train[:data_size] y_train = y_train[:data_size] x_test = x_test[:data_size] y_test = y_test[:data_size] print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) ``` ## Build the DNN model ``` model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) model.summary() plot_model(model, to_file="mnistcnn.png", show_shapes=True) img = Image.open('./mnistcnn.png') img ``` ## Train the model ``` history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) ``` ## Evalaute the model ``` score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) plt.figure(figsize=(10, 7)) plt.plot(history.history['loss'], label='Train') plt.plot(history.history['val_loss'], label='Test') plt.title('Learning curve') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend() ``` ## Save and load the trained model ``` from keras.models import load_model # save the model model.save('my_model.h5') # creates a HDF5 file 'my_model.h5' del model # deletes the existing model !ls -l # load the saved model myloadednewmodel = load_model('my_model.h5') myloadednewmodel.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) ```
github_jupyter
# Searching and sorting Now we're getting more into the 'numerical methods' part of the course! Today, we will delve into the following: * how to write **pseudo code** * **computational complexity** (big-O notion). * **search algorithms** (sequential, binary) * **sort algorithms** (bubble, insertion, quick) **Search** and **sort** algos are at the heart of computer science. Understanding these is the first thing you get into at DIKU or DTU, so we are also going to get a taste of them. **Links to further material:** If you feel inspired by the material here, you can try your hand at solving algorithmic challenges at [Project Euler](https://projecteuler.net). (there are both easy and harder exercises to choose from) ``` import numpy as np import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import time import string import random import sys from IPython.display import Image ``` # Algorithms - what are they even? **Technically:** An unambigious specification of how to solve a class of problems. **In a nut shell:** *An algo is a recipe.* Even a simple cooking recipe is an algorithm.. 1. Preheat the oven 2. Mix flour, sugar and eggs 3. Pour into a baking pan etc. **Properties of an algorithm:** 1. Unambigious termination criteria 1. Pre-defined inputs 2. Pre-defined ouputs 3. Guaranteed finite runtime 4. Correct result ## Simple example: $\max\{ \ell\}$ **Problem:** Given a list of positive numbers, return the largest number in the list. **Inputs:** A list `L` of positive numbers. **Outputs:** A number. **Algorithm:** `find_max()` 1. Set `maxL` to 0. 2. For each `x` in the list `L`, compare it to `maxL`. If `x` is larger, set `maxL` to `x`. 3. `maxL` is now set to the largest number in the list. > **Note:** The above is called **pseudo-code** (understandable across programming languages). **Implementation** in Python: ``` def find_max(L): maxL = 0 for x in L: if x > maxL: maxL = x return maxL ``` **Question:** An error *might* occur if `L` is not restricted to contain strictly positive numbers. What could happen? **Bonus info:** Python, and other modern languages, actually tries to **predict** the result of an `if` statement before it is reached and prepares the following set of instructions. This is called *branch prediction* and is a major source of computational improvement. If you have a lot of `if-statements` that are not predictable, eg. because of randomized data, it may be a drag on computation time. ## Algorithmic complexity Algorithms can be characterized by the number of operations needed to perform them. This is called their complexity. The `find_max()` algorithm has `n = len(L)` operations each making a *comparison* (`x > max`) and (perhaps) an *assignment* (`max = x`). The number of operations increase linearily in the length of the input list (the order of the function is linear). **Mathematically** we say that `find_max()` has linear complexity, \\(O(n)\\) where $n$ is the input size (length of L). Other **common levels of complexity** are: 1. Constant, $O(1)$ (i.e. independent of input size) 2. Logarithmic, $O(\log n)$ 3. Linear, $O(n)$ 4. Log-linear, $O(n \log n)$ 5. Quadratic, $O(n^2)$ 6. Cubic, $O(n^3)$ 7. Exponential, $O(2^n)$ (**curse of dimensionality**) If the performance of an algorithm **depends on the exact values of the input** we differentiate between 1. **Best** case 2. **Average** case (across all possible inputs) 3. **Worst** case Complexity is an **asymptotic** measure, 1. Only the number of operations matter (not their type or cost) 2. Only the highest order matter <img src="https://github.com/NumEconCopenhagen/lectures-2019/raw/master/08/bigO.png" alt="bigO" width=40% /> **In practice however:** * The cost of each operation matters for fixed input size. * The amount and flow of **memory** matter for speed (cache vs. RAM vs. disc). * Therefore, it is **not guaranteed** that an algorithm of lower complexity executes faster than that of higher complexity for all cases. Especially, there may be differences in the costs of memory allocation and deletion which are not counted into the measure of complexity. In the case above, we were not counting in the *deletion* of objects, that would necessarily follow. ## Example of a complexity calculation ``` def demo_algorithm(n): # a. 3 assignments a = 5 b = 6 c = 10 # b. 3*n^2 multiplications and 3*n^2 assignments for i in range(n): for j in range(n): x = i * i y = j * j z = i * j # c. n multiplications, additions, and assignments # + n multiplications and assignments for k in range(n): w = a*k + 45 v = b*b # d. 1 assignment d = 33 ``` The **total number of operations** are: $T(n) = 3 + 6n^2 + 5n + 1 = 6n^2 + 5n + 4$ Notice: this is an exposition of operations. There are of course also operations involved in multiplication itself, which means that the number above is not indicative of the *total* number of operations that the computer must handle. **In big-O notation**: `demo_algorithm()` is $O(n^2)$, i.e. *quadratic complexity* **$\large \color{purple}{Question}$:** What is the complexity of these two algoritms? ``` def algorithm_a(n): s = 0 for i in range(n): for j in range(n): for k in range(n): s += 1 def algorithm_b(n): s = 0 for i in range(n): s *= 2 for j in range(n): s *= 2 for k in range(n): s *= 2 ``` ## The complexity of operations on data containers ### How are lists and dictionaries structured? The fact that our data containers have a certain structure in memory matters *greatly* for the speed of the methods (read: algos) that we apply on them. Let's have a look at how lists and dictionaries are organized. **Lists:** * A list is an ordered set of references to objects (eg. floats). * Each reference *points* to an address in memory where values are stored. * The reference variables of addresses (called pointers) of data in a list are ligned up next to each other in memory, such that they are increments of `1` apart. A bit like a train, if you will. * Need therefore **only** to keep track of the reference to the address of the **first element**, `l[0]`, and the rest follows in line. * If by $a$ we denote the address of the first element of `l`, then looking up element `l[i]` means accessing the $a+i$ address in memory using its reference variable. * Therefore, the algorithmic complexity of looking up an element `l[i]` does **not depend** on the size of `l`. *Which is nice.* ``` # A demonstration of addresses of elements in a list x = [5, 21, 30, 35] x_ref = [] x_id = [] # The addresses of x's elements for i in x: x_id.append(id(i)) # Each object has its own unique id x_ref.append(hex(x_id[-1])) # The memory address is a hexadecimal of the id # The addresses printed below are NOT lined up next to each other in memory. # Only the reference variables are lined up, but those we cannot see directly in Python. print('Id of each element in x:') for i in x_id: print(i) print('\nMemory address of elements in x: ', x_ref) ``` ### A quick overview of list operations |Operation | Code | Complexity | |:----------|:------------------|:--------------:| |**Index:** | `l[i]` | $O(1)$ | |**Store:** | `l[i] = 0` | $O(1)$ | |**Length:** | `len(l)` | $O(1)$ | |**Append:** | `l.append(n)` | $O(1)$ | |**Slice:** | `l[a:b]` | $O(b-a)$ | |**Pop last:** | `l.pop()` | $O(1)$ | |**Pop i:** | `l.pop(i)` | $O(N)$ | |**Clear:** | `l.clear()` | $O(N)$ | |**check:** | `l1 == l2` | $O(N)$ | |**Insert:** | `l[a:b] = ...` | $O(N)$ | |**Delete:** | `del l[i]` | $O(N)$ | |**Containment:** | x `in/not in l` | $O(N)$ | |**Copy:** | `l.copy()` | $O(N)$ | |**Sort:** | `l.sort()` | $O(N $Log$ N)$ | **A few notes:** * Getting the length of a list is $O(1)$ because Python keeps track of a list's size as it created and expanded. The length is stored as an attribute to the list. * Popping (getting the last element) is $O(1)$ because it only requires detaching the last reference in the "train" of references that comprises a list. * Inserting an element into, or removing it from, the middle of a list requires moving around all the references in memory "behind" the inserted element and is therefore $O(N)$. * Checking for containment of an element is $O(N)$ because all elements in the list may have to be visited. ### A beautiful solution **Question:** how do you delete element `i` from list `l` in $O(1)$? (*even when it says above that `del` is an $O(N)$ operation*) **Answer:** `l[i] = l.pop()` The `pop` operation will delete the last element of `l` while also using it to overwrite element `i` in `l`. Hence, last element is preserved while element `i` disappears. **Note** this won't work if `i` is the last element. A full implementation needs to account for this, but it will still be $O(1)$. **Dictionaries:** * A dictionary is a set of *buckets* (think lists) which can store items. * A dictionary with 1 element and 5 buckets: `[] - [] - [] - [<key,value>] - []` * Contrary to lists, there is no explicit indexing of a dictionary. No `d[i]`, we can use a string instead, `d[str]`. * However, the buckets of a dictionary are lined up just like a the references in a list. * Python therefore needs to locate a bucket, when adding a `<key,value>` pair. * Buckets are located using a **hash function** on the key of an element. * This **hash function** converts the key to a integer number, which can then serve as an index. * Obviously, a useful hash function must be very fast and work on strings as well as floats. * A fast hash function enables $O(1)$ lookup in a dictionary. * Hashing also implies that `key in dict.keys()` is $O(1)$, thus independent of dictionary size! (Very handy) * When an empty dictionary is created, it contains 5 buckets. As a 6th element is added to the dictionary, it is rescaled to 10 buckets. At 11 elements, rescaled to 20 buckets and so on. * Dictionaries thus **pre-allocate** memory to be efficient when adding the next element. * *Taking up memory in favor of fast execution is a basic trade-off in algorithms!* ``` d = {'x': 1, 'z': 2} print('size of md in bytes:', sys.getsizeof(d)) # Start adding elements to d and see how memory usage changes for i in range(25): key = random.choice(string.ascii_letters) value = random.random() d[key] = value print(f"key: {key} value: {value: 1.3f} \t size: {i+1:2.0f} bytes: {sys.getsizeof(d)} \t hashed key: {hash(key)}") # Notice that there may be collisions as some keys are similar, and therefore get same hash value. # Python can handle such collisions, but they do create a drag on performance. ``` ### A quick overview of dictionary operations |Operation | Code | Complexity | |:----------|:------------------|:--------------:| |**Index:** | `d[k]` | $O(1)$ | |**Store:** | `d[k] = v` | $O(1)$ | |**Delete:** | `del d[k]` | $O(1)$ | |**Length:** | `len(d)` | $O(1)$ | |**Clear:** | `d.clear()` | $O(1)$ | |**View:** | `d.keys()` | $O(1)$ | Notice the difference in complexity for **deletions**. Faster in dictionaries because they are unordered. You can checkout a [comprehensive table](https://www.ics.uci.edu/~pattis/ICS-33/lectures/complexitypython.txt) of Python operations' complexity. ## Multiplication and Karatsuba's algorithm Ever wondered how Python multiplies two numbers? It actually depends on the size of those numbers! **Small numbers:** 3rd grade algorithm. **Large numbers:** Karatsuba's algorithm. ### Demonstration Consider the multiplication $2275 \times 5013 = 11,404,575$ **3rd grade algorithm** (this one we all know - although it's been a while) The 3rd grade algorithm is $O(n^2)$. To see this, think of the multiplication part as nested for-loops throughout the 10s, 100s, 1000s etc. Then there is the addition part, which is also $O(n^2)$. ``` Image(filename = "ThirdGradeMultiplication.jpg", width = 230, height = 230) ``` **Karatsuba's algorithm** It is not super intuitive what goes on here. But basically, it's splitting the numbers to be multiplied into multiples of 10s and then performs operations on those splits. The algorithm is only $O(n^{log_3})$, so better than 3rd grade algorithm for large $n$. **Some preparation:** $x = 2275$, $y = 5013$ Note the identities: $x = 22 \times 10^2 + 75$ $y = 50 \times 10^2 + 13$ We denote: $x_a = 22, \: x_b = 75$ $y_a = 50, \: y_b = 13$ **The algorithm** *First compute:* $A = x_a \times y_a$ $B = x_b \times y_b$ $C = (x_a + x_b) \times (y_a +y_b) - A - B$ *Then we have that* $x \times y = A \times 10^4 + C\times 10^2 + B$ **In numbers** $A = 22 \times 50 = 1100$ $B = 75 \times 13 = 975$ $C = (22 + 75)(50 + 13) - 1100 - 975 = 4036$ $x \times y = 1100 \times 10^4 + 4036\times 10^2 + 975 = 11,404,575$ ## Linear search (also called sequential search) **Problem:** Check whether element is in list. See the `containment` row in the list of complexity above. **Inputs:** A list `L` and a potential element `x`. **Outputs:** Boolean. **Algorithm:** `linear_search()` 1. Set variable `found == False` 2. For each `y` in the list `L`, compare it to `x`. If `x == y` set `found = True` and break loop. 3. `found` now shows whether the element is in the list or not ``` L = [1, 2, 32, 8, 17, 19, 42, 13, 0] # test list def linear_search(L,x): pass print('found 3:',linear_search(L,3)) print('found 13:',linear_search(L,13)) def linear_search(L,x): """ linear search Args: L (list): List to search in. x (any): Element to search for. Returns: found (bool): Boolean for whether element is in list or not. """ # a. prep i = 0 N = len(L) found = False # b. main while i < N and not found: if L[i] == x: # comparison found = True else: i += 1 # increment # c. return return found print('found 3:',linear_search(L,3)) print('found 13:',linear_search(L,13)) ``` **Terminology:** The linear search algorithm is called a **brute force** algorithm (we solve the problem without any intermediate steps). **Analysis:** Each operation consists of a *comparision* and an *incremenet*: 1. **Best case:** $O(1)$ (element present and first in list) 2. **Average case:** * $O(\frac{n}{2})=O(n)$ (if element present), or * $O(n)$ (if element *not* present) 3. **Worst case:** $O(n)$ (element not present or last in list) **Note:** Much faster ($O(1)$) on a dictionary, because we just apply the hash function to `x`. ## Binary search ("the phonebook search") **Problem:** You know that a list is sorted. Check whether an element is contained in it. **Inputs:** A list `L` and a potential element `x`. **Outputs:** Boolean. **Algorithm:** `binary_search()` 1. Set `found` to `False`, 2. Locate the `midpoint` of the list part that remains to be searched. 2. Check whether the `midpoint` is the one we are searching for: * If yes, set `found=True` and go to step 3. * If no, and the `midpoint` is *larger*, restrict attention to the *left* part of the list and restart step 2 if not empty. * If no, and the `midpoint` is *smaller*, restrict attention to the *right* part of the list and restart step 2 if not empty. 3. `found` now shows whether the element is in the list or not **Middle element:** Define the midpoint between index `i` and index `j >= i` as `i + (j-i)/2`, rounded down if necessary. ``` for i in [0,2,4]: for j in [4,5,9]: print(f'(i,j) = {i,j} -> midpoint = {i+((j-i)//2)}') # note integer division with // L = [0, 1, 2, 8, 13, 17, 19, 32, 42] # test list def binary_search(L,x): pass print('found 3:',binary_search(L,3)) print('found 13:',binary_search(L,13)) def binary_search(L,x,do_print=False): """ binary search Args: L (list): List to search in. x (any): Element to search for. do_print (bool): Indicator for printing progress. Returns: found (bool): Boolean for whether element is in list or not. """ # a. initialize found = False # b. start with whole list first = 0 last = len(L)-1 # c. main while first <= last and not found: # i. find midpoint midpoint = first + (last - first) // 2 # // is integer division if do_print: print(L[first:last+1],L[midpoint]) # ii. check if x found or smaller or larger than midpoint if L[midpoint] == x: found = True else: if L[midpoint] > x: last = midpoint-1 else: first = midpoint+1 return found print('found 3:',binary_search(L,3)) print('found 13:',binary_search(L,13)) binary_search(L,32,do_print=True) ``` **Terminology:** This is called a **divide-and-conquer** algorithm. **Analysis:** * After 1 comparison there is approximately $\frac{n}{2}$ elements left. * After 2 comparisons there is approximately $\frac{n}{4}$ elements left. * After 3 comparisons there is approximately $\frac{n}{8}$ elements left. * ... * After $j$ comparisons there is approximately $\frac{n}{2^j}$ number of elements left. **When is there one element left?** $\frac{n}{2^j} = 1 \Leftrightarrow j = \frac{\log n}{\log 2}$ **Result:** The binary search algorithm is $O(\log n)$, i.e. logarithmic complexity. # Recursion **Problem:** Sum the elements in a list. ``` L = [1,3,5,7,9] ``` **Simple:** Just sum them: ``` def listsum(L): result = 0 for x in L: result += x return result print(listsum(L)) ``` **Recursion:** The sum of a list is the sum of the first element and the sum of the rest of the list: ``` def listsum_recursive(L): if len(L) == 1: return L[0] else: return L[0] + listsum_recursive(L[1:]) print(listsum_recursive(L)) ``` This is also a divide-and-conquor strategy. Avoids loops. ## Fibonacci numbers **Definition:** $$ \begin{aligned} F_0 &= 0 \\ F_1 &= 1 \\ F_n &= F_{n-1} + F_{n-2} \\ \end{aligned} $$ **Implementation:** ``` def fibonacci(n): if n == 0: return 0 elif n == 1: return 1 return fibonacci(n-1)+fibonacci(n-2) fibonacci(5) #for n in range(4): #print(fibonacci(n)) ``` ### Caution! This implementation is for demonstration purposes only. It can be greatly sped up by using the `@cache` decorator, which stores the previous return value of a function call. If you ever want to use recursion, you must rely on **caching** of function values. Because ***recursion on itself is sloow***. **Test approximate formula:** ``` def fibonacci_approx(n): return 1/np.sqrt(5)*( ((1+np.sqrt(5))/2)**n - ((1-np.sqrt(5))/2)**n) for n in [5,10,15,20,25]: print(f'n = {n:3d}: true = {fibonacci(n):6d}, approximate = {fibonacci_approx(n):20.12f}') ``` ## Advanced: Binary search with recursion ``` L = [0, 1, 2, 8, 13, 17, 19, 32, 42,] # test list def binary_search_recursive(L,x): pass print('found 3:',binary_search_recursive(L,3)) print('found 13:',binary_search_recursive(L,13)) def binary_search_recursive(L,x): """ recursive binary search Args: L (list): List to search in. x (any): Element to search for. Returns: found (bool): Boolean for whether element is in list or not. """ if len(L) == 0: return False # not found else: # a. find midpoint midpoint = len(L)//2 # b. check if x found or smaller or larger than midpoint if L[midpoint] == x: # found return True else: if L[midpoint] > x: newL = L[:midpoint] else: newL = L[midpoint+1:] return binary_search_recursive(newL,x) print('found 3:',binary_search_recursive(L,3)) print('found 13:',binary_search_recursive(L,13)) ``` # Sorting Sorting is a super central task of computing. IBM invented it's first computers in the 30s to sort data. Would be hard to keep track of data without sorting. Thus, many algorithms have been developed for this purpose. We will look at a simple algorithm first, the bubble sort, which relies on swapping elements iteratively. Function for **swapping** element `L[i]` with element `L[j]` in-place: ``` def swap(L,i,j): temp = L[i] # save value in place holder variable L[i] = L[j] # overwrite value at i with value at j L[j] = temp # write original value at i to value at j ``` **Example:** ``` L = [1, 3, 4, 9, 13] swap(L,i=0,j=1) print('after swap',L) ``` ## Bubble sort **Problem:** Sort a list of numbers in-place. **Inputs:** List of numbers. **Outputs:** None. **Algorithm:** `bubble_sort()` 1. Loop through the first n-1 elements in list, swap with next element if current is larger. 2. Loop through the first n-2 elements in list, swap with next element if current is larger. <br> ... <br> 4. Loop through the first 3 elements in list, swap with next element if current is larger. 5. Swap the two first elements if the first is larger than the second 6. List is sorted ``` L = [54, 26, 93, 17, 77, 31, 44, 55, 20] # test list def bubble_sort(L): pass bubble_sort(L) print(L) def bubble_sort(L): """ bubble sort Args: L (list): List of numbers """ # k starts being len(L)-1 and is decreased by 1 until hitting 0 for k in range(len(L)-1,0,-1): for i in range(k): if L[i] > L[i+1]: swap(L,i,i+1) L = [54, 26, 93, 17, 77, 31, 44, 55, 20] bubble_sort(L) print('sorted L:',L) from IPython.display import YouTubeVideo YouTubeVideo('lyZQPjUT5B4', width=800, height=600, start=45) ``` **Another visualization of bubble sort** ![bubble](https://upload.wikimedia.org/wikipedia/commons/5/54/Sorting_bubblesort_anim.gif) **Illustration with printout:** ``` def bubble_sort_with_print(L): for k in range(len(L)-1,0,-1): print(f'step = {len(L)-k}') for i in range(k): if L[i] > L[i+1]: swap(L,i,i+1) print(L) print('') L = [54, 26, 93, 17, 77, 31, 44, 55, 20] print('original',L,'\n') bubble_sort_with_print(L) ``` **Analysis:** Bubble sort is $O(n^2)$ - do you have an intuition? ## Insertion sort **Algorithm:** `insertion_sort()` 1. Consider the *second* element. Insert it correctly in the list of the numbers before the *second* element. 2. Consider the *third* element. Insert it correctly in the list of the numbers before the *third* element. <br> ... <br> 4. Consider the n'th element. Insert it correctly in the list of the numbers before the *n'th* element. 5. List is sorted **Illustration:** <img src="https://github.com/NumEconCopenhagen/lectures-2019/raw/master/08/insertionsort.png" alt="insertionsort" width=50% /> ``` L = [54, 26, 93, 17, 77, 31, 44, 55, 20] # test list def insertion_sort(L): pass insertion_sort(L) print(L) def insertion_sort(L): """ insertion sort Args: L (list): List of numbers """ # loop over last n-1 elements, skipping the 1st element (see range func). n = len(L) for k in range(1,n): # a. current value and position x = L[k] i = k # b. move left while larger: a bubble sort at heart while i > 0 and L[i-1] > x: L[i] = L[i-1] # move i = i-1 # c. insert current vlaue L[i] = x L = [54, 26, 93, 17, 77, 31, 44, 55, 20] insertion_sort(L) print('sorted',L) ``` **Analysis:** Still $O(n^2)$.. **Benefits relative to bubble sort:** 1. Moves instead of swaps, 1 operation less. 2. Data is often **partially sorted** to begin with. Insertion sort benefits from that. ## Partition (+) *Intermezzo: Solving the partition problem is useful for a so-called quicksort.* **Problem:** Permute a list and return a splitpoint such that all elements before the point is larger than or equal to the first element in the original list, and all elements afterwards are strictly larger. **Input:** List of numbers. **Output:** Integer. **Algorithm:** 0. Let splitting point be first element of list. 1. From the *left* find the first element larger than split point (leftmark). 2. From the *right* find the first element smaller than split point (rightmark). 3. Swap these two elements. 4. Repeat 1-3 starting from previous leftmark and rightmark. Continue until leftmark is larger than rightmark. 5. Swap first and rightmark element. 6. Return the rightmark. <img src="https://github.com/NumEconCopenhagen/lectures-2019/raw/master/08/quicksort.png" alt="quicksort" width=60% /> ``` def partition(L,first,last): """ partition Permute a list and return a splitpoint, such that all elements before is larger than or equal to the first element in the original list, and all elements afterwards are strictly larger. Args: L (list): List of numbers first (integer): Startpoint last (integer): Endpoint Returns: splitpoint (integer): """ # a. initialize splitvalue = L[first] leftmark = first+1 rightmark = last # b. find splitpoint done = False while not done: # i. find leftmark while leftmark <= rightmark and L[leftmark] <= splitvalue: leftmark = leftmark + 1 # i. find rightmark while L[rightmark] >= splitvalue and rightmark >= leftmark: rightmark = rightmark -1 # iii. check if done or swap left and right if rightmark < leftmark: done = True else: swap(L,leftmark,rightmark) # c. final swap swap(L,first,rightmark) return rightmark L = [54, 26, 93, 17, 77, 31, 44, 55, 20] print('before',L) splitpoint = partition(L,0,len(L)-1) print('after',L) print('split',L[:splitpoint+1],L[splitpoint+1:]) ``` ## Quicksort (+) **Algorithm:** `quick_sort()` 1. Recursively partition the list and the sub-lists when splitting at the splitpoint. 2. The list is now sorted. ``` def quick_sort(L): _quick_sort(L,0,len(L)-1) def _quick_sort(L,first,last): if first < last: splitpoint = partition(L,first,last) _quick_sort(L,first,splitpoint-1) # left part _quick_sort(L,splitpoint+1,last) # right part L = [54, 26, 93, 17, 77, 31, 44, 55, 20] quick_sort(L) print('sorted',L) ``` **Analysis:** $O(n \log n)$ on average, but still $O(n^2)$ in the worst case [we don't derive this, just trust me]. **Visualization of quicksort** ![quicksort](https://upload.wikimedia.org/wikipedia/commons/6/6a/Sorting_quicksort_anim.gif) ## Advanced: Comparision of performance Lets us compare the different sorting algorithm: 1. Bubble 2. Insertion 3. Quick 4. Quick (as implemented in Numpy) ``` # a. settings n_vec = np.array([100,200,300,400,500,750,1000,1500,2000,4000,8000,16000]) # number of elements in list K = 50 # number of repetitions when timing # b. allocate vectors for results bubble = np.empty(len(n_vec)) insertion = np.empty(len(n_vec)) quick = np.empty(len(n_vec)) quicknp = np.empty(len(n_vec)) # c. run time trials np.random.seed(1999) for i,n in enumerate(n_vec): # i. draw K random lists of lenght n L_bubble = [] L_insertion = [] L_quick = [] L_quicknp = [] for k in range(K): L = np.random.uniform(size=n) np.random.shuffle(L) L_bubble.append(L.copy()) L_insertion.append(L.copy()) L_quick.append(L.copy()) L_quicknp.append(L.copy()) # ii. bubble sort if n <= 500: t0 = time.time() # start timer for k in range(K): bubble_sort(L_bubble[k]) bubble[i] = time.time()-t0 # calculate time since start else: bubble[i] = np.nan # ii. insertion sort if n <= 500: t0 = time.time() for k in range(K): insertion_sort(L_insertion[k]) insertion[i] = time.time()-t0 else: insertion[i] = np.nan # iii. quicksort if n <= 2000: t0 = time.time() for k in range(K): quick_sort(L_quick[k]) quick[i] = time.time()-t0 else: quick[i] = np.nan # iii. quicksort (numpy implementation) t0 = time.time() for k in range(K): L_quicknp[k].sort() # built-in numpy method quicknp[i] = time.time()-t0 # iv. check that all sorted lists are the same for k in range(K): if n <= 500: assert np.all(L_bubble[k] == L_quick[k]) assert np.all(L_insertion[k] == L_quick[k]) if n <= 2000: assert np.all(L_quicknp[k] == L_quick[k]) # d. figure I = n_vec <= 2000 fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(n_vec[I],bubble[I],label='bubble') ax.plot(n_vec[I],insertion[I],label='insertion') ax.plot(n_vec[I],quick[I],label='quick') ax.plot(n_vec[I],quicknp[I],label='quick (numpy)') ax.set_xlabel('number of elements') ax.set_ylabel('seconds') ax.legend(facecolor='white',frameon=True); fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(n_vec,quicknp,label='quick (numpy)') ax.set_xlabel('number of elements') ax.set_ylabel('seconds') ax.legend(facecolor='white',frameon=True); ``` **Take-aways:** 1. Complexity matters 2. Implementation matter (and the built-in functions and methods are hard to beat) # Summary **This lecture:** 1. Algorithms and their complexity (big-O notation) 2. Function recursion (functions calling themselves) 3. Searching algorithms (linear, bineary) 4. Sorting algorithm (bubble, insertion, quick) **Your work:** The problem set is closely related to the algorithms presented here. **Next lecture:** Solving equations (single vs. system, linear vs. non-linear, numerically vs. symbolically)
github_jupyter
``` import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import Ridge import lightgbm as lgb import xgboost as xgb import warnings warnings.filterwarnings("ignore") import pickle def cv_sklearn (model_name, fun_create_model, df, cols, score_column, early_stopping_rounds = None): pred = np.zeros ((df.shape[0])) for fold in range(5): train = df.query ("kfold != @fold") valid = df.query ("kfold == @fold") X_train = train [cols].values y_train = train[score_column].values X_valid = valid [cols].values y_valid = valid[score_column].values model = fun_create_model () if not early_stopping_rounds is None: model.fit(X_train, y_train, early_stopping_rounds = early_stopping_rounds, eval_set=[(X_train, y_train), (X_valid, y_valid)], verbose=0) else: model.fit(X_train, y_train) pred_train = model.predict ( X_train ) pred_valid = model.predict ( X_valid ) pred [valid.index.values] = pred_valid rmse_train = mean_squared_error ( y_train, pred_train, squared = False ) rmse_valid = mean_squared_error ( y_valid, pred_valid, squared = False ) print (f"fold:{fold} rmse_train:{rmse_train:.5f}, rmse_valid:{rmse_valid:.5f}") pickle.dump(model, open(f"../models/{model_name}_{fold}.pkl", 'wb')) y_true = df[score_column] rmse_tot = mean_squared_error ( y_true, pred, squared = False ) print (f"tot rmse_tot:{rmse_tot:.5f}") return pred def show_rmse (y_true, pred, model_name): rmse_tot = mean_squared_error ( y_true, pred, squared = False ) plt.figure (figsize = (6,6)) plt.scatter (y_true, pred) plt.title(f"{model_name}: rmse_tot:{rmse_tot:.5f}") plt.show() def create_validation_features (): df_valid_pair = pd.read_csv("../processed/validation.csv") df_valid_fe = pd.read_csv("../processed/valid_text_detoxify_fe.csv") df_valid_fe_2 = pd.read_csv("../processed/valid_text_jc_tfidf_fe.csv") df_valid_fe_2 = df_valid_fe_2[["jc_tfidf_toxic","jc_tfidf_severe_toxic","jc_tfidf_obscene","jc_tfidf_threat","jc_tfidf_insult","jc_tfidf_identity_hate"]] df_valid_fe_3 = pd.read_csv("../processed/valid_text_juc_tfidf_fe.csv") df_valid_fe_3 = df_valid_fe_3[['juc_tfidf_toxicity', 'juc_tfidf_severe_toxicity','juc_tfidf_obscene', 'juc_tfidf_sexual_explicit','juc_tfidf_identity_attack', 'juc_tfidf_insult', 'juc_tfidf_threat']] df_valid_fe = pd.concat ( [df_valid_fe,df_valid_fe_2, df_valid_fe_3], axis=1) return df_valid_pair, df_valid_fe def create_rud_features (): df_valid_pair = pd.read_csv("../processed/rud_pair.csv") df_valid_fe = pd.read_csv("../processed/rud_text_detoxify_fe.csv") df_valid_fe_2 = pd.read_csv("../processed/rud_text_jc_tfidf_fe.csv") df_valid_fe_2 = df_valid_fe_2[["jc_tfidf_toxic","jc_tfidf_severe_toxic","jc_tfidf_obscene","jc_tfidf_threat","jc_tfidf_insult","jc_tfidf_identity_hate"]] df_valid_fe_3 = pd.read_csv("../processed/rud_text_juc_tfidf_fe.csv") df_valid_fe_3 = df_valid_fe_3[['juc_tfidf_toxicity', 'juc_tfidf_severe_toxicity','juc_tfidf_obscene', 'juc_tfidf_sexual_explicit','juc_tfidf_identity_attack', 'juc_tfidf_insult', 'juc_tfidf_threat']] df_valid_fe = pd.concat ( [df_valid_fe,df_valid_fe_2, df_valid_fe_3], axis=1) return df_valid_pair, df_valid_fe def validate(df_valid_pair,df_valid_text, pred): ### rud if "comment_id" in df_valid_text.columns: m = { a:b for a, b in zip(df_valid_text["comment_id"].values, pred) } p_less = df_valid_pair["less_toxic_id"].map(lambda x: m[x] ) p_more = df_valid_pair["more_toxic_id"].map(lambda x: m[x] ) return (p_less < p_more).mean() else: m = { a:b for a, b in zip(df_valid_text["text"].values, pred) } df_valid_pair = df_valid_pair.copy() #df_valid_pair = df_valid_pair.query("count == 3").copy() df_valid_pair["less_toxic_score"] = df_valid_pair["less_toxic"].map(lambda x: m[x] ) df_valid_pair["more_toxic_score"] = df_valid_pair["more_toxic"].map(lambda x: m[x] ) d_a = df_valid_pair.query("less_toxic_score < more_toxic_score and avg_agreement == 1.0") d_b = df_valid_pair.query("less_toxic_score < more_toxic_score and avg_agreement < 1.0") d_c = df_valid_pair.query("less_toxic_score > more_toxic_score and avg_agreement < 1.0") return (d_a["count"].sum() + d_b.shape[0]*2 + d_c.shape[0])/df_valid_pair["count"].sum() def avg_predict (df_valid_text, cols): for k, col in enumerate(cols): p = rankdata(df_valid_text[col].values, method='ordinal') if k == 0: y = p else: y = y + p return y def sklearn_predict (df_valid_text, model_path, model_name, cols, folds = 5): pred = np.zeros ((df_valid_text.shape[0])) X = df_valid_text [cols].values for fold in range(5): model = pickle.load(open(f"{model_path}/{model_name}_{fold}.pkl", 'rb')) pred += model.predict ( X ) return pred cols = ['original_toxicity', 'original_severe_toxicity', 'original_obscene', 'original_threat', 'original_insult', 'original_identity_attack', 'unbiased_toxicity', 'unbiased_severe_toxicity', 'unbiased_obscene', 'unbiased_identity_attack', 'unbiased_insult', 'unbiased_threat', 'unbiased_sexual_explicit', 'multilingual_toxicity', 'multilingual_severe_toxicity', 'multilingual_obscene', 'multilingual_identity_attack', 'multilingual_insult', 'multilingual_threat', 'multilingual_sexual_explicit', 'original-small_toxicity', 'original-small_severe_toxicity', 'original-small_obscene', 'original-small_threat', 'original-small_insult', 'original-small_identity_attack', 'unbiased-small_toxicity', 'unbiased-small_severe_toxicity', 'unbiased-small_obscene', 'unbiased-small_identity_attack', 'unbiased-small_insult', 'unbiased-small_threat', 'unbiased-small_sexual_explicit', "jc_tfidf_toxic","jc_tfidf_severe_toxic","jc_tfidf_obscene","jc_tfidf_threat","jc_tfidf_insult","jc_tfidf_identity_hate"] def lgb_model (): return lgb.LGBMRegressor(random_state=2022, learning_rate=0.1, subsample=0.8, colsample_bytree=0.8, num_leaves=6, ) def xgb_model (): return xgb.XGBRegressor(random_state=2022, learning_rate=0.1, subsample=0.6, colsample_bytree=0.6, max_depth=4, reg_alpha=1.0, ) def rf_model(): return RandomForestRegressor(random_state=2022, max_features=3, max_depth=8) def ridge_model(): return Ridge(alpha=1.0) rud_pair, rud_text = create_rud_features () val_pair, val_text = create_validation_features () y_true = rud_text["offensiveness_score"] model_name = "ridge_rud" print(f"MODEL {model_name}") pred_ridge = cv_sklearn ( model_name = model_name, fun_create_model = ridge_model, df = rud_text , cols = cols , score_column = "offensiveness_score") show_rmse (y_true, pred_ridge, model_name) pred_val_ridge = sklearn_predict (val_text, model_path = "../models/", model_name =model_name, cols=cols) val_score = validate (val_pair,val_text, pred_val_ridge) print(f"{model_name} VAL: {val_score:.5f}") print() print() model_name = "lgb_rud" print(f"MODEL {model_name}") pred_lgb = cv_sklearn ( model_name = model_name, fun_create_model = lgb_model, df = rud_text , cols = cols , score_column = "offensiveness_score", early_stopping_rounds = 50) show_rmse (y_true, pred_lgb, model_name) pred_val_lgb = sklearn_predict (val_text, model_path = "../models/", model_name =model_name, cols=cols) val_score = validate (val_pair,val_text, pred_val_lgb) print(f"{model_name} VAL: {val_score:.5f}") print() print() model_name = "rf_rud" print(f"MODEL {model_name}") pred_rf = cv_sklearn ( model_name = model_name, fun_create_model = rf_model, df = rud_text , cols = cols , score_column = "offensiveness_score") show_rmse (y_true, pred_rf, model_name) pred_val_rf = sklearn_predict (val_text, model_path = "../models/", model_name =model_name, cols=cols) val_score = validate (val_pair,val_text, pred_val_rf) print(f"{model_name} VAL: {val_score:.5f}") print() print() model_name = "xgb_rud" print(f"MODEL {model_name}") pred_xgb = cv_sklearn ( model_name = model_name, fun_create_model = xgb_model, df = rud_text , cols = cols , score_column = "offensiveness_score", early_stopping_rounds = 50) show_rmse (y_true, pred_xgb, model_name) pred_val_xgb = sklearn_predict (val_text, model_path = "../models/", model_name =model_name, cols=cols) val_score = validate (val_pair,val_text, pred_val_xgb) print(f"{model_name} VAL: {val_score:.5f}") print() print() pred_val_ensemble = pred_val_ridge + pred_val_lgb + pred_val_rf + pred_val_xgb val_score = validate (val_pair,val_text, pred_val_ensemble) print(f"Ensemble VAL: {val_score:.5f}") ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/image_overview.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_overview.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/image_overview.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_overview.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`. The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium. ``` import subprocess try: import geehydro except ImportError: print('geehydro package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro']) ``` Import libraries ``` import ee import folium import geehydro ``` Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. ``` try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`. ``` Map = folium.Map(location=[40, -100], zoom_start=4) Map.setOptions('HYBRID') ``` ## Add Earth Engine Python script ## Display Earth Engine data layers ``` Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True) Map ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import sys from src.config import config from scipy.stats import pearsonr cdr_dhs_other = pd.DataFrame(pd.read_csv('data/processed/civ/correlation/master_cdr_dhs_other.csv')) urb = cdr_dhs_other[cdr_dhs_other['Z_Med'] >= np.median(cdr_dhs_other['Z_Med'])] rur = cdr_dhs_other[cdr_dhs_other['Z_Med'] < np.median(cdr_dhs_other['Z_Med'])] # For each Administrative level for i in ['Adm_4']: cdr_sum_urb = urb.groupby(i)['Vol', 'Vol_in', 'Vol_out', 'Dur', 'Dur_in', 'Dur_out'].sum().reset_index() cdr_mean_urb = urb.groupby(i)['Entropy', 'Med_degree', 'Pagerank', 'Introversion'].median().reset_index() cdr_urb = cdr_sum_urb.merge(cdr_mean_urb, on=i) dhs_urb = urb.groupby(i)['BloodPosRate', 'RapidPosRate', 'DeathRate', 'HIVPosRate', 'HealthAccessDifficulty'].mean().reset_index() data_urb = cdr_urb.merge(dhs_urb, on=i) cdr_sum_rur = rur.groupby(i)['Vol', 'Vol_in', 'Vol_out', 'Dur', 'Dur_in', 'Dur_out'].sum().reset_index() cdr_mean_rur = rur.groupby(i)['Entropy', 'Med_degree', 'Pagerank', 'Introversion'].median().reset_index() cdr_rur = cdr_sum_rur.merge(cdr_mean_rur, on=i) dhs_rur = rur.groupby(i)['BloodPosRate', 'RapidPosRate', 'DeathRate', 'HIVPosRate', 'HealthAccessDifficulty'].mean().reset_index() data_rur = cdr_rur.merge(dhs_rur, on=i) for j in ['Vol', 'Vol_in', 'Vol_out', 'Entropy', 'Med_degree', 'Pagerank', 'Introversion']: for k in ['BloodPosRate', 'RapidPosRate', 'DeathRate', 'HIVPosRate', 'HealthAccessDifficulty']: a = np.array(data_urb[j]) b = np.array(data_urb[k]) outliers = np.where(a > 0) a = a[outliers] b = b[outliers] outliers2 = np.where(b > 0) a = a[outliers2] b = b[outliers2] c = np.array(data_rur[j]) d = np.array(data_rur[k]) outliers3 = np.where(c > 0) c = c[outliers3] d = d[outliers3] outliers4 = np.where(d > 0) c = c[outliers4] d = d[outliers4] print i, j, k print pearsonr(a, b) plt.scatter(a, b) # plt.scatter(c, d, c='r') plt.show() print pearsonr(a, b) plt.scatter(np.log(a), b) # plt.scatter(np.log(c), np.log(d), c='r') plt.show() ```
github_jupyter
# Curve-fit to estimate final dissipation ``` %run base.py %run paths.py from base import * from paths import * %matplotlib ipympl import matplotlib.pyplot as plt def get_teps(short_name): path = paths_sim[short_name] d = SpatialMeansSW1L._load(path) t = d['t'] eps = d['epsK'] + d ['epsA'] idx = _index_where(eps, eps.max()) return t, eps, idx def plot_eps(short_name): t, eps,idx = get_teps(short_name) plt.plot(t, eps, label=short_name) plt.text(t[idx], eps[idx], short_name) # plt.legend() ## Some extreme cases #plot_eps('noise_c400nh7680Buinf') #plot_eps('vortex_grid_c100nh1920Bu2.0efr1.00e+00') #plot_eps('noise_c20nh7680Buinf') #plot_eps('vortex_grid_c100nh1920Bu1.0efr1.00e+00') #plot_eps('noise_c40nh7680Buinf') #plot_eps('vortex_grid_c100nh1920Buinfefr1.00e+02') ## Lognorm like # plot_eps('vortex_grid_c20nh1920Buinfefr1.00e+01') # plot_eps('vortex_grid_c100nh1920Buinfefr1.00e+02') # plot_eps('vortex_grid_c100nh1920Bu4.0efr1.00e+02') # plot_eps('vortex_grid_c400nh1920Buinfefr1.00e+02') #for short in df_vort['short name']: # plot_eps(short) plot_eps('noise_c400nh3840Buinf') # plot_eps('noise_c100nh3840Buinf') # plot_eps('vortex_grid_c100nh1920Bu2.0efr1.00e+00') %matplotlib ipympl from scipy.signal import medfilt from scipy.special import erf t, eps, idx = get_teps("vortex_grid_c400nh1920Buinfefr1.00e+02") # plt.figure() # plt.plot(t, np.tanh(t/10)) # plt.plot(t, np.tanh((t/10)**4)) # plt.plot(t, erf((t/10)**4)) # plt.show() eps_filt = medfilt(eps, 7) plt.figure() plt.plot(t, eps) plt.plot(t, eps_filt) ``` # Curve fit ``` %matplotlib ipympl from scipy.optimize import curve_fit from scipy.signal import lti, step2 from scipy import stats from matplotlib.pyplot import * option = 2 # short = 'vortex_grid_c100nh1920Buinfefr1.00e-01' # short = 'vortex_grid_c100nh1920Buinfefr1.00e+02' # short = 'vortex_grid_c20nh1920Buinfefr1.00e+01' # short = 'vortex_grid_c400nh1920Buinfefr1.00e+02' # short = 'noise_c20nh7680Buinf' # short = 'vortex_grid_c20nh960Buinfefr1.00e+00' # short = 'noise_c20nh3840Buinf' # short = 'noise_c400nh3840Buinf' short = 'vortex_grid_c20nh1920Bu4.0efr1.00e-02' short = 'vortex_grid_c20nh1920Bu20.0efr1.00e+00' short = 'noise_c400nh1920Buinf' # short = 'vortex_grid_c100nh1920Bu4.0efr1.00e+02' # short = 'vortex_grid_c100nh1920Bu2.0efr1.00e+00' t, eps,_ = get_teps(short) if option == 1: def f(x, amptan, ttan): return amptan * pl.tanh(2 * (x / ttan)**4) guesses = [pl.median(eps), t[eps==eps.max()]] else: # def f(x, amptan, ttan, amplog, tlog, sigma): def f(x, amptan, ttan, amplog, sigma): return ( amptan * np.tanh(2 * (x/ttan)**4) + amplog * stats.lognorm.pdf(x, scale=np.exp(ttan), s=sigma) ) guesses = { 'amptan': np.median(eps), 'ttan': t[eps==eps.max()], 'amplog': eps.max(), # 'tlog': t[eps==eps.max()], 'sigma': eps.std() } guesses = np.array(list(guesses.values()), dtype=float) bounds = (0, guesses * 1.5) # popt, pcov = curve_fit(f, t, eps) # popt, pcov = curve_fit(f, t, eps, sigma=1./t) popt, pcov = curve_fit(f, t, eps, guesses) # popt, pcov = curve_fit(f, t, eps, guesses, bounds=bounds, method="trf") plot(t, eps, label='original') plot(t, f(t, *popt), label='curve fit') plot(t, np.median(eps) * np.ones_like(eps), 'g', label='median_all') plot(t, np.median(eps[t>40]) * np.ones_like(eps), 'r:', label='median') plot(t, np.mean(eps[t>40]) * np.ones_like(eps), 'r--', label='mean') # df = df_vort if 'vortex' in short else df_noise # eps_chosen = get_row(df, 'short name', short)['$\epsilon$'].iloc[0] # plot(t, eps_chosen * np.ones_like(eps), 'k', label='chosen') # plot(t, popt[2] * stats.lognorm.pdf(t, *popt[-2:]), label='lognorm') legend() eps_fit = f(t, *popt) dt = t[1]-t[0] # dt = np.median(np.gradient(t)) deps_fit = np.gradient(eps_fit, dt) ddeps_fit = np.gradient(deps_fit, dt) curv = ddeps_fit / (1 + deps_fit) ** 1.5 # curv = curv*eps.max()/curv.max() figure() plot(t, eps_fit) plot(t, curv) # plot(t, deps_fit) ``` ### Kneedle algorithm ``` def locate_knee(time, eps_fit, eps_stat): from kneed import KneeLocator while not np.array_equal(time, np.sort(time)): idx_del = np.where(np.diff(time) < 0)[0] + 1 time = np.delete(time, idx_del) eps_fit = np.delete(eps_fit, idx_del) if eps_fit.max() > 2 * eps_stat: # log-norm + tanh knee = KneeLocator(time, eps_fit, direction='decreasing') idx = knee.knee_x else: knee = KneeLocator(time, eps_fit) idx = knee.knee_x if idx is None: # non-stationary case idx = -1 time_stat = time[idx] return time_stat locate_knee(t, eps_fit, eps_fit[-1]) from kneed import KneeLocator while not np.array_equal(t, np.sort(t)): idx_del = np.where(np.diff(t) < 0)[0] + 1 t = np.delete(t, idx_del) eps_fit = np.delete(eps_fit, idx_del) print(idx_del) knee = KneeLocator(t, eps_fit) knee.plot_knee() t[knee.knee_x], knee.direction %matplotlib np.where(np.gradient(t) <= 0)[0] # plt.plot(t, np.gradient(t)) idx_neq = np.where(t != np.sort(t))[0] print(idx_neq) print(t[idx_neq]) idx = _index_where(eps_fit, np.median(eps)); t[idx] idx = _index_where(abs(curv), 1e-5); t[idx] idx = np.argmin(abs(curv)); t[idx] ``` ### Histogram of curvatures ``` curv.std()*0.01 %matplotlib ipympl n, bins, patches = plt.hist(curv, 10, normed=1, facecolor='green', alpha=0.75) idx = _index_flat(eps_fit, t, 1e-4); t[idx] popt ``` # Cumulative average ``` from fluidsim.base.output.spect_energy_budget import cumsum_inv import numpy as np def cummean(x): """Cumulative average from the reversed array.""" sum_fwd = x.cumsum() idx_fwd = np.arange(1, x.shape[0]+1) return sum_fwd / idx_fwd def cummean_inv(x): """Cumulative average from the reversed array.""" sum_inv = cumsum_inv(x) idx_inv = np.arange(x.shape[0], 0, -1) return sum_inv / idx_inv eps_mean = cummean(eps) eps_mean_inv = cummean_inv(eps) plt.figure() plt.plot(t, eps) plt.plot(t, eps_mean) plt.plot(t, eps_mean_inv) ``` # Moving average (from SciPy cookbook) ``` import numpy def smooth(x,window_len=11,window='hanning'): """smooth the data using a window with requested size. This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the begining and end part of the output signal. input: x: the input signal window_len: the dimension of the smoothing window; should be an odd integer window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing. output: the smoothed signal example: t=linspace(-2,2,0.1) x=sin(t)+randn(len(t))*0.1 y=smooth(x) see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve scipy.signal.lfilter TODO: the window parameter could be the window itself if an array instead of a string NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y. """ if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.") if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.") if window_len<3: return x if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'") s=numpy.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]] #print(len(s)) if window == 'flat': #moving average w=numpy.ones(window_len,'d') else: w=eval('numpy.'+window+'(window_len)') y=numpy.convolve(w/w.sum(),s,mode='valid') return y from scipy.stats import linregress linregress(t[100:200], eps[100:200]) def f(x, a): return a * np.ones_like(x) curve_fit(f, t[300:], eps[300:]) eps[300:].mean() plt.figure() plt.plot(eps) eps_filt = medfilt(eps, 51) eps_mavg = smooth(eps, 21)[20:] plt.figure() plt.plot(t, eps) plt.plot(t, eps_filt, label="median filtered") plt.plot(t, eps_mavg, label="averaged") plt.legend() ``` # Using FFT ``` Ts = t[1] - t[0]; # sampling interval Fs = 1.0/Ts; # sampling rate tvec = t # time vector y = eps n = len(y) # length of the signal k = np.arange(n) T = n/Fs frq = k/T # two sides frequency range frq = frq[0:n//2] # one side frequency range Y = np.fft.fft(y)/n # fft computing and normalization Y = Y[0:n//2] plt.clf() fig, ax = plt.subplots(2, 1) ax[0].plot(tvec,y) ax[0].set_xlabel('Time') ax[0].set_ylabel('Amplitude') ax[1].loglog(frq[1:],abs(Y[1:]),'r') # plotting the spectrum ax[1].set_xlabel('Freq (Hz)') ax[1].set_ylabel('|Y(freq)|') ``` # Peak detection ``` %matplotlib ipympl from matplotlib.pyplot import * from scipy.signal import find_peaks_cwt, find_peaks widths = np.diff(t) peaks = find_peaks(eps)[0] plot(t, eps) scatter(t[peaks], eps[peaks]) eps[-1], eps_filt[-1] def step_info(t, yout, thresh_percent=20): thresh = 1 + thresh_percent / 100 result = dict( overshoot_percent=(yout.max() / yout[-1] - 1) * 100, rise_time=( t[next(i for i in range(0,len(yout)-1) if yout[i]>yout[-1]*.90)] - t[0] ), settling_time=( t[next(len(yout)-i for i in range(2,len(yout)-1) if abs(yout[-i]/yout[-1])>thresh)] - t[0] ), ) return result step_info(t, eps_filt) yout = eps thresh_settling = 1.20 idx = np.where(np.abs(yout / yout[-1]) > thresh_settling)[0][-1] settling_time = t[idx] - t[0] settling_time == step_info(t, yout)["settling_time"] ```
github_jupyter
``` import math import os import nemo from nemo.utils.lr_policies import WarmupAnnealing import nemo.collections.nlp as nemo_nlp from nemo.collections.nlp.data import NemoBertTokenizer, SentencePieceTokenizer from nemo.collections.nlp.callbacks.token_classification_callback import \ eval_iter_callback, eval_epochs_done_callback from nemo.backends.pytorch.common.losses import CrossEntropyLossNM from nemo.collections.nlp.nm.trainables import TokenClassifier ``` You can download data from [here](https://github.com/kyzhouhzau/BERT-NER/tree/master/data) and use [this](https://github.com/NVIDIA/NeMo/blob/master/examples/nlp/token_classification/import_from_iob_format.py) script to preprocess it. ``` BATCHES_PER_STEP = 1 BATCH_SIZE = 32 CLASSIFICATION_DROPOUT = 0.1 DATA_DIR = "PATH TO WHERE THE DATA IS" WORK_DIR = "PATH_TO_WHERE_TO_STORE_CHECKPOINTS_AND_LOGS" MAX_SEQ_LENGTH = 128 NUM_EPOCHS = 3 LEARNING_RATE = 0.00005 LR_WARMUP_PROPORTION = 0.1 OPTIMIZER = "adam" # Instantiate neural factory with supported backend neural_factory = nemo.core.NeuralModuleFactory( backend=nemo.core.Backend.PyTorch, # If you're training with multiple GPUs, you should handle this value with # something like argparse. See examples/nlp/token_classification.py for an example. local_rank=None, # If you're training with mixed precision, this should be set to mxprO1 or mxprO2. # See https://nvidia.github.io/apex/amp.html#opt-levels for more details. optimization_level="O0", # Define path to the directory you want to store your results log_dir=WORK_DIR, # If you're training with multiple GPUs, this should be set to # nemo.core.DeviceType.AllGpu placement=nemo.core.DeviceType.GPU) # If you're using a standard BERT model, you should do it like this. To see the full # list of BERT model names, check out nemo_nlp.huggingface.BERT.list_pretrained_models() tokenizer = NemoBertTokenizer(pretrained_model="bert-base-cased") bert_model = nemo_nlp.nm.trainables.huggingface.BERT( pretrained_model_name="bert-base-cased") # Describe training DAG train_data_layer = nemo_nlp.nm.data_layers.BertTokenClassificationDataLayer( tokenizer=tokenizer, text_file=os.path.join(DATA_DIR, 'text_train.txt'), label_file=os.path.join(DATA_DIR, 'labels_train.txt'), max_seq_length=MAX_SEQ_LENGTH, batch_size=BATCH_SIZE) label_ids = train_data_layer.dataset.label_ids num_classes = len(label_ids) hidden_size = bert_model.hidden_size ner_classifier = TokenClassifier(hidden_size=hidden_size, num_classes=num_classes, dropout=CLASSIFICATION_DROPOUT) ner_loss = CrossEntropyLossNM(logits_dim=3) input_ids, input_type_ids, input_mask, loss_mask, _, labels = train_data_layer() hidden_states = bert_model(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask) logits = ner_classifier(hidden_states=hidden_states) loss = ner_loss(logits=logits, labels=labels, loss_mask=loss_mask) # Describe evaluation DAG eval_data_layer = nemo_nlp.nm.data_layers.BertTokenClassificationDataLayer( tokenizer=tokenizer, text_file=os.path.join(DATA_DIR, 'text_dev.txt'), label_file=os.path.join(DATA_DIR, 'labels_dev.txt'), max_seq_length=MAX_SEQ_LENGTH, batch_size=BATCH_SIZE, label_ids=label_ids) eval_input_ids, eval_input_type_ids, eval_input_mask, _, eval_subtokens_mask, eval_labels \ = eval_data_layer() hidden_states = bert_model( input_ids=eval_input_ids, token_type_ids=eval_input_type_ids, attention_mask=eval_input_mask) eval_logits = ner_classifier(hidden_states=hidden_states) callback_train = nemo.core.SimpleLossLoggerCallback( tensors=[loss], print_func=lambda x: logging.info("Loss: {:.3f}".format(x[0].item()))) train_data_size = len(train_data_layer) # If you're training on multiple GPUs, this should be # train_data_size / (batch_size * batches_per_step * num_gpus) steps_per_epoch = int(train_data_size / (BATCHES_PER_STEP * BATCH_SIZE)) # Callback to evaluate the model callback_eval = nemo.core.EvaluatorCallback( eval_tensors=[eval_logits, eval_labels, eval_subtokens_mask], user_iter_callback=lambda x, y: eval_iter_callback(x, y), user_epochs_done_callback=lambda x: eval_epochs_done_callback(x, label_ids), eval_step=steps_per_epoch) # Callback to store checkpoints # Checkpoints will be stored in checkpoints folder inside WORK_DIR ckpt_callback = nemo.core.CheckpointCallback( folder=neural_factory.checkpoint_dir, epoch_freq=1) lr_policy = WarmupAnnealing(NUM_EPOCHS * steps_per_epoch, warmup_ratio=LR_WARMUP_PROPORTION) neural_factory.train( tensors_to_optimize=[loss], callbacks=[callback_train, callback_eval, ckpt_callback], lr_policy=lr_policy, batches_per_step=BATCHES_PER_STEP, optimizer=OPTIMIZER, optimization_params={ "num_epochs": NUM_EPOCHS, "lr": LEARNING_RATE }) ```
github_jupyter
# Vectorized Execution in SparkR This nootebook demonstrates Arrow optimization with some small data (~10 MB) so that people can actually try out and refer when they run the benchmark in an actual cluster. **Note that** the performance improves far more greatly when the size of data is large. Given my benchmark with [500000 Records](http://eforexcel.com/wp/downloads-16-sample-csv-files-data-sets-for-testing/) dataset, I have observed up to around **1000% ~ 5000%** improvement. For more details, see [Databricks' blog](https://databricks.com/blog/2018/08/15/100x-faster-bridge-between-spark-and-r-with-user-defined-functions-on-databricks.html). ## Preparation First, enable R cell magic to execute R codes in Jupyter. ``` import rpy2.rinterface %load_ext rpy2.ipython ``` After that, prepare data to use. In this simple benchmark, [10000 Records](http://eforexcel.com/wp/downloads-16-sample-csv-files-data-sets-for-testing/) dataset is used. ``` import urllib.request from zipfile import ZipFile from io import BytesIO # `rownum` can be 10000, 50000, 100000, .... # See http://eforexcel.com/wp/downloads-16-sample-csv-files-data-sets-for-testing/ rownum = 10000 url = "http://eforexcel.com/wp/wp-content/uploads/2017/07/%s-Records.zip" % rownum ZipFile(BytesIO(urllib.request.urlopen(url).read())).extractall() ``` Initialize SparkR with enough memory and load libraries used for benchmarking. In this benchmark, it used 1 for shuffle and default parallelism to mimic the case when the large dataset is processed. ``` %%R library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib"))) # Let's limit core; otherwise, Jupyter might die on some heavy workload. sparkR.session(master = "local[1]") library(microbenchmark) ``` Prepare R DataFrame to test from the data downloaded above. ``` %%R # `rownum` can be 10000, 50000, 100000, .... # See http://eforexcel.com/wp/downloads-16-sample-csv-files-data-sets-for-testing/ rownum <- 10000 df <- read.csv(paste0(rownum, " Records.csv")) print(object.size(df), units = "MB") # To load `createDataFrame` faster, let's turn on Arrow optimization sparkR.session(sparkConfig = list(spark.sql.execution.arrow.sparkr.enabled = "true")) spark_df <- cache(createDataFrame(df)) num <- count(spark_df) # trigger the count to make sure input DataFrame is cached. ``` Prepare the common function to use for the benchmarking. ``` %%R benchmark_arrow <- function(func) { microbenchmark("No Arrow" = { sparkR.session(sparkConfig = list(spark.sql.execution.arrow.sparkr.enabled = "false")) func() }, "Arrow" = { sparkR.session(sparkConfig = list(spark.sql.execution.arrow.sparkr.enabled = "true")) func() }, times = 3L) } ``` ## R DataFrame to Spark DataFrame ``` %%R func <- function() { createDataFrame(df) } boxplot(benchmark_arrow(func)) ``` ## Spark DataFrame to R DataFrame ``` %%R func <- function() { collect(spark_df) } boxplot(benchmark_arrow(func)) ``` ## `dapply` ``` %%R func <- function() { count(dapply(spark_df, function(rdf) { rdf }, schema(spark_df))) } boxplot(benchmark_arrow(func)) ``` ## `gapply` ``` %%R func <- function() { count(gapply(spark_df, "Month_of_Joining", function(key, group) { group }, schema(spark_df))) } boxplot(benchmark_arrow(func)) ```
github_jupyter
``` #!/usr/bin/env python # coding: utf-8 %matplotlib inline %reload_ext autoreload %autoreload 2 import sys sys.path.insert(0, '../') from pyMulticopterSim.simulation.env import * # execute only if run as a script env = simulation_env() env.proceed_motor_speed("uav1", np.array([1100.0,1100.0,1100.0,1100.0]),0.1) env.plot_state("uav1") #!/usr/bin/env python # coding: utf-8 %matplotlib inline %reload_ext autoreload %autoreload 2 import os, sys, time, copy, yaml from scipy.special import factorial, comb, perm import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np import pandas as pd import h5py sys.path.insert(0, '../') from pyTrajectoryUtils.pyTrajectoryUtils.utils import * from multicopter_dynamics_sim import MulticopterDynamicsSim as uav_dyns from pyMulticopterSim.simulation.env import * from pyTrajectoryUtils.pyTrajectoryUtils.PIDcontroller import * def plot_state(time_array, state, state_ref, label_txt='vel', dim=3, flag_save=False): start_idx = 0 if failure_idx >= 0: end_idx = min(status_ref.shape[0], state.shape[0], time_array.shape[0], failure_idx) else: end_idx = min(status_ref.shape[0], state.shape[0], time_array.shape[0]) time_array_t = time_array[start_idx:end_idx] plt.ioff() fig = plt.figure(figsize=(10,5)) ax = fig.add_subplot(111) for i in range(dim): ax.plot(time_array_t, state[start_idx:end_idx,i], '-', label='{} dim {}'.format(label_txt,i)) ax.plot(time_array_t, state_ref[start_idx:end_idx,i], '-', label='{} ref dim {}'.format(label_txt,i)) ax.legend() ax.grid() plt.show() plt.pause(0.1) if flag_save: plt.savefig('{}/{}_{}.png'.format(save_dir,save_idx,label_txt)) plt.close() if __name__ == "__main__": env = simulation_env() controller = UAV_pid_tracking() traj_ref_path = '../test/sample_trajectory.csv' df = pd.read_csv(traj_ref_path, sep=',', header=None) status_ref = df.values[1:,:] print(status_ref.shape) freq_ctrl=200 freq_sim=2000 max_time = 100 dt_micro_ctrl = np.int(1e6/freq_ctrl) freq_sim_update = np.int(freq_sim/freq_ctrl) N = min(status_ref.shape[0], max_time*freq_ctrl) traj_ref = status_ref[0,:] curr_time = 0 env.set_state_vehicle("uav1", position=status_ref[0,2:5], velocity=status_ref[0,5:8]) state_t = env.get_state("uav1") pos = state_t["position"] vel = state_t["velocity"] acc = state_t["acceleration"] att_q = state_t["attitude"] att = state_t["attitude_euler_angle"] angV = state_t["angular_velocity"] angA = state_t["angular_acceleration"] ms = state_t["motor_speed"] ma = state_t["motor_acceleration"] raw_acc = state_t["acceleration_raw"] raw_gyro = state_t["gyroscope_raw"] raw_ms = state_t["motor_speed_raw"] pos_array = np.zeros((N,3)) vel_array = np.zeros((N,3)) acc_array = np.zeros((N,3)) att_array = np.zeros((N,3)) att_q_array = np.zeros((N,4)) raw_acc_array = np.zeros((N,3)) raw_gyro_array = np.zeros((N,3)) filtered_acc_array = np.zeros((N,3)) filtered_gyro_array = np.zeros((N,3)) ms_array = np.zeros((N,4)) ms_c_array = np.zeros((N,4)) time_array = np.zeros(N) pos_err_array = np.zeros(N) yaw_err_array = np.zeros(N) failure_idx = -1 failure_start_idx = -1 failure_end_idx = -1 for it in range(N): curr_time = np.int(1.0*(it+1)/freq_ctrl*1e6) traj_ref = status_ref[it,2:] pos_ref = traj_ref[:3] vel_ref = traj_ref[3:6] ms_c = controller.control_update(traj_ref, pos, vel, acc, att, angV, angA, 1.0/freq_ctrl) env.proceed_motor_speed("uav1", ms_c, 1.0/freq_ctrl) state_t = env.get_state("uav1") pos = state_t["position"] vel = state_t["velocity"] acc = state_t["acceleration"] att_q = state_t["attitude"] att = state_t["attitude_euler_angle"] angV = state_t["angular_velocity"] angA = state_t["angular_acceleration"] ms = state_t["motor_speed"] ma = state_t["motor_acceleration"] raw_acc = state_t["acceleration_raw"] raw_gyro = state_t["gyroscope_raw"] raw_ms = state_t["motor_speed"] time_array[it] = 1.0*(it+1)/freq_ctrl pos_array[it,:] = pos vel_array[it,:] = vel acc_array[it,:] = acc att_array[it,:] = att att_q_array[it,:] = att_q raw_acc_array[it,:] = raw_acc raw_gyro_array[it,:] = raw_gyro filtered_acc_array[it,:] = acc filtered_gyro_array[it,:] = angV ms_array[it,:] = ms ms_c_array[it,:] = ms_c plot_state(time_array, pos_array, status_ref[:,2:5], label_txt='pos', dim=3) #!/usr/bin/env python # coding: utf-8 %matplotlib inline %reload_ext autoreload %autoreload 2 import os, sys, time, copy, yaml sys.path.insert(0, '../') from pyTrajectoryUtils.pyTrajectoryUtils.trajectorySimulation import * traj_sim = TrajectorySimulation() res = traj_sim.run_simulation(traj_ref_path='./sample_trajectory.csv', N_trial=1, max_pos_err=5.0, min_pos_err=0.5, max_yaw_err=30.0, min_yaw_err=5.0, freq_ctrl=200) traj_sim.plot_result(debug_value=res[0], flag_save=False) ```
github_jupyter
##### Copyright 2021 The Cirq Developers ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://quantumai.google/cirq/qcvv/xeb_theory>"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/qcvv/xeb_theory.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/qcvv/xeb_theory.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/qcvv/xeb_theory.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a> </td> </table> ``` try: import cirq except ImportError: print("installing cirq...") !pip install --quiet cirq --pre print("installed cirq.") ``` # Cross Entropy Benchmarking Theory Cross entropy benchmarking uses the properties of random quantum programs to determine the fidelity of a wide variety of circuits. When applied to circuits with many qubits, XEB can characterize the performance of a large device. When applied to deep, two-qubit circuits it can be used to accurately characterize a two-qubit interaction potentially leading to better calibration. ``` # Standard imports import numpy as np import cirq from cirq.contrib.svg import SVGCircuit ``` ## The action of random circuits with noise An XEB experiment collects data from the execution of random circuits subject to noise. The effect of applying a random circuit with unitary $U$ is modeled as $U$ followed by a depolarizing channel. The result is that the initial state $|𝜓⟩$ is mapped to a density matrix $ρ_U$ as follows: $$ |𝜓⟩ → ρ_U = f |𝜓_U⟩⟨𝜓_U| + (1 - f) I / D $$ where $|𝜓_U⟩ = U|𝜓⟩$, $D$ is the dimension of the Hilbert space, $I / D$ is the maximally mixed state, and $f$ is the fidelity with which the circuit is applied. For this model to be accurate, we require $U$ to be a random circuit that scrambles errors. In practice, we use a particular circuit ansatz consisting of random single-qubit rotations interleaved with entangling gates. ### Possible single-qubit rotations These 8*8 possible rotations are chosen randomly when constructing the circuit. Geometrically, we choose 8 axes in the XY plane to perform a quarter-turn (pi/2 rotation) around. This is followed by a rotation around the Z axis of 8 different magnitudes. ``` exponents = np.linspace(0, 7/4, 8) exponents import itertools SINGLE_QUBIT_GATES = [ cirq.PhasedXZGate(x_exponent=0.5, z_exponent=z, axis_phase_exponent=a) for a, z in itertools.product(exponents, repeat=2) ] SINGLE_QUBIT_GATES[:10], '...' ``` ### Random circuit We use `random_rotations_between_two_qubit_circuit` to generate a random two-qubit circuit. Note that we provide the possible single-qubit rotations from above and declare that our two-qubit operation is the $\sqrt{i\mathrm{SWAP}}$ gate. ``` import cirq.google as cg from cirq.experiments import random_quantum_circuit_generation as rqcg SQRT_ISWAP = cirq.ISWAP**0.5 q0, q1 = cirq.LineQubit.range(2) circuit = rqcg.random_rotations_between_two_qubit_circuit( q0, q1, depth=4, two_qubit_op_factory=lambda a, b, _: SQRT_ISWAP(a, b), single_qubit_gates=SINGLE_QUBIT_GATES ) SVGCircuit(circuit) ``` ## Estimating fidelity Let $O_U$ be an observable that is diagonal in the computational basis. Then the expectation value of $O_U$ on $ρ_U$ is given by $$ Tr(ρ_U O_U) = f ⟨𝜓_U|O_U|𝜓_U⟩ + (1 - f) Tr(O_U / D). $$ This equation shows how $f$ can be estimated, since $Tr(ρ_U O_U)$ can be estimated from experimental data, and $⟨𝜓_U|O_U|𝜓_U⟩$ and $Tr(O_U / D)$ can be computed. Let $e_U = ⟨𝜓_U|O_U|𝜓_U⟩$, $u_U = Tr(O_U / D)$, and $m_U$ denote the experimental estimate of $Tr(ρ_U O_U)$. We can write the following linear equation (equivalent to the expression above): $$ m_U = f e_U + (1-f) u_U \\ m_U - u_U = f (e_U - u_U) $$ ``` # Make long circuits (which we will truncate) MAX_DEPTH = 100 circuits = [ rqcg.random_rotations_between_two_qubit_circuit( q0, q1, depth=MAX_DEPTH, two_qubit_op_factory=lambda a, b, _: SQRT_ISWAP(a, b), single_qubit_gates=SINGLE_QUBIT_GATES) for _ in range(10) ] # We will truncate to these lengths cycle_depths = np.arange(3, MAX_DEPTH, 9) cycle_depths ``` ### Execute circuits Cross entropy benchmarking requires sampled bitstrings from the device being benchmarked *as well as* the true probabilities from a noiseless simulation. We find these quantities for all `(cycle_depth, circuit)` permutations. ``` pure_sim = cirq.Simulator() P_DEPOL = 5e-3 noisy_sim = cirq.DensityMatrixSimulator(noise=cirq.depolarize(P_DEPOL)) # These two qubit circuits have 2^2 = 4 probabilities DIM = 4 records = [] for cycle_depth in cycle_depths: for circuit_i, circuit in enumerate(circuits): # Truncate the long circuit to the requested cycle_depth circuit_depth = cycle_depth * 2 + 1 assert circuit_depth <= len(circuit) trunc_circuit = circuit[:circuit_depth] # Pure-state simulation psi = pure_sim.simulate(trunc_circuit) psi = psi.final_state_vector pure_probs = np.abs(psi)**2 # Noisy execution meas_circuit = trunc_circuit + cirq.measure(q0, q1) sampled_inds = noisy_sim.sample(meas_circuit, repetitions=10_000).values[:,0] sampled_probs = np.bincount(sampled_inds, minlength=DIM) / len(sampled_inds) # Save the results records += [{ 'circuit_i': circuit_i, 'cycle_depth': cycle_depth, 'circuit_depth': circuit_depth, 'pure_probs': pure_probs, 'sampled_probs': sampled_probs, }] print('.', end='', flush=True) ``` ## What's the observable What is $O_U$? Let's define it to be the observable that gives the sum of all probabilities, i.e. $$ O_U |x \rangle = p(x) |x \rangle $$ for any bitstring $x$. We can use this to derive expressions for our quantities of interest. $$ e_U = \langle \psi_U | O_U | \psi_U \rangle \\ = \sum_x a_x^* \langle x | O_U | x \rangle a_x \\ = \sum_x p(x) \langle x | O_U | x \rangle \\ = \sum_x p(x) p(x) $$ $e_U$ is simply the sum of squared ideal probabilities. $u_U$ is a normalizing factor that only depends on the operator. Since this operator has the true probabilities in the definition, they show up here anyways. $$ u_U = \mathrm{Tr}[O_U / D] \\ = 1/D \sum_x \langle x | O_U | x \rangle \\ = 1/D \sum_x p(x) $$ For the measured values, we use the definition of an expectation value $$ \langle f(x) \rangle_\rho = \sum_x p(x) f(x) $$ It becomes notationally confusing because remember: our operator on basis states returns the ideal probability of that basis state $p(x)$. The probability of observing a measured basis state is estimated from samples and denoted $p_\mathrm{est}(x)$ here. $$ m_U = \mathrm{Tr}[\rho_U O_U] \\ = \langle O_U \rangle_{\rho_U} = \sum_{x} p_\mathrm{est}(x) p(x) $$ ``` for record in records: e_u = np.sum(record['pure_probs']**2) u_u = np.sum(record['pure_probs']) / DIM m_u = np.sum(record['pure_probs'] * record['sampled_probs']) record.update( e_u=e_u, u_u=u_u, m_u=m_u, ) ``` Remember: $$ m_U - u_U = f (e_U - u_U) $$ We estimate f by performing least squares minimization of the quantity $$ f (e_U - u_U) - (m_U - u_U) $$ over different random circuits. The solution to the least squares problem is given by $$ f = (∑_U (m_U - u_U) * (e_U - u_U)) / (∑_U (e_U - u_U)^2) $$ ``` import pandas as pd df = pd.DataFrame(records) df['y'] = df['m_u'] - df['u_u'] df['x'] = df['e_u'] - df['u_u'] df['numerator'] = df['x'] * df['y'] df['denominator'] = df['x'] ** 2 df.head() ``` ### Fit We'll plot the linear relationship and least-squares fit while we transform the raw DataFrame into one containing fidelities. ``` %matplotlib inline from matplotlib import pyplot as plt # Color by cycle depth import seaborn as sns colors = sns.cubehelix_palette(n_colors=len(cycle_depths)) colors = {k: colors[i] for i, k in enumerate(cycle_depths)} _lines = [] def per_cycle_depth(df): fid_lsq = df['numerator'].sum() / df['denominator'].sum() cycle_depth = df.name xx = np.linspace(0, df['x'].max()) l, = plt.plot(xx, fid_lsq*xx, color=colors[cycle_depth]) plt.scatter(df['x'], df['y'], color=colors[cycle_depth]) global _lines _lines += [l] # for legend return pd.Series({'fid_lsq': fid_lsq}) fids = df.groupby('cycle_depth').apply(per_cycle_depth).reset_index() plt.xlabel(r'$e_U - u_U$', fontsize=18) plt.ylabel(r'$m_U - u_U$', fontsize=18) _lines = np.asarray(_lines) plt.legend(_lines[[0,-1]], cycle_depths[[0,-1]], loc='best', title='Cycle depth') plt.tight_layout() ``` ### Fidelities ``` plt.plot(fids['cycle_depth'], fids['fid_lsq'], label='LSq') xx = np.linspace(0, fids['cycle_depth'].max()) plt.plot(xx, (1-P_DEPOL)**(4*xx), label=r'$(1-\mathrm{depol})^{4d}$') plt.ylabel('Circuit fidelity', fontsize=18) plt.xlabel('Cycle Depth $d$', fontsize=18) plt.legend(loc='best') plt.tight_layout() ```
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder from sklearn.cross_validation import train_test_split import seaborn as sns from itertools import combinations_with_replacement sns.set() df = pd.read_csv('TempLinkoping2016.csv') df.head() X = df.iloc[:, 0:1].values Y = df.iloc[:, 1].values n_features = X.shape[1] degree = 15 combs = [combinations_with_replacement(range(n_features), i) for i in range(0, degree + 1)] flat_combs = [item for sublist in combs for item in sublist] X_new = np.empty((X.shape[0], len(flat_combs))) for i, index_combs in enumerate(flat_combs): X_new[:, i] = np.prod(X[:, index_combs], axis=1) train_X, test_X, train_Y, test_Y = train_test_split(X_new, Y, test_size = 0.2) def divide(X, i, threshold): more_than = np.where(X[:,i]>=threshold)[0] less_than = np.setxor1d(np.arange(X.shape[0]), more_than) return np.array([X[more_than,:], X[less_than,:]]) class Node: def __init__(self, feature=None, threshold=None, val=None, true_b=None, false_b=None): self.feature = feature self.threshold = threshold self.val = val self.true_b = true_b self.false_b = false_b class RegressionTree: def __init__(self, min_samples_split=2, min_impurity = 1e-8, max_depth=float('inf')): self.root = None self.min_samples_split = min_samples_split self.min_impurity = min_impurity self.max_depth = max_depth self.impurity_calculation = None self.leaf_value_calculation = None def _cal_variance_reduction(self, Y, y_more, y_less): var = np.var(Y) var_more = np.var(y_more) var_less = np.var(y_less) frac_more = len(y_more) / len(Y) frac_less = len(y_less) / len(Y) return np.sum(var - (frac_more * var_more + frac_less * var_less)) def _mean_y(self, Y): return np.mean(Y[:,0]) def build_tree(self, X, Y, current_depth=0): largest_impurity = 0 best_criteria = None best_sets = None if len(Y.shape) == 1: Y = np.expand_dims(Y, axis=1) XY = np.concatenate((X, Y), axis=1) n_samples, n_features = np.shape(X) if n_samples >= self.min_samples_split and current_depth <= self.max_depth: for i in range(n_features): feature_values = np.expand_dims(X[:, i], axis=1) unique_values = np.unique(feature_values) for threshold in unique_values: XY_more, XY_less = divide(XY, i, threshold) if XY_more.shape[0] > 0 and XY_less.shape[0]: y_more = XY_more[:, n_features:] y_less = XY_less[:, n_features:] impurity = self._cal_variance_reduction(Y, y_more, y_less) if impurity > largest_impurity: largest_impurity = impurity best_criteria = {"i": i, "threshold": threshold} best_sets = { 'left_X': XY_more[:, :n_features], 'left_Y': XY_more[:, n_features:], 'right_X': XY_less[:, :n_features], 'right_Y': XY_less[:, n_features:], } if largest_impurity > self.min_impurity: T_branch = self.build_tree(best_sets["left_X"], best_sets["left_Y"], current_depth + 1) F_branch = self.build_tree(best_sets["right_X"], best_sets["right_Y"], current_depth + 1) return Node(feature=best_criteria['i'],threshold=best_criteria['threshold'], true_b=T_branch, false_b=F_branch) selected_val = self._mean_y(Y) return Node(val=selected_val) def fit(self, X, Y): self.root = self.build_tree(X, Y) def _predict_val(self, X, tree=None): if tree is None: tree = self.root if tree.val is not None: return tree.val feature_val = X[tree.feature] branch = tree.false_b if feature_val >= tree.threshold: branch = tree.true_b return self._predict_val(X, branch) def predict(self, X): results = [] for i in range(X.shape[0]): results.append(self._predict_val(X[i,:])) return results regression_tree = RegressionTree() regression_tree.fit(train_X, train_Y) np.mean(np.square(test_Y - regression_tree.predict(test_X))) plt.scatter(X[:,0],Y) plt.plot(X,regression_tree.predict(X_new), c='red') plt.show() ```
github_jupyter
# Announcements - __Please familiarize yourself with the term projects, and sign up for your (preliminary) choice__ using [this form](https://forms.gle/ByLLpsthrpjCcxG89). _You may revise your choice, but I'd recommend settling on a choice well before Thanksgiving._ - Recommended reading on ODEs: [Lecture notes by Prof. Hjorth-Jensen (University of Oslo)](https://www.asc.ohio-state.edu/physics/ntg/6810/readings/hjorth-jensen_notes2013_08.pdf) - Problem Set 5 will be posted on D2L on Oct 12, due Oct 20. - __Outlook__: algorithms for solving high-dimensional linear and non-linear equations; then Boundary Value Problems and Partial Differential Equations. - Conference for Undergraduate Women in Physics: online event in 2021, [applications accepted until 10/25](https://www.aps.org/programs/women/cuwip/) This notebook presents as selection of topics from the book "Numerical Linear Algebra" by Trefethen and Bau (SIAM, 1997), and uses notebooks by Kyle Mandli. # Numerical Linear Algebra Numerical methods for linear algebra problems lies at the heart of many numerical approaches and is something we will spend some time on. Roughly we can break down problems that we would like to solve into two general problems, solving a system of equations $$A \vec{x} = \vec{b}$$ and solving the eigenvalue problem $$A \vec{v} = \lambda \vec{v}.$$ We examine each of these problems separately and will evaluate some of the fundamental properties and methods for solving these problems. We will be careful in deciding how to evaluate the results of our calculations and try to gain some understanding of when and how they fail. ## General Problem Specification The number and power of the different tools made available from the study of linear algebra makes it an invaluable field of study. Before we dive in to numerical approximations we first consider some of the pivotal problems that numerical methods for linear algebra are used to address. For this discussion we will be using the common notation $m \times n$ to denote the dimensions of a matrix $A$. The $m$ refers to the number of rows and $n$ the number of columns. If a matrix is square, i.e. $m = n$, then we will use the notation that $A$ is $m \times m$. ### Systems of Equations The first type of problem is to find the solution to a linear system of equations. If we have $m$ equations for $m$ unknowns it can be written in matrix/vector form, $$A \vec{x} = \vec{b}.$$ For this example $A$ is an $m \times m$ matrix, denoted as being in $\mathbb{R}^{m\times m}$, and $\vec{x}$ and $\vec{b}$ are column vectors with $m$ entries, denoted as $\mathbb{R}^m$. #### Example: Vandermonde Matrix We have data $(x_i, y_i), ~~ i = 1, 2, \ldots, m$ that we want to fit a polynomial of order $m-1$. Solving the linear system $A p = y$ does this for us where $$A = \begin{bmatrix} 1 & x_1 & x_1^2 & \cdots & x_1^{m-1} \\ 1 & x_2 & x_2^2 & \cdots & x_2^{m-1} \\ \vdots & \vdots & \vdots & & \vdots \\ 1 & x_m & x_m^2 & \cdots & x_m^{m-1} \end{bmatrix} \quad \quad y = \begin{bmatrix} y_1 \\ y_2 \\ \vdots \\ y_m \end{bmatrix}$$ and $p$ are the coefficients of the interpolating polynomial $\mathcal{P}_N(x) = p_0 + p_1 x + p_2 x^2 + \cdots + p_m x^{m-1}$. The solution to this system satisfies $\mathcal{P}_N(x_i)=y_i$ for $i=1, 2, \ldots, m$. #### Example: Linear least squares 1 In a similar case as above, say we want to fit a particular function (could be a polynomial) to a given number of data points except in this case we have more data points than free parameters. In the case of polynomials this could be the same as saying we have $m$ data points but only want to fit a $n - 1$ order polynomial through the data where $n - 1 \leq m$. One of the common approaches to this problem is to minimize the "least-squares" error between the data and the resulting function: $$ E = \left( \sum^m_{i=1} |y_i - f(x_i)|^2 \right )^{1/2}. $$ But how do we do this if our matrix $A$ is now $m \times n$ and looks like $$ A = \begin{bmatrix} 1 & x_1 & x_1^2 & \cdots & x_1^{n-1} \\ 1 & x_2 & x_2^2 & \cdots & x_2^{n-1} \\ \vdots & \vdots & \vdots & & \vdots \\ 1 & x_m & x_m^2 & \cdots & x_m^{n-1} \end{bmatrix}? $$ Turns out if we solve the system $$A^T A x = A^T b$$ we can gaurantee that the error is minimized in the least-squares sense[<sup>1</sup>](#footnoteRegression). #### Practical Example: Linear least squares implementation Fitting a line through data that has random noise added to it. ``` %matplotlib inline %precision 3 import numpy import matplotlib.pyplot as plt # Linear Least Squares Problem # First define the independent and dependent variables. N = 20 x = numpy.linspace(-1.0, 1.0, N) y = x + numpy.random.random((N)) # Define the Vandermonde matrix based on our x-values A = numpy.ones((x.shape[0], 2)) A[:, 1] = x # Determine the coefficients of the polynomial that will # result in the smallest sum of the squares of the residual. p = numpy.linalg.solve(numpy.dot(A.transpose(), A), numpy.dot(A.transpose(), y)) print("Error in slope = %s, y-intercept = %s" % (numpy.abs(p[1] - 1.0), numpy.abs(p[0] - 0.5))) # Plot it out, cuz pictures are fun! fig = plt.figure() axes = fig.add_subplot(1, 1, 1) axes.plot(x, y, 'ko') axes.plot(x, p[0] + p[1] * x, 'r') axes.set_title("Least Squares Fit to Data") axes.set_xlabel("$x$") axes.set_ylabel("$f(x)$ and $y_i$") plt.show() ``` ### Eigenproblems Eigenproblems come up in a variety of contexts and often are integral to many problem of scientific and engineering interest. It is such a powerful idea that it is not uncommon for us to take a problem and convert it into an eigenproblem. We will covered detailed algorithms for eigenproblems in the next lectures, but for now let's remind ourselves of the problem and analytic solution: If $A \in \mathbb{C}^{m\times m}$ (a square matrix with complex values), a non-zero vector $\vec{v}\in\mathbb{C}^m$ is an **eigenvector** of $A$ with a corresponding **eigenvalue** $\lambda \in \mathbb{C}$ if $$A \vec{v} = \lambda \vec{v}.$$ One way to interpret the eigenproblem is that we are attempting to ascertain the "action" of the matrix $A$ on some subspace of $\mathbb{C}^m$ where this action acts like scalar multiplication. This subspace is called an **eigenspace**. #### Example Compute the eigenspace of the matrix $$ A = \begin{bmatrix} 1 & 2 \\ 2 & 1 \end{bmatrix} $$ Recall that we can find the eigenvalues of a matrix by computing $\det(A - \lambda I) = 0$. In this case we have $$\begin{aligned} A - \lambda I &= \begin{bmatrix} 1 & 2 \\ 2 & 1 \end{bmatrix} - \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix} \lambda\\ &= \begin{bmatrix} 1 - \lambda & 2 \\ 2 & 1 - \lambda \end{bmatrix}. \end{aligned}$$ The determinant of the matrix is $$\begin{aligned} \begin{vmatrix} 1 - \lambda & 2 \\ 2 & 1 - \lambda \end{vmatrix} &= (1 - \lambda) (1 - \lambda) - 2 \cdot 2 \\ &= 1 - 2 \lambda + \lambda^2 - 4 \\ &= \lambda^2 - 2 \lambda - 3. \end{aligned}$$ This result is sometimes referred to as the characteristic equation of the matrix, $A$. Setting the determinant equal to zero we can find the eigenvalues as $$\begin{aligned} & \\ \lambda &= \frac{2 \pm \sqrt{4 - 4 \cdot 1 \cdot (-3)}}{2} \\ &= 1 \pm 2 \\ &= -1 \mathrm{~and~} 3 \end{aligned}$$ The eigenvalues are used to determine the eigenvectors. The eigenvectors are found by going back to the equation $(A - \lambda I) \vec{v}_i = 0$ and solving for each vector. A trick that works some of the time is to normalize each vector such that the first entry is 1 ($\vec{v}_1 = 1$): $$ \begin{bmatrix} 1 - \lambda & 2 \\ 2 & 1 - \lambda \end{bmatrix} \begin{bmatrix} 1 \\ v_2 \end{bmatrix} = 0 $$ $$\begin{aligned} 1 - \lambda + 2 v_2 &= 0 \\ v_2 &= \frac{\lambda - 1}{2} \end{aligned}$$ We can check this by $$\begin{aligned} 2 + \left(1- \lambda \frac{\lambda - 1}{2}\right) & = 0\\ (\lambda - 1)^2 - 4 &=0 \end{aligned}$$ which by design is satisfied by our eigenvalues. Another sometimes easier approach is to plug-in the eigenvalues to find each corresponding eigenvector. The eigenvectors are therefore $$\vec{v} = \begin{bmatrix}1 \\ -1 \end{bmatrix}, \begin{bmatrix}1 \\ 1 \end{bmatrix}.$$ Note that these are linearly independent. ## Fundamentals ### Matrix-Vector Multiplication One of the most basic operations we can perform with matrices is to multiply them be a vector. This matrix-vector product $A \vec{x} = \vec{b}$ is defined as $$ b_i = \sum^n_{j=1} a_{ij} x_j \quad \text{where}\quad i = 1, \ldots, m $$ Writing the matrix-vector product this way we see that one interpretation of this product is that each column of $A$ is weighted by the value $x_j$, or in other words $\vec{b}$ is a linear combination of the columns of $A$ where each column's weighting is $x_j$. $$ \begin{align} \vec{b} &= A \vec{x}, \\ \vec{b} &= \begin{bmatrix} & & & \\ & & & \\ \vec{a}_1 & \vec{a}_2 & \cdots & \vec{a}_n \\ & & & \\ & & & \end{bmatrix} \begin{bmatrix} x_1 \\ x_2 \\ \vdots \\ x_n \end{bmatrix}, \\ \vec{b} &= x_1 \vec{a}_1 + x_2 \vec{a}_2 + \cdots + x_n \vec{a}_n. \end{align} $$ This view will be useful later when we are trying to interpret various types of matrices. One important property of the matrix-vector product is that is a **linear** operation, also known as a **linear operator**. This means that the for any $\vec{x}, \vec{y} \in \mathbb{C}^n$ and any $c \in \mathbb{C}$ we know that 1. $A (\vec{x} + \vec{y}) = A\vec{x} + A\vec{y}$ 1. $A\cdot (c\vec{x}) = c A \vec{x}$ #### Example: Vandermonde Matrix In the case where we have $m$ data points and want $m - 1$ order polynomial interpolant the matrix $A$ is a square, $m \times m$, matrix as before. Using the above interpretation the polynomial coefficients $p$ are the weights for each of the monomials that give exactly the $y$ values of the data. #### Example: Numerical matrix-vector multiply Write a matrix-vector multiply function and check it with the appropriate `numpy` routine. Also verify the linearity of the matrix-vector multiply. ``` #A x = b #(m x n) (n x 1) = (m x 1) def matrix_vector_product(A, x): m, n = A.shape b = numpy.zeros(m) for i in range(m): for j in range(n): b[i] += A[i, j] * x[j] return b m = 4 n = 3 A = numpy.random.uniform(size=(m,n)) x = numpy.random.uniform(size=(n)) y = numpy.random.uniform(size=(n)) c = numpy.random.uniform() b = matrix_vector_product(A, x) print(numpy.allclose(b, numpy.dot(A, x))) print(numpy.allclose(matrix_vector_product(A, (x + y)), matrix_vector_product(A, x) + matrix_vector_product(A, y))) print(numpy.allclose(matrix_vector_product(A, c * x), c*matrix_vector_product(A, x))) ``` ### Matrix-Matrix Multiplication The matrix product with another matrix $A C = B$ is defined as $$ b_{ij} = \sum^m_{k=1} a_{ik} c_{kj}. $$ Again, a useful interpretation of this operation is that the product result $B$ is the a linear combination of the columns of $A$. _What are the dimensions of $A$ and $C$ so that the multiplication works?_ #### Example: Outer Product The product of two vectors $\vec{u} \in \mathbb{C}^m$ and $\vec{v} \in \mathbb{C}^n$ is a $m \times n$ matrix where the columns are the vector $u$ multiplied by the corresponding value of $v$: $$ \begin{align} \vec{u} \vec{v}^T &= \begin{bmatrix} u_1 \\ u_2 \\ \vdots \\ u_n \end{bmatrix} \begin{bmatrix} v_1 & v_2 & \cdots & v_n \end{bmatrix}, \\ & = \begin{bmatrix} v_1u_1 & \cdots & v_n u_1 \\ \vdots & & \vdots \\ v_1 u_m & \cdots & v_n u_m \end{bmatrix}. \end{align} $$ It is useful to think of these as operations on the column vectors, and an equivalent way to express this relationship is $$ \begin{align} \vec{u} \vec{v}^T &= \begin{bmatrix} \\ \vec{u} \\ \\ \end{bmatrix} \begin{bmatrix} v_1 & v_2 & \cdots & v_n \end{bmatrix}, \\ &= \begin{bmatrix} & & & \\ & & & \\ \vec{u}v_1 & \vec{u} v_2 & \cdots & \vec{u} v_n \\ & & & \\ & & & \end{bmatrix}, \\ & = \begin{bmatrix} v_1u_1 & \cdots & v_n u_1 \\ \vdots & & \vdots \\ v_1 u_m & \cdots & v_n u_m \end{bmatrix}. \end{align} $$ #### Example: Upper Triangular Multiplication Consider the multiplication of a matrix $A \in \mathbb{C}^{m\times n}$ and the **upper-triangular** matrix $R$ defined as the $n \times n$ matrix with entries $r_{ij} = 1$ for $i \leq j$ and $r_{ij} = 0$ for $i > j$. The product can be written as $$ \begin{bmatrix} \\ \\ \vec{b}_1 & \cdots & \vec{b}_n \\ \\ \\ \end{bmatrix} = \begin{bmatrix} \\ \\ \vec{a}_1 & \cdots & \vec{a}_n \\ \\ \\ \end{bmatrix} \begin{bmatrix} 1 & \cdots & 1 \\ & \ddots & \vdots \\ & & 1 \end{bmatrix}. $$ The columns of $B$ are then $$ \vec{b}_j = A \vec{r}_j = \sum^j_{k=1} \vec{a}_k $$ so that $\vec{b}_j$ is the sum of the first $j$ columns of $A$. #### Example: Write Matrix-Matrix Multiplication Write a function that computes matrix-matrix multiplication and demonstrate the following properties: 1. $A (B + C) = AB + AC$ (for square matrices)) 1. $A (cB) = c AB$ where $c \in \mathbb{C}$ 1. $AB \neq BA$ in general ``` def matrix_matrix_product(A, B): C = numpy.zeros((A.shape[0], B.shape[1])) for i in range(A.shape[0]): for j in range(B.shape[1]): for k in range(A.shape[1]): C[i, j] += A[i, k] * B[k, j] return C m = 4 n = 4 p = 4 A = numpy.random.uniform(size=(m, n)) B = numpy.random.uniform(size=(n, p)) C = numpy.random.uniform(size=(m, p)) c = numpy.random.uniform() print(numpy.allclose(matrix_matrix_product(A, B), numpy.dot(A, B))) print(numpy.allclose(matrix_matrix_product(A, (B + C)), matrix_matrix_product(A, B) + matrix_matrix_product(A, C))) print(numpy.allclose(matrix_matrix_product(A, c * B), c*matrix_matrix_product(A, B))) print(numpy.allclose(matrix_matrix_product(A, B), matrix_matrix_product(B, A))) ``` ### Matrices in NumPy NumPy and SciPy contain routines that ware optimized to perform matrix-vector and matrix-matrix multiplication. Given two `ndarray`s you can take their product by using the `dot` function. ``` n = 10 m = 5 # Matrix vector with identity A = numpy.identity(n) x = numpy.random.random(n) print(numpy.allclose(x, numpy.dot(A, x))) # Matrix vector product A = numpy.random.random((m, n)) print(numpy.dot(A, x)) # Matrix matrix product B = numpy.random.random((n, m)) print(numpy.dot(A, B)) ``` ### Range and Null-Space #### Range - The **range** of a matrix $A \in \mathbb R^{m \times n}$ (similar to any function), denoted as $\text{range}(A)$, is the set of vectors that can be expressed as $A x$ for $x \in \mathbb R^n$. - We can also then say that that $\text{range}(A)$ is the space **spanned** by the columns of $A$. In other words the columns of $A$ provide a basis for $\text{range}(A)$, also called the **column space** of the matrix $A$. #### Null-Space - Similarly the **null-space** of a matrix $A$, denoted $\text{null}(A)$ is the set of vectors $x$ that satisfy $A x = 0$. - A similar concept is the **rank** of the matrix $A$, denoted as $\text{rank}(A)$, is the dimension of the column space. A matrix $A$ is said to have **full-rank** if $\text{rank}(A) = \min(m, n)$. This property also implies that the matrix mapping is **one-to-one**. ### Inverse A **non-singular** or **invertible** matrix is characterized as a matrix with full-rank. This is related to why we know that the matrix is one-to-one, we can use it to transform a vector $x$ and using the inverse, denoted $A^{-1}$, we can map it back to the original matrix. The familiar definition of this is \begin{align*} A \vec{x} &= \vec{b}, \\ A^{-1} A \vec{x} & = A^{-1} \vec{b}, \\ x &=A^{-1} \vec{b}. \end{align*} Since $A$ has full rank, its columns form a basis for $\mathbb{R}^m$ and the vector $\vec{b}$ must be in the column space of $A$. There are a number of important properties of a non-singular matrix A. Here we list them as the following equivalent statements 1. $A$ has an inverse $A^{-1}$ 1. $\text{rank}(A) = m$ 1. $\text{range}(A) = \mathbb{C}^m$ 1. $\text{null}(A) = {0}$ 1. 0 is not an eigenvalue of $A$ 1. $\text{det}(A) \neq 0$ #### Example: Properties of invertible matrices Show that given an invertible matrix that the rest of the properties hold. Make sure to search the `numpy` packages for relevant functions. ``` m = 3 for n in range(100): A = numpy.random.uniform(size=(m, m)) if numpy.linalg.det(A) != 0: break print(numpy.dot(numpy.linalg.inv(A), A)) print(numpy.linalg.matrix_rank(A)) print("range") print(numpy.linalg.solve(A, numpy.zeros(m))) print(numpy.linalg.eigvals(A)) ``` ### Orthogonal Vectors and Matrices Orthogonality is a very important concept in linear algebra that forms the basis of many of the modern methods used in numerical computations. Two vectors are said to be orthogonal if their **inner-product** or **dot-product** defined as $$ < \vec{x}, \vec{y} > \equiv (\vec{x}, \vec{y}) \equiv \vec{x}^T\vec{y} \equiv \vec{x} \cdot \vec{y} = \sum^m_{i=1} x_i y_i $$ Here we have shown the various notations you may run into (the inner-product is in-fact a general term for a similar operation for mathematical objects such as functions). If $\langle \vec{x},\vec{y} \rangle = 0$ then we say $\vec{x}$ and $\vec{y}$ are orthogonal. The reason we use this terminology is that the inner-product of two vectors can also be written in terms of the angle between them where $$ \cos \theta = \frac{\langle \vec{x}, \vec{y} \rangle}{||\vec{x}||_2~||\vec{y}||_2} $$ and $||\vec{x}||_2$ is the Euclidean ($\ell^2$) norm of the vector $\vec{x}$. We can write this in terms of the inner-product as well as $$ ||\vec{x}||_2^2 = \langle \vec{x}, \vec{x} \rangle = \vec{x}^T\vec{x} = \sum^m_{i=1} |x_i|^2. $$ The generalization of the inner-product to complex spaces is defined as $$ \langle x, y \rangle = \sum^m_{i=1} x_i^* y_i $$ where $x_i^*$ is the complex-conjugate of the value $x_i$. #### Orthonormality Taking this idea one step further we can say a set of vectors $\vec{x} \in X$ are orthogonal to $\vec{y} \in Y$ if $\forall \vec{x},\vec{y}$ $< \vec{x}, \vec{y} > = 0$. If $\forall \vec{x},\vec{y}$ $||\vec{x}|| = 1$ and $||\vec{y}|| = 1$ then they are also called orthonormal. Note that we dropped the 2 as a subscript to the notation for the norm of a vector. Later we will explore other ways to define a norm of a vector other than the Euclidean norm defined above. Another concept that is related to orthogonality is linear-independence. A set of vectors $\vec{x} \in X$ are **linearly independent** if $\forall \vec{x} \in X$ that each $\vec{x}$ cannot be written as a linear combination of the other vectors in the set $X$. An equivalent statement is that there does not exist a set of scalars $c_i$ such that $$ \vec{x}_k = \sum^n_{i=1, i \neq k} c_i \vec{x}_i. $$ Another way to write this is that $\vec{x}_k \in X$ is orthogonal to all the rest of the vectors in the set $X$. This can be related directly through the idea of projection. If we have a set of vectors $\vec{x} \in X$ we can project another vector $\vec{v}$ onto the vectors in $X$ by using the inner-product. This is especially powerful if we have a set of linearly-independent vectors $X$, which are said to **span** a space (or provide a **basis** for a space), s.t. any vector in the space spanned by $X$ can be expressed as a linear combination of the basis vectors $X$ $$ \vec{v} = \sum^n_{i=1} \, \langle \vec{v}, \vec{x}_i \rangle \, \vec{x}_i. $$ Note if $\vec{v} \in X$ that $$ \langle \vec{v}, \vec{x}_i \rangle = 0 \quad \forall \vec{x}_i \in X \setminus \vec{v}. $$ Looping back to matrices, the column space of a matrix is spanned by its linearly independent columns. Any vector $v$ in the column space can therefore be expressed via the equation above. A special class of matrices are called **unitary** matrices when complex-valued and **orthogonal** when purely real-valued if the columns of the matrix are orthonormal to each other. Importantly this implies that for a unitary matrix $Q$ we know the following 1. $Q^* = Q^{-1}$ 1. $Q^*Q = I$ where $Q^*$ is called the **adjoint** of $Q$. The adjoint is defined as the transpose of the original matrix with the entries being the complex conjugate of each entry as the notation implies. ### Vector Norms Norms (and also measures) provide a means for measure the "size" or distance in a space. In general a norm is a function, denoted by $||\cdot||$, that maps $\mathbb{C}^m \rightarrow \mathbb{R}$. In other words we stick in a multi-valued object and get a single, real-valued number out the other end. All norms satisfy the properties: 1. $||\vec{x}|| \geq 0$, and $||\vec{x}|| = 0$ only if $\vec{x} = \vec{0}$ 1. $||\vec{x} + \vec{y}|| \leq ||\vec{x}|| + ||\vec{y}||$ (triangle inequality) 1. $||c \vec{x}|| = |c| ~ ||\vec{x}||$ where $c \in \mathbb{C}$ There are a number of relevant norms that we can define beyond the Euclidean norm, also know as the 2-norm or $\ell_2$ norm: 1. $\ell_1$ norm: $$ ||\vec{x}||_1 = \sum^m_{i=1} |x_i|, $$ 1. $\ell_2$ norm: $$ ||\vec{x}||_2 = \left( \sum^m_{i=1} |x_i|^2 \right)^{1/2}, $$ 1. $\ell_p$ norm: $$ ||\vec{x}||_p = \left( \sum^m_{i=1} |x_i|^p \right)^{1/p}, \quad \quad 1 \leq p < \infty, $$ 1. $\ell_\infty$ norm: $$ ||\vec{x}||_\infty = \max_{1\leq i \leq m} |x_i|, $$ 1. weighted $\ell_p$ norm: $$ ||\vec{x}||_{W_p} = \left( \sum^m_{i=1} |w_i x_i|^p \right)^{1/p}, \quad \quad 1 \leq p < \infty, $$ These are also related to other norms denoted by capital letters ($L_2$ for instance). In this case we use the lower-case notation to denote finite or discrete versions of the infinite dimensional counterparts. #### Example: Comparisons Between Norms Compute the norms given some vector $\vec{x}$ and compare their values. Verify the properties of the norm for one of the norms. ``` m = 10 p = 4 x = numpy.random.uniform(size=m) ell_1 = 0.0 for i in range(m): ell_1 += numpy.abs(x[i]) ell_2 = 0.0 for i in range(m): ell_2 += numpy.abs(x[i])**2 ell_2 = numpy.sqrt(ell_2) ell_p = 0.0 for i in range(m): ell_p += numpy.abs(x[i])**p ell_p = (ell_2)**(1.0 / p) ell_infty = numpy.max(numpy.abs(x)) print("L_1 = %s, L_2 = %s, L_%s = %s, L_infty = %s" % (ell_1, ell_2, p, ell_p, ell_infty)) y = numpy.random.uniform(size=m) print() print("Properties of norms:") print(numpy.max(numpy.abs(x + y)), numpy.max(numpy.abs(x)) + numpy.max(numpy.abs(y))) print(numpy.max(numpy.abs(0.1 * x)), 0.1 * numpy.max(numpy.abs(x))) ``` ### Matrix Norms The most direct way to consider a matrix norm is those induced by a vector-norm. Given a vector norm, we can define a matrix norm as the smallest number $C$ that satisfies the inequality $$ ||A \vec{x}||_{m} \leq C ||\vec{x}||_{n}. $$ or as the supremum of the ratios so that $$ C = \sup_{\vec{x}\in\mathbb{C}^n ~ \vec{x}\neq\vec{0}} \frac{||A \vec{x}||_{m}}{||\vec{x}||_n}. $$ Noting that $||A \vec{x}||$ lives in the column space and $||\vec{x}||$ on the domain we can think of the matrix norm as the "size" of the matrix that maps the domain to the range. Also noting that if $||\vec{x}||_n = 1$ we also satisfy the condition we can write the induced matrix norm as $$ ||A||_{(m,n)} = \sup_{\vec{x} \in \mathbb{C}^n ~ ||\vec{x}||_{n} = 1} ||A \vec{x}||_{m}. $$ #### Example: Induced Matrix Norms Consider the matrix $$ A = \begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix}. $$ Compute the induced-matrix norm of $A$ for the vector norms $\ell_2$ and $\ell_\infty$. $\ell^2$: For both of the requested norms the unit-length vectors $[1, 0]$ and $[0, 1]$ can be used to give an idea of what the norm might be and provide a lower bound. $$ ||A||_2 = \sup_{x \in \mathbb{R}^n} \left( ||A \cdot [1, 0]^T||_2, ||A \cdot [0, 1]^T||_2 \right ) $$ computing each of the norms we have $$\begin{aligned} \begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix} \cdot \begin{bmatrix} 1 \\ 0 \end{bmatrix} &= \begin{bmatrix} 1 \\ 0 \end{bmatrix} \\ \begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix} \cdot \begin{bmatrix} 0 \\ 1 \end{bmatrix} &= \begin{bmatrix} 2 \\ 2 \end{bmatrix} \end{aligned}$$ which translates into the norms $||A \cdot [1, 0]^T||_2 = 1$ and $||A \cdot [0, 1]^T||_2 = 2 \sqrt{2}$. This implies that the $\ell_2$ induced matrix norm of $A$ is at least $||A||_{2} = 2 \sqrt{2} \approx 2.828427125$. The exact value of $||A||_2$ can be computed using the spectral radius defined as $$ \rho(A) = \max_{i} |\lambda_i|, $$ where $\lambda_i$ are the eigenvalues of $A$. With this we can compute the $\ell_2$ norm of $A$ as $$ ||A||_2 = \sqrt{\rho(A^\ast A)} $$ Computing the norm again here we find $$ A^\ast A = \begin{bmatrix} 1 & 0 \\ 2 & 2 \end{bmatrix} \begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix} = \begin{bmatrix} 1 & 2 \\ 2 & 8 \end{bmatrix} $$ which has eigenvalues $$ \lambda = \frac{1}{2}\left(9 \pm \sqrt{65}\right ) $$ so $||A||_2 \approx 2.9208096$. $\ell^\infty$: We can again bound $||A||_\infty$ by looking at the unit vectors which give us the matrix lower bound of 2. To compute it turns out $||A||_{\infty} = \max_{1 \leq i \leq m} ||a^\ast_i||_1$ where $a^\ast_i$ is the $i$th row of $A$. This represents then the maximum of the row sums of $A$. Therefore $||A||_\infty = 3$. ``` A = numpy.array([[1, 2], [0, 2]]) print(numpy.linalg.norm(A, ord=2)) print(numpy.linalg.norm(A, ord=numpy.infty)) ``` #### Example: General Norms of a Matrix Compute a bound on the induced norm of the $m \times n$ dimensional matrix $A$ using $\ell_1$ and $\ell_2$ One of the most useful ways to think about matrix norms is as a transformation of a unit-ball to an ellipse. Depending on the norm in question, the norm will be some combination of the resulting ellipse. For the above cases we have some nice relations based on these ideas. 1. $||A \vec{x}||_1 = || \sum^n_{j=1} x_j \vec{a}_j ||_1 \leq \sum^n_{j=1} |x_j| ||\vec{a}_j||_1 \leq \max_{1\leq j\leq n} ||\vec{a}_j||_1$ 1. $||A \vec{x}||_\infty = || \sum^n_{j=1} x_j \vec{a_j} ||_\infty \leq \sum^n_{j=1} |x_j| ||\vec{a}_j||_\infty \leq \max_{1 \leq i \leq m} ||a^*_i||_1$ ``` # Note: that this code is a bit fragile to angles that go beyond pi # due to the use of arccos. import matplotlib.patches as patches A = numpy.array([[1, 2], [0, 2]]) def draw_unit_vectors(axes, A, head_width=0.1): head_length = 1.5 * head_width image_e = numpy.empty(A.shape) angle = numpy.empty(A.shape[0]) image_e[:, 0] = numpy.dot(A, numpy.array((1.0, 0.0))) image_e[:, 1] = numpy.dot(A, numpy.array((0.0, 1.0))) for i in range(A.shape[0]): angle[i] = numpy.arccos(image_e[0, i] / numpy.linalg.norm(image_e[:, i], ord=2)) axes.arrow(0.0, 0.0, image_e[0, i] - head_length * numpy.cos(angle[i]), image_e[1, i] - head_length * numpy.sin(angle[i]), head_width=head_width, color='b', alpha=0.5) head_width = 0.2 head_length = 1.5 * head_width # ============ # 1-norm # Unit-ball fig = plt.figure() fig.set_figwidth(fig.get_figwidth() * 2) fig.suptitle("1-Norm") axes = fig.add_subplot(1, 2, 1, aspect='equal') axes.plot((1.0, 0.0, -1.0, 0.0, 1.0), (0.0, 1.0, 0.0, -1.0, 0.0), 'r') draw_unit_vectors(axes, numpy.eye(2)) axes.set_title("Unit Ball") axes.set_xlim((-1.1, 1.1)) axes.set_ylim((-1.1, 1.1)) axes.grid(True) # Image axes = fig.add_subplot(1, 2, 2, aspect='equal') axes.plot((1.0, 2.0, -1.0, -2.0, 1.0), (0.0, 2.0, 0.0, -2.0, 0.0), 'r') draw_unit_vectors(axes, A, head_width=0.2) axes.set_title("Images Under A") axes.grid(True) plt.show() # ============ # 2-norm # Unit-ball fig = plt.figure() fig.suptitle("2-Norm") fig.set_figwidth(fig.get_figwidth() * 2) axes = fig.add_subplot(1, 2, 1, aspect='equal') axes.add_artist(plt.Circle((0.0, 0.0), 1.0, edgecolor='r', facecolor='none')) draw_unit_vectors(axes, numpy.eye(2)) axes.set_title("Unit Ball") axes.set_xlim((-1.1, 1.1)) axes.set_ylim((-1.1, 1.1)) axes.grid(True) # Image # Compute some geometry u, s, v = numpy.linalg.svd(A) theta = numpy.empty(A.shape[0]) ellipse_axes = numpy.empty(A.shape) theta[0] = numpy.arccos(u[0][0]) / numpy.linalg.norm(u[0], ord=2) theta[1] = theta[0] - numpy.pi / 2.0 for i in range(theta.shape[0]): ellipse_axes[0, i] = s[i] * numpy.cos(theta[i]) ellipse_axes[1, i] = s[i] * numpy.sin(theta[i]) axes = fig.add_subplot(1, 2, 2, aspect='equal') axes.add_artist(patches.Ellipse((0.0, 0.0), 2 * s[0], 2 * s[1], theta[0] * 180.0 / numpy.pi, edgecolor='r', facecolor='none')) for i in range(A.shape[0]): axes.arrow(0.0, 0.0, ellipse_axes[0, i] - head_length * numpy.cos(theta[i]), ellipse_axes[1, i] - head_length * numpy.sin(theta[i]), head_width=head_width, color='k') draw_unit_vectors(axes, A, head_width=0.2) axes.set_title("Images Under A") axes.set_xlim((-s[0] + 0.1, s[0] + 0.1)) axes.set_ylim((-s[0] + 0.1, s[0] + 0.1)) axes.grid(True) plt.show() # ============ # infty-norm # Unit-ball fig = plt.figure() fig.suptitle("$\infty$-Norm") fig.set_figwidth(fig.get_figwidth() * 2) axes = fig.add_subplot(1, 2, 1, aspect='equal') axes.plot((1.0, -1.0, -1.0, 1.0, 1.0), (1.0, 1.0, -1.0, -1.0, 1.0), 'r') draw_unit_vectors(axes, numpy.eye(2)) axes.set_title("Unit Ball") axes.set_xlim((-1.1, 1.1)) axes.set_ylim((-1.1, 1.1)) axes.grid(True) # Image # Geometry - Corners are A * ((1, 1), (1, -1), (-1, 1), (-1, -1)) # Symmetry implies we only need two. Here we just plot two u = numpy.empty(A.shape) u[:, 0] = numpy.dot(A, numpy.array((1.0, 1.0))) u[:, 1] = numpy.dot(A, numpy.array((-1.0, 1.0))) theta[0] = numpy.arccos(u[0, 0] / numpy.linalg.norm(u[:, 0], ord=2)) theta[1] = numpy.arccos(u[0, 1] / numpy.linalg.norm(u[:, 1], ord=2)) axes = fig.add_subplot(1, 2, 2, aspect='equal') axes.plot((3, 1, -3, -1, 3), (2, 2, -2, -2, 2), 'r') for i in range(A.shape[0]): axes.arrow(0.0, 0.0, u[0, i] - head_length * numpy.cos(theta[i]), u[1, i] - head_length * numpy.sin(theta[i]), head_width=head_width, color='k') draw_unit_vectors(axes, A, head_width=0.2) axes.set_title("Images Under A") axes.set_xlim((-4.1, 4.1)) axes.set_ylim((-3.1, 3.1)) axes.grid(True) plt.show() ``` #### General Matrix Norms (induced and non-induced) In general matrix-norms have the following properties whether they are induced from a vector-norm or not: 1. $||A|| \geq 0$ and $||A|| = 0$ only if $A = 0$ 1. $||A + B|| \leq ||A|| + ||B||$ (Triangle Inequality) 1. $||c A|| = |c| ||A||$ The most widely used matrix norm not induced by a vector norm is the **Frobenius norm** defined by $$ ||A||_F = \left( \sum^m_{i=1} \sum^n_{j=1} |A_{ij}|^2 \right)^{1/2}. $$ #### Invariance under unitary multiplication One important property of the matrix 2-norm (and Forbenius norm) is that multiplication by a unitary matrix does not change the product (kind of like multiplication by 1). In general for any $A \in \mathbb{C}^{m\times n}$ and unitary matrix $Q \in \mathbb{C}^{m \times m}$ we have \begin{align*} ||Q A||_2 &= ||A||_2 \\ ||Q A||_F &= ||A||_F. \end{align*} ## Singular Value Decomposition Definition: Let $A \in \mathbb R^{m \times n}$, then $A$ can be factored as $$ A = U\Sigma V^{T} $$ where, * $U \in \mathbb R^{m \times m}$ and is the orthogonal matrix whose columns are the eigenvectors of $AA^{T}$ * $V \in \mathbb R^{n \times n}$ and is the orthogonal matrix whose columns are the eigenvectors of $A^{T}A$ * $\Sigma \in \mathbb R^{m \times n}$ and is a diagonal matrix with elements $\sigma_{1}, \sigma_{2}, \sigma_{3}, ... \sigma_{r}$ where $r = rank(A)$ corresponding to the square roots of the eigenvalues of $A^{T}A$. They are called the singular values of $A$ and are non negative arranged in descending order. ($\sigma_{1} \geq \sigma_{2} \geq \sigma_{3} \geq ... \sigma_{r} \geq 0$). The SVD has a number of applications mostly related to reducing the dimensionality of a matrix. ### Full SVD example Consider the matrix $$ A = \begin{bmatrix} 2 & 0 & 3 \\ 5 & 7 & 1 \\ 0 & 6 & 2 \end{bmatrix}. $$ The example below demonstrates the use of the `numpy.linalg.svd` function and shows the numerical result. ``` A = numpy.array([ [2.0, 0.0, 3.0], [5.0, 7.0, 1.0], [0.0, 6.0, 2.0] ]) U, sigma, V_T = numpy.linalg.svd(A, full_matrices=True) print(numpy.dot(U, numpy.dot(numpy.diag(sigma), V_T))) ``` ### Eigenvalue Decomposition vs. SVD Decomposition Let the matrix $X$ contain the eigenvectors of $A$ which are linearly independent, then we can write a decomposition of the matrix $A$ as $$ A = X \Lambda X^{-1}. $$ How does this differ from the SVD? - The basis of the SVD representation differs from the eigenvalue decomposition - The basis vectors are not in general orthogonal for the eigenvalue decomposition where it is for the SVD - The SVD effectively contains two basis sets. - All matrices have an SVD decomposition whereas not all have eigenvalue decompositions. ### Existence and Uniqueness Every matrix $A \in \mathbb{C}^{m \times n}$ has a singular value decomposition. Furthermore, the singular values $\{\sigma_{j}\}$ are uniquely determined, and if $A$ is square and the $\sigma_{j}$ are distinct, the left and right singular vectors $\{u_{j}\}$ and $\{v_{j}\}$ are uniquely determined up to complex signs (i.e., complex scalar factors of absolute value 1). ### Matrix Properties via the SVD - The $\text{rank}(A) = r$ where $r$ is the number of non-zero singular values. - The $\text{range}(A) = [u_1, ... , u_r]$ and $\text{null}(a) = [v_{r+1}, ... , v_n]$. - The $|| A ||_2 = \sigma_1$ and $||A||_F = \sqrt{\sigma_{1}^{2}+\sigma_{2}^{2}+...+\sigma_{r}^{2}}$. - The nonzero singular values of A are the square roots of the nonzero eigenvalues of $A^{T}A$ or $AA^{T}$. - If $A = A^{T}$, then the singular values of $A$ are the absolute values of the eigenvalues of $A$. - For $A \in \mathbb{C}^{m \times n}$ then $|det(A)| = \Pi_{i=1}^{m} \sigma_{i}$ ### Low-Rank Approximations - $A$ is the sum of the $r$ rank-one matrices: $$ A = U \Sigma V^T = \sum_{j=1}^{r} \sigma_{j}u_{j}v_{j}^{T} $$ - For any $k$ with $0 \leq k \leq r$, define $$ A = \sum_{j=1}^{k} \sigma_{j}u_{j}v_{j}^{T} $$ Let $k = min(m,n)$, then $$ ||A - A_{v}||_{2} = \text{inf}_{B \in \mathbb{C}^{m \times n}} \text{rank}(B)\leq k|| A-B||_{2} = \sigma_{k+1} $$ - For any $k$ with $0 \leq k \leq r$, the matrix $A_{k}$ also satisfies $$ ||A - A_{v}||_{F} = \text{inf}_{B \in \mathbb{C}^{m \times n}} \text{rank}(B)\leq v ||A-B||_{F} = \sqrt{\sigma_{v+1}^{2} + ... + \sigma_{r}^{2}} $$ #### Example: Putting the above equations into code How does this work in practice? ``` data = numpy.zeros((15,40)) #H data[2:10,2:4] = 1 data[5:7,4:6] = 1 data[2:10,6:8] = 1 #E data[3:11,10:12] = 1 data[3:5,12:16] = 1 data[6:8, 12:16] = 1 data[9:11, 12:16] = 1 #L data[4:12,18:20] = 1 data[10:12,20:24] = 1 #L data[5:13,26:28] = 1 data[11:13,28:32] = 1 #0 data[6:14,34:36] = 1 data[6:8, 36:38] = 1 data[12:14, 36:38] = 1 data[6:14,38:40] = 1 plt.imshow(data) plt.show() u, diag, vt = numpy.linalg.svd(data, full_matrices=True) fig = plt.figure() fig.set_figwidth(fig.get_figwidth() * 3) fig.set_figheight(fig.get_figheight() * 4) for i in range(1, 16): diag_matrix = numpy.concatenate((numpy.zeros((len(diag[:i]) -1),), diag[i-1: i], numpy.zeros((40-i),))) reconstruct = numpy.dot(numpy.dot(u, numpy.diag(diag_matrix)[:15,]), vt) axes = fig.add_subplot(5, 3, i) mappable = axes.imshow(reconstruct, vmin=0.0, vmax=1.0) axes.set_title('Component = %s' % i) plt.show() u, diag, vt = numpy.linalg.svd(data, full_matrices=True) fig = plt.figure() fig.set_figwidth(fig.get_figwidth() * 3) fig.set_figheight(fig.get_figheight() * 4) for i in range(1, 16): diag_matrix = numpy.concatenate((diag[:i], numpy.zeros((40-i),))) reconstruct = numpy.dot(numpy.dot(u, numpy.diag(diag_matrix)[:15,]), vt) axes = fig.add_subplot(5, 3, i) mappable = axes.imshow(reconstruct, vmin=0.0, vmax=1.0) axes.set_title('Component = %s' % i) plt.show() ``` <sup>1</sup><span id="footnoteRegression"> http://www.utstat.toronto.edu/~brunner/books/LinearModelsInStatistics.pdf</span>
github_jupyter
# Simpson paradoxes over time Copyright 2021 Allen B. Downey License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) [Click here to run this notebook on Colab](https://colab.research.google.com/github/AllenDowney/ProbablyOverthinkingIt2/blob/master/simpson_wages.ipynb) ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from simpson import * gss = pd.read_hdf('gss_simpson', 'gss') ``` Would you say that most of the time people try to be helpful, or that they are mostly just looking out for themselves? ``` xvarname = 'year' yvarname = 'helpful' gvarname = 'cohort10' run_subgroups(gss, xvarname, yvarname, gvarname) series_all, table = summarize(gss, xvarname, yvarname, gvarname) table.drop([1890, 1990], axis=1, inplace=True) table visualize(series_all, table) plt.title('') title = """Would you say that most of the time people try to be helpful, or that they are mostly just looking out for themselves? """ plt.title(title, loc='left', fontdict=dict(fontsize=14)) plt.ylabel('Percent saying "helpful"') plt.xlabel('Year') x = y = 1.02 plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1) plt.tight_layout() plt.savefig('helpful_vs_year_by_cohort10.jpg') ``` ## trust Generally speaking, would you say that most people can be trusted or that you can't be too careful in dealing with people? ``` xvarname = 'year' yvarname = 'trust' gvarname = 'cohort10' yvalue = 1 run_subgroups(gss, xvarname, yvarname, gvarname, yvalue) series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue) table.drop([1890, 1990], axis=1, inplace=True) table visualize(series_all, table) plt.title('') title = """Generally speaking, would you say that most people can be trusted or that you can't be too careful in dealing with people? """ plt.title(title, loc='left', fontdict=dict(fontsize=14)) plt.ylabel('Percent saying "can be trusted"') plt.xlabel('Year') x = y = 1.02 plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1) plt.tight_layout() plt.savefig('trust_vs_year_by_cohort10.jpg') ``` Do you think most people would try to take advantage of you if they got a chance, or would they try to be fair? ``` xvarname = 'year' yvarname = 'fair' gvarname = 'cohort10' yvalue = 2 run_subgroups(gss, xvarname, yvarname, gvarname, yvalue) series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue) table.drop([1890, 1990], axis=1, inplace=True) table visualize(series_all, table) plt.title('') title = """Do you think most people would try to take advantage of you if they got a chance, or would they try to be fair? """ plt.title(title, loc='left', fontdict=dict(fontsize=14)) plt.ylabel('Percent saying "would try to be fair"') plt.xlabel('Year') x = y = 1.02 plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1) plt.tight_layout() plt.savefig('fair_vs_year_by_cohort10.jpg') ``` Is there any area right around here--that is, within a mile--where you would be afraid to walk alone at night? ``` xvarname = 'year' yvarname = 'fear' gvarname = 'cohort10' yvalue = 2 run_subgroups(gss, xvarname, yvarname, gvarname, yvalue) series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue) table.drop([1890, 1990], axis=1, inplace=True) table visualize(series_all, table) plt.title('') title = """Is there any area right around here--that is, within a mile-- where you would be afraid to walk alone at night? """ plt.title(title, loc='left', fontdict=dict(fontsize=14)) plt.ylabel('Percent saying "no"') plt.xlabel('Year') x = y = 1.02 plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1) plt.tight_layout() plt.savefig('fear_vs_year_by_cohort10.jpg') ``` ## happy ``` xvarname = 'year' yvarname = 'happy' gvarname = 'cohort10' yvalue = [1,2] run_subgroups(gss, xvarname, yvarname, gvarname, yvalue) series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue) table.drop([1890, 1990], axis=1, inplace=True) table visualize(series_all, table) plt.title('') title = """Taken all together, how would you say things are these days-- would you say that you are very happy, pretty happy, or not too happy? """ plt.title(title, loc='left', fontdict=dict(fontsize=14)) plt.ylabel('Percent saying "very happy" or "pretty happy"') plt.xlabel('Year') x = y = 1.02 plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1) plt.tight_layout() plt.savefig('happy_vs_year_by_cohort10.jpg') ``` ## pornlaw https://gss.norc.org/Documents/quex/GSS2018%20Ballot%202%20-%20English.pdf Which of these statements comes closest to your feelings about pornography laws? 1. There should be laws against the distribution of pornography, whatever the age, or 2. There should be laws against the distribution of pornography to persons under 18, or 3. There should be no laws forbidding the distribution of pornography ``` xvarname = 'year' yvarname = 'pornlaw' gvarname = 'cohort10' yvalue = 1 run_subgroups(gss, xvarname, yvarname, gvarname, yvalue) series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue) table.drop([1890, 1990], axis=1, inplace=True) table visualize(series_all, table) plt.title('') title = """Do you think there should be laws against the distribution of pornography, whatever the age? """ plt.title(title, loc='left', fontdict=dict(fontsize=14)) plt.ylabel('Percent who agree') plt.xlabel('Year') x = y = 1.02 plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1) plt.tight_layout() plt.savefig('pornlaw_vs_year_by_cohort10.jpg') ``` Do you think the use of marijuana should be made legal or not? ``` xvarname = 'year' yvarname = 'fair' gvarname = 'cohort10' yvalue = 1 run_subgroups(gss, xvarname, yvarname, gvarname, yvalue) series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue) table.drop([1890, 1990], axis=1, inplace=True) table visualize(series_all, table) plt.title('') title = """Do you think the use of marijuana should be made legal or not? """ plt.title(title, loc='left', fontdict=dict(fontsize=14)) plt.ylabel('Percent saying "No legal"') plt.xlabel('Year') x = y = 1.02 plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1) plt.tight_layout() plt.savefig('grass_vs_year_by_cohort10.jpg') ``` Please tell me whether or not you think it should be possible for a pregnant woman to obtain a legal abortion if she is married and does not want any more children? ``` xvarname = 'year' yvarname = 'abnomore' gvarname = 'degree5' yvalue = 1 run_subgroups(gss, xvarname, yvarname, gvarname, yvalue) xvarname = 'year' yvarname = 'abnomore' gvarname = 'degree5' yvalue = 1 pre2002 = gss['year'] <= 2002 run_subgroups(gss[pre2002].copy(), xvarname, yvarname, gvarname, yvalue) series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue) table.sort_values(by=2017, axis=1, ascending=False, inplace=True) table visualize(series_all, table) plt.title('') title = """Do you think it should be possible for a pregnant woman to obtain a legal abortion if she is married and does not want any more children? """ plt.title(title, loc='left', fontdict=dict(fontsize=14)) plt.ylabel('Percent saying yes') plt.xlabel('Year') x = y = 1.02 plt.legend(title='Highest degree', bbox_to_anchor=(x, y), loc='upper left', ncol=1) plt.tight_layout() plt.savefig('abnomore_vs_year_by_degree.jpg') ``` Suppose an admitted Communist wanted to make a speech in your community. Should he be allowed to speak, or not? ``` xvarname = 'year' yvarname = 'spkcom' gvarname = 'degree5' yvalue = 1 run_subgroups(gss, xvarname, yvarname, gvarname, yvalue) series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue) table.sort_values(by=2017, axis=1, ascending=False, inplace=True) table visualize(series_all, table) plt.title('') title = """Suppose an admitted Communist wanted to make a speech in your community. Should he be allowed to speak, or not? """ plt.title(title, loc='left', fontdict=dict(fontsize=14)) plt.ylabel('Percent saying "allowed"') plt.xlabel('Year') x = y = 1.02 plt.legend(title='Highest degree', bbox_to_anchor=(x, y), loc='upper left', ncol=1) plt.tight_layout() plt.savefig('spkcom_vs_year_by_degree.jpg') ``` There are always some people whose ideas are considered bad or dangerous by other people. For instance, somebody who is against all churches and religion . . . If some people in your community suggested that a book he wrote against churches and religion should be taken out of your public library, would you favor removing this book, or not? ``` xvarname = 'year' yvarname = 'libath' gvarname = 'degree5' yvalue = 2 run_subgroups(gss, xvarname, yvarname, gvarname, yvalue) series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue) table.sort_values(by=2017, axis=1, ascending=False, inplace=True) table visualize(series_all, table) plt.title('') title = """If people object to a book by someone who is opposed to churches and religion, should it be removed from a public library, or not? """ plt.title(title, loc='left', fontdict=dict(fontsize=14)) plt.ylabel('Percent saying "not removed"') plt.xlabel('Year') x = y = 1.02 plt.legend(title='Highest degree', bbox_to_anchor=(x, y), loc='upper left', ncol=1) plt.tight_layout() plt.savefig('libath_vs_year_by_degree.jpg') ```
github_jupyter
# scikit-learn - Machine Learning in Python Scikit-learn is a machine learning library for Python. A key feature is that is has been designed to seamlessly interoperate with the scientific libraries NumPy and SciPy, which we have introduced in the previous notebooks, as well as with the graphical library Matplotlib. It collects a number of algorithms for supervised and unsupervised learning, including * Classification * SVM (`sklearn.svm`) * Nearest neighbors (`sklearn.neighbors`) * Random forests (`sklearn.ensemble`) * Regression * SVR (`sklearn.svm`) * Ridge regression (`sklearn.linear_model`) * Lasso (`sklearn.linear_model`) * Clustering (`sklearn.cluster`) * k-Means * Spectral clustering * Mean-shift * Dimensionality reduction * PCA (`sklearn.decomposition`) * Feature selection (`sklearn.feature_selection`) * Non-negative matrix factorization (`sklearn.decomposition`) Scikit-learn also offers modules for model evaluation and selection, including grid search, cross validation and metrics, and for data preprocessing (feature extraction and normalization). This tutorial has been adapted from materials at [scikit-learn](https://scikit-learn.org) (BSD License). To access the scikit-learn module, you can import the whole module: ``` import sklearn ``` or just the required component(s): ``` from sklearn import svm ``` # Basic commands ## Loading a dataset Scikit-learn ships with some standard datasets, that are ideal for tutorial purposes. For instance the `digits` dataset is a collection of handwritten digits, suitable for classification tasks: ``` from sklearn import datasets digits = datasets.load_digits() ``` This dataset is a dictionary-like object holding data and metadata. As typical when using scikit-learn, data are stored as a 2D Numpy array, namely the `data` member having shape `n_samples` by `n_features`: ``` type(digits.data) digits.data.shape print(digits.data) ``` Data need to be formatted as a 2D `(n_samples,n_features)` array to be used with the scikit-learn methods. However, original data can have a different shape; in the case of the `digits` dataset the 64 features correspond to the pixels of a 8x8 image. These original data can be accessed through the `images` member, e.g. to inspect the first image: ``` print(digits.images[0]) ``` In the case of a supervised problem, response variables are stored in the `target` member, a 1D Numpy array of size `n_samples`: ``` type(digits.target) digits.target.shape print(digits.target) ``` As we are dealing with a classification problem, the set of target classes is available, too, through the `target_names` member: ``` type(digits.target_names) digits.target_names.shape print(digits.target_names) ``` Scikit-learn allows to import external datasets in a variety of ways. Typical formats include Numpy arrays, Scipy sparse matrices, Pandas dataframes, and more. ## Learning and predicting In the case of the `digits` dataset, the task is to predict, given an image, which digit it represents. We are given samples of each of the 10 possible classes (the digits zero through nine) on which we fit a so called **estimator** to be able to predict the classes to which unseen samples belong. In scikit-learn, an estimator for classification is a Python object that implements the methods `fit(X, y)` and `predict(T)`. An example of estimator is `sklearn.svm.SVC`, which implements the Support Vector Classification algorithm. Let's set up a SVC model with fixed hyper-parameters `gamma` and `C`: ``` from sklearn import svm clf = svm.SVC(gamma=0.001, C=100.) ``` The estimator instance `clf` is trained to the model using a training set and the `fit` method. For the purposes of this tutorial let us use as training set all the images from the `digits` dataset but the last one, which we are keeping for the prediction step: ``` clf.fit(digits.data[:-1], digits.target[:-1]) ``` After the estimator has been trained, we can use it to predict new values. For instance, let us predict the last image from the dataset. This is the image: ``` import matplotlib.pyplot as plt %matplotlib inline plt.figure(1, figsize=(3, 3)) plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation='nearest') plt.show() ``` And this is the prediction: ``` clf.predict(digits.data[-1:]) ``` Image classification can be a challenging task, especially if the images are low resolution. Do you agree with the classifier? ## Displaying results of an image classification task Let us re-run the `digits` dataset example using the first half of the images for training and the second half for prediction. We are going to use Matplotlib to plot some of the images and get graphical insights. First, let us load the dataset and display the first 4 images from the training subset: ``` import matplotlib.pyplot as plt from sklearn import datasets, svm, metrics %matplotlib inline digits = datasets.load_digits() images_and_labels = list(zip(digits.images, digits.target)) for index, (image, label) in enumerate(images_and_labels[:4]): plt.subplot(2, 4, index + 1) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Training: %i' % label) ``` Then, let us proceed with some data preparation and the actual training: ``` n_samples = len(digits.images) data = digits.images.reshape((n_samples, -1)) classifier = svm.SVC(gamma=0.001) classifier.fit(data[:n_samples // 2], digits.target[:n_samples // 2]) ``` Finally, let's make predictions and visualise some of the outcomes: ``` expected = digits.target[n_samples // 2:] predicted = classifier.predict(data[n_samples // 2:]) print("Classification report for classifier %s:\n%s\n" % (classifier, metrics.classification_report(expected, predicted))) print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)) images_and_predictions = list(zip(digits.images[n_samples // 2:], predicted)) for index, (image, prediction) in enumerate(images_and_predictions[:4]): plt.subplot(2, 4, index + 5) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Prediction: %i' % prediction) plt.show() ``` ## Model persistence What if we want to save a trained model for future use? One possibility is to use Python’s built-in persistence model, `pickle`. First let us instantiate and train an estimator: ``` from sklearn import svm from sklearn import datasets clf = svm.SVC(gamma='scale') iris = datasets.load_iris() X, y = iris.data, iris.target clf.fit(X, y) ``` Then we can save our trained model to a string: ``` import pickle s = pickle.dumps(clf) ``` And later on load it back and use it: ``` clf2 = pickle.loads(s) clf2.predict(X[0:1]) ``` In alternative, the `joblib` module can be used. It is more efficient on big data, but only allows to write to disk: ``` from joblib import dump, load dump(clf, 'filename.joblib') clf3 = load('filename.joblib') clf3.predict(X[0:1]) ``` ## Refitting the hyper-parameters Suppose we have trained an estimator: ``` import numpy as np from sklearn.svm import SVC rng = np.random.RandomState(0) X = rng.rand(100, 10) y = rng.binomial(1, 0.5, 100) X_test = rng.rand(5, 10) clf = SVC(kernel='linear') clf.fit(X, y) clf.predict(X_test) ``` Later on, we can update the hyper-parameters using the `set_params()` method. Re-calling the `fit()` method will then overwrite any previous training: ``` clf.set_params(kernel='rbf', gamma='scale').fit(X, y) clf.predict(X_test) ``` ## Choosing the hyper-parameters of the model To tune the model hyper-parameters, we can use tools such as grid search and cross validation. As an example, let us optimize the classifier estimator for the `digits` dataset using cross-validation and the `sklearn.model_selection.GridSearchCV` object. Again, half of the available data will be used for training, and the other half for evaluation. ``` from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.metrics import classification_report from sklearn.svm import SVC # Loading the Digits dataset digits = datasets.load_digits() # To apply an classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.images) X = digits.images.reshape((n_samples, -1)) y = digits.target # Split the dataset in two equal parts X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.5, random_state=0) ``` Now we are setting the hyper-parameters to be tuned: ``` tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] ``` Then tuning for a `precision` score: ``` score = 'precision' print("# Tuning hyper-parameters for %s" % score) print() clf = GridSearchCV(SVC(), tuned_parameters, cv=5, scoring='%s_macro' % score) clf.fit(X_train, y_train) print("Best parameters set found on development set:") print() print(clf.best_params_) print() print("Grid scores on development set:") print() means = clf.cv_results_['mean_test_score'] stds = clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, clf.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print() print("Detailed classification report:") print() print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.") print() y_true, y_pred = y_test, clf.predict(X_test) print(classification_report(y_true, y_pred)) print() ``` And finally tuning for a `recall` score: ``` score = 'recall' print("# Tuning hyper-parameters for %s" % score) print() clf = GridSearchCV(SVC(), tuned_parameters, cv=5, scoring='%s_macro' % score) clf.fit(X_train, y_train) print("Best parameters set found on development set:") print() print(clf.best_params_) print() print("Grid scores on development set:") print() means = clf.cv_results_['mean_test_score'] stds = clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, clf.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print() print("Detailed classification report:") print() print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.") print() y_true, y_pred = y_test, clf.predict(X_test) print(classification_report(y_true, y_pred)) print() ``` Note how this is just a toy problem: the hyper-parameter plateau is too flat and the output model is the same for precision and recall with ties in quality. # A clustering example for image segmentation Let us generate an image with connected circles. We will then apply a Spectral Clustering model to separate them. ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline from sklearn.feature_extraction import image from sklearn.cluster import spectral_clustering l = 100 x, y = np.indices((l, l)) center1 = (28, 24) center2 = (40, 50) center3 = (67, 58) center4 = (24, 70) radius1, radius2, radius3, radius4 = 16, 14, 15, 14 circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2 circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2 circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2 circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2 # 4 circles img = circle1 + circle2 + circle3 + circle4 # We use a mask that limits to the foreground: the problem that we are # interested in here is not separating the objects from the background, # but separating them one from the other. mask = img.astype(bool) img = img.astype(float) img += 1 + 0.2 * np.random.randn(*img.shape) ``` Here, the spectral clustering approach solves the problem known as "normalized graph cuts": the image is seen as a graph of connected voxels, and the algorithm amounts to choosing graph cuts defining regions while minimizing the ratio of the gradient along the cut, and the volume of the region. ``` # Convert the image into a graph with the value of the gradient on the # edges. graph = image.img_to_graph(img, mask=mask) # Take a decreasing function of the gradient: we take it weakly # dependent from the gradient the segmentation is close to a voronoi graph.data = np.exp(-graph.data / graph.data.std()) # Force the solver to be arpack, since amg is numerically # unstable on this example labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack') label_im = np.full(mask.shape, -1.) label_im[mask] = labels plt.matshow(img) plt.matshow(label_im) ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. [Licensed under the Apache License, Version 2.0](#scrollTo=Afd8bu4xJOgh). ``` // #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. ``` <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/swift/tutorials/custom_differentiation"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/swift/blob/master/docs/site/tutorials/custom_differentiation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/swift/blob/master/docs/site/tutorials/custom_differentiation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> # Custom differentiation This tutorial will show you how to define your own custom derivatives, perform derivative surgery, and implement your own gradient checkpointing API in just 5 lines of Swift. ## Declaring custom derivatives You can define custom derivatives for any Swift function that has differentiable parameters and results. By doing that, you can even import a C function and make it differentiable. ``` import Glibc func sillyExp(_ x: Float) -> Float { let 𝑒 = Float(M_E) print("Taking 𝑒(\(𝑒)) to the power of \(x)!") return pow(𝑒, x) } @differentiating(sillyExp) func sillyDerivative(_ x: Float) -> (value: Float, pullback: (Float) -> Float) { let y = sillyExp(x) return (value: y, pullback: { v in v * y }) } print("exp(3) =", sillyExp(3)) print("𝛁exp(3) =", gradient(of: sillyExp)(3)) ``` ## Stop derivatives from propagating Commonly known as "stop gradient" in machine learning use cases, method `withoutDerivative(at:)` stops derivatives from propagating. Plus, `withoutDerivative(at:)` can sometimes help the Swift compiler with identifying what not to differentiate and producing more efficient derivaitves. When it is detectable that the derivative of a function will always be zero, the Swift compiler will produce a warning. Explicitly using `withoutDerivative(at:)` silences that warning. ``` let x: Float = 2.0 let y: Float = 3.0 gradient(at: x, y) { x, y in sin(sin(sin(x))) + withoutDerivative(at: cos(cos(cos(y)))) } ``` ## Derivative surgery Method [`withDerivative(_:)`](https://www.tensorflow.org/swift/api_docs/Protocols/Differentiable#/s:10TensorFlow14DifferentiablePAAE12withGradientyxy15CotangentVectorQzzcF) makes arbitrary operations (including mutation) run on the gradient at a value during the enclosing function’s backpropagation. Use this to debug or make experimental tweaks to backpropagation. ### It works anywhere All differentiation APIs provided by the standard library are defined generically over all types that conform to the `Differentiable` protocol: `Float`, `Double`, `Float80`, SIMD vectors, and even your own types! Read technical document [Differentiable Types](https://github.com/tensorflow/swift/blob/master/docs/DifferentiableTypes.md) for more insights on the `Differentiable` protocol. ``` var x: Float = 30 x.gradient { x -> Float in // Print the partial derivative with respect to the result of `sin(x)`. let a = sin(x).withDerivative { print("∂+/∂sin = \($0)") } // Force the partial derivative with respect to `x` to be `0.5`. let b = log(x.withDerivative { (dx: inout Float) in print("∂log/∂x = \(dx), but rewritten to 0.5"); dx = 0.5 }) return a + b } ``` ### Use it in a neural network module Just like how we used it in a simple `Float` function, we can use it in any numerical application, like the following neural network built using the [Swift for TensorFlow Deep Learning Library](https://github.com/tensorflow/swift-apis). ``` import TensorFlow struct MLP: Layer { var layer1 = Dense<Float>(inputSize: 2, outputSize: 10, activation: relu) var layer2 = Dense<Float>(inputSize: 10, outputSize: 1, activation: relu) @differentiable func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> { let h0 = layer1(input).withDerivative { print("∂L/∂layer1 =", $0) } return layer2(h0) } } var classifier = MLP() let optimizer = SGD(for: classifier, learningRate: 0.02) let x: Tensor<Float> = [[0, 0], [0, 1], [1, 0], [1, 1]] let y: Tensor<Float> = [0, 1, 1, 0] for _ in 0..<10 { let 𝛁model = classifier.gradient { classifier -> Tensor<Float> in let ŷ = classifier(x).withDerivative { print("∂L/∂ŷ =", $0) } let loss = (ŷ - y).squared().mean() print("Loss: \(loss)") return loss } optimizer.update(&classifier, along: 𝛁model) } ``` ## Recomputing activations during backpropagation to save memory (checkpointing) Checkpointing is a traditional technique in reverse-mode automatic differentiation for saving memory. Rather than saving large intermediate values in the original computation for computing derivatives, the intermediate values are instead recomputed as needed during backpropagation. This technique has been realized in modern deep learning libraries as well. In Swift, API [`withRecomputationInPullbacks(_:)`](https://www.tensorflow.org/swift/api_docs/Protocols/Differentiable#/s:10TensorFlow14DifferentiablePAAE28withRecomputationInPullbacksyqd__qd__xcAaBRd__lF) enables you to control what to recompute during backpropagation, and it is available on all `Differentiable` types. But today, let us learn how to define our own gradient checkpointing APIs from scratch, in just a few lines of code. ### Our gradient checkpointing API We can define our own gradient checkpointing API, `makeRecomputedInGradient(_:)`, in terms of standard library function [`differentiableFunction(from:)`](https://www.tensorflow.org/swift/api_docs/Functions#/s:10TensorFlow22differentiableFunction4fromq0_x_q_tcq0_5value_15CotangentVectorQz_AEQy_tAEQy0_c8pullbacktx_q_tc_tAA14DifferentiableRzAaJR_AaJR0_r1_lF), which is a shorthand for creating a differentiable function directly from a derivative function (also called a "vector-Jacobian products (VJP) function"). As we have seen before, the derivative function returns a tuple of the original function's result and a pullback closure. We return `original(x)` in `value:`, and call `pullback(at:in:)` on `original` to evaluate the original function again and get a pullback. ``` /// Given a differentiable function, returns the same differentiable function except when /// derivatives of this function are being computed. In that case, values in the original function needed /// for computing the derivatives will be recomputed, instead of being captured by the differential or pullback. /// /// - Parameter body: The body of the differentiable function. /// - Returns: The same differentiable function whose derivatives, when computed, will recompute /// some values from the original function. func makeRecomputedInGradient<T: Differentiable, U: Differentiable>( _ original: @escaping @differentiable (T) -> U ) -> @differentiable (T) -> U { return differentiableFunction { x in (value: original(x), pullback: { v in pullback(at: x, in: original)(v) }) } } ``` ### Verify it works ``` let input: Float = 10.0 print("Running original computation...") // Differentiable multiplication with checkpointing. let square = makeRecomputedInGradient { (x: Float) -> Float in print(" Computing square...") return x * x } // Differentiate `f(x) = (cos(x))^2`. let (output, backprop) = input.valueWithPullback { input -> Float in return square(cos(input)) } print("Running backpropagation...") let grad = backprop(1) print("Gradient = \(grad)") ``` ### Extend it to neural network modules In this example, we define a simple convolutional neural network. ```swift struct Model: Layer { var conv = Conv2D<Float>(filterShape: (5, 5, 3, 6)) var maxPool = MaxPool2D<Float>(poolSize: (2, 2), strides: (2, 2)) var flatten = Flatten<Float>() var dense = Dense<Float>(inputSize: 36 * 6, outputSize: 10) @differentiable func call(_ input: Tensor<Float>) -> Tensor<Float> { return input.sequenced(through: conv, maxPool, flatten, dense) } } ``` We want to make activations in the convolution layer (`conv`) be recomputed during backpropagation. However, using `makeRecomputedInGradient(_:)` could make the resulting code look cumbersome, especially when we want to apply layers sequentially using [`sequenced(in:through:_:_:_:_:)`](https://www.tensorflow.org/swift/api_docs/Protocols/Differentiable#/s:10TensorFlow14DifferentiablePAAE9sequenced2in7through____6OutputQyd_3_AA7ContextC_qd__qd_0_qd_1_qd_2_qd_3_t5InputQyd__RszAA5LayerRd__AaMRd_0_AaMRd_1_AaMRd_2_AaMRd_3_AKQyd_0_AGRtd__AKQyd_1_AGRtd_0_AKQyd_2_AGRtd_1_AKQyd_3_AGRtd_2_r3_lF). ```swift input.sequenced(in: context, through: conv, maxPool, flatten, dense) ``` So, why don't we define a **special layer type** that wraps a layer and makes its activations be recomputed during backpropagation? Let's do it. First, we define a `makeRecomputedInGradient(_:)` function that takes a binary function. ``` // Same as the previous `makeRecomputedInGradient(_:)`, except it's for binary functions. func makeRecomputedInGradient<T: Differentiable, U: Differentiable, V: Differentiable>( _ original: @escaping @differentiable (T, U) -> V ) -> @differentiable (T, U) -> V { return differentiableFunction { x, y in (value: original(x, y), pullback: { v in pullback(at: x, y, in: original)(v) }) } } ``` Then, we define a generic layer `ActivationDiscarding<Wrapped>`. ``` import TensorFlow /// A layer wrapper that makes the underlying layer's activations be discarded during application /// and recomputed during backpropagation. struct ActivationDiscarding<Wrapped: Layer>: Layer { /// The wrapped layer. var wrapped: Wrapped @differentiable func callAsFunction(_ input: Wrapped.Input) -> Wrapped.Output { let apply = makeRecomputedInGradient { (layer: Wrapped, input: Input) -> Wrapped.Output in print(" Applying \(Wrapped.self) layer...") return layer(input) } return apply(wrapped, input) } } ``` Finally, we can add a method on all layers that returns the same layer except its activations are discarded during application and recomputed during backpropagation. ``` extension Layer { func discardingActivations() -> ActivationDiscarding<Self> { return ActivationDiscarding(wrapped: self) } } ``` Back in the model, all we have to change is to wrap the convolution layer into the activation-recomputing layer. ```swift var conv = Conv2D<Float>(filterShape: (5, 5, 3, 6)).discardingActivations() ``` Now, simply use it in the model! ``` struct Model: Layer { var conv = Conv2D<Float>(filterShape: (5, 5, 3, 6)).discardingActivations() var maxPool = MaxPool2D<Float>(poolSize: (2, 2), strides: (2, 2)) var flatten = Flatten<Float>() var dense = Dense<Float>(inputSize: 36 * 6, outputSize: 10) @differentiable func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> { return input.sequenced(through: conv, maxPool, flatten, dense) } } ``` When we run a training loop, we can see that the convolution layer's activations are computed twice: once during layer application, and once during backpropagation. ``` // Use random training data. let x = Tensor<Float>(randomNormal: [10, 16, 16, 3]) let y = Tensor<Int32>(rangeFrom: 0, to: 10, stride: 1) var model = Model() let opt = SGD(for: model) for i in 1...5 { print("Starting training step \(i)") print(" Running original computation...") let (logits, backprop) = model.appliedForBackpropagation(to: x) let (loss, dL_dŷ) = logits.valueWithGradient { logits in softmaxCrossEntropy(logits: logits, labels: y) } print(" Loss: \(loss)") print(" Running backpropagation...") let (dL_dθ, _) = backprop(dL_dŷ) opt.update(&model, along: dL_dθ) } ``` Just like that, it is super easy to define generic differentiable programming libraries for different domains.
github_jupyter
# Introduction You've built up your SQL skills enough that the remaining hands-on exercises will use different datasets than you see in the explanations. If you need to get to know a new dataset, you can run a couple of **SELECT** queries to extract and review the data you need. The next exercises are also more challenging than what you've done so far. Don't worry, you are ready for it! Run the code in the following cell to get everything set up: ``` # Set up feedback system from learntools.core import binder binder.bind(globals()) from learntools.sql.ex4 import * print("Setup Complete") ``` The World Bank has made tons of interesting education data available through BigQuery. Run the following cell to see the first few rows of the `international_education` table from the `world_bank_intl_education` dataset. ``` from google.cloud import bigquery # Create a "Client" object client = bigquery.Client() # Construct a reference to the "world_bank_intl_education" dataset dataset_ref = client.dataset("world_bank_intl_education", project="bigquery-public-data") # API request - fetch the dataset dataset = client.get_dataset(dataset_ref) # Construct a reference to the "international_education" table table_ref = dataset_ref.table("international_education") # API request - fetch the table table = client.get_table(table_ref) # Preview the first five lines of the "international_education" table client.list_rows(table, max_results=5).to_dataframe() ``` # Exercises The value in the `indicator_code` column describes what type of data is shown in a given row. One interesting indicator code is `SE.XPD.TOTL.GD.ZS`, which corresponds to "Government expenditure on education as % of GDP (%)". ### 1) Government expenditure on education Which countries spend the largest fraction of GDP on education? To answer this question, consider only the rows in the dataset corresponding to indicator code `SE.XPD.TOTL.GD.ZS`, and write a query that returns the average value in the `value` column for each country in the dataset between the years 2010-2017 (including 2010 and 2017 in the average). Requirements: - Your results should have the country name rather than the country code. You will have one row for each country. - The aggregate function for average is **AVG()**. Use the name `avg_ed_spending_pct` for the column created by this aggregation. - Order the results so the countries that spend the largest fraction of GDP on education show up first. In case it's useful to see a sample query, here's a query you saw in the tutorial (using a different dataset): ``` # Query to find out the number of accidents for each day of the week query = """ SELECT COUNT(consecutive_number) AS num_accidents, EXTRACT(DAYOFWEEK FROM timestamp_of_crash) AS day_of_week FROM `bigquery-public-data.nhtsa_traffic_fatalities.accident_2015` GROUP BY day_of_week ORDER BY num_accidents DESC """ ``` ``` # Your code goes here country_spend_pct_query = """ SELECT _____ FROM `bigquery-public-data.world_bank_intl_education.international_education` WHERE ____ GROUP BY ____ ORDER BY ____ """ # Set up the query (cancel the query if it would use too much of # your quota, with the limit set to 1 GB) safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=1e9) country_spend_pct_query_job = client.query(country_spend_pct_query, job_config=safe_config) # API request - run the query, and return a pandas DataFrame country_spending_results = country_spend_pct_query_job.to_dataframe() # View top few rows of results print(country_spending_results.head()) # Check your answer q_1.check() ``` For a hint or the solution, uncomment the appropriate line below. ``` #q_1.hint() #q_1.solution() ``` ### 2) Identify interesting codes to explore The last question started by telling you to focus on rows with the code `SE.XPD.TOTL.GD.ZS`. But how would you find more interesting indicator codes to explore? There are 1000s of codes in the dataset, so it would be time consuming to review them all. But many codes are available for only a few countries. When browsing the options for different codes, you might restrict yourself to codes that are reported by many countries. Write a query below that selects the indicator code and indicator name for all codes with at least 175 rows in the year 2016. Requirements: - You should have one row for each indicator code. - The columns in your results should be called `indicator_code`, `indicator_name`, and `num_rows`. - Only select codes with 175 or more rows in the raw database (exactly 175 rows would be included). - To get both the `indicator_code` and `indicator_name` in your resulting DataFrame, you need to include both in your **SELECT** statement (in addition to a **COUNT()** aggregation). This requires you to include both in your **GROUP BY** clause. - Order from results most frequent to least frequent. ``` # Your code goes here code_count_query = """____""" # Set up the query code_count_query_job = client.query(code_count_query, job_config=safe_config) # API request - run the query, and return a pandas DataFrame code_count_results = code_count_query_job.to_dataframe() # View top few rows of results print(code_count_results.head()) # Check your answer q_2.check() ``` For a hint or the solution, uncomment the appropriate line below. ``` #q_2.hint() #q_2.solution() ``` # Keep Going **[Click here](#$NEXT_NOTEBOOK_URL$)** to learn how to use **AS** and **WITH** to clean up your code and help you construct more complex queries.
github_jupyter
# 4. Categorical Model Author: _Carlos Sevilla Salcedo (Updated: 18/07/2019)_ This notebook presents the categorical approach of the algorithm. for our model we understand that the view we are analysing is composed of one among several categories (The data given to the model must be an integer). To do so, we have to use the graphic model shown in the next image modifying the relation between the variables $X$ and $t$. <img src="Images/Graphic_Model_Categorical.png" style="max-width:100%; width: 70%"> where, in this case, variable $t$ is now a vector instead of a matrix. In order to have this relationship we have stablished a multinomial probit function as the connection between them, as proposed by _Girolami (2016)_. ## Synthetic data generation We can now generate data in a similar manner to the regression model to compare the performance of both apporaches. In this case we are going to change the regression data to a categorical approach, to work with classes. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import math np.random.seed(0) N = 1000 # number of samples D0 = 55 # input features D1 = 3 # output features myKc = 20 K = 2 # common latent variables K0 = 3 # first view's latent variables K1 = 3 # second view's latent variables Kc=K+K0+K1 # latent variables # Generation of matrix W A0 = np.random.normal(0.0, 1, D0 * K).reshape(D0, K) A1 = np.random.normal(0.0, 1, D1 * K).reshape(D1, K) B0 = np.random.normal(0.0, 1, D0 * K0).reshape(D0, K0) B1 = np.random.normal(0.0, 1, D1 * K1).reshape(D1, K1) W0 = np.hstack((np.hstack((A0,B0)),np.zeros((D0,K1)))) W1 = np.hstack((np.hstack((A1,np.zeros((D1,K0)))),B1)) W_tot = np.vstack((W0,W1)) # Generation of matrix Z Z = np.random.normal(0.0, 1, Kc * N).reshape(N, Kc) # Generation of matrix X X0 = np.dot(Z,W0.T) + np.random.normal(0.0, 0.1, D0 * N).reshape(N, D0) X1 = np.dot(Z,W1.T) + np.random.normal(0.0, 0.1, D1 * N).reshape(N, D1) # Generation of matrix t t1 = np.argmax(X1,axis=1) ``` Once the data is generated we divide it into train and test in order to be able to test the performance of the model. After that, we can normalize the data. ``` from sklearn.model_selection import train_test_split X_tr, X_tst, Y_tr, Y_tst = train_test_split(X0, t1, test_size=0.3, random_state = 31) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_tr = scaler.fit_transform(X_tr) X_tst = scaler.transform(X_tst) ``` ## Training the model Once the data is prepared we just have to feed it to the model. As the model has so many possibilities we have decided to pass the data to the model following a particular structure so that we can now, for each view, if the data corresponds to real, multilabel or categorical as well as knowing if we want to calculate the model with sparsity in the features. ``` import os os.sys.path.append('lib') import sshiba myKc = 20 # number of latent features max_it = int(5*1e4) # maximum number of iterations tol = 1e-6 # tolerance of the stopping condition (abs(1 - L[-2]/L[-1]) < tol) prune = 1 # whether to prune the irrelevant latent features myModel = sshiba.SSHIBA(myKc, prune) X0_tr = myModel.struct_data(X_tr, 0, 0) X1_tr = myModel.struct_data(Y_tr, 1, 0) X0_tst = myModel.struct_data(X_tst, 0, 0) X1_tst = myModel.struct_data(Y_tst, 1, 0) myModel.fit(X0_tr, X1_tr, max_iter = max_it, tol = tol, Y_tst = X1_tst, X_tst = X0_tst, AUC = 1) print('Final AUC %.3f' %(myModel.AUC[-1])) ``` ## Visualization of the results ### Lower Bound and MSE Now the model is trained we can plot the evolution of the lower bound through out the iterations. This lower bound is calculated using the values of the variables the model is calculating and is the value we are maximizing. As we want to maximize this value it has to be always increasing with each iteration. At the same time, we are plotting now the evolution of the Minimum Square Error (MSE) with each update of the model. As we are not minimizing this curve, this doesn't necessarily have to be always decreasing and might need more iterations to reach a minimum. ``` def plot_AUC(AUC): fig, ax = plt.subplots(figsize=(10, 4)) ax.plot(AUC, linewidth=2, marker='s',markersize=5, label='SSHIBA', markerfacecolor='red') ax.grid() ax.set_xlabel('Iteration') ax.set_ylabel('Multiclass AUC') plt.legend() def plot_L(L): fig, ax = plt.subplots(figsize=(10, 4)) ax.plot(L, linewidth=2, marker='s',markersize=5, markerfacecolor='red') ax.grid() ax.set_xlabel('Iteration') ax.set_ylabel('L(Q)') plot_L(myModel.L) plt.title('Lower Bound') plot_AUC(myModel.AUC) plt.title('AUC test') plt.show() ``` ## LFW Dataset In order to improve the analysis of the results, we are showing in this section the results obtained using the _LFW_ database. This database is composed by different images of famous people and the goal is to identify what person each of them is. For the purpose of this example we have included the images of the people with more images, having that our data is now composed of 7 people or categories. First of all, we can prepare the data we want to work with. ``` import pickle resize = 0.4 my_dict = pickle.load( open('Databases/data_lfwa_'+str(resize)+'_7classes.pkl', "rb" ), encoding='latin1' ) X = my_dict['X'].astype(float) Y = (my_dict['Y_cat']).astype(int) h = my_dict['h'] w = my_dict['w'] target_names = my_dict['target'] from sklearn.model_selection import train_test_split X_tr, X_tst, Y_tr, Y_tst = train_test_split(X, Y.astype(int), test_size=0.3, random_state = 31) n_samples = X.shape[0] n_features = X.shape[1] n_classes = target_names.shape[0] print("Total dataset size:") print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) print("n_classes: %d" % n_classes) ``` Here we can see what the images we have downloaded look like. ``` n_col, n_row = 6,3 plt.figure(figsize=(1.8 * n_col, 2.4 * n_row)) plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): plt.subplot(n_row, n_col, i + 1) plt.imshow(X_tst[i,:].reshape((h, w)), cmap=plt.cm.gray) plt.xticks(()) plt.yticks(()) ``` At this point, the model can be trained with the train and test splits. ``` myKc = 50 # number of latent features max_it = int(5*1e4) # maximum number of iterations tol = 1e-7 # tolerance of the stopping condition (abs(1 - L[-2]/L[-1]) < tol) prune = 1 # whether to prune the irrelevant latent features myModel = sshiba.SSHIBA(myKc, prune) X0_tr = myModel.struct_data(X_tr, 0, 0) X1_tr = myModel.struct_data(Y_tr, 1, 0) X0_tst = myModel.struct_data(X_tst, 0, 0) X1_tst = myModel.struct_data(Y_tst, 1, 0) myModel.fit(X0_tr, X1_tr, max_iter = max_it, tol = tol, Y_tst = X1_tst, X_tst = X0_tst, AUC = 1) print('Final AUC %.3f' %(myModel.AUC[-1])) ``` Now the model is trained, we can visualize the results, seeing how the image look like as well as both the true and predicted label for each one of them. ``` def plot_gallery(images, titles, h, w, n_row=3, n_col=6): """Helper function to plot a gallery of portraits""" plt.figure(figsize=(1.8 * n_col, 2.4 * n_row)) plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): plt.subplot(n_row, n_col, i + 1) plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray) plt.title(titles[i], size=12) plt.xticks(()) plt.yticks(()) # plot the result of the prediction on a portion of the test set def title(y_pred, y_test, target_names, i): pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1] true_name = target_names[y_test[i]].rsplit(' ', 1)[-1] return 'predicted: %s\ntrue: %s' % (pred_name, true_name) def plot_W(W): plt.figure() plt.imshow((np.abs(W)), aspect=W.shape[1]/W.shape[0]) plt.colorbar() plt.title('W') plt.ylabel('features') plt.xlabel('K') y_pred = myModel.predict([0],1,0, X0_tst) prediction_titles = [title(y_pred, Y_tst, target_names, i) for i in range(y_pred.shape[0])] plot_gallery(X_tst, prediction_titles, h, w) ``` ## LFW Dataset with Sparsity Finally, we can use the sparse version of the method to make the model learn not only which latent features are relevant but also which features are the more relevant as well in order to learn the labels given. To do so, we just need to train the model as we did before, specifying which views are to be learned with the before mentioned sparsity. ``` myKc = 50 # number of latent features max_it = int(5*1e4) # maximum number of iterations tol = 1e-7 # tolerance of the stopping condition (abs(1 - L[-2]/L[-1]) < tol) prune = 1 # whether to prune the irrelevant latent features myModel = sshiba.SSHIBA(myKc, prune) X0_tr = myModel.struct_data(X_tr, 0, 1) X1_tr = myModel.struct_data(Y_tr, 1, 0) X0_tst = myModel.struct_data(X_tst, 0, 1) X1_tst = myModel.struct_data(Y_tst, 1, 0) myModel.fit(X0_tr, X1_tr, max_iter = max_it, tol = tol, Y_tst = X1_tst, X_tst = X0_tst, AUC = 1) print('Final AUC %.3f' %(myModel.AUC[-1])) import pickle my_dict = {} my_dict['models'] = myModel filename = 'Models_categorical_sparse' with open(filename+'.pkl', 'wb') as output: pickle.dump(my_dict, output, pickle.HIGHEST_PROTOCOL) import pickle filename = 'Models_categorical_sparse' my_dict = pickle.load( open( filename+'.pkl', "rb" )) myModel = my_dict['models'] y_pred = myModel.predict([0],1,0, X0_tst) prediction_titles = [title(y_pred, Y_tst, target_names, i) for i in range(y_pred.shape[0])] plot_gallery(X_tst, prediction_titles, h, w) ``` ## Visualization of the results ### Vector $\gamma$ Once the model is trained, we can visualize the variable $\gamma$ to see which parts of the image are considered as relevant and which ones irrelevant. ``` q = myModel.q_dist gamma = q.gamma_mean(0) ax1 = plt.subplot(2, 1, 1) plt.title('Feature selection analysis') plt.hist(gamma,100) ax2 = plt.subplot(2, 1, 2) plt.plot(gamma,'.') plt.ylabel('gamma') plt.xlabel('feature') plt.show() ``` ### Matrix $W$ Now we can see as we did in the _sparse notebook_ how the model is learning matrix $W$ to trasnform $X$ to the latent space, $Z$. ``` pos_ord_var=np.argsort(gamma)[::-1] plot_W(q.W[0]['mean'][pos_ord_var,:]) ``` ### Vector $\gamma$ mask visualization Finally, as the data we are working with are images, we could visualize the values the variable $\gamma$ takes as an image to see the relevance each pixel has. In our case, we can see that the method is capable of finding the most relevant features to describe the different attributes we have as labels. ``` q = myModel.q_dist gamma = q.gamma_mean(0) plt.figure(figsize=(3, 5)) plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) plt.imshow(gamma.reshape((h, w)), cmap=plt.cm.gray) plt.xticks(()) plt.yticks(()) plt.title('Gamma mask') plt.show() ``` ### Matrix $W$ masks visualization Conversely, we can plot the projection matrix W to see the how the latent features are learning the different parts of face learning. ``` alpha = q.alpha_mean(0) pos_ord_var = np.argsort(alpha) W_0 = q.W[0]['mean'][:,pos_ord_var] Wface_titles = ["Latent feature %d" % i for i in range(W_0.shape[0])] n_col, n_row = 6,8 plt.figure(figsize=(1.8 * n_col, 2.4 * n_row)) plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): plt.subplot(n_row, n_col, i + 1) plt.imshow(W_0[:,i].reshape((h, w)), cmap=plt.cm.gray) plt.title(Wface_titles[i], size=12) plt.xticks(()) plt.yticks(()) plt.show() ```
github_jupyter
# Single NFW profile Here we demonstrate most of the NFW functionality using a single NFW profile. ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np from profiley.nfw import NFW mass = 1e14 concentration = 4 redshift = 0.5 nfw = NFW(mass, concentration, redshift) print(nfw) ``` Note that the profile attributes are always arrays, even if scalars are passed to it. The first thing to look at is the 3-dimensional density profile. For all profiles we need to pass the distances at which these will be computed, as a 1d array, so let's define that first. These distances must be in Mpc. ``` R = np.logspace(-2, 1, 100) ``` With that, getting the density profile is as simple as ``` rho = nfw.profile(R) ``` That's it! ``` plt.loglog(R, rho) plt.xlabel('$r$ (Mpc)', fontsize=16) plt.ylabel(r'$\rho(r)$ (M$_\odot$/Mpc$^3$)', fontsize=16); ``` Similarly, we can obtain the projected surface density or the excess surface density (the weak lensing observable): ``` sigma = nfw.surface_density(R) esd = nfw.excess_surface_density(R) fig, axes = plt.subplots(figsize=(14,5), ncols=2) axes[0].plot(R, sigma) axes[0].set_ylabel(r'$\Sigma(R)$ (M$_\odot$/Mpc$^2$)', fontsize=16) axes[1].plot(R, esd) axes[1].set_ylabel(r'$\Delta\Sigma(R)$ (M$_\odot$/Mpc$^2$)', fontsize=16) for ax in axes: ax.set_xlabel('$R$ (Mpc)', fontsize=16) ax.set_xscale('log') ax.set_yscale('log') ``` The ESD can also be calculated "manually": ``` barsigma = nfw.enclosed_surface_density(R) esd_manual = barsigma - sigma np.allclose(esd, esd_manual) ``` We can also calculate the convergence profile for a given source redshift: ``` z_source = 1.0 kappa = nfw.convergence(R, z_source) plt.loglog(R, kappa) plt.xlabel('$R$ (Mpc)', fontsize=16) plt.ylabel(f'$\kappa(R)$ ($z_s={z_source}$)', fontsize=16); ``` Finally, we can also obtain offset profiles like so: ``` Roff = np.linspace(0.2, 1, 5) sigma_off = nfw.offset_surface_density(R, Roff) sigma_off.shape for Ri, sigma_i in zip(Roff, sigma_off): plt.loglog(R, sigma_i[0], label=rf'$R_\mathrm{{off}}={Ri:.1f}$ Mpc') plt.plot(R, sigma, 'k-') plt.legend() plt.xlabel('$R$ (Mpc)', fontsize=16) plt.ylabel(r'$\Sigma(R)$ (M$_\odot$/Mpc$^2$)', fontsize=16); ``` There is a similar `offset_excess_surface_density` method, as well as `offset_density` and `ofset_enclosed_density`, though these would not be used so often. The offset convergence has a different signature: ``` kappa_off = nfw.convergence(R, z_source, Roff=Roff) for Ri, kappa_i in zip(Roff, kappa_off): plt.loglog(R, kappa_i[0], label=rf'$R_\mathrm{{off}}={Ri:.1f}$ Mpc') plt.plot(R, kappa, 'k-') plt.legend() plt.xlabel('$R$ (Mpc)', fontsize=16) plt.ylabel(r'$\kappa(R)$', fontsize=16); ```
github_jupyter
# GraviPy - tutorial ## _Coordinates_ and _MetricTensor_ To start working with the gravipy package you must load the package and initialize a pretty-printing mode in Jupyter environment ``` from gravipy.tensorial import * # import GraviPy package from sympy import init_printing import inspect init_printing() ``` The next step is to choose coordinates and define a metric tensor of a particular space. Let's take, for example, the Schwarzschild metric - vacuum solution to the Einstein's field equations which describes the gravitational field of a spherical mass distribution. ``` # define some symbolic variables t, r, theta, phi, M = symbols('t, r, \\theta, \phi, M') # create a coordinate four-vector object instantiating # the Coordinates class x = Coordinates('\chi', [t, r, theta, phi]) # define a matrix of a metric tensor components Metric = diag(-(1-2*M/r), 1/(1-2*M/r), r**2, r**2*sin(theta)**2) # create a metric tensor object instantiating the MetricTensor class g = MetricTensor('g', x, Metric) ``` Each component of any tensor object, can be computed by calling the appropriate instance of the _GeneralTensor_ subclass with indices as arguments. The covariant indices take positive integer values (1, 2, ..., dim). The contravariant indices take negative values (-dim, ..., -2, -1). ``` x(-1) g(1, 1) x(1) ``` Matrix representation of a tensor can be obtained in the following way ``` x(-All) g(All, All) g(All, 4) ``` ## Predefined _Tensor_ Classes The GraviPy package contains a number of the _Tensor_ subclasses that can be used to calculate a tensor components. The _Tensor_ subclasses available in the current version of GraviPy package are ``` print([cls.__name__ for cls in vars()['Tensor'].__subclasses__()]) ``` ### The _Christoffel_ symbols The first one is the _Christoffel_ class that represents Christoffel symbols of the first and second kind. (Note that the Christoffel symbols are not tensors) Components of the _Christoffel_ objects are computed from the below formula $$ \Gamma_{\rho \mu \nu} = g_{\rho \sigma}\Gamma^{\sigma}_{\ \mu \nu} = \frac{1}{2}(g_{\rho \mu, \nu} + g_{\rho \nu, \mu} - g_{\mu \nu, \rho})$$ Let's create an instance of the _Christoffel_ class for the Schwarzschild metric g and compute some components of the object ``` Ga = Christoffel('Ga', g) Ga(1, 2, 1) ``` Each component of the _Tensor_ object is computed only once due to memoization procedure implemented in the _Tensor_ class. Computed value of a tensor component is stored in _components_ dictionary (attribute of a _Tensor_ instance) and returned by the next call to the instance. ``` Ga.components ``` The above dictionary consists of two elements because the symmetry of the Christoffel symbols is implemented in the _Christoffel_ class. If necessary, you can clear the _components_ dictionary ``` Ga.components = {} Ga.components ``` The _Matrix_ representation of the Christoffel symbols is the following ``` Ga(All, All, All) ``` You can get help on any of classes mentioned before by running the command ``` help(Christoffel) ``` Try also "_Christoffel?_" and "_Christoffel??_" ### The _Ricci_ tensor $$ R_{\mu \nu} = \frac{\partial \Gamma^{\sigma}_{\ \mu \nu}}{\partial x^{\sigma}} - \frac{\partial \Gamma^{\sigma}_{\ \mu \sigma}}{\partial x^{\nu}} + \Gamma^{\sigma}_{\ \mu \nu}\Gamma^{\rho}_{\ \sigma \rho} - \Gamma^{\rho}_{\ \mu \sigma}\Gamma^{\sigma}_{\ \nu \rho} $$ ``` Ri = Ricci('Ri', g) Ri(All, All) ``` Contraction of the _Ricci_ tensor $R = R_{\mu}^{\ \mu} = g^{\mu \nu}R_{\mu \nu}$ ``` Ri.scalar() ``` ### The _Riemann_ tensor $$ R_{\mu \nu \rho \sigma} = \frac{\partial \Gamma_{\mu \nu \sigma}}{\partial x^{\rho}} - \frac{\partial \Gamma_{\mu \nu \rho}}{\partial x^{\sigma}} + \Gamma^{\alpha}_{\ \nu \sigma}\Gamma_{\mu \rho \alpha} - \Gamma^{\alpha}_{\ \nu \rho}\Gamma_{\mu \sigma \alpha} - \frac{\partial g_{\mu \alpha}}{\partial x^{\rho}}\Gamma^{\alpha}_{\ \nu \sigma} + \frac{\partial g_{\mu \alpha}}{\partial x^{\sigma}}\Gamma^{\alpha}_{\ \nu \rho} $$ ``` Rm = Riemann('Rm', g) ``` Some nonzero components of the _Riemann_ tensor are ``` from IPython.display import display, Math from sympy import latex for i, j, k, l in list(variations(range(1, 5), 4, True)): if Rm(i, j, k, l) != 0 and k<l and i<j: display(Math('R_{'+str(i)+str(j)+str(k)+str(l)+'} = '+ latex(Rm(i, j, k, l)))) ``` You can also display the matrix representation of the tensor ``` # Rm(All, All, All, All) ``` Contraction of the _Riemann_ tensor $R_{\mu \nu} = R^{\rho}_{\ \mu \rho \nu} $ ``` ricci = sum([Rm(i, All, k, All)*g(-i, -k) for i, k in list(variations(range(1, 5), 2, True))], zeros(4)) ricci.simplify() ricci ``` ### The _Einstein_ tensor $$ G_{\mu \nu} = R_{\mu \nu} - \frac{1}{2}g_{\mu \nu}R $$ ``` G = Einstein('G', Ri) G(All, All) ``` ### _Geodesics_ $$ w_{\mu} = \frac{Du_{\mu}}{d\tau} = \frac{d^2x_{\mu}}{d\tau^2} - \frac{1}{2}g_{\rho \sigma, \mu} \frac{dx^{\rho}}{d\tau}\frac{dx^{\sigma}}{d\tau} $$ ``` tau = Symbol('\\tau') w = Geodesic('w', g, tau) w(All).transpose() ``` Please note that instantiation of a _Geodesic_ class for the metric $g$ automatically turns on a _Parametrization_ mode for the metric $g$. Then all coordinates are functions of a world line parameter $\tau$ ``` Parametrization.info() x(-All) g(All, All) ``` _Parametrization_ mode can be deactivated by typing ``` Parametrization.deactivate(x) Parametrization.info() x(-All) g(All, All) ``` ## Derivatives ### Partial derivative All instances of a _GeneralTensor_ subclasses inherits _partialD_ method which works exactly the same way as SymPy _diff_ method. ``` T = Tensor('T', 2, g) T(1, 2) T.partialD(1, 2, 1, 3) # The first two indices belongs to second rank tensor T T(1, 2).diff(x(-1), x(-3)) ``` The only difference is that computed value of _partialD_ is saved in "_partial_derivative_components_" dictionary an then returned by the next call to the _partialD_ method. ``` T.partial_derivative_components ``` ### Covariant derivative Covariant derivative components of the tensor ___T___ can be computed by the covariantD method from the formula $$ \nabla_{\sigma} T_{\mu}^{\ \nu} = T_{\mu \ ;\sigma}^{\ \nu} = \frac{\partial T_{\mu}^{\ \nu}}{\partial x^{\sigma}} - \Gamma^{\rho}_{\ \mu \sigma}T_{\rho}^{\ \nu} + \Gamma^{\nu}_{\ \rho \sigma}T_{\mu}^{\ \rho}$$ Let's compute some covariant derivatives of a scalar field C ``` C = Tensor('C', 0, g) C() C.covariantD(1) C.covariantD(2, 3) ``` All _covariantD_ components of every _Tensor_ object are also memoized ``` for k in C.covariant_derivative_components: display(Math(str(k) + ': ' + latex(C.covariant_derivative_components[k]))) C.covariantD(1, 2, 3) ``` Proof that the covariant derivative of the metric tensor $g$ is zero ``` not any([g.covariantD(i, j, k).simplify() for i, j, k in list(variations(range(1, 5), 3, True))]) ``` Bianchi identity in the Schwarzschild spacetime $$ R_{\mu \nu \sigma \rho ;\gamma} + R_{\mu \nu \gamma \sigma ;\rho} + R_{\mu \nu \rho \gamma ;\sigma} = 0$$ ``` not any([(Rm.covariantD(i, j, k, l, m) + Rm.covariantD(i, j, m, k, l) + Rm.covariantD(i, j, l, m, k)).simplify() for i, j, k, l, m in list(variations(range(1, 5), 5, True))]) ``` ## User-defined tensors To define a new scalar/vector/tensor field in some space you should __extend__ the _Tensor_ class or __create an instance__ of the _Tensor_ class. ### _Tensor_ class instantiation Let's create a third-rank tensor field living in the Schwarzshild spacetime as an instance of the _Tensor_ class ``` S = Tensor('S', 3, g) ``` Until you define (override) the _\_compute\_covariant\_component_ method of the __S__ object, all of $4^3$ components are arbitrary functions of coordinates ``` S(1, 2, 3) inspect.getsourcelines(T._compute_covariant_component) ``` Let's assume that tensor __S__ is the commutator of the covariant derivatives of some arbitrary vector field __V__ and create a new _\_compute\_covariant\_component_ method for the object __S__ ``` V = Tensor('V', 1, g) V(All) def S_new_method(idxs): # definition component = (V.covariantD(idxs[0], idxs[1], idxs[2]) - V.covariantD(idxs[0], idxs[2], idxs[1])).simplify() S.components.update({idxs: component}) # memoization return component S._compute_covariant_component = S_new_method # _compute_covariant_component method was overriden S(1, 1, 3) ``` One can check that the well known formula is correct $$ V_{\mu ;\nu \rho} - V_{\mu ;\rho \nu} = R^{\sigma}_{\ \mu \nu \rho}V_{\sigma} $$ ``` zeros = reduce(Matrix.add, [Rm(-i, All, All, All)*V(i) for i in range(1, 5)]) - S(All, All, All) zeros.simplify() zeros ``` Another way of tensor creation is to make an instance of the _Tensor_ class with components option. Tensor components stored in _Matrix_ object are writen to the _components_ dictionary of the instance by this method. ``` Z = Tensor('Z', 3, g, components=zeros, components_type=(1, 1, 1)) not any(Z.components.values()) ``` ### _Tensor_ class extension As an example of the _Tensor_ class extension you can get the source code of any of the predefined _Tensor_ subclasses ``` print([cls.__name__ for cls in vars()['Tensor'].__subclasses__()]) inspect.getsourcelines(Christoffel) ```
github_jupyter
``` import re import numpy as np import transformers as ppb #!python -m pip install transformers import torch import pickle import torch.nn as nn import torch.optim as optim from torch.autograd import Variable from torch.utils import data from torchsummary import summary import warnings warnings.filterwarnings('ignore') def preprocess_regex(text): # Applies preprocessing on text #remove leading & end white spaces and convert text to lowercase text = text.strip().lower() # remove HTML tags text = re.sub(r'<.*?>', '', text) # remove punctuation marks punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~''' for i in text: if i in punctuations: text = text.replace(i, "") # remove the characters [\], ['] and ["] text = re.sub(r"\\", "", text) text = re.sub(r"\'", "", text) text = re.sub(r"\"", "", text) #remove number text = re.sub(r"\d+", "", text) return text stop_words = ["from", "to", "subject", "title", "request", "looking", "look", "forward", "cheers", "regards", "thank", "thanks", "hi", "all", "since", "mentioned", "free", "ourselves", "hers", "between", "yourself", "but", "again", "there", "about", "once", "during", "out", "very", "having", "with", "they", "own", "an", "be", "some", "for", "do", "its", "yours", "such", "into", "of", "most", "itself", "other", "off", "is", "s", "am", "or", "who", "as", "from", "him", "each", "the", "themselves", "until", "below", "are", "we", "these", "your", "his", "through", "don", "nor", "me", "were", "her", "more", "himself", "this", "down", "should", "our", "their", "while", "above", "both", "up", "to", "ours", "had", "she", "all", "no", "when", "at", "any", "before", "them", "same", "and", "been", "have", "in", "will", "on", "does", "yourselves", "then", "that", "because", "what", "over", "why", "so", "can", "did", "not", "now", "under", "he", "you", "herself", "has", "just", "where", "too", "only", "myself", "which", "those", "i", "after", "few", "whom", "t", "being", "if", "theirs", "my", "against", "a", "by", "doing", "it", "how", "further", "was", "here", "than"] MAX_TOKENIZE_LEN = 512 def remove_stop_words(input_str): tokenized_words = input_str.split() filtered_words = [w for w in tokenized_words if not w in stop_words] output = " ".join(filtered_words) if len(output) > MAX_TOKENIZE_LEN: return output[0: MAX_TOKENIZE_LEN] return output #return as string print('Load index label') label_path = "labelclass.pickle" labelhandler = open(label_path, 'rb') labelhandler = pickle.load(labelhandler) # For DistilBERT: model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased') ## uncomment below for BERT instead of distilBERT #model_class, tokenizer_class, pretrained_weights = (ppb.BertModel, ppb.BertTokenizer, 'bert-base-uncased') # Load pretrained model/tokenizer tokenizer = tokenizer_class.from_pretrained(pretrained_weights) model = model_class.from_pretrained(pretrained_weights) # Load model from keras.models import load_model classifier = load_model('bert-embeddings-keras-mlp.h5') # D:\\Users\\chiawei\\konduit\\Github\\newsgroup_data\\20news-bydate\\20news-bydate-test\\alt.atheism\\53257 alt.atheism # D:\\Users\\chiawei\\konduit\\Github\\newsgroup_data\\20news-bydate\\20news-bydate-test\\comp.sys.ibm.pc.hardware\\60817 comp.sys.ibm.pc.hardware test_file_input = 'D:\\Users\\chiawei\\konduit\\Github\\newsgroup_data\\20news-bydate\\20news-bydate-test\\comp.sys.ibm.pc.hardware\\60817' with open(test_file_input, "r") as file_iterator: raw_input = file_iterator.read() processed_input = remove_stop_words(preprocess_regex(raw_input)) tokenized_test_data = tokenizer.encode(processed_input, add_special_tokens=True) max_len = 512 max_len_add = max_len if len(tokenized_test_data) > max_len: max_len_add = len(tokenized_test_data) padded_test_data = np.array([tokenized_test_data + [0]*(max_len_add-len(tokenized_test_data))]) attention_test_data = np.where(padded_test_data != 0, 1, 0) input_test_ids = torch.tensor(padded_test_data) attention_test_mask = torch.tensor(attention_test_data) input_test_ids = torch.tensor(input_test_ids).to(torch.int64) with torch.no_grad(): last_hidden_states = model(input_test_ids, attention_mask=attention_test_mask) test_feature = last_hidden_states[0][:,0,:].numpy() test_output = classifier.predict(test_feature) local_index = int(np.argmax(test_output, 1)[0]) print("Class: {}".format(labelhandler[local_index])) print("Probabilities: {}".format(np.max(test_output))) import io import logging import time from konduit.load import client_from_file logging.basicConfig(level='DEBUG') logging.info("Test") client = client_from_file("config.yaml") responses = [] start = time.time() for i in range(10): response = client.predict({"default": test_feature}) responses.append(response) end = time.time() print("%f seconds elapsed for %d requests (%d RPS)" % (end - start, len(responses), (10.0 / (end - start)))) response = client.predict({"default": test_feature}) results = response["output"]["probabilities"] index = int(np.argmax(response['output']['probabilities'], 1)[0]) print("Class: {}".format(labelhandler[index])) print("Probabilities: {}".format(np.max(response['output']['probabilities']))) ```
github_jupyter
**Connect With Me in Linkedin :-** https://www.linkedin.com/in/dheerajkumar1997/ ## One Hot Encoding - variables with many categories We observed in the previous lecture that if a categorical variable contains multiple labels, then by re-encoding them using one hot encoding we will expand the feature space dramatically. See below: ``` import pandas as pd import numpy as np # let's load the mercedes benz dataset for demonstration, only the categorical variables data = pd.read_csv('mercedesbenz.csv', usecols=['X1', 'X2', 'X3', 'X4', 'X5', 'X6']) data.head() # let's have a look at how many labels each variable has for col in data.columns: print(col, ': ', len(data[col].unique()), ' labels') # let's examine how many columns we will obtain after one hot encoding these variables pd.get_dummies(data, drop_first=True).shape ``` We can see that from just 6 initial categorical variables, we end up with 117 new variables. These numbers are still not huge, and in practice we could work with them relatively easily. However, in business datasets and also other Kaggle or KDD datasets, it is not unusual to find several categorical variables with multiple labels. And if we use one hot encoding on them, we will end up with datasets with thousands of columns. What can we do instead? In the winning solution of the KDD 2009 cup: "Winning the KDD Cup Orange Challenge with Ensemble Selection" (http://www.mtome.com/Publications/CiML/CiML-v3-book.pdf), the authors limit one hot encoding to the 10 most frequent labels of the variable. This means that they would make one binary variable for each of the 10 most frequent labels only. This is equivalent to grouping all the other labels under a new category, that in this case will be dropped. Thus, the 10 new dummy variables indicate if one of the 10 most frequent labels is present (1) or not (0) for a particular observation. How can we do that in python? ``` # let's find the top 10 most frequent categories for the variable X2 data.X2.value_counts().sort_values(ascending=False).head(10) # let's make a list with the most frequent categories of the variable top_10 = [x for x in data.X2.value_counts().sort_values(ascending=False).head(10).index] top_10 # and now we make the 10 binary variables for label in top_10: data[label] = np.where(data['X2']==label, 1, 0) data[['X2']+top_10].head(10) # get whole set of dummy variables, for all the categorical variables def one_hot_top_x(df, variable, top_x_labels): # function to create the dummy variables for the most frequent labels # we can vary the number of most frequent labels that we encode for label in top_x_labels: df[variable+'_'+label] = np.where(data[variable]==label, 1, 0) # read the data again data = pd.read_csv('mercedesbenz.csv', usecols=['X1', 'X2', 'X3', 'X4', 'X5', 'X6']) # encode X2 into the 10 most frequent categories one_hot_top_x(data, 'X2', top_10) data.head() # find the 10 most frequent categories for X1 top_10 = [x for x in data.X1.value_counts().sort_values(ascending=False).head(10).index] # now create the 10 most frequent dummy variables for X1 one_hot_top_x(data, 'X1', top_10) data.head() ``` ### One Hot encoding of top variables ### Advantages - Straightforward to implement - Does not require hrs of variable exploration - Does not expand massively the feature space (number of columns in the dataset) ### Disadvantages - Does not add any information that may make the variable more predictive - Does not keep the information of the ignored labels Because it is not unusual that categorical variables have a few dominating categories and the remaining labels add mostly noise, this is a quite simple and straightforward approach that may be useful on many occasions. It is worth noting that the top 10 variables is a totally arbitrary number. You could also choose the top 5, or top 20. This modelling was more than enough for the team to win the KDD 2009 cup. They did do some other powerful feature engineering as we will see in following lectures, that improved the performance of the variables dramatically. **Connect With Me in Linkedin :-** https://www.linkedin.com/in/dheerajkumar1997/
github_jupyter
# GSEA analysis on leukemia dataset ``` %load_ext autoreload %autoreload 2 from gsea import * import numpy as np %pylab %matplotlib inline ``` ## Load data ``` genes, D, C = read_expression_file("data/leukemia.txt") gene_sets, gene_set_names = read_genesets_file("data/pathways.txt", genes) gene_set_hash = {} for i in range(len(gene_sets)): gene_set_hash[gene_set_names[i][0]] = {'indexes':gene_sets[i],'desc':gene_set_names[i][1]} # verify that the dimensions make sense len(genes),D.shape,len(C) ``` ## Enrichment score calculations We graphically present the calculation of ES. ``` L,r = rank_genes(D,C) ``` See if the first genes in *L* are indeed correlated with *C* ``` scatter(D[L[1],:],C) scatter(D[L[-1],:],C) scatter(D[L[1000],:],C) ``` ## Graphical ilustration of ES calculations ``` p_exp = 1 def plot_es_calculations(name, L, r): S = gene_set_hash[name]['indexes'] N = len(L) S_mask = np.zeros(N) S_mask[S] = 1 # reorder gene set mask S_mask = S_mask[L] N_R = sum(abs(r*S_mask)**p_exp) P_hit = np.cumsum(abs(r*S_mask)**p_exp)/N_R if N_R!=0 else np.zeros_like(S_mask) N_H = len(S) P_mis = np.cumsum((1-S_mask))/(N-N_H) if N!=N_H else np.zeros_like(S_mask) idx = np.argmax(abs(P_hit - P_mis)) print("ES =", P_hit[idx]-P_mis[idx]) f, axarr = plt.subplots(3, sharex=True) axarr[0].plot(S_mask) axarr[0].set_title('gene set %s' % name) axarr[1].plot(r) axarr[1].set_title('correlation with phenotype') axarr[2].plot(P_hit-P_mis) axarr[2].set_title('random walk') L,r = rank_genes(D,C) plot_es_calculations('CBF_LEUKEMIA_DOWNING_AML', L, r) ``` ## Random phenotype labels Now let's assign phenotype labels randomly. Is the ES much different? ``` N, k = D.shape pi = np.array([np.random.randint(0,2) for i in range(k)]) L, r = rank_genes(D,pi) print(pi) plot_es_calculations('CBF_LEUKEMIA_DOWNING_AML', L, r) ``` ## GSEA analysis ``` # use `n_jobs=-1` to use all cores %time order, NES, p_values = gsea(D, C, gene_sets, n_jobs=-1) from IPython.display import display, Markdown s = "| geneset | NES | p-value | number of genes in geneset |\n |-------|---|---|---|\n " for i in range(len(order)): s = s + "| **%s** | %.3f | %.7f | %d |\n" % (gene_set_names[order[i]][0], NES[i], p_values[i], len(gene_sets[order[i]])) display(Markdown(s)) ``` ## Multiple Hypotesis testing We present two example gene sets. One with a high *NES* and low *p-value* and one with a low *NES* and a high *p-value*. We plot the histograms of null distribution for ES. ``` name = 'DNA_DAMAGE_SIGNALLING' L,r = rank_genes(D,C) plot_es_calculations(name, L, r) n = 1000 S = gene_set_hash[name]['indexes'] L, r = rank_genes(D,C) ES = enrichment_score(L,r,S) ES_pi = np.zeros(n) for i in range(n): pi = np.array([np.random.randint(0,2) for i in range(k)]) L, r = rank_genes(D,pi) ES_pi[i] = enrichment_score(L,r,S) hist(ES_pi,bins=100) plot([ES,ES],[0,20],'r-',label="ES(S)") title("Histogram of ES vlues for random phenotype labels.\nRed line is ES for the selected gene set.") name = 'tcrPathway' L,r = rank_genes(D,C) plot_es_calculations(name, L, r) n = 1000 S = gene_set_hash[name]['indexes'] L, r = rank_genes(D,C) ES = enrichment_score(L,r,S) ES_pi = np.zeros(n) for i in range(n): pi = np.array([np.random.randint(0,2) for i in range(k)]) L, r = rank_genes(D,pi) ES_pi[i] = enrichment_score(L,r,S) hist(ES_pi,bins=100) plot([ES,ES],[0,20],'r-',label="ES(S)") title("Histogram of ES vlues for random phenotype labels.\nRed line is ES for the selected gene set.") ``` ## Performance optimizations ``` %timeit L,R = rank_genes(D,C) %timeit ES = enrichment_score(L,r,S) %prun order, NES, p_values = gsea(D, C, gene_sets) ```
github_jupyter
# Goal * Follow-up to `atomIncorp_taxaIncorp` simulation run. * Investigating factors that influenced accuracy * e.g., pre-fractionation abundance or G+C of fragments # Setting parameters ``` workDir = '/home/nick/notebook/SIPSim/dev/bac_genome1147/atomIncorp_taxaIncorp/' frag_info_file = '/home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags_kde_info.txt' ``` ## Init ``` import os import glob import itertools import nestly %load_ext rpy2.ipython %load_ext pushnote %%R library(ggplot2) library(dplyr) library(tidyr) library(gridExtra) ``` ### BD min/max ``` ## min G+C cutoff min_GC = 13.5 ## max G+C cutoff max_GC = 80 ## max G+C shift max_13C_shift_in_BD = 0.036 min_BD = min_GC/100.0 * 0.098 + 1.66 max_BD = max_GC/100.0 * 0.098 + 1.66 max_BD = max_BD + max_13C_shift_in_BD print 'Min BD: {}'.format(min_BD) print 'Max BD: {}'.format(max_BD) ``` # Reading in all necessary files # Comm files ``` F = os.path.join(workDir, '*', '*', '*', 'comm.txt') files = glob.glob(F) print len(files) %%R -i files df_comm = list() for (f in files){ df.tmp = read.delim(f, sep='\t') ff = strsplit(f, '/') %>% unlist df.tmp$percIncorp = ff[9] df.tmp$percTaxa = ff[10] df.tmp$sim_rep = ff[11] f_name = ff[12] df_comm[[f]] = df.tmp } df_comm = do.call(rbind, df_comm) rownames(df_comm) = 1:nrow(df_comm) df_comm %>% head(n=3) ``` ## Classification data ``` F = os.path.join(workDir, '*', '*', '*', '*_data.txt') files = glob.glob(F) print len(files) %%R -i files cols = c('library', 'taxon', 'min', 'q25', 'mean', 'median', 'q75', 'max', 'incorp.known', 'incorp.pred') df_data = list() for (f in files){ df.tmp = read.delim(f, sep='\t') df.tmp = df.tmp[,cols] ff = strsplit(f, '/') %>% unlist df.tmp$percIncorp = ff[9] df.tmp$percTaxa = ff[10] df.tmp$sim_rep = ff[11] df.tmp$method = gsub('-cMtx_data.txt', '', ff[12]) f_name = ff[12] df_data[[f]] = df.tmp } df_data = do.call(rbind, df_data) rownames(df_data) = 1:nrow(df_data) df_data %>% head(n=3) ``` ## Fragment GC & length info ``` %%R -i frag_info_file df_info = read.delim(frag_info_file, sep='\t') df_info %>% head(n=3) ``` # Formatting table ``` %%R clsfy = function(guess,known){ if(is.na(guess) | is.na(known)){ return(NA) } if(guess == TRUE){ if(guess == known){ return('True positive') } else { return('False positive') } } else if(guess == FALSE){ if(guess == known){ return('True negative') } else { return('False negative') } } else { stop('Error: true or false needed') } } %%R # comm & classificatino join.on = c( 'library' = 'library', 'taxon_name' = 'taxon', 'percIncorp' = 'percIncorp', 'percTaxa' = 'percTaxa', 'sim_rep' = 'sim_rep') df.j = inner_join(df_comm, df_data, join.on) %>% filter(library %in% c(2,4,6)) %>% mutate(cls = mapply(clsfy, incorp.pred, incorp.known)) # frag info df.j = inner_join(df.j, df_info, c('taxon_name'='taxon_ID')) df.j %>% head(n=3) %%R # renaming method rename = data.frame(method = c('DESeq2', 'heavy', 'qSIP'), method_new = c('HR-SIP', 'Heavy-SIP', 'qSIP')) df.j = inner_join(df.j, rename, c('method'='method')) %>% select(-method) %>% rename('method' = method_new) # reorder as.Num = function(x) x %>% as.character %>% as.numeric df.j$percTaxa = reorder(df.j$percTaxa, df.j$percTaxa %>% as.Num) df.j$percIncorp = reorder(df.j$percIncorp, df.j$percIncorp %>% as.Num) df.j %>% head(n=3) ``` ## accuracy ~ abundance ``` %%R -w 800 -h 600 df.j.f = df.j %>% filter(KDE_ID == 1, cls != 'True negative') ggplot(df.j.f, aes(cls, rel_abund_perc, fill=method)) + geom_boxplot() + facet_grid(percTaxa ~ percIncorp) + scale_y_log10() + labs(y='Pre-fractionation\nrelative abundance (%)') + theme_bw() + theme( text = element_text(size=16), axis.text.x = element_text(angle=45, hjust=1), axis.title.x = element_blank() ) ``` ## accuracy ~ fragment BD ``` %%R -w 800 -h 600 ggplot(df.j.f, aes(cls, median.y, fill=method)) + geom_boxplot() + facet_grid(percTaxa ~ percIncorp) + labs(y='Median fragment BD (g ml^-1)') + theme_bw() + theme( text = element_text(size=16), axis.text.x = element_text(angle=45, hjust=1), axis.title.x = element_blank() ) ```
github_jupyter
``` # Зависимости import pandas as pd import numpy as np import matplotlib.pyplot as plt import random import os from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler, OneHotEncoder from sklearn.compose import ColumnTransformer from sklearn.linear_model import LogisticRegression from sklearn.metrics import f1_score import tensorflow as tf from keras.models import Sequential from keras.layers import Dense, Dropout # Инициализируем все известные генераторы случаынйх чисел / Setting all known random seeds my_code = "Margaryan" seed_limit = 2 ** 32 my_seed = int.from_bytes(my_code.encode(), "little") % seed_limit os.environ['PYTHONHASHSEED']=str(my_seed) random.seed(my_seed) np.random.seed(my_seed) tf.compat.v1.set_random_seed(my_seed) session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf) tf.compat.v1.keras.backend.set_session(sess) # Читаем данные из файла train_data = pd.read_csv("../datasets/iris_train.csv") train_data.head() # Определим размер валидационной выборки val_size = round(0.2*len(train_data)) print(val_size) # Создадим обучающую и валидационную выборки random_state = my_seed train, val = train_test_split(train_data, test_size=val_size, random_state=random_state) print(len(train), len(val)) # Значения в числовых столбцах преобразуем к отрезку [0,1]. # Для настройки скалировщика используем только обучающую выборку. num_columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width'] ord_columns = ['species'] ct = ColumnTransformer(transformers=[('numerical', MinMaxScaler(), num_columns)], remainder='passthrough') ct.fit(train) # Преобразуем значения, тип данных приводим к DataFrame sc_train = pd.DataFrame(ct.transform(train)) sc_val = pd.DataFrame(ct.transform(val)) # Устанавливаем названия столбцов column_names = num_columns + ord_columns sc_train.columns = column_names sc_val.columns = column_names sc_train # Отберем необходимые параметры x_train = sc_train[num_columns] x_val = sc_val[num_columns] y_train = (sc_train[ord_columns].values).flatten() y_val = (sc_val[ord_columns].values).flatten() # Создадим простую модель логистической регрессии model = LogisticRegression() # Обучим модель model.fit(x_train, y_train) # Проверим работу обученной нейронной сети на валидационной выборке pred_val = model.predict(x_val) f1 = f1_score(y_val, pred_val, average='weighted') print(f1) test = pd.read_csv("../datasets/iris_test.csv") test['species'] = '' test.head() sc_test = pd.DataFrame(ct.transform(test)) sc_test.columns = column_names x_test = sc_test[num_columns] test['species'] = model.predict(x_test) test.head() test.to_csv('margaryan.csv', index=False) ```
github_jupyter
# 15-minutes Realized Variance Notebook This notebook analyzes the best subfrequency for computing the 15-minutes Realized Variance by creating a variance signature plot. ``` # Required libraries # Required libraries from IPython.core.display import display, HTML display(HTML("<style>.container { width:80% !important; }</style>")) from pathlib import Path import sys import os import pandas as pd import numpy as np from itertools import chain import matplotlib.pyplot as plt import datetime import zipfile from timeit import default_timer as timer import sqlalchemy as db import matplotlib.pylab as pylab # Paths sys.path.append(os.path.join(Path(os.getcwd()).parent)) data_path = os.path.join(os.path.join(Path(os.getcwd()).parent), 'data') data_per_day_path = os.path.join(os.path.join(Path(os.getcwd()).parent), 'data','data_per_day') results_path = os.path.join(os.path.join(Path(os.getcwd()).parent), 'results') # create connection to sqlite database db_path = os.path.join(data_path, 'database.db') db_engine = db.create_engine('sqlite:///' + db_path) params = { 'axes.labelsize': 'x-large', 'axes.titlesize':'x-large', 'xtick.labelsize':'x-large', 'ytick.labelsize':'x-large'} pylab.rcParams.update(params) # get the data folders file now data_folders = [f for f in os.listdir(data_per_day_path) if not os.path.isfile(os.path.join(data_per_day_path, f))] data_folders = [file for file in data_folders if '.' not in file] data_folders = [os.path.join(data_per_day_path, x) for x in data_folders] # get the csv file now data_folder = data_folders[1] table_name = data_folder[-3:] csv_files = [f for f in os.listdir(data_folder) if os.path.isfile(os.path.join(data_folder, f))] csv_files = [file for file in csv_files if '.csv' in file and '201912' in file] csv_files = np.sort([os.path.join(data_folder, x) for x in csv_files]) def compute_second_data(csv_file): data_df = pd.read_csv(csv_file) data_df.DT = pd.to_datetime(data_df.DT) data_df.sort_values(by=['DT'], inplace=True) data_df.index = data_df.DT data_df.drop(columns=['DT'],inplace=True) data_df = data_df.between_time('9:30', '16:00') data_df.reset_index(drop=False, inplace=True) # non zero quotes data_df = data_df.loc[(data_df.BID>0) & (data_df.BIDSIZ>0) & (data_df.ASK>0) & (data_df.ASKSIZ>0)] # autoselect exchange data_df['total_size'] = data_df.BIDSIZ + data_df.ASKSIZ #data_df = data_df.loc[data_df.EX == data_df.groupby(['EX']).sum().total_size.idxmax()] # delete negative spreads data_df = data_df.loc[data_df.ASK > data_df.BID] # mergeQuotesSameTimestamp ex = data_df.EX.values[0] sym_root = data_df.SYM_ROOT.values[0] data_df.drop(columns=['SYM_SUFFIX', 'total_size'], inplace=True) data_df = data_df.groupby(['DT']).median() data_df['EX'] = ex data_df['SYM_ROOT'] = sym_root data_df.reset_index(drop=False, inplace=True) # remove entries with spread > 50 * daily median spread data_df['SPREAD'] = data_df.ASK - data_df.BID data_df = data_df.loc[data_df['SPREAD'] < 50 * data_df['SPREAD'].median()] # remove outliers using the centered rolling window approach def compute_diff(x): return x.values[window] - np.median(np.delete(x.values,window)) window = 25 data_df.sort_values(by=['DT'], inplace=True) data_df['SPREAD_DIFF'] = data_df.SPREAD.rolling(2*window+1, min_periods=2*window+1, center=True).apply(compute_diff) data_df = data_df.loc[(data_df['SPREAD_DIFF'] < 10 * data_df['SPREAD_DIFF'].mean()) | (data_df['SPREAD_DIFF'].isna())] data_df = data_df.reset_index(drop=True) # resample data to 15 seconds level data_df.set_index(['DT'], inplace=True) data_df["MID"] = data_df.apply(lambda x: (x.ASK * x.ASKSIZ + x.BID * x.BIDSIZ) / (x.ASKSIZ + x.BIDSIZ), axis=1) data_df = data_df[['MID', 'SYM_ROOT']] df_resampled = data_df.resample('1s').ffill() df_resampled = df_resampled.append(pd.DataFrame(data_df[-1:].values, index=[df_resampled.index[-1] + datetime.timedelta(seconds=1)],columns=data_df.columns)) # get last observation that is not added by ffill # set new index and forward fill the price data first_date = datetime.datetime(year=2019,month=12,day=int(csv_file[-6:-4]),hour=9,minute=45,second=0) df_resampled = df_resampled.iloc[1:,:] # observation at 9:30 is going to be NA new_index = pd.date_range(start=first_date, periods=22501, freq='1s') # index from 9:45 until 16:00 df_resampled = df_resampled.reindex(new_index, method='ffill') df_resampled.reset_index(drop=False, inplace=True) df_resampled.rename(columns={'index': 'DT'}, inplace = True) return df_resampled %%time from joblib import Parallel, delayed df_data_all_days_SPY = Parallel(n_jobs=14)(delayed(compute_second_data)(i) for i in csv_files) %%time from joblib import Parallel, delayed df_data_all_days_EEM = Parallel(n_jobs=14)(delayed(compute_second_data)(i) for i in csv_files) %%time from joblib import Parallel, delayed df_data_all_days_EZU = Parallel(n_jobs=14)(delayed(compute_second_data)(i) for i in csv_files) ``` # Analysis best sampling for 15min realized variance The result indicates that 1min is more than enough ``` def compute_rv(df, sampling): df.index = df.DT df_resampled = df.resample(sampling).ffill() df_resampled['RET'] = df_resampled.MID.pct_change().apply(np.vectorize(lambda x: np.log(1+x))) df_resampled = df_resampled.iloc[1:,:] # first return is NA df_resampled.reset_index(drop=True, inplace=True) df_resampled['RET2'] = df_resampled['RET'].apply(lambda x: x ** 2) df_resampled.iloc[-1,0] = df_resampled.iloc[-1,0] - datetime.timedelta(seconds=1) df_resampled.index = df_resampled.DT df_resampled = df_resampled.resample('15min').sum() df_resampled.reset_index(drop=False, inplace=True) df_resampled.DT = df_resampled.DT + datetime.timedelta(minutes=15) return list(df_resampled['RET2'].values) samplings = ['1s', '2s', '5s', '10s', '20s', '30s', '40s', '50s', '1min','3min', '5min'] rv_plot = [] for sampling in samplings: rv_sample = [] for df in df_data_all_days_SPY: rv_sample +=compute_rv(df, sampling) rv_plot.append(np.mean(rv_sample)) fig,ax = plt.subplots(1,1,figsize=(20,15)) plt.plot(samplings, rv_plot) plt.savefig(os.path.join(results_path, 'rv_15_signature_plot.png'), dpi=400, facecolor='aliceblue',edgecolor='k',bbox_inches='tight') plt.show() df_test = pd.DataFrame(columns=['varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)']) for day in range(len(df_data_all_days_SPY)): df_SPY = df_data_all_days_SPY[day] df_SPY.index = df_SPY.DT df_SPY = df_SPY.resample('1min').ffill() df_SPY['RET'] = df_SPY.MID.pct_change().apply(np.vectorize(lambda x: np.log(1+x))) df_SPY = df_SPY[1:] df_EEM = df_data_all_days_EEM[day] df_EEM.index = df_EEM.DT df_EEM = df_EEM.resample('1min').ffill() df_EEM['RET'] = df_EEM.MID.pct_change().apply(np.vectorize(lambda x: np.log(1+x))) df_EEM = df_EEM[1:] df_EZU = df_data_all_days_EZU[day] df_EZU.index = df_EZU.DT df_EZU = df_EZU.resample('1min').ffill() df_EZU['RET'] = df_EZU.MID.pct_change().apply(np.vectorize(lambda x: np.log(1+x))) df_EZU = df_EZU[1:] master_df = pd.DataFrame(index = df_SPY.index, columns=['varEEM', 'varSPY', 'varEZU', 'cov(EEM,SPY)', 'cov(EEM, EZU)', 'cov(SPY, EZU)']) master_df['varEEM'] = df_EEM.RET.apply(lambda x: x**2) master_df['varSPY'] = df_SPY.RET.apply(lambda x: x**2) master_df['varEZU'] = df_EZU.RET.apply(lambda x: x**2) master_df['cov(EEM,SPY)'] = np.multiply(df_EEM.RET.values, df_SPY.RET.values) master_df['cov(EEM, EZU)'] = np.multiply(df_EEM.RET.values, df_EZU.RET.values) master_df['cov(SPY, EZU)'] = np.multiply(df_SPY.RET.values, df_EZU.RET.values) master_df.reset_index(drop=False, inplace=True) master_df.iloc[-1,0] = master_df.iloc[-1,0] - datetime.timedelta(seconds=1) master_df.index = master_df.DT master_df = master_df.resample('15min').sum() master_df.reset_index(drop=False, inplace=True) master_df.DT = master_df.DT + datetime.timedelta(minutes=15) df_test = pd.concat([df_test, master_df]) df_test.to_excel(os.path.join(data_path, 'RV15min.xlsx')) ```
github_jupyter
# Custom Distributions You might want to model input uncertanty with a distribution not currenlty available in Golem. In this case you can create your own class implementing such distribution. Here, we will reimplement a uniform distribution as a toy example. ``` from golem import * import numpy as np import pandas as pd from matplotlib import pyplot as plt import matplotlib %matplotlib inline import seaborn as sns sns.set(context='talk', style='ticks') ``` To create your own distribution class to be used in Golem, you need to create a class that: (1) Inherits from the ``BaseDist`` class; (2) Implements a ``cdf`` method that returns the cumulative density for the distribution of interest. The ``cdf`` method needs to to take in two arguments, ``x`` and ``loc``. ``loc`` is the location of the distribution, e.g. the mean for a Gaussian, and ``x`` is where the CDF needs to be evaluated at. In addition, even though this is not required for the code to run, the ``__init__`` method should allow to define the scale of the distribution. In the example below, we allow the user to define the range of the uniform. For a Gaussian distribution this would be the standard deviation, and so on. ``` # Here is a custom, user-implemented, uniform distribution class class MyDistribution(BaseDist): def __init__(self, urange): self.urange = urange def cdf(self, x, loc): """Cumulative density function. Parameters ---------- x : float The point where to evaluate the cdf. loc : float The location of the Uniform distribution. Returns ------- cdf : float Cumulative density evaluated at ``x``. """ a = loc - 0.5 * self.urange b = loc + 0.5 * self.urange # calc cdf if x < a: return 0. elif x > b: return 1. else: return (x - a) / (b - a) ``` To demonstrate how this can be used, we use a simple objective function and we will compute its robust counterpart using the ``Uniform`` class available in Golem as well as the above, user-defined equivalent ``MyDistribution``. ``` # a sample 1d objective function def objective(x): def sigmoid(x, l, k, x0): return l / (1 + np.exp(-k*(x-x0))) sigs = [sigmoid(x, 1, 100, 0.1), sigmoid(x, -1, 100, 0.2), sigmoid(x, 0.7, 80, 0.5), sigmoid(x, -0.7, 80, 0.9) ] return np.sum(sigs, axis=0) ``` First, using the ``Golem.Uniform`` class... ``` # take 1000 samples in x x = np.linspace(0, 1, 1000) # compute objective y = objective(x) # compute robust objective with Golem golem = Golem(goal='max', random_state=42, nproc=1) golem.fit(X=x.reshape(-1,1), y=y) # use the Golem.Uniform class here dists = [Uniform(0.2)] y_robust = golem.predict(X=x.reshape(-1,1), distributions=dists) # plot results plt.plot(x, y, linewidth=5, label='Objective') plt.plot(x, y_robust, linewidth=5, label='Robust Objective') _ = plt.legend(loc='lower center', ncol=2, bbox_to_anchor=(0.5 ,1.), frameon=False) _ = plt.xlabel('$x$') _ = plt.ylabel('$f(x)$') ``` ...then with our new custom ``MyDistribution`` class: ``` # use MyDistribution for the prediction/convolution dists = [MyDistribution(0.2)] y_robust = golem.predict(X=x.reshape(-1,1), distributions=dists) # plot the results plt.plot(x, y, linewidth=5, label='Objective') plt.plot(x, y_robust, linewidth=5, label='Robust Objective') _ = plt.legend(loc='lower center', ncol=2, bbox_to_anchor=(0.5 ,1.), frameon=False) _ = plt.xlabel('$x$') _ = plt.ylabel('$f(x)$') ``` As you can see, the result above (orange line) obtained with the user-defined uniform is the same to that obtained with ``Golem.Uniform`` as expected. However, note that while with ``Golem.Uniform`` the 1000 samples were processed in less than 10 ms, with ``MyDistribution`` it took almost 300 ms (~30 times slower). This is because the method ``cdf`` is called many times (about 1 million times in this example) and ``Golem.Uniform`` is implemented in Cython rather than Python. Therefore, if the execution time of the ``predict`` method in Golem with your custom distribution is too slow, you shuold consider a Cython implementation.
github_jupyter