text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import numpy as np import matplotlib.pyplot as plt # Prepare 100 evenly spaced numbers from 0 to 200 x = np.linspace(0, 200, 100) y = x * 2 plt.figure() plt.plot(x,y) # Plot an outlier point plt.plot([100], [100000], 'o') plt.xlabel('x') plt.ylabel('y') plt.show() plt.figure() plt.plot(x,y) # Plot an outlier point plt.plot([100], [100000], 'o') plt.xlabel('x') plt.ylabel('y') #Change y-axis to log10 scale plt.yscale('log') plt.show() plt.figure() plt.plot(x,y) # Plot an outlier point plt.plot([100], [100000], 'o') plt.xlabel('x') plt.ylabel('y') #Change y-axis to log2 scale plt.yscale('log', basey=2) plt.show() import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter def ticks(y, pos): """ Method that formats the axis labels to natural log scale """ # Use Python string formatter to limit the number of decimal places for float values. # The precision of the formatted float value can be specified using a full stop, followed # by the number of decimal places you would like to include. # # Latex notation is denoted by $$ in Matplotlib. # The outer double curly braces {{ }} will be escaped by Python to {}, while the innermost # curly braces contains the actual Python float format string. As an example, 7.389 will # be converted to $e^{2}$ in latex notation. return r'$e^{{{:.0f}}}$'.format(np.log(y)) x = np.linspace(0, 200, 100) y = x * 2 plt.figure() # The two keyword arguments, basex and basey, can be specified together in plt.loglog(). # Note: natural log scale is used on the y axis by supplying numpy.e to basey plt.loglog(x,y, basex=2, basey=np.e) # Plot an outlier point plt.plot([100], [100000], 'o') # Get the current axes in this plot ax = plt.gca() # Apply the tick formatter to the y-axis ax.yaxis.set_major_formatter(FuncFormatter(ticks)) plt.xlabel('x') plt.ylabel('y') plt.show() np.log(0) # Output: -inf np.log(-1) # Output: nan import numpy as np import matplotlib.pyplot as plt # Prepare 100 evenly spaced numbers from -200 to 200 x = np.linspace(-200, 200, 100) y = x * 2 # Setup subplot with 3 rows and 2 columns, with shared x-axis. # More details about subplots will be discussed in Chapter 3. f, axarr = plt.subplots(nrows=3, ncols=2, figsize=(8,10), sharex=True) # Linear scale axarr[0, 0].plot(x, y) axarr[0, 0].plot([-200,200], [10,10]) # Horizontal line (y=10) axarr[0, 0].set_title('Linear scale') # Log scale, mask non-positive numbers axarr[0, 1].plot(x,y) axarr[0, 1].plot([-200,200], [10,10]) axarr[0, 1].set_title('Log scale, nonposy=mask') axarr[0, 1].set_yscale('log', nonposy='mask') # Note: axes object requires set_yscale instead of yscale method. # Log scale, clip non-positive numbers axarr[1, 0].plot(x,y) axarr[1, 0].plot([-200,200], [10,10]) axarr[1, 0].set_title('Log scale, nonposy=clip') axarr[1, 0].set_yscale('log', nonposy='clip') # Symlog scale axarr[1, 1].plot(x,y) axarr[1, 1].plot([-200,200], [10,10]) axarr[1, 1].set_title('Symlog scale') axarr[1, 1].set_yscale('symlog') # Symlog scale, expand the linear range to -50,50 (default=None) axarr[2, 0].plot(x,y) axarr[2, 0].plot([-200,200], [10,10]) axarr[2, 0].set_title('Symlog scale, linthreshy=50') axarr[2, 0].set_yscale('symlog', linthreshy=50) # Symlog scale, expand the linear scale to 3 (default=1) # The linear region is expanded, while the log region is compressed. axarr[2, 1].plot(x,y) axarr[2, 1].plot([-200,200], [10,10]) axarr[2, 1].set_title('Symlog scale, linscaley=3') axarr[2, 1].set_yscale('symlog', linscaley=3) plt.show() import numpy as np import matplotlib.pyplot as plt # Prepare 1000 sorted random numbers from the normal distribution np.random.seed(1) y = np.random.normal(0.5, 0.1, 1000) y.sort() x = np.arange(len(y)) plt.figure() plt.plot(x,y) # Transform x-axis to logit scale plt.yscale('logit') plt.show() import numpy as np import matplotlib.pyplot as plt plt.figure() time = 100 acceleration = 5 velocity = acceleration * time # Constant acceleration plt.plot([0,time], [acceleration,acceleration], 'b') # Get current plotting axes ax1 = plt.gca() ax1.set_xlabel('time (s)') # Make the y-axis label, ticks and tick labels match the line color. ax1.set_ylabel('$acceleration (ms^{−2})$', color='b') ax1.tick_params('y', colors='b') # Plot velocity in another axes ax2 = plt.twinx() ax2.plot([0,time], [0,velocity], 'r') ax2.set_ylabel('$velocity (ms^{−1})$', color='r') ax2.tick_params('y', colors='r') plt.show() ```
github_jupyter
``` import numpy as np from numpy import random import matplotlib.pyplot as plt ``` # Uniform distribution ``` # get a single random number between 0 and 100 x = random.uniform(0, 100) print(x) # get 10 random numbers x = random.uniform(0, 100, size=10) print(x) # improve readability by writing all parameter names x = random.uniform(low=0, high=100, size=10000) print(x) plt.hist(x, bins=100) plt.show() # make a 2 dimensional distribution of random numbers and plot it x = random.uniform(low=0, high=100, size=100) y = random.uniform(low=0, high=100, size=100) plt.plot(x, y, ".") plt.show() ``` # Gaussian / Normal distribution ``` import math def normal_distribution(x, mean, standard_deviation): return math.exp(-0.5 * pow( (x - mean) / standard_deviation, 2)) / standard_deviation / math.sqrt(2 * math.pi) mean = 0 standard_deviation = 1 x_array = np.arange(-4, 4, 0.1) y_array = [] for x in x_array: y = normal_distribution(x, mean, standard_deviation) y_array = y_array + [y] fig, ax = plt.subplots() ax.plot(x_array, y_array, "-") ax.set_xlabel("x") ax.set_ylabel("probability") plt.show() # generate random numbers following a normal distribution x = random.normal(loc=0, scale=2, size=10000) print(x) plt.hist(x, bins=100) plt.show() # make a 2 dimensional distribution of random numbers and plot it x = random.normal(loc=0, scale=2, size=1000) y = random.normal(loc=0, scale=2, size=1000) plt.plot(x, y, ".") plt.show() x = random.normal(loc=0, scale=2, size=1000) y = random.normal(loc=0, scale=2, size=1000) data = [x, y] fig1, ax1 = plt.subplots() ax1.set_title('Box Plot') ax1.boxplot(data) plt.show() ``` # Biomodal distribution ``` # generate random numbers following a bi-modal distribution a = random.normal(loc=0, scale=2, size=10000) b = random.normal(loc=8, scale=2, size=10000) x = np.concatenate([a, b]) print(x) plt.hist(x, bins=100) plt.show() ``` # Paired/related samples ``` number_of_samples = 100 x = random.uniform(low=0, high=100, size=number_of_samples) x1 = x + random.normal(loc=0, scale=2, size=number_of_samples) x2 = x + random.normal(loc=0, scale=2, size=number_of_samples) plt.plot(x1, x2, ".") plt.show() ``` ## Recap: Descriptive statistics ``` # we setup an array of normal distributed values and # measure their mean and standard deviation. x = random.normal(loc=0, scale=2, size=1000000) # <-- increase and decrease # the size here! mean = np.mean(x) standard_deviation = np.std(x) print("Mean: " + str(mean)) print("standard_deviation: " + str(standard_deviation)) ``` # Central limit theorem ``` def normal_random_plots(num_random_numbers): x = random.normal(loc=0, scale=1, size=num_random_numbers) data = [x] fig1, ax1 = plt.subplots() ax1.set_title('Probability distribution of ' + str(num_random_numbers) + ' normal distributed random numbers') ax1.set_xlabel("x"); ax1.set_ylabel("probability"); ax1.hist(data) plt.show() for i in [1, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]: normal_random_plots(i) def normal_random_box_plots(num_random_numbers): x = random.normal(loc=0, scale=1, size=num_random_numbers) y = random.normal(loc=0, scale=1, size=num_random_numbers) data = [x, y] fig1, ax1 = plt.subplots() ax1.set_title('Box Plot of ' + str(num_random_numbers) + ' normal distributed random numbers') ax1.boxplot(data) plt.show() for i in [1, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]: normal_random_box_plots(i) def uniform_random_box_plots(num_random_numbers): x = random.uniform(low=0, high=10, size=num_random_numbers) y = random.uniform(low=0, high=10, size=num_random_numbers) data = [x, y] fig1, ax1 = plt.subplots() ax1.set_title('Box Plot of ' + str(num_random_numbers) + ' normal distributed random numbers') ax1.boxplot(data) plt.show() for i in [1, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]: uniform_random_box_plots(i) ``` # Students grades ``` from numpy import random import matplotlib.pyplot as plt student_count = 60 grades = random.normal(loc=3, scale=1, size=student_count) fig1, ax1 = plt.subplots() ax1.set_title('Probability distribution grades') ax1.set_xlabel("grade"); ax1.set_ylabel("count"); ax1.hist(grades, range=(1,6), bins=6) plt.show() student_count = 60 grades = random.normal(loc=2.5, scale=0.8, size=student_count) fig1, ax1 = plt.subplots() ax1.set_title('Probability distribution grades') ax1.set_xlabel("grade"); ax1.set_ylabel("likelihood"); ax1.hist(grades, range=(1,6), bins=6, density=True) plt.show() student_count = 10000 grades = random.normal(loc=3, scale=1, size=student_count) fig1, ax1 = plt.subplots() ax1.set_title('Probability distribution grades') ax1.set_xlabel("grade"); ax1.set_ylabel("probability"); ax1.hist(grades, range=(1,6), bins=6, density=True) plt.show() ```
github_jupyter
``` import numpy as np l = [1, 2, 4,3] l2 = [[1,2], [3,4]] a = np.array(l2, dtype=np.float) print(type(a[1,1])) a.shape a = np.arange(24) print(a.ndim) b = a.reshape(2,4,3) print(b.ndim) print(b.shape) print(b.itemsize) print(b.dtype) print(b.size) print(b.flags) a = np.linspace(10, 20, 5, endpoint=False) print(a) a = np.array([[1,2,3], [3,4,5], [4,5,6]]) print(a) print('从数组索引 a[1:] 处开始切割') print(a[1:,:]) print('从数组索引 a[1,:] 处开始切割') print(a[1,:]) print('从数组索引 a[1,...] 处开始切割') print(a[1,...]) import matplotlib.pylab as plt plt.figure(figsize=(8,5), dpi=60) ax = plt.subplot(111) ax = plt.gca() ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.spines['bottom'].set_position(('data',0)) ax.yaxis.set_ticks_position('left') ax.spines['left'].set_position(('data',0)) X = np.linspace(-np.pi, np.pi, 256,endpoint=True) C,S = np.cos(X), np.sin(X) plt.plot(X, C, color="blue", linewidth=2.5, linestyle="-", label="cosine", zorder=-1) plt.plot(X, S, color="red", linewidth=2.5, linestyle="-", label="sine", zorder=-2) plt.xlim(X.min()*1.1, X.max()*1.1) plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], [r'$-\pi$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$+\pi$']) plt.ylim(C.min()*1.1,C.max()*1.1) plt.yticks([-1, 0, +1], [r'$-1$', r'$0$', r'$+1$']) plt.legend(loc='upper left') t = 2*np.pi/3 plt.plot([t, t], [0, np.cos(t)], color ='blue', linewidth=2.5, linestyle="--") plt.scatter([t,],[np.cos(t),], 50, color ='blue') plt.annotate(r'$\sin(\frac{2\pi}{3})=\frac{\sqrt{3}}{2}$', xy=(t, np.sin(t)), xycoords='data', xytext=(+10, +30), textcoords='offset points', fontsize=16, arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2")) plt.plot([t,t], [0,np.sin(t)], color ='red', linewidth=2.5, linestyle="--") plt.scatter([t,],[np.sin(t),], 50, color ='red') plt.annotate(r'$\cos(\frac{2\pi}{3})=-\frac{1}{2}$', xy=(t, np.cos(t)), xycoords='data', xytext=(-90, -50), textcoords='offset points', fontsize=16, arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2")) for label in ax.get_xticklabels() + ax.get_yticklabels(): label.set_fontsize(16) label.set_bbox(dict(facecolor='white', edgecolor='None', alpha=0.65 )) plt.show() from pylab import * subplot(2,1,1) xticks([]), yticks([]) text(0.5,0.5, 'subplot(2,1,1)',ha='center',va='center',size=24,alpha=.5) subplot(2,1,2) xticks([]), yticks([]) text(0.5,0.5, 'subplot(2,1,2)',ha='center',va='center',size=24,alpha=.5) plt.savefig('../snapshots/subplot-horizontal.png', dpi=64) show() from pylab import * subplot(1,2,1) xticks([]), yticks([]) text(0.5,0.5, 'subplot(1,2,1)',ha='center',va='center',size=20,alpha=.5) subplot(1,2,2) xticks([]), yticks([]) text(0.5,0.5, 'subplot(1,2,2)',ha='center',va='center',size=20,alpha=.5) plt.savefig('../snapshots/subplot-vertical.png', dpi=64) show() from pylab import * subplot(2,2,1) xticks([]), yticks([]) text(0.5,0.5, 'subplot(2,2,1)',ha='center',va='center',size=20,alpha=.5) subplot(2,2,2) xticks([]), yticks([]) text(0.5,0.5, 'subplot(2,2,2)',ha='center',va='center',size=20,alpha=.5) subplot(2,2,3) xticks([]), yticks([]) text(0.5,0.5, 'subplot(2,2,3)',ha='center',va='center',size=20,alpha=.5) subplot(2,2,4) xticks([]), yticks([]) text(0.5,0.5, 'subplot(2,2,4)',ha='center',va='center',size=20,alpha=.5) savefig('../snapshots/subplot-grid.png', dpi=64) show() from pylab import * import matplotlib.gridspec as gridspec G = gridspec.GridSpec(4,4) axes_1 = subplot(G[0, :]) xticks([]), yticks([]) text(0.5,0.5, 'Axes 1',ha='center',va='center',size=20,alpha=.5) axes_2 = subplot(G[1, :-1]) xticks([]), yticks([]) text(0.5,0.5, 'Axes 2',ha='center',va='center',size=20,alpha=.5) axes_3 = subplot(G[2, :-1]) xticks([]), yticks([]) text(0.5,0.5, 'Axes 3',ha='center',va='center',size=20,alpha=.5) axes_4 = subplot(G[1:, -1]) xticks([]), yticks([]) text(0.5,0.5, 'Axes 4',ha='center',va='center',size=20,alpha=.5) axes_5 = subplot(G[-1, 0]) xticks([]), yticks([]) text(0.5,0.5, 'Axes 5',ha='center',va='center',size=20,alpha=.5) axes_6 = subplot(G[-1, 1:-1]) xticks([]), yticks([]) text(0.5,0.5, 'Axes 6',ha='center',va='center',size=20,alpha=.5) show() from pylab import * axes([0.1,0.1,.8,.8]) xticks([]), yticks([]) text(0.6,0.6, 'axes([0.1,0.1,.8,.8])',ha='center',va='center',size=20,alpha=.5) axes([0.2,0.2,.3,.3]) xticks([]), yticks([]) text(0.5,0.5, 'axes([0.2,0.2,.3,.3])',ha='center',va='center',size=12,alpha=.5) plt.savefig("../snapshots/axes.png",dpi=64) show() from pylab import * axes([0.1,0.1,.5,.5]) xticks([]), yticks([]) text(0.1,0.1, 'axes([0.1,0.1,.5,.5])',ha='left',va='center',size=16,alpha=.5) axes([0.2,0.2,.5,.5]) xticks([]), yticks([]) text(0.1,0.1, 'axes([0.2,0.2,.5,.5])',ha='left',va='center',size=16,alpha=.5) axes([0.3,0.3,.5,.5]) xticks([]), yticks([]) text(0.1,0.1, 'axes([0.3,0.3,.5,.5])',ha='left',va='center',size=16,alpha=.5) axes([0.4,0.4,.5,.5]) xticks([]), yticks([]) text(0.1,0.1, 'axes([0.4,0.4,.5,.5])',ha='left',va='center',size=16,alpha=.5) plt.savefig("../snapshots/axes-2.png",dpi=64) ticklabel_format() show() import numpy as np import matplotlib.pyplot as plt n = 256 X = np.linspace(-np.pi,np.pi,n,endpoint=True) Y = np.sin(2*X) plt.axes([0.025,0.025,0.95,0.95]) plt.plot (X, Y+1, color='blue', alpha=1.00) plt.fill_between(X, 1, Y+1, color='blue', alpha=.25) plt.plot (X, Y-1, color='blue', alpha=1.00) plt.fill_between(X, -1, Y-1, (Y-1) > -1, color='blue', alpha=.25) plt.fill_between(X, -1, Y-1, (Y-1) < -1, color='red', alpha=.25) plt.xlim(-np.pi,np.pi) plt.ylim(-2.5,2.5) savefig('../snapshots/plot_ex.png',dpi=48) plt.show() import numpy as np import numpy.matlib as matlib import matplotlib.pyplot as plt n = 1024 X = np.random.normal(0,1,n) Y = np.random.normal(0,1,n) print(X.shape) T = np.arctan2(Y,X) plt.axes([0.025,0.025,0.95,0.95]) plt.scatter(X,Y, s=35, c=T, alpha=.5) plt.xlim(-1.5,1.5), plt.xticks([]) plt.ylim(-1.5,1.5), plt.yticks([]) savefig('../snapshots/scatter_ex.png',dpi=48) plt.show() import numpy as np import matplotlib.pyplot as plt n = 12 X = np.arange(n) Y1 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n) Y2 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n) plt.axes([0.025,0.025,0.95,0.95]) plt.bar(X, +Y1, facecolor='#9999ff', edgecolor='white') plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white') for x,y in zip(X,Y1): plt.text(x, y+0.05, '%.2f' % y, ha='center', va= 'bottom') for x,y in zip(X,Y2): plt.text(x, -y-0.05, '%.2f' % y, ha='center', va= 'top') plt.xlim(-.5,n), plt.xticks([]) plt.ylim(-1.25,+1.25), plt.yticks([]) plt.savefig('../snapshots/bar_ex.png', dpi=48) plt.show() import numpy as np import matplotlib.pyplot as plt def f(x,y): return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2) n = 256 x = np.linspace(-3,3,n) y = np.linspace(-3,3,n) X,Y = np.meshgrid(x,y) plt.axes([0.025,0.025,0.95,0.95]) plt.contourf(X, Y, f(X,Y), 8, alpha=.75, cmap=plt.cm.hot) C = plt.contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5) plt.clabel(C, inline=1, fontsize=10) plt.xticks([]), plt.yticks([]) plt.savefig('../snapshots/contour_ex.png',dpi=48) plt.show() import numpy as np import matplotlib.pyplot as plt def f(x,y): return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2) n = 10 x = np.linspace(-3,3,3.5*n) y = np.linspace(-3,3,3.0*n) X,Y = np.meshgrid(x,y) Z = f(X,Y) plt.axes([0.025,0.025,0.95,0.95]) plt.imshow(Z,interpolation='bicubic', cmap='bone', origin='lower') plt.colorbar(shrink=.92) plt.xticks([]), plt.yticks([]) plt.savefig('../snapshots/imshow_ex.png', dpi=48) plt.show() import numpy as np import matplotlib.pyplot as plt n = 20 Z = np.ones(n) Z[-1] *= 2 plt.axes([0.025, 0.025, 0.95, 0.95]) plt.pie(Z, explode=Z*.05, colors=['%f' % (i/float(n)) for i in range(n)], wedgeprops={"linewidth": 1, "edgecolor": "black"}) plt.gca().set_aspect('equal') plt.xticks([]), plt.yticks([]) plt.savefig('../snapshots/pie_ex.png',dpi=48) plt.show() import numpy as np import matplotlib.pyplot as plt n = 8 X,Y = np.mgrid[0:n,0:n] T = np.arctan2(Y-n/2.0, X-n/2.0) R = 10+np.sqrt((Y-n/2.0)**2+(X-n/2.0)**2) U,V = R*np.cos(T), R*np.sin(T) plt.axes([0.025,0.025,0.95,0.95]) plt.quiver(X,Y,U,V,R, alpha=.5) plt.quiver(X,Y,U,V, edgecolor='k', facecolor='None', linewidth=.5) plt.xlim(-1,n), plt.xticks([]) plt.ylim(-1,n), plt.yticks([]) plt.savefig('../snapshots/quiver_ex.png',dpi=48) plt.show() import numpy as np import matplotlib.pyplot as plt ax = plt.axes([0.025,0.025,0.95,0.95]) ax.set_xlim(0,4) ax.set_ylim(0,3) ax.xaxis.set_major_locator(plt.MultipleLocator(1.0)) ax.xaxis.set_minor_locator(plt.MultipleLocator(0.1)) ax.yaxis.set_major_locator(plt.MultipleLocator(1.0)) ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1)) ax.grid(which='major', axis='x', linewidth=0.75, linestyle='-', color='0.75') ax.grid(which='minor', axis='x', linewidth=0.25, linestyle='-', color='0.75') ax.grid(which='major', axis='y', linewidth=0.75, linestyle='-', color='0.75') ax.grid(which='minor', axis='y', linewidth=0.25, linestyle='-', color='0.75') ax.set_xticklabels([]) ax.set_yticklabels([]) plt.savefig('../snapshots/grid_ex.png',dpi=48) plt.show() import numpy as np import matplotlib.pyplot as plt fig = plt.figure() fig.subplots_adjust(bottom=0.025, left=0.025, top = 0.975, right=0.975) plt.subplot(2,1,1) plt.xticks([]), plt.yticks([]) plt.subplot(2,3,4) plt.xticks([]), plt.yticks([]) plt.subplot(2,3,5) plt.xticks([]), plt.yticks([]) plt.subplot(2,3,6) plt.xticks([]), plt.yticks([]) plt.savefig('../snapshots/multiplot_ex.png',dpi=48) plt.show() import numpy as np import matplotlib.pyplot as plt ax = plt.axes([0.025,0.025,0.95,0.95], polar=True) N = 20 theta = np.arange(0.0, 2*np.pi, 2*np.pi/N) radii = 10*np.random.rand(N) width = np.pi/4*np.random.rand(N) bars = plt.bar(theta, radii, width=width, bottom=0.0) for r,bar in zip(radii, bars): bar.set_facecolor( plt.cm.jet(r/10.)) bar.set_alpha(0.5) ax.set_xticklabels([]) ax.set_yticklabels([]) plt.savefig('../snapshots/polar_ex.png',dpi=48) plt.show() import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = Axes3D(fig) X = np.arange(-4, 4, 0.25) Y = np.arange(-4, 4, 0.25) X, Y = np.meshgrid(X, Y) R = np.sqrt(X**2 + Y**2) Z = np.sin(R) ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.cm.hot) ax.contourf(X, Y, Z, zdir='z', offset=-2, cmap=plt.cm.hot) ax.set_zlim(-2,2) plt.savefig('../snapshots/plot3d_ex.png',dpi=48) plt.show() import numpy as np import matplotlib.pyplot as plt eqs = [] eqs.append((r"$W^{3\beta}_{\delta_1 \rho_1 \sigma_2} = U^{3\beta}_{\delta_1 \rho_1} + \frac{1}{8 \pi 2} \int^{\alpha_2}_{\alpha_2} d \alpha^\prime_2 \left[\frac{ U^{2\beta}_{\delta_1 \rho_1} - \alpha^\prime_2U^{1\beta}_{\rho_1 \sigma_2} }{U^{0\beta}_{\rho_1 \sigma_2}}\right]$")) eqs.append((r"$\frac{d\rho}{d t} + \rho \vec{v}\cdot\nabla\vec{v} = -\nabla p + \mu\nabla^2 \vec{v} + \rho \vec{g}$")) eqs.append((r"$\int_{-\infty}^\infty e^{-x^2}dx=\sqrt{\pi}$")) eqs.append((r"$E = mc^2 = \sqrt{{m_0}^2c^4 + p^2c^2}$")) eqs.append((r"$F_G = G\frac{m_1m_2}{r^2}$")) plt.axes([0.025,0.025,0.95,0.95]) for i in range(24): index = np.random.randint(0,len(eqs)) eq = eqs[index] size = np.random.uniform(12,32) x,y = np.random.uniform(0,1,2) alpha = np.random.uniform(0.25,.75) plt.text(x, y, eq, ha='center', va='center', color="#11557c", alpha=alpha, transform=plt.gca().transAxes, fontsize=size, clip_on=True) plt.xticks([]), plt.yticks([]) plt.savefig('../snapshots/text_ex.png',dpi=48) plt.show() x= np.arange(32).reshape(8, 4) print(x) print (x[np.ix_([1,5,7,2], [0,3,1,2])]) from matplotlib import pyplot as plt x = np.arange(1,11) y = 2 * x + 5 plt.title("Matplotlib demo", fontsize=14) plt.xlabel("x axis caption", fontsize=14) plt.ylabel("y axis caption", fontsize=14) # 设置刻度标记的大小-labelsize plt.tick_params(axis='both', labelsize=14) # 绘制坐标轴 plt.axis([0, 15, 0, 30]) plt.plot(x, y, 'go', label='line 1', linewidth = 5) plt.show() import matplotlib # fname 为 你下载的字体库路径,注意 SimHei.ttf 字体的路径 zhfont1 = matplotlib.font_manager.FontProperties(fname="/home/magic/work/pkgs/SimHei.ttf") x = np.arange(1,11) y = 2 * x + 5 plt.title("matplot 中文字体测试", fontproperties=zhfont1) # fontproperties 设置中文显示,fontsize 设置字体大小 plt.xlabel("x 轴", fontproperties=zhfont1) plt.ylabel("y 轴", fontproperties=zhfont1) plt.plot(x,y, marker='H') plt.show() print(help(plt.scatter)) ```
github_jupyter
<h1 align=center><font size = 5> Logistic Regression with Python</font></h1> In this notebook, you will learn Logistic Regression, and then, you'll create a model for a telecommunication company, to predict when its customers will leave for a competitor, so that they can take some action to retain the customers. <a id="ref1"></a> ## What is different between Linear and Logistic Regression? While Linear Regression is suited for estimating continuous values (e.g. estimating house price), it is not the best tool for predicting the class of an observed data point. In order to estimate the class of a data point, we need some sort of guidance on what would be the **most probable class** for that data point. For this, we use **Logistic Regression**. <div class="alert alert-success alertsuccess" style="margin-top: 20px"> <font size = 3><strong>Recall linear regression:</strong></font> <br> <br> As you know, __Linear regression__ finds a function that relates a continuous dependent variable, _y_, to some predictors (independent variables _x1_, _x2_, etc.). For example, Simple linear regression assumes a function of the form: <br><br> $$ y = B_0 + B_1 * x1 + B_2 * x2 +... $$ <br> and finds the values of parameters _B0_, _B1_, _B2_, etc, where the term _B0_ is the "intercept". It can be generally shown as: <br><br> $$ ℎ_θ(𝑥) = 𝜃^TX $$ <p></p> </div> Logistic Regression is a variation of Linear Regression, useful when the observed dependent variable, _y_, is categorical. It produces a formula that predicts the probability of the class label as a function of the independent variables. Logistic regression fits a special s-shaped curve by taking the linear regression and transforming the numeric estimate into a probability with the following function, which is called sigmoid function 𝜎: $$ ℎ_θ(𝑥) = 𝜎({θ^TX}) = \frac {e^{(B0 + B1 * x1 + B2 * x2 +...)}}{1 + e^{(B0 + B1 * x1 + B2 * x2 +...)}} $$ Or: $$ ProbabilityOfaClass_1 = P(Y=1|X) = 𝜎({θ^TX}) = \frac{e^{θ^TX}}{1+e^{θ^TX}} $$ In this equation, ${θ^TX}$ is the regression result (the sum of the variables weighted by the coefficients), `exp` is the exponential function and $𝜎(θ^TX)$ is the sigmoid or [logistic function](http://en.wikipedia.org/wiki/Logistic_function), also called logistic curve. It is a common "S" shape (sigmoid curve). So, briefly, Logistic Regression passes the input through the logistic/sigmoid but then treats the result as a probability: <img src="https://ibm.box.com/shared/static/kgv9alcghmjcv97op4d6onkyxevk23b1.png" width = "1024" align = "center"> The objective of __Logistic Regression__ algorithm, is to find the best parameters θ, for $ℎ_θ(𝑥) = 𝜎({θ^TX})$, in such a way that the model best predicts the class of each case. ### Customer churn with Logistic Regression A telecommunications company is concerned about the number of customers leaving their land-line business for cable competitors. They need to understand who is leaving. Imagine that you’re an analyst at this company and you have to find out who is leaving and why. Lets first import required libraries: ``` import pandas as pd import numpy as np import scipy.optimize as opt from sklearn import preprocessing %matplotlib inline import matplotlib.pyplot as plt ``` ### About dataset We’ll use a telecommunications data for predicting customer churn. This is a historical customer data where each row represents one customer. The data is relatively easy to understand, and you may uncover insights you can use immediately. Typically it’s less expensive to keep customers than acquire new ones, so the focus of this analysis is to predict the customers who will stay with the company. This data set provides info to help you predict behavior to retain customers. You can analyze all relevant customer data and develop focused customer retention programs. The data set includes information about: - Customers who left within the last month – the column is called Churn - Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies - Customer account information – how long they’ve been a customer, contract, payment method, paperless billing, monthly charges, and total charges - Demographic info about customers – gender, age range, and if they have partners and dependents ### Load the Telco Churn data Telco Churn is a hypothetical data file that concerns a telecommunications company's efforts to reduce turnover in its customer base. Each case corresponds to a separate customer and it records various demographic and service usage information. Before you can work with the data, you must use the URL to get the ChurnData.csv. ### Load Data From CSV File ``` churn_df = pd.read_csv("Datasets/ChurnData.csv") churn_df.head() ``` ## Data pre-processing and selection Lets select some features for the modeling. Also we change the target data type to be integer, as it is a requirement by the skitlearn algorithm: ``` churn_df.columns churn_df = churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip', 'callcard', 'wireless','churn']] churn_df.head() churn_df['churn'] = churn_df['churn'].astype('int') churn_df churn_df.isna().any() ``` ## Practice How many rows and columns are in this dataset in total? What are the name of columns? ``` # write your code here ``` Lets define X, and y for our dataset: ``` X = np.asarray(churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip']]) X[0:5] y = np.asarray(churn_df['churn']) y [0:5] ``` Also, we normalize the dataset: ``` from sklearn import preprocessing X = preprocessing.StandardScaler().fit(X).transform(X) X[0:5] ``` ## Train/Test dataset Okay, we split our dataset into train and test set: ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4) print ('Train set:', X_train.shape, y_train.shape) print ('Test set:', X_test.shape, y_test.shape) ``` # Modeling (Logistic Regression with Scikit-learn) Lets build our model using __LogisticRegression__ from Scikit-learn package. This function implements logistic regression and can use different numerical optimizers to find parameters, including ‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’ solvers. You can find extensive information about the pros and cons of these optimizers if you search it in internet. The version of Logistic Regression in Scikit-learn, support regularization. Regularization is a technique used to solve the overfitting problem in machine learning models. __C__ parameter indicates __inverse of regularization strength__ which must be a positive float. Smaller values specify stronger regularization. Now lets fit our model with train set: ``` from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix,classification_report LR = LogisticRegression(C=0.0001, solver='liblinear').fit(X_train,y_train) LR ``` Now we can predict using our test set: ``` yhat = LR.predict(X_test) yhat y_test ``` __predict_proba__ returns estimates for all classes, ordered by the label of classes. So, the first column is the probability of class 1, P(Y=1|X), and second column is probability of class 0, P(Y=0|X): ``` yhat_prob = LR.predict_proba(X_test) yhat_prob set(churn_df['churn']) ``` ## Evaluation ### What is the Jaccard Index? The Jaccard similarity index (sometimes called the Jaccard similarity coefficient) compares members for two sets to see which members are shared and which are distinct. It’s a measure of similarity for the two sets of data, with a range from 0% to 100%. The higher the percentage, the more similar the two populations. Although it’s easy to interpret, it is extremely sensitive to small samples sizes and may give erroneous results, especially with very small samples or data sets with missing observations. **How to Calculate the Jaccard Index** The formula to find the Index is: $Jaccard Index = (the number in both sets) / (the number in either set) * 100$ The same formula in notation is: $J(X,Y) = |X∩Y| / |X∪Y|$ In Steps, that’s: 1. Count the number of members which are shared between both sets. 2. Count the total number of members in both sets (shared and un-shared). 3. Divide the number of shared members (1) by the total number of members (2). 4. Multiply the number you found in (3) by 100. **This percentage tells you how similar the two sets are.** Two sets that share all members would be 100% similar. the closer to 100%, the more similarity (e.g. 90% is more similar than 89%). If they share no members, they are 0% similar. The midway point — 50% — means that the two sets share half of the members. Examples A simple example using set notation: How similar are these two sets? A = {0,1,2,5,6} B = {0,2,3,4,5,7,9} **Solution: J(A,B)** = |A∩B| / |A∪B| = |{0,2,5}| / |{0,1,2,3,4,5,6,7,9}| = 3/9 = 0.33. **Notes:** The cardinality of A, denoted |A| is a count of the number of elements in set A. Although it’s customary to leave the answer in decimal form if you’re using set notation, you could multiply by 100 to get a similarity of 33.33%. Lets try jaccard index for accuracy evaluation. we can define jaccard as the size of the intersection divided by the size of the union of two label sets. If the entire set of predicted labels for a sample strictly match with the true set of labels, then the subset accuracy is 1.0; otherwise it is 0.0. ``` from sklearn.metrics import jaccard_score,jaccard_similarity_score jaccard_score(y_test, yhat) jaccard_similarity_score(y_test, yhat) ``` ### confusion matrix Another way of looking at accuracy of classifier is to look at __confusion matrix__. ``` from sklearn.metrics import classification_report, confusion_matrix import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="green" if cm[i, j] > thresh else "red") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') print(confusion_matrix(y_test, yhat, labels=[1,0])) # Compute confusion matrix cnf_matrix = confusion_matrix(y_test, yhat, labels=[1,0]) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=['churn=1','churn=0'],normalize= False, title='Confusion matrix') ``` Look at first row. The firsr row is for customers whose actual churn value in test set is 1. As you can calculate, out of 40 customers, the churn value of 15 of them is 1. And out of these 15, the classifier correctly predicted 6 of them as 1, and 9 of them as 0. It means, for 6 customers, the actual churn value were 1 in test set, and classifier also correctly predicted those as 1. However, while the actual label of 9 customers were 1, the classifier predicted those as 0, which is not very good. We can consider it as error of the model for first row. What about the customers with churn value 0? Lets look at the second row. It looks like there were 25 customers whom their churn value were 0. The classifier correctly predicted 24 of them as 0, and one of them wrongly as 1. So, it has done a good job in predicting the customers with churn value 0. A good thing about confusion matrix is that shows the model’s ability to correctly predict or separate the classes. In specific case of binary classifier, such as this example, we can interpret these numbers as the count of true positives, false positives, true negatives, and false negatives. ``` print (classification_report(y_test, yhat)) ``` Based on the count of each section, we can calculate precision and recall of each label: - __Precision__ is a measure of the accuracy provided that a class label has been predicted. It is defined by: precision = TP / (TP + FP) - __Recall__ is true positive rate. It is defined as: Recall =  TP / (TP + FN) So, we can calculate precision and recall of each class. __F1 score:__ Now we are in the position to calculate the F1 scores for each label based on the precision and recall of that label. The F1score is the harmonic average of the precision and recall, where an F1 score reaches its best value at 1 (perfect precision and recall) and worst at 0. It is a good way to show that a classifer has a good value for both recall and precision. And finally, we can tell the average accuracy for this classifier is the average of the f1-score for both labels, which is 0.72 in our case. ### log loss Now, lets try __log loss__ for evaluation. In logistic regression, the output can be the probability of customer churn is yes (or equals to 1). This probability is a value between 0 and 1. Log loss( Logarithmic loss) measures the performance of a classifier where the predicted output is a probability value between 0 and 1. ``` from sklearn.metrics import log_loss log_loss(y_test, yhat_prob) ``` ## Practice Try to build Logistic Regression model again for the same dataset, but this time, use different __solver__ and __regularization__ values? What is new __logLoss__ value? ``` # write your code here LR2 = LogisticRegression(C=0.01, solver='sag').fit(X_train,y_train) yhat_prob2 = LR2.predict_proba(X_test) print ("LogLoss: : %.2f" % log_loss(y_test, yhat_prob2)) # write your code here LR2 = LogisticRegression(C=0.01, solver='newton-cg').fit(X_train,y_train) yhat_prob2 = LR2.predict_proba(X_test) print ("LogLoss: : %.2f" % log_loss(y_test, yhat_prob2)) LR2 = LogisticRegression(C=0.01, solver='newton-cg').fit(X_train,y_train) yhat_prob2 = LR2.predict_proba(X_test) print ("LogLoss: : %.2f" % log_loss(y_test, yhat_prob2)) Solver = ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga'] for i in Solver: LR2 = LogisticRegression(C=0.01, solver=i).fit(X_train,y_train) yhat_prob2 = LR2.predict_proba(X_test) print ("LogLoss: : %.2f" % log_loss(y_test, yhat_prob2)) Data = pd.DataFrame({'Slover_method':Solver, 'LogLoss':log_loss(y_test, yhat_prob2)}) print(Data) ```
github_jupyter
``` import pandas as pd import numpy as np import os import git import sys repo = git.Repo("./", search_parent_directories=True) homedir = repo.working_dir #Key to give state & county name value for each FIPS code Key = pd.read_csv('Key.csv', index_col=0).sort_values(by=['FIPS']) Key.head() #County Covid Cases/Deaths #NaN cleaning here set things to 0 Consecutive_CD = pd.read_csv('USAFacts_CDConsecutive.csv', index_col=0).sort_values(by=['FIPS']) Nonconsecutive_CD = pd.read_csv('USAFacts_CDNonconsecutive.csv', index_col=0).sort_values(by=['FIPS']) Consecutive_CD.head() #County Health Info County_Beds = pd.read_csv('County_Beds.csv', index_col=0).sort_values(by=['FIPS']) County_ICU = pd.read_csv('County_ICU.csv', index_col=0).sort_values(by=['FIPS']) County_Health = pd.read_csv('County_Health.csv', index_col=0).sort_values(by=['FIPS']) County_Beds.head() County_ICU.head() County_Health.head() #County Mobility Info (Need to be cleaned for NaNs) Consecutive_M = pd.read_csv('Mobility_County_Consecutive.csv', index_col=0).sort_values(by=['FIPS']) Nonconsecutive_M = pd.read_csv('Mobility_County_Nonconsecutive.csv', index_col=0).sort_values(by=['FIPS']) #making NaN=0 Consecutive_M = Consecutive_M.fillna(0) Nonconsecutive_M = Nonconsecutive_M.fillna(0) ####################################################### #Google Mobility Info #This needs to be cleaned for NaNs google_county_Consecutive = pd.read_csv('google_county_Consecutive.csv', index_col=0).sort_values(by=['FIPS']) google_county_Nonconsecutive = pd.read_csv('google_county_Nonconsecutive.csv', index_col=0).sort_values(by=['FIPS']) Consecutive_M.head() google_county_Consecutive.head() #County Policy/Transit Info #This needs to be cleaned for NaNs Policies_County = pd.read_csv('Policies_County.csv', index_col=0).sort_values(by=['FIPS']) Transit = pd.read_csv('Transit.csv', index_col=0).sort_values(by=['FIPS']) Policies_County.head() Transit.head() #County Demographic Info Votes = pd.read_csv('Votes.csv', index_col=0).sort_values(by=['FIPS']) Age_Race = pd.read_csv('Age_Race.csv', index_col=0).sort_values(by=['FIPS']) Educ_County = pd.read_csv('Educ_County.csv', index_col=0).sort_values(by=['FIPS']) Density = pd.read_csv('Density.csv', index_col=0).sort_values(by=['FIPS']) Unemp = pd.read_csv('Unemp.csv', index_col=0).sort_values(by=['FIPS']) Poverty = pd.read_csv('Poverty.csv', index_col=0).sort_values(by=['FIPS']) Pop_60 = pd.read_csv('Pop_60.csv', index_col=0).sort_values(by=['FIPS']) Votes.head() Age_Race.head() Educ_County.head() Density.head() Unemp.head() Poverty.head() Pop_60.head() #County Air Quality Info #This needs to be cleaned for NaNs Air_Qual = pd.read_csv('Air_Qual.csv', index_col=0).sort_values(by=['FIPS','ValidDate']) Air_Qual = Air_Qual.set_index('FIPS') #Individual breakdown of Air Quality, needs to be cleaned for NaNs Ozone_AQI = pd.read_csv('Ozone_AQI.csv', index_col=0).sort_values(by=['FIPS']) PM10_AQI = pd.read_csv('PM10_AQI.csv', index_col=0).sort_values(by=['FIPS']) PM25_AQI = pd.read_csv('PM25_AQI.csv', index_col=0).sort_values(by=['FIPS']) NO2_AQI = pd.read_csv('NO2_AQI.csv', index_col=0).sort_values(by=['FIPS']) CO_PPB = pd.read_csv('CO_PPB.csv', index_col=0).sort_values(by=['FIPS']) SO2_PPB = pd.read_csv('SO2_PPB.csv', index_col=0).sort_values(by=['FIPS']) Air_Qual.head() NO2_AQI.head() CO_PPB.head() ```
github_jupyter
# Symbulate Lab 7 - Stochastic Processes This Jupyter notebook provides a template for you to fill in. Read the notebook from start to finish, completing the parts as indicated. To run a cell, make sure the cell is highlighted by clicking on it, then press SHIFT + ENTER on your keyboard. (Alternatively, you can click the "play" button in the toolbar above.) In this lab you will use the Symbulate package. Many of the new commands are discussed in the [Random processes](https://dlsun.github.io/symbulate/process.html) section of the [Symbulate documentation](https://dlsun.github.io/symbulate/index.html). **You should use Symbulate commands whenever possible.** If you find yourself writing long blocks of Python code, you are probably doing something wrong. For example, you should not need to write any *long* `for` loops (though you will need to write a simple `for` loop in Problem 1 part a). There are 2 parts, and at the end of each part there are some reflection questions. There is no need to type a response to the reflection questions, but you should think about them and discuss them with your partner to try to make sense of your simulation results. **Warning:** You may notice that many of the cells in this notebook are not editable. This is intentional and for your own safety. We have made these cells read-only so that you don't accidentally modify or delete them. However, you should still be able to execute the code in these cells. ``` from symbulate import * %matplotlib inline ``` ## Problem 1. Here is one example of a discrete time, continuous state process. Suppose that $X_0 = 0$ and for $n = 0, 1, 2, \ldots$, $$ X_{n+1} = 0.5 X_n + Z_{n} $$ where $Z_1, Z_2, \ldots$ are i.i.d. $N(0,1)$. Such a process is called an *autoregressive* process (of order 1). ### a) Define in Symbulate the $X$ process, for time steps $n= 0, 1,2, \ldots, 20$. Hint: [this example](https://dlsun.github.io/symbulate/process.html#rw) should be very helpful. - Define a probability space `P` corresponding to an infinite sequence of i.i.d. $N(0, 1)$ values. (Hint: [bottom of this page](https://dlsun.github.io/symbulate/probspace.html#indep).) - Define an `RV` `Z` on the probability space `P`; each component of `Z` can be indexed with brackets `[]` , e.g. `Z[0]`, `Z[1]`, etc. - Define a [`RandomProcess`](https://dlsun.github.io/symbulate/process.html#time) `X` on `P`. Write a simple `for` loop to define the value of `X[n+1]` based `X[n]` and `Z[n]`. ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### b) Simulate and plot a single sample path for n= 0, ..., 20. (Hint: [see the plots here](https://dlsun.github.io/symbulate/process.html#Xt). You might need to change `alpha = ` if the plot is too light.) ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### c) Simulate and plot 100 sample paths. ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### d) Simulate and plot the distribution of $X_5$, and approximate its mean and variance. ([Hint](https://dlsun.github.io/symbulate/process.html#value).) ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### e) Simulate and plot the distribution of $X_{10}$, and approximate its mean and variance. **Reflection question:** How does the distribution of $X_{10}$ compare to that of $X_5$? ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### f) Simulate and make a histogram or density plot of the joint distribution of $X_5$ and $X_{10}$, and approximate its correlation and covariance. ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### g) Simulate and make a histogram or density plot of the joint distribution of $X_5$ and $X_6$, and approximate its correlation and covariance. **Reflection question:** How does the joint distribution of $X_5$ and $X_6$ compare to that of $X_5$ and $X_{10}$? ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### h) Simulate and make a histogram of density plot of the joint distribution of $X_{10}$ and $X_{15}$, and approximate its correlation and covariance. **Reflection question:** How does the joint distribution of $X_{10}$ and $X_{15}$ compare to that of $X_{5}$ and $X_{10}$? ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### i) Simulate and make a histogram or density plot of the joint distribution of $X_{10}$ and $X_{11}$, and approximate its correlation and covariance. **Reflection question:** How does the joint distribution of $X_{10}$ and $X_{11}$ compare to that of $X_{5}$ and $X_{6}$? ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### j) More reflection questions Does the $X$ process appear to be stationary? Note: The process starts with $X_0=0$ so it is technically not stationary. But aside from the first few times, does it appear that the process is stationary? ## Problem 2) Consider a random signal with both a random amplitude and a random "phase shift". $$ X(t) = A\cos(2\pi t + \Theta) $$ where $A$ and $\Theta$ are independent, $A$ is equally likely to be 0.5, 1, or 2, and $\Theta$ has a Uniform(0,$2\pi$) distribution. Note: to define $\cos(2\pi t)$ in Python, use `cos(2 * pi * t)` ### a) Define in Symbulate the $X$ process. - Hint: use `ContinuousTimeFunction` to define the deterministic process $f(t)=t$. - However, to do some of the parts below, you'll need to explicitly define `RV` for $A$ and $\Theta$ like [here](https://dlsun.github.io/symbulate/joint.html#unpack). - Define the probability space `P` for $A$ and $\Theta$ and then define `RV` on that space and also `RandomProcess` on that space. ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### b) Simulate and plot a single sample path for $0\le t \le 3$. (You might need to change `alpha = ` if the plot is too light.) ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### c) Simulate and plot 100 sample paths. ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### d) Simulate and plot the conditional distribution of $X(1)$ given $A = 1$, and approximate its mean and variance. (Remember [this](https://dlsun.github.io/symbulate/conditioning.html#conditioning).) **Reflection question:** You might suspect that since $\Theta$ has a uniform distribution then $X(1)$ has a uniform distribution when $A=1$. But is that true? ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### e) Simulate and plot the distribution of $X(1)$, and approximate its mean and variance. **Reflection question:** Can you explain the shape? ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### f) Simulate and plot the joint distribution of $X(1)$ and $X(2)$ and approximate the covariance and correlation. **Reflection question:** Can you explain why the scatterplot looks the way it does?) ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### g) Simulate and plot the joint distribution of $X(1)$ and $X(1.5)$ and approximate the covariance and correlation. **Reflection question:** Can you explain why the scatterplot looks the way it does? ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ### h) Simulate and plot the joint distribution of $X(1)$ and $X(1.25)$ and approximate the covariance and correlation. **Reflection question:** Can you explain why the scatterplot looks the way it does? ``` # Type all of your code for this problem in this cell. # Feel free to add additional cells for scratch work, but they will not be graded. ``` ## Submission Instructions Before you submit this notebook, click the "Kernel" drop-down menu at the top of this page and select "Restart & Run All". This will ensure that all of the code in your notebook executes properly. Please fix any errors, and repeat the process until the entire notebook executes without any errors.
github_jupyter
# Encodings creation notebook This notebook was used to get the encodings of the images using a VGG19 ``` import json, os import random from matplotlib.image import imread import numpy as np import tensorflow as tf from PIL import Image ``` Reading dataset json files ``` img_h = 224 img_w = 224 cwd = os.getcwd() datasetName = '../input/ann-and-dl-vqa/dataset_vqa' jsonFiles = '../input/json-files' trainJsonName = 'train_data.json' validJsonName = 'valid_data.json' imagesPath = os.path.join(datasetName, 'train') trainJsonPath = os.path.join(jsonFiles, trainJsonName) validJsonPath = os.path.join(jsonFiles, validJsonName) with open(trainJsonPath,'r') as json_file_train, open (validJsonPath, 'r') as json_file_valid: data_train = json.load(json_file_train).get('questions') data_valid = json.load(json_file_valid).get('questions') json_file_train.close() json_file_valid.close() print(data_train[0]) ``` Instantiating the encoder structure ``` fe = tf.keras.applications.VGG19(include_top = False, pooling = 'avg', input_shape = (img_h,img_w,3), weights = 'imagenet') for l in fe.layers: l.trainable = False model = tf.keras.Sequential() model.add(fe) model.summary() ``` ### Get the encoding of the training and validation dataset ``` train_images = set([el['image_filename'] for el in data_train]) valid_images = set([el['image_filename'] for el in data_valid]) len_train = len(train_images) len_valid = len(valid_images) print('taken the ' + str(len(valid_images)/len(train_images))+' of validation') train_tensor_map = {} i = 0 for imagename in train_images: print('{:3.2f} %'.format(i/len_train * 100), end = '\r') image = Image.open(os.path.join(imagesPath, imagename)).resize((img_w, img_h)).convert('RGB') img = np.array(image).astype(np.float32) / 255 res = model.predict(x = np.expand_dims(img,0)) train_tensor_map[str(imagename)] = res.tolist() i = i + 1 json.dump(train_tensor_map, open("train_tensors_VGG19_GAP.json","w"), indent=2) valid_tensor_map = {} i = 0 for imagename in valid_images: print('{:3.2f} %'.format(i/len_valid * 100), end = '\r') image = Image.open(os.path.join(imagesPath, imagename)).resize((img_w, img_h)).convert('RGB') img = np.array(image).astype(np.float32) / 255 res = model.predict(x = np.expand_dims(img,0)) valid_tensor_map[str(imagename)] = res.tolist() i = i + 1 json.dump(valid_tensor_map, open("valid_tensors_VGG19_GAP.json","w"), indent=2) ``` ### Get the encoding of the test data ``` testJsonName = 'test_data.json' imagesPathTest = os.path.join(datasetName,'test') testJsonPath = os.path.join(datasetName, testJsonName) with open (testJsonPath, 'r') as json_file_test: data_test = json.load(json_file_test).get('questions') json_file_test.close() test_images = set([el['image_filename'] for el in data_test]) len_test = len(test_images) test_tensor_map = {} i = 0 for imagename in test_images: print('{:3.2f} %'.format(i/len_test * 100),end = '\r') image = Image.open(os.path.join(imagesPathTest, imagename)).resize((img_w, img_h)).convert('RGB') img = np.array(image).astype(np.float32) / 255 res = model.predict(x = np.expand_dims(img,0)) test_tensor_map[str(imagename)] = res.tolist() i = i + 1 json.dump(test_tensor_map, open("test_tensors_VGG19_GAP.json","w"), indent=2) ```
github_jupyter
<a href="https://colab.research.google.com/github/yohanesnuwara/pyreservoir/blob/master/notebooks/reservoir_pressure_analytical_solution_notebook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Reservoir Pressure Analytical Solution (John W. Lee's Approximation) This notebook solves for pressure distribution in the reservoir (across some radial distance from the centre of wellbore) due to wellbore production, using an approximation method proposed by John W. Lee. Find Chapter 6-4 (Constant Rate Solutions, Infinite Reservoir with Line-Source Well) in Brian F. Towler's book. This solution is used for the assumption of NO FLOW B.C. at outer boundary and INFINITE-ACTING flow conditions (means reservoir is very large). ``` !git clone https://github.com/yohanesnuwara/pyreservoir import numpy as np import matplotlib.pyplot as plt import sys sys.path.append("/content/pyreservoir/fluid_flow") from flow_solutions import * plt.style.use("dark_background") ``` Example given here is taken from Example 6-3 of Brian F. Towler's book. Given the following reservoir conditions: * Porosity 17.5% * Permeability 0.12 micro-m2 * Oil viscosity 2.5 cp * Wellbore radius 8 cm * Reservoir extent 1,000 m * Initial reservoir pressure 7,900 kPa * Total compressibility 2x10^-6 kPa^-1 * Wellbore flowing rate 40 m3/day * Oil FVF 1.405 RB/STB First, calculate the pressure at a point located 1 m away from wellbore after 24 hours. ``` # known poro = 0.175 k = 0.12 # in micro.m2 h = 10 # in m mu_oil = 2.5 # in cP rw = 8 # in cm pi = 7900 # in kPa re = 1000 # in m ct = 2E-06 # in kPa^-1 q = 40 # in std m3/d Bo = 1.405 # in res m3/std m3 # conversion k = k / 9.869233E-4 # 1 mD = 9.869233E-4 micro.m2 h = h / 3.048E-01 # 1 m = 3.048E-01 ft rw = rw / (1E+02 * 3.048E-01) # from cm to ft pi = pi / 6.894757 # 1 psi = 6.894757 kPa re = re / 3.048E-01 # 1 m = 3.048E-01 ft ct = ct / (1 / 6.894757) # from kPa^-1 to psi^-1 q = q / (1.589873E-01) # from m3 to barrel time, distance = 24, 1*3.281-rw print("Time : {} hours".format(time)) print("Distance from wellbore : {:.2f} ft".format(distance)) t_finite_acting = time_finite_acting(re, rw, poro, mu_oil, ct, k) td, Pd, Pwf = constant_terminal_rate(time, distance, re, rw, pi, q, poro, ct, k, h, mu_oil, Bo) print("Finite acting time : {:.2f} hours \n".format(t_finite_acting)) print("Calculation result at time {} hour and radius {:.2f} ft from wellbore \n".format(time, distance)) print("Wellbore flowing pressure (pwf) : {:.2f} psia".format(Pwf)) print("Dimensionless time : {:.2f}".format(td)) print("Dimensionless pressure : {:.2f}".format(Pd)) ``` The finite acting time is 506 hours, means that until 24 hours flow is INFINITE-ACTING. Therefore, this method is still VALID. Plot pressure distribution from 0 to 5 days (120 hours) at distance from 0 to 1 km away from the wellbore. ``` time = np.linspace(0*24, 5*24, 100) # r = np.arange(1, 1001, 1) r = np.linspace(1, 1000, 100) r = r * 3.281 # convert distance from m to ft distance = [i-rw for i in r] # Solve for Pwf M, N = len(time), len(distance) Pwf = np.empty((M, N)) for i in range(len(time)): for j in range(len(distance)): Pwf[i,j] = constant_terminal_rate(time[i], distance[j], re, rw, pi, q, poro, ct, k, h, mu_oil, Bo)[2] # Plot solution from mpl_toolkits.mplot3d import Axes3D tt, rr = np.meshgrid(time, r) fig = plt.figure() #adjust 3D Figure ax = Axes3D(fig) ax.plot_surface(tt,rr,Pwf.T, cmap="jet", linewidth=0) ax.set_title('Spatio-temporal Plot of Reservoir Pressure due to Wellbore Production', size=16, pad=15) ax.set_xlabel('Time [hour]', labelpad=10) ax.set_ylabel('Radial Distance [ft]', labelpad=10) ax.set_zlabel('Reservoir Pressure [psia]', labelpad=10) # Colorbar handler import matplotlib.cm as cm m = cm.ScalarMappable(cmap=cm.jet) m.set_array(Pwf) plt.colorbar(m) plt.show() ```
github_jupyter
# <center>LECTURE OVERVIEW</center> --- ## By the end of the day you'll be able to: - filter container elements using a combination of a `for` loop and an `if` statement - update values in a nested `list` using double `for` loops (aka nested loop) - use `if` statements with `list` and `dictionary` comprehensions - sort a `list` of numbers using a nested loop and an `if` statement # <center>CONTROL FLOW</center> --- Control flow is the 'grammar' of programing languages. Today we combine for loops and if statements to do more complex operations # Recap - `if` statement ```python if var1_bool: # by default python check for True condition # do something elif var2_bool: # do something else ... else: # if none of the conditions above were true # do something else ``` # Recap - `for` loop ```python for item in container: # do something with the item ``` # Filter Container Elements ## <font color='LIGHTGRAY'>By the end of the day you'll be able to:</font> - **filter container elements using a combination of a `for` loop and an `if` statement** - <font color='LIGHTGRAY'>update values in a nested list using double for loops (aka nested loop)</font> - <font color='LIGHTGRAY'>use if statements with list and dictionary comprehensions</font> - <font color='LIGHTGRAY'>sort a list of numbers using a nested loop and an if statement</font> ``` my_lst = ['ambivert', 24, 4567, 582, 78, 'calcspar', 'deaness', 12, 675, 'entrete', 'gades'] str_lst = [] for item in my_lst: if type(item) == str: # indented once str_lst.append(item) # indented twice print(item) # indented once so this line is outside the if statement print(str_lst) # no indent, this line is performed once the for loop is done people_dict = {} people_dict['Andras'] = {'age': 36, 'singer': False} people_dict['Rihanna'] = {'age': 28, 'singer': True} people_dict['Madonna'] = {'age': 62, 'singer': True} people_dict['Ashley'] = {'age': 30, 'singer': False} people_dict['Shawn'] = {'age': 22, 'singer': True} singers_lst = [] for key, val in people_dict.items(): print(key, val) if val['singer']: singers_lst.append(key) print(singers_lst) ``` ### **<font color='GREEN'> Exercise</font>** Collect all numbers from `my_lst` into `nums_lst`. ``` my_lst = ['ambivert', 24, 4567, 582, 78, 'calcspar', 'deaness', 12, 675, 'entrete', 'gades'] # TODO: insert solution here ``` ### **<font color='GREEN'> Exercise</font>** Collect everyone's name who is older than 30 from `people_dict` into `oldies_lst`. :) ``` # TODO: insert solution here ``` # Nested `for` Loops ## <font color='LIGHTGRAY'>By the end of the day you'll be able to:</font> - <font color='LIGHTGRAY'>filter container elements using a combination of a for loop and an if statement</font> - **update values in a nested `list` using double `for` loops (aka nested loop)** - <font color='LIGHTGRAY'>use if statements with list and dictionary comprehensions</font> - <font color='LIGHTGRAY'>sort a list of numbers using a nested loop and an if statement</font> ``` # each item in the list contains the birth years of people living in the same household birthyears_lst = [ [1976, 1956, 2013], [1989, 2002], [1954, 1978, 1928, 2009, 1938], [2001], [1978, 2000, 2015, 1981, 1995] ] # let's print out each number for item in birthyears_lst: print('current item:', item) for num in item: print(' num in item:', num) num += 1 print(' num + 1:', num) print('item after update:', item) print(birthyears_lst) ``` If you want to modify a list element, you need to reference them by index. ``` i = 2 j = 4 print(birthyears_lst[i][j]) # just a reminder how to index nested lists for i in range(len(birthyears_lst)): print('current i:', i, birthyears_lst[i]) for j in range(len(birthyears_lst[i])): print(' before i:', i, 'j:', j, 'element:', birthyears_lst[i][j]) birthyears_lst[i][j] = birthyears_lst[i][j] + 1 print(' after i:', i, 'j:', j, 'element:', birthyears_lst[i][j]) print('after update i:', i, birthyears_lst[i]) print(birthyears_lst) ``` Let's create a list that contains everyone's age and has the same shape as `birthyears_lst`. ``` ages_lst = [] current_year = 2021 for i in range(len(birthyears_lst)): print('sublist before:', birthyears_lst[i]) ages_lst.append([]) for j in range(len(birthyears_lst[i])): print(' before:', birthyears_lst[i][j]) ages_lst[i].append(current_year - birthyears_lst[i][j]) print(' after:', ages_lst[i][j]) print('sublist after:', ages_lst[i]) print(ages_lst) ``` ### **<font color='GREEN'> Exercise</font>** Create a `boolean` nested list of the same shape as `ages_lst` and make the items `True` if someone is 18 or older and `False` otherwise. Call this list `can_vote_lst`. ``` # TODO: insert solution here ``` # Conditional Statements and Comprehensions ## <font color='LIGHTGRAY'>By the end of the day you'll be able to:</font> - <font color='LIGHTGRAY'>filter container elements using a combination of a for loop and an if statement</font> - <font color='LIGHTGRAY'>update values in a nested list using double for loops (aka nested loop)</font> - **use `if` statements with `list` and `dictionary` comprehensions** - <font color='LIGHTGRAY'>sort a list of numbers using a nested loop and an if statement</font> Only `if` conditions: ```python [f(item) for item in container if condition] ``` ```python {f(key): g(val) for key, val in dictionary.items() if condition} ``` ``` nums_lst = [5, 4, 90, 60] print([num for num in nums_lst if num > 50]) nums_dict = {'five': 5, 'four': 4, 'ninety': 90, 'sixty': 60} print({name: num for name, num in nums_dict.items() if num > 50}) ``` `if` and `else` conditions: ```python [f(item) if condition else g(item) for item in container] ``` ```python { (f(key) if condition else g(key)):(h(val) if condition else k(val)) for key, val in dictionary.items() } ``` ``` nums_lst = [5, 4, 90, 60] print( [ True if num > 50 else False for num in nums_lst ] ) nums_dict = {'five': 5, 'four': 4, 'ninety': 90, 'sixty': 60} print( { name: True if num > 50 else False for name, num in nums_dict.items() } ) ``` # Sorting ## <font color='LIGHTGRAY'>By the end of the day you'll be able to:</font> - <font color='LIGHTGRAY'>filter container elements using a combination of a for loop and an if statement</font> - <font color='LIGHTGRAY'>update values in a nested list using double for loops (aka nested loop)</font> - <font color='LIGHTGRAY'>use if statements with list and dictionary comprehensions</font> - **sort a `list` of numbers using a nested loop and an `if` statement** Given a list of numbers, rearrange the elements such that they are in increasing order. [Bubble sort](https://en.wikipedia.org/wiki/Bubble_sort) is a simple sorting algorithm that steps through the list, compares adjacent elements and swaps them if they are in the wrong order. After each iteration, one less element is needed to be compared until there are no more elements left to be compared. ![](data/Bubble-sort-example.gif) Although bubble sort is inefficient, it is a good algorithm to learn about nesting loops and conditional statements. ``` nums_lst = [34, 7, 1335, 8] print("starting list:", str(nums_lst)) for i in range(len(nums_lst)): for j in range(i): i_str = 'nums_lst[i=' + str(i) + '] = ' + str(nums_lst[i]) j_str = 'nums_lst[j=' + str(j) + '] = ' + str(nums_lst[j]) print( 'is', i_str, '<', j_str, '?' ) if nums_lst[i] < nums_lst[j]: print(' swap') temp = nums_lst[i] nums_lst[i] = nums_lst[j] nums_lst[j] = temp else: print(' no swap') print(' current list:', nums_lst) print("sorted list:", nums_lst) ``` ### **<font color='GREEN'> Exercise</font>** Rewrite the bubble sort algorithm above to order strings in a list ascii-betically. Test your algorithm on `str_lst`. ``` str_lst = [ 'thank', 'library', 'knot', 'bath' ] ``` Hint 1: You can compare strings with greater than (>) and smaller than (<). For example 'ba' < 'bb' is True, try it. Hint 2: The smaller than operation will tell you if two items in a list needs to be swapped. ``` # TODO: insert solution here # >>> ['bath', 'knot', 'library', 'thank'] ``` # Conclusion ## You are now able to: - filter container elements using a combination of a `for` loop and an `if` statement - update values in a nested `list` using double `for` loops (aka nested loop) - use `if` statements with `list` and `dictionary` comprehensions - sort a `list` of numbers using a nested loop and an `if` statement
github_jupyter
## 1. Loading the NIPS papers <p>The NIPS conference (Neural Information Processing Systems) is one of the most prestigious yearly events in the machine learning community. At each NIPS conference, a large number of research papers are published. Over 50,000 PDF files were automatically downloaded and processed to obtain a dataset on various machine learning techniques. These NIPS papers are stored in <code>datasets/papers.csv</code>. The CSV file contains information on the different NIPS papers that were published from 1987 until 2017 (30 years!). These papers discuss a wide variety of topics in machine learning, from neural networks to optimization methods and many more. <img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_158/img/nips_logo.png" alt="The logo of NIPS (Neural Information Processing Systems)"></p> <p>First, we will explore the CSV file to determine what type of data we can use for the analysis and how it is structured. A research paper typically consists of a title, an abstract and the main text. Other data such as figures and tables were not extracted from the PDF files. Each paper discusses a novel technique or improvement. In this analysis, we will focus on analyzing these papers with natural language processing methods.</p> ``` # Importing modules # -- YOUR CODE HERE -- import pandas as pd # Read datasets/papers.csv into papers papers = pd.read_csv('datasets/papers.csv') # Print out the first rows of papers # -- YOUR CODE HERE -- papers.head(5) ``` ## 2. Preparing the data for analysis <p>For the analysis of the papers, we are only interested in the text data associated with the paper as well as the year the paper was published in.</p> <p>We will analyze this text data using natural language processing. Since the file contains some metadata such as id's and filenames, it is necessary to remove all the columns that do not contain useful text information.</p> ``` # Remove the columns # -- YOUR CODE HERE -- papers.drop(['id', 'event_type', 'pdf_name'], axis=1, inplace=True) # Print out the first rows of papers # -- YOUR CODE HERE -- papers.head(5) ``` ## 3. Plotting how machine learning has evolved over time <p>In order to understand how the machine learning field has recently exploded in popularity, we will begin by visualizing the number of publications per year. </p> <p>By looking at the number of published papers per year, we can understand the extent of the machine learning 'revolution'! Typically, this significant increase in popularity is attributed to the large amounts of compute power, data and improvements in algorithms.</p> ``` # Group the papers by year groups = papers.groupby('year') # Determine the size of each group counts = groups.size() # Visualise the counts as a bar plot import matplotlib.pyplot %matplotlib inline # -- YOUR CODE HERE -- counts.plot(kind='bar') ``` ## 4. Preprocessing the text data <p>Let's now analyze the titles of the different papers to identify machine learning trends. First, we will perform some simple preprocessing on the titles in order to make them more amenable for analysis. We will use a regular expression to remove any punctuation in the title. Then we will perform lowercasing. We'll then print the titles of the first rows before and after applying the modification.</p> ``` # Load the regular expression library # -- YOUR CODE HERE -- import re # Print the titles of the first rows print(papers['title'].head()) # Remove punctuation papers['title_processed'] = papers['title'].map(lambda x: re.sub('[,\.!?]', '', x)) # Convert the titles to lowercase papers['title_processed'] = papers['title_processed'].str.lower() # Print the processed titles of the first rows # -- YOUR CODE HERE -- papers.head(5) ``` ## 5. A word cloud to visualize the preprocessed text data <p>In order to verify whether the preprocessing happened correctly, we can make a word cloud of the titles of the research papers. This will give us a visual representation of the most common words. Visualisation is key to understanding whether we are still on the right track! In addition, it allows us to verify whether we need additional preprocessing before further analyzing the text data.</p> <p>Python has a massive number of open libraries! Instead of trying to develop a method to create word clouds ourselves, we'll use Andreas Mueller's <a href="http://amueller.github.io/word_cloud/">wordcloud library</a>.</p> ``` # Import the wordcloud library # -- YOUR CODE HERE -- import wordcloud # Join the different processed titles together. s = " " long_string = s.join(papers['title_processed']) # Create a WordCloud object wc = wordcloud.WordCloud() # Generate a word cloud # -- YOUR CODE HERE -- wc.generate(long_string) # Visualize the word cloud wc.to_image() ``` ## 6. Prepare the text for LDA analysis <p>The main text analysis method that we will use is latent Dirichlet allocation (LDA). LDA is able to perform topic detection on large document sets, determining what the main 'topics' are in a large unlabeled set of texts. A 'topic' is a collection of words that tend to co-occur often. The hypothesis is that LDA might be able to clarify what the different topics in the research titles are. These topics can then be used as a starting point for further analysis.</p> <p>LDA does not work directly on text data. First, it is necessary to convert the documents into a simple vector representation. This representation will then be used by LDA to determine the topics. Each entry of a 'document vector' will correspond with the number of times a word occurred in the document. In conclusion, we will convert a list of titles into a list of vectors, all with length equal to the vocabulary. For example, <em>'Analyzing machine learning trends with neural networks.'</em> would be transformed into <code>[1, 0, 1, ..., 1, 0]</code>.</p> <p>We'll then plot the 10 most common words based on the outcome of this operation (the list of document vectors). As a check, these words should also occur in the word cloud.</p> ``` # Load the library with the CountVectorizer method from sklearn.feature_extraction.text import CountVectorizer import numpy as np import matplotlib.pyplot as plt # Helper function def plot_10_most_common_words(count_data, count_vectorizer): words = count_vectorizer.get_feature_names() total_counts = np.zeros(len(words)) for t in count_data: total_counts+=t.toarray()[0] count_dict = (zip(words, total_counts)) count_dict = sorted(count_dict, key=lambda x:x[1], reverse=True)[0:10] words = [w[0] for w in count_dict] counts = [w[1] for w in count_dict] x_pos = np.arange(len(words)) plt.bar(x_pos, counts,align='center') plt.xticks(x_pos, words, rotation=90) plt.xlabel('words') plt.ylabel('counts') plt.title('10 most common words') plt.show() # Initialise the count vectorizer with the English stop words count_vectorizer = CountVectorizer(stop_words = 'english') # Fit and transform the processed titles count_data = count_vectorizer.fit_transform(papers['title']) # Visualise the 10 most common words # -- YOUR CODE HERE -- ``` ## 7. Analysing trends with LDA <p>Finally, the research titles will be analyzed using LDA. Note that in order to process a new set of documents (e.g. news articles), a similar set of steps will be required to preprocess the data. The flow that was constructed here can thus easily be exported for a new text dataset.</p> <p>The only parameter we will tweak is the number of topics in the LDA algorithm. Typically, one would calculate the 'perplexity' metric to determine which number of topics is best and iterate over different amounts of topics until the lowest 'perplexity' is found. For now, let's play around with a different number of topics. From there, we can distinguish what each topic is about ('neural networks', 'reinforcement learning', 'kernel methods', 'gaussian processes', etc.).</p> ``` import warnings warnings.simplefilter("ignore", DeprecationWarning) # Load the LDA model from sk-learn from sklearn.decomposition import LatentDirichletAllocation as LDA # Helper function def print_topics(model, count_vectorizer, n_top_words): words = count_vectorizer.get_feature_names() for topic_idx, topic in enumerate(model.components_): print("\nTopic #%d:" % topic_idx) print(" ".join([words[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) # Tweak the two parameters below (use int values below 15) number_topics = 10 number_words = 10 # Create and fit the LDA model lda = LDA(n_components=number_topics) lda.fit(count_data) # Print the topics found by the LDA model print("Topics found via LDA:") print_topics(lda, count_vectorizer, number_words) ``` ## 8. The future of machine learning <p>Machine learning has become increasingly popular over the past years. The number of NIPS conference papers has risen exponentially, and people are continuously looking for ways on how they can incorporate machine learning into their products and services.</p> <p>Although this analysis focused on analyzing machine learning trends in research, a lot of these techniques are rapidly being adopted in industry. Following the latest machine learning trends is a critical skill for a data scientist, and it is recommended to continuously keep learning by going through blogs, tutorials, and courses.</p> ``` # The historical data indicates that: more_papers_published_in_2018 = True ```
github_jupyter
# Scheduling Machine Learning Pipelines using Apache Airflow In this workshop, you will use Airflow to schedule a basic machine learning pipeline. The workshop consists of 3 assignments. 1. Schedule a basic 'hello world' example on Airflow 2. Schedule a machine learning pipeline on Airflow 3. Improve the the pipeline by creating your own custom Airflow operator Along the way, you will come across these blockquotes (as shown below). In there, we provide supplemental information related to the assignments you are doing. This information is not necessary to understand the assignment, but offers additional reading material for Aiflow enthousiasts. > This is a blockquote with supplemental information! You are provided with a random username for this workshop. This username is used to interact with the resources on AWS. Your user can be retrieved from the environment variable `WORKSHOP_USER`. Execute the cell below to find out your user ``` import os os.environ['WORKSHOP_USER'] ``` ## Assignment 1: Hello World In this assignment, we are going to schedule a simple workflow on Airflow to get used with the concepts. Airflow uses the following concepts for worfklows: - DAG ([directed acyclic graph](https://en.wikipedia.org/wiki/Directed_acyclic_graph)): a description of the order in which work should take place - Operator: a class that acts as a template for carrying out some work - Task: a parameterized instance of an operator - Task Instance: a task that 1. has been assigned to a DAG and 2. has a state associated with a specific run of the DAG In the image below, you can see an example of an Airflow DAG. Each box in the image represents a task. Arrows between the boxes indicate a dependency. For this DAG, task "run_after_loop" has to wait for "runme_0", "runme_1", and "runme_2", and "run_this_last" has to wait for "also_run_this" and "run_after_loop". ![](https://images.ctfassets.net/be04ylp8y0qc/5Q0bUH9SmNXdyiWpug1ArJ/040a27277e334849930ece4c60f2058a/1_tctnw7FxqwmcwWS1bdKHpw.png?fm=jpg) The code below defines a DAG in Airflow. The DAG has the following properties: - The DAG starts on 2019-11-27 and runs every day at midnight - This DAG covers 2 tasks. 1 of them is an instance of [`PythonOperator`](https://github.com/apache/airflow/blob/1.10.4/airflow/operators/python_operator.py)s, and 1 is an instance of the [`BashOperator`](https://github.com/apache/airflow/blob/1.10.4/airflow/operators/bash_operator.py). Both tasks will simply print a word to the logs. - Task 'print_hello' runs first. Then, task 'print_world' will run Inspect the code and try to understand how the properties above are defined: ``` import datetime from airflow.models import DAG from airflow.operators.python_operator import PythonOperator from airflow.operators.bash_operator import BashOperator with DAG( dag_id='hello_world', schedule_interval='@daily', start_date=datetime.datetime(2019, 11, 27) ) as dag: print_hello_operator = PythonOperator( task_id='print_hello', python_callable=print, op_args=['hello'] ) print_world_operator = BashOperator( task_id='print_world', bash_command='echo world' ) print_hello_operator >> print_world_operator ``` > In this DAG, we use de `'@daily'` schedule interval, which means that the DAG will run every day at midnight. For more complex scheduling, a [cron expression](https://en.wikipedia.org/wiki/Cron#CRON_expression) can be used. > The bitshift operator `>>` is overloaded for operators in Airflow. It is used to define dependencies between tasks in a DAG. For more information about defining relationships between tasks, check out the [bitshift composition](https://airflow.apache.org/concepts.html#bitshift-composition) and [relationship helper](https://airflow.apache.org/concepts.html#relationship-helper) in the Airflow documentation. This is everything we need to do to define a DAG that can be scheduled on Airflow. Because DAGs are defined using Python, we have a lot of freedom in how we want to design our DAG. We could, for example, dynamically create tasks by looping over lists. Furthermore, defining your DAGs as code makes it it easy to keep track on their version in a source code management system. The next step is to actually run this example on Airflow. The Airflow scheduler periodically scans a folder, called 'the DagBag', for files that define DAGs. There is a folder in this jupyter notebook server, called `dags`, which is also present at the airflow scheduler via a network file system. Any python file containing a DAG definition that we put there, will be picked up by the scheduler. - Copy the code snippet above. Go to the dags folder in the file explorer, press the 'New' button in the upper right corner, and create a **Text File**. Name it `hello_world.py`. The name of the file is flexible, but should end with `.py`. Paste the copied code inside. - Go to the airflow UI, which is located at `/<your username>/airflow` on the address where you are currently on. You should see the DAG view, which looks similar to the screen below (with less DAGs shown). Refresh the page regularly, as this does not happen automatically. Your DAG should appear within a few seconds. ![](https://airflow.apache.org/_images/dags.png) - The DAG is turned off at first. Turn it on and frequently refresh the page to see what happens. In the 'Recent Tasks' and 'DAG Runs' columns, you will see the circles changing. Hover over them to see what they mean. After a while, only the leftmost, dark green circles are filled. They show that all tasks and DAGs have completed successfully. The start date of our DAG was 27-11-2019 (2 days ago), and we see 2 completed DAG runs. Airflow automatically processes historical DAG runs if a DAG's start date lies in the past. This is a very useful feature for processing historical data for example. - Click on 'hello_world' in the 'DAG' column. You are brought to the tree view, showing all DAG runs and task instances and their status. They are all dark green, meaning they all completed successfully. The tree view looks as follows. ![](https://airflow.apache.org/_images/tree.png) - Press the lower left, dark green square, which is the 'print_hello' task instance of the DAG run of 27-11-2019. The task instance context menu with various actions that can be performed on the task instance will pop up. The popup is shown below. Press 'View Log'. This brings you to the logs produced by the task instance. We can also see that is has printed 'hello' somewhere in the logs, as we expected from this task instance. ![](https://airflow.apache.org/_images/context.png) - Click on 'Graph View'. Here we can see the graphical representation of our DAG. We can see that 'print_hello' is followed by 'print_world', as we defined this in the DAG definition above. Below you can see an example graph view of a DAG. ![](https://airflow.apache.org/_images/graph.png) Congratulations, you have scheduled your first DAG on Airflow, and learned the basic features of the Airflow UI. This DAG does not do anything useful yet though. In the next assignment, we will create a DAG that will process actual data. ## Assignment 2: Machine Learning Pipeline In this assignment, we will create an Airflow DAG to schedule a basic machine learning pipeline. The pipeline will use the famous Iris dataset and consists of 2 steps: 1. Preprocess the dataset by adding some new features 2. Train a predictive model on the dataset The goal is to schedule this training pipeline on a regular interval. This makes sure your model gets updated frequently with the latest data. > In this example, we will use the same dataset for every run, but in reality you would like to use a new dataset every time the pipeline is run. This can be done using Airflow's [templating](https://airflow.apache.org/macros.html) mechanism. You are provided with 2 scripts located in the folder `transform_scripts`. Each script transforms an input file and stores the transformed data in an output file. The locations of the input and output files are provided as arguments to the scripts. The first script, `preprocess.py`, takes a CSV with raw training data, and outputs a CSV with preprocessed training data. The second script, `train.py`, takes a CSV with preprocessed training data, and outputs a pickled machine learning model. Also, you are provided with an [S3 bucket](https://aws.amazon.com/s3/) containing our raw training data. S3 is a simple storage service provided available on Amazon AWS, allowing one to store and retrieve files in a scalable way. Let's use the [boto3](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html?id=docs_gateway) python package to list the resources contained in our S3 bucket: ``` import boto3 import os def list_s3(): user = os.environ['WORKSHOP_USER'] bucket = f'pydata-eindhoven-2019-airflow-{user}' client = boto3.client('s3', region_name='eu-west-1') return [o['Key'] for o in client.list_objects(Bucket=bucket)['Contents']] list_s3() ``` We can see that the raw training data is already present in our bucket, along with some raw unlabeled data. Since this DAG should train a machine learning model, we are going to ignore the unlabeled data for now. Our DAG should to the following: 1. Retrieve the raw training data from S3, apply the `preprocess.py` transform script to it, and send the preprocessed CSV back to S3 2. Retrieve the preprocessed training data from S3, apply the `train.py` transform script to it, and send the pickled model to S3 Airflow's built-in [`S3FileTransformOperator`](https://github.com/apache/airflow/blob/1.10.4/airflow/operators/s3_file_transform_operator.py) will do exactly what we want. The operator does the following things: 1. Download a file from S3 to a temporary file on the Airflow worker machine 2. Apply a transform script to the file, producing a new, temporary file 3. Upload the output file to S3 Inspect the DAG below. It uses the `S3FileTransformOperator` to first preprocess the data, and then train the model. We see that the connection ID's to S3 are set to `'s3'`. The Airflow [connections](https://airflow.apache.org/howto/connection/index.html) feature allows one to securely store connection information to external systems in the Airflow database. The connection to your bucket is already setup for you, and has the name 's3'. This name is specified in the operators, to let Airflow know which connection to use. Your own S3 bucket for this workshop is already configured in the DAG definition. As in assignment 1, copy the contents of the cell below. In the `dags` folder, create a new file called `ml_pipeline.py`, and paste the copied content inside. This DAG also uses the transform scripts. Therefore, we also need to copy the `transform_scripts` folder into the `dags` folder. This can be done by executing the cell below. > Usually, you want to avoid adding a lot of files that do not define DAGs to your DAG folder. Airflow scans this folder regularly to load new DAGs. The more files are in there, the bigger the performance hit will be. Consider storing files like transform scripts outside of the DAGs folder, or use the [`.airflowignore`](https://airflow.apache.org/concepts.html#airflowignore) file, which works similarly to a `.gitignore` file. You could even install your python dependencies on the Airflow host using pip. ``` !cp -R transform_scripts dags/transform_scripts import datetime from airflow.models import DAG from airflow.operators.s3_file_transform_operator import S3FileTransformOperator import os user = os.environ['WORKSHOP_USER'] bucket = f'pydata-eindhoven-2019-airflow-{user}' dag_folder = os.path.dirname(os.path.abspath(__file__)) with DAG( dag_id='ml_pipeline', schedule_interval='@daily', start_date=datetime.datetime(2019, 11, 27) ) as dag: preprocess_operator = S3FileTransformOperator( task_id='preprocess', transform_script=f'{dag_folder}/transform_scripts/preprocess.py', source_s3_key=f's3://{bucket}/raw_training_data.csv', dest_s3_key=f's3://{bucket}/preprocessed_training_data.csv', source_aws_conn_id='s3', dest_aws_conn_id='s3', replace=True ) train_operator = S3FileTransformOperator( task_id='train', transform_script=f'{dag_folder}/transform_scripts/train.py', source_s3_key=f's3://{bucket}/preprocessed_training_data.csv', dest_s3_key=f's3://{bucket}/trained_model.pkl', source_aws_conn_id='s3', dest_aws_conn_id='s3', replace=True ) preprocess_operator >> train_operator ``` In the Airflow UI, on port 8080, you should see your second DAG appear very soon. As in the first assignment, turn it on and refresh regularly to see the DAG runs succeed. > We ran this pipeline on a single machine for this workshop. For large datasets or a large number of jobs, this does not scale well. Since Airflow is not a big data processing tool, a recommendation is to push computation to external systems as much as possible. For this example, we could decide to run our workload for example [AWS Sagemaker](https://aws.amazon.com/sagemaker/). > > Also, Airflow provides various options for making it scalable. An example is the Kubernetes executor, which will spawn Kubernetes pods for every task instances ran. This allows Airflow to schedule a large number of parallel tasks, with as much resources as required for the task. To get started with this, check out [this blog](https://towardsdatascience.com/kubernetesexecutor-for-airflow-e2155e0f909c) by Brecht de Vlieger. Now that the DAG runs are finished, we can check which files are generated on S3. We can use the previously created `list_s3` function to do so: ``` list_s3() ``` As we can see, the raw data files are there, as well as the newly created preprocessed data and pickled model files. Since we kept the file names constant, the second DAG run has overwritten the files of the first DAG run. In this exercise, we have scheduled a machine learning pipeline, which transforms raw training data into a trained model. This model could now be served somewhere (i.e. using a web service), or used to make batch predictions (i.e. with Airflow). The next assignment is a bonus assignment, where you will improve the pipeline above by creating your own Airflow operator. ## \[BONUS\] Assignment 3: Custom Operator In the previous assigment, we have used the `S3FileTransformOperator` to transform our raw training data into a trained model. A logical next step would be that we use the trained model to make some batch predictions. In order to achieve this, we need to add 2 tasks to our previous DAG: 1. Preprocess the raw unlabeled data (to add the new features to it) 2. Use the trained model to make predictions on the preprocessed unlabeled data The first task looks a lot like the preprocessing step of our training data, and should be trivial to add. For the second task, we need to download 2 files from S3 (the unlabeled data and the model), and need to use the model to make a prediction (using the scikit-learn estimator's `.predict` method). Both of these steps are new, and cannot easily be achieved by using the `S3FileTransformOperator` again. Luckily, Airflow provides an easy way to create custom operator. Create your own operator for the prediction class. Let's take the `S3FileTransformOperator` code as inspiration. The code can be found [here](https://github.com/apache/airflow/blob/1.10.4/airflow/operators/s3_file_transform_operator.py). An Airflow operator has a few properties: - It inherits from `BaseOperator`. - It has an `__init__` method, where all inputs are defined. - It overrides the `execute` method of `BaseOperator`, which is the method that gets executed when the task instance is triggered. - It often uses one or more hooks to communicate with external services. In our case, this is the [`S3Hook`](https://github.com/apache/airflow/blob/1.10.4/airflow/hooks/S3_hook.py). Once you created the operator, extend the DAG from assignment 2 with the 2 tasks specified above. Like before, copy the DAG into a text file in the `dags` folder, and inspect the result in the Airflow UI. A backbone for your DAG is provided below. After the DAG has finished running, check the results using the `list_s3` function. In case you get stuck, an example answer is stored in the `answers` folder. ``` import datetime from airflow.models import DAG, BaseOperator from airflow.operators.s3_file_transform_operator import S3FileTransformOperator from airflow.exceptions import AirflowException from airflow.hooks.S3_hook import S3Hook from airflow.utils.decorators import apply_defaults import sys import os from tempfile import NamedTemporaryFile import pandas as pd import pickle class S3PredictionOperator(BaseOperator): template_fields = ('source_s3_key', 'dest_s3_key') template_ext = () ui_color = '#f9c915' @apply_defaults def __init__( self, source_s3_key, model_s3_key, dest_s3_key, aws_conn_id='aws_default', verify=None, replace=False, *args, **kwargs): super().__init__(*args, **kwargs) self.source_s3_key = source_s3_key self.model_s3_key = model_s3_key self.dest_s3_key = dest_s3_key self.aws_conn_id = aws_conn_id self.verify = verify self.replace = replace self.output_encoding = sys.getdefaultencoding() def execute(self, context): # TODO: create your execute function. You can ignore the context parameter user = os.environ['WORKSHOP_USER'] bucket = f'pydata-eindhoven-2019-airflow-{user}' dag_folder = os.path.dirname(os.path.abspath(__file__)) with DAG( dag_id='custom_operator', schedule_interval='@daily', start_date=datetime.datetime(2019, 11, 27) ) as dag: preprocess_train_operator = S3FileTransformOperator( task_id='preprocess_train', transform_script=f'{dag_folder}/transform_scripts/preprocess.py', source_s3_key=f's3://{bucket}/raw_training_data.csv', dest_s3_key=f's3://{bucket}/preprocessed_training_data.csv', source_aws_conn_id='s3', dest_aws_conn_id='s3', replace=True ) train_operator = S3FileTransformOperator( task_id='train', transform_script=f'{dag_folder}/transform_scripts/train.py', source_s3_key=f's3://{bucket}/preprocessed_training_data.csv', dest_s3_key=f's3://{bucket}/trained_model.pkl', source_aws_conn_id='s3', dest_aws_conn_id='s3', replace=True ) # TODO preprocess your unlabeled data # TODO make predictions on your unlabeled data preprocess_train_operator >> train_operator # TODO create the correct dependencies for your newly created operators ``` In this assignment you created your own Airflow operator. The only thing you needed to do was to inherit from `BaseOperator` and override the `execute` method. By using this approach, we created an operator that makes predictions for us on unlabeled data. Additionally, we extended the DAG from assignment 2 with more tasks, resulting in a DAG with more complex dependencies. ## Summary In this workshop, you learned what Airflow is, how to schedule your own Workflows with it, and how to use it for training machine learning models. As a bonus, you learned how to create your own operators that hold custom logic for the tasks you need to execute. This was just the tip of the iceberg, as the Airflow ecosystem offers lots of other features for creating production-grade data processing pipelines. This notebook contains various resources for further reading, in case you are interested. For questions, feel free to reach out to us. Axel Goblet (axel.goblet@bigdatarepublic.nl) Dick Abma (dick.abma@bigdatarepublic.nl) https://www.bigdatarepublic.nl/ ![](https://www.bigdatarepublic.nl/wp-content/uploads/2019/05/BDR_Logo_RGB_no_whitespace.jpg)
github_jupyter
<a href="https://colab.research.google.com/github/oughtinc/ergo/blob/notebooks-readme/covid-19-average-lockdown.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Setup ``` !pip install --quiet poetry # Fixes https://github.com/python-poetry/poetry/issues/532 !pip install --quiet git+https://github.com/oughtinc/ergo.git !pip install --quiet pendulum requests !pip install --quiet torch %load_ext google.colab.data_table import ergo import pendulum import requests import torch import pandas as pd from typing import List from pendulum import Date, Duration def get_questions_for_cat(cat): r = requests.get(f"https://pandemic.metaculus.com/api2/questions/?search=cat:{cat}") return r.json() def get_question_ids_for_cat(cat): qs = get_questions_for_cat(cat) return [q["id"] for q in qs["results"]] def show_related_questions(related_question_ids): related_questions = [] for id in related_question_ids: # try-except there b/c some "questions" aren't really questions and throw erros w/ metaculus # like https://pandemic.metaculus.com/questions/3957/lockdown-series-when-will-life-return-to-normal-ish/ try: rq = metaculus.get_question(id) related_questions.append(rq) except: pass def needs_attention(related_question): if not question.my_predictions: return True else: period = pendulum.from_timestamp( related_question.prediction_timeseries[-1]["t"]) - \ pendulum.from_timestamp(question.my_predictions["predictions"][-1]["t"] ) return period.in_seconds() > 1 def direction(q): i = 1 have_gone_far_back_enough = True while (have_gone_far_back_enough): try: period = pendulum.from_timestamp( q.prediction_timeseries[-i]["t"]) - \ pendulum.from_timestamp(question.my_predictions["predictions"][-1]["t"] ) except: i = i-1 break if (period.in_seconds() < 1): have_gone_far_back_enough = False else: i = i+1 if (isinstance(q.prediction_timeseries[-i]["community_prediction"], float)): old = q.prediction_timeseries[-i]["community_prediction"] new = q.prediction_timeseries[-1]["community_prediction"] return old - new # if (q.prediction_timeseries[-2]["community_prediction"] > q.prediction_timeseries[-1]["community_prediction"]): # return new - old # elif (q.prediction_timeseries[-2]["community_prediction"] > q.prediction_timeseries[-1]["community_prediction"]): # return "+" # else: # return "=" else: old = q.prediction_timeseries[-i]["community_prediction"]["q2"] new = q.prediction_timeseries[-1]["community_prediction"]["q2"] return old - new # if (q.prediction_timeseries[-2]["community_prediction"]["q2"] > q.prediction_timeseries[-1]["community_prediction"]["q2"]): # return "-" # elif (q.prediction_timeseries[-2]["community_prediction"]["q2"] > q.prediction_timeseries[-1]["community_prediction"]["q2"]): # return "+" # else: # return "=" related_questions_data = [ [ related_question.id, needs_attention(related_question), float(direction(related_question)), related_question.data["possibilities"]["scale"]["min"], related_question.data["possibilities"]["scale"]["max"], (pendulum.period(pendulum.parse(related_question.data["possibilities"]["scale"]["min"]), pendulum.parse(related_question.data["possibilities"]["scale"]["max"])) * float(direction(related_question))).in_hours(), related_question.title, f"https://pandemic.metaculus.com{related_question.page_url}"] for related_question in related_questions ] print(related_questions[0].data["possibilities"]["scale"]["max"]) print(related_questions[0].data["possibilities"]["scale"]["min"]) df = pd.DataFrame(related_questions_data, columns=["id", "changed", "degree of change", "min", "max", "period", "title", "link"]) return df.set_index("id") def relate_questions_in_cat(cat): related_question_ids = get_question_ids_for_cat(cat) return show_related_questions(related_question_ids) ``` # Questions Here is the question we want to forecast: ``` question_data = { "id": 3925, "name": "How many days will the average American spend under lockdown between 2020-03-25 and 2020-04-24", } metaculus = ergo.Metaculus(username="ought", password="", api_domain="pandemic") # metaculus = ergo.Metaculus(username="oughttest", password="6vCo39Mz^rrb", api_domain="pandemic") question = metaculus.get_question(question_data["id"], name=question_data["name"]) df = pd.DataFrame([[question.id, question.name]], columns=["id", "name"]) df.set_index("id") # p = pendulum.instance(question.last_activity_time) - pendulum.from_timestamp(question.prediction_timeseries[-1]["t"]) # p.in_seconds() < 1 ``` # Data Data: https://www.nytimes.com/interactive/2020/us/coronavirus-stay-at-home-order.html Manually copied in on 2020-04-05 Updated on 2020-04-08: * Added three regions to Oklahoma: Claremore, Moore, and Sallisaw. * Added South Carolina state-wide lockdown ``` M = 1000000 USA_data = { "start": False, "pop": 327.2 * M, "regions": { "Alabama": { "start": pendulum.Date(2020, 4, 4), "pop": 4.9 * M }, "Alaska": { "start": pendulum.Date(2020, 3, 31), "pop": 0.737 * M }, "Arizona": { "start": pendulum.Date(2020, 3, 31), "pop": 7.2 * M }, "California": { "start": pendulum.Date(2020, 3, 19), "pop": 39.6 * M }, "Colorado": { "start": pendulum.Date(2020, 3, 26), "pop": 5.7 * M }, "Connecticut": { "start": pendulum.Date(2020, 3, 23), "pop": 3.6 * M }, "Delaware": { "start": pendulum.Date(2020, 3, 24), "pop": 0.973 * M }, "District of Columbia": { "start": pendulum.Date(2020, 4, 1), "pop": 0.702 * M }, "Florida": { "start": pendulum.Date(2020, 4, 3), "pop": 21.5 * M }, "Georgia": { "start": pendulum.Date(2020, 4, 3), "pop": 10.6 * M }, "Hawaii": { "start": pendulum.Date(2020, 3, 25), "pop": 1.4 * M }, "Idaho": { "start": pendulum.Date(2020, 3, 25), "pop": 1.8 * M }, "Illinois": { "start": pendulum.Date(2020, 3, 21), "pop": 12.7 * M }, "Indiana": { "start": pendulum.Date(2020, 3, 24), "pop": 6.7 * M }, "Kansas": { "start": pendulum.Date(2020, 3, 30), "pop": 2.9 * M }, "Kentucky": { "start": pendulum.Date(2020, 3, 26), "pop": 4.5 * M }, "Louisiana": { "start": pendulum.Date(2020, 3, 23), "pop": 4.6 * M }, "Maine": { "start": pendulum.Date(2020, 4, 2), "pop": 1.3 * M }, "Maryland": { "start": pendulum.Date(2020, 3, 30), "pop": 6 * M }, "Massachusetts": { "start": pendulum.Date(2020, 3, 24), "pop": 6.9 * M }, "Michigan": { "start": pendulum.Date(2020, 3, 24), "pop": 10 * M }, "Minnesota": { "start": pendulum.Date(2020, 3, 27), "pop": 5.6 * M }, "Mississippi": { "start": pendulum.Date(2020, 4, 3), "pop": 3 * M }, "Missouri": { "start": pendulum.Date(2020, 4, 6), "pop": 6.1 * M }, "Montana": { "start": pendulum.Date(2020, 3, 28), "pop": 1.1 * M }, "Nevada": { "start": pendulum.Date(2020, 4, 1), "pop": 3.1 * M }, "New Hampshire": { "start": pendulum.Date(2020, 3, 27), "pop": 1.4 * M }, "New Jersey": { "start": pendulum.Date(2020, 3, 21), "pop": 8.9 * M }, "New Mexico": { "start": pendulum.Date(2020, 3, 24), "pop": 2.1 * M }, "New York": { "start": pendulum.Date(2020, 3, 22), "pop": 19.5 * M }, "North Carolina": { "start": pendulum.Date(2020, 3, 30), "pop": 10.4 * M }, "Ohio": { "start": pendulum.Date(2020, 3, 23), "pop": 11.7 * M }, "Oklahoma": { "start": False, "pop": 3.9 * M, "regions": { "Claremore": { "start": pendulum.Date(2020, 4, 6), "pop": 0.019 * M }, "Edmond": { "start": pendulum.Date(2020, 3, 30), "pop": 0.093 * M }, "Moore": { "start": pendulum.Date(2020, 4, 4), "pop": 0.062 * M }, "Norman": { "start": pendulum.Date(2020, 3, 25), "pop": 0.123 * M }, "Oklahoma City": { "start": pendulum.Date(2020, 3, 28), "pop": 0.649 * M }, "Sallisaw": { "start": pendulum.Date(2020, 4, 4), "pop": 0.009 * M }, "Stillwater": { "start": pendulum.Date(2020, 3, 30), "pop": 0.05 * M }, "Tulsa": { "start": pendulum.Date(2020, 3, 28), "pop": 0.401 * M } } }, "Oregon": { "start": pendulum.Date(2020, 3, 23), "pop": 4.2 * M }, "Pennsylvania": { "start": pendulum.Date(2020, 4, 1), "pop": 12.8 * M }, "Puerto Rico": { "start": pendulum.Date(2020, 3, 15), "pop": 3.2 * M }, "Rhode Island": { "start": pendulum.Date(2020, 3, 28), "pop": 1.1 * M }, "South Carolina": { "start": pendulum.Date(2020, 4, 7), "pop": 5.1 * M, "regions": { "Charleston": { "start": pendulum.Date(2020, 3, 26), "pop": 0.136 * M }, "Columbia": { "start": pendulum.Date(2020, 3, 29), "pop": 0.133 * M } } }, "Tennessee": { "start": pendulum.Date(2020, 3, 31), "pop": 6.8 * M }, "Texas": { "start": pendulum.Date(2020, 4, 2), "pop": 29 * M }, "Utah": { "start": False, "pop": 3.2 * M, "regions": { "Davis County": { "start": pendulum.Date(2020, 4, 1), "pop": 0.352 * M }, "Salt Lake County": { "start": pendulum.Date(2020, 3, 30), "pop": 1.2 * M }, "Summit County": { "start": pendulum.Date(2020, 3, 27), "pop": 0.042 * M } } }, "Vermont": { "start": pendulum.Date(2020, 3, 25), "pop": 0.626 * M }, "Virginia": { "start": pendulum.Date(2020, 3, 30), "pop": 8.5 * M }, "Washington": { "start": pendulum.Date(2020, 3, 23), "pop": 7.5 * M }, "West Virginia": { "start": pendulum.Date(2020, 3, 24), "pop": 1.8 * M }, "Wisconsin": { "start": pendulum.Date(2020, 3, 25), "pop": 5.8 * M }, "Wyoming": { "start": False, "pop": 0.578 * M, "regions": { "Jackson": { "start": pendulum.Date(2020, 3, 28), "pop": 0.01 * M } } } } } ``` #Assumptions * Every place that is currently on lockdown had no subregion on lockdown prior to implementing the region-wide lockdown. We know this if false, but it's simple. * Every place that is currently on lockdown will remain on lockdown through at least Apirl 25. * Any state that isn't fullly locked down will have a 1% of entering a lockdown every day. ``` chance_of_full_lockdown_transition = 0.01 ``` # Related Questions ``` relate_questions_in_cat("internal--lockdown-series") ``` # Model ``` def get_pop_in_lockdown_on_date(region, date): is_in_future = date > pendulum.now().date() if (region["start"] and region["start"] < date): return region["pop"] elif (is_in_future and ergo.flip(chance_of_full_lockdown_transition)): region["start"] = pendulum.now().date() return region["pop"] elif ("regions" in region): pop_in_lockdown = 0 for region_key in region["regions"]: sub_region = region["regions"][region_key] pop_in_lockdown += get_pop_in_lockdown_on_date(sub_region, date) return pop_in_lockdown else: return 0 import numpy as np def get_avg_pop_in_lockdown_over_range(region, start_date, end_date): period = pendulum.period(start_date, end_date) arr = np.array([]) for dt in period.range("days"): arr = np.append(arr, get_pop_in_lockdown_on_date(region, dt)) return np.mean(arr) def get_avg_proportion_in_lockdown_over_range(region, start_date, end_date): return get_avg_pop_in_lockdown_over_range(region, start_date, end_date) / region["pop"] ``` We need to make a deep copy of the region data for each model so that model-specific changes don't carry over to the next run. ``` import copy def model(): usa_region = copy.deepcopy(USA_data) avg = get_avg_proportion_in_lockdown_over_range(usa_region, pendulum.Date(2020, 3, 25), pendulum.Date(2020, 4, 25)) ergo.tag(torch.Tensor([avg*32]), question.name) # 3/25 to 4/25 inclusive is 32 days samples = ergo.run(lambda: model(), num_samples=10) samples ``` # Analysis Histogram: ``` samples.hist(column=question.name) ``` Summary stats: ``` samples.describe() ``` # Submit predictions Convert samples to Metaculus distributions and visualize: ``` if question.name in samples: question.show_submission(samples[question.name]) else: print(f"No predictions for {question.name}") print("\n\n") ``` If everything looks good, submit the predictions! ``` # def submit_all(): # for question in questions: # if question.name in samples: # try: # params = question.submit_from_samples(samples[question.name]) # print(f"Submitted for {question.name}") # print(f"https://pandemic.metaculus.com{question.page_url}") # except requests.exceptions.HTTPError as e: # print(f"Couldn't make prediction for {question.name} -- maybe this question is now closed? See error below.") # print(e) # else: # print(f"No predictions for {question.name}") # submit_all() ``` # To do - Add to dos
github_jupyter
## Demonstrates some common TensorFlow errors This notebook demonstrates some common TensorFlow errors, how to find them, and how to fix them. ``` import tensorflow as tf print(tf.__version__) ``` # Shape error ``` def some_method(data): a = data[:,0:2] c = data[:,1] s = (a + c) return tf.sqrt(tf.matmul(s, tf.transpose(s))) with tf.Session() as sess: fake_data = tf.constant([ [5.0, 3.0, 7.1], [2.3, 4.1, 4.8], [2.8, 4.2, 5.6], [2.9, 8.3, 7.3] ]) print(sess.run(some_method(fake_data))) def some_method(data): a = data[:,0:2] print(a.get_shape()) c = data[:,1] print(c.get_shape()) s = (a + c) return tf.sqrt(tf.matmul(s, tf.transpose(s))) with tf.Session() as sess: fake_data = tf.constant([ [5.0, 3.0, 7.1], [2.3, 4.1, 4.8], [2.8, 4.2, 5.6], [2.9, 8.3, 7.3] ]) print(sess.run(some_method(fake_data))) def some_method(data): a = data[:,0:2] print(a.get_shape()) c = data[:,1:3] print(c.get_shape()) s = (a + c) return tf.sqrt(tf.matmul(s, tf.transpose(s))) with tf.Session() as sess: fake_data = tf.constant([ [5.0, 3.0, 7.1], [2.3, 4.1, 4.8], [2.8, 4.2, 5.6], [2.9, 8.3, 7.3] ]) print(sess.run(some_method(fake_data))) import tensorflow as tf x = tf.constant([[3, 2], [4, 5], [6, 7]]) print("x.shape", x.shape) expanded = tf.expand_dims(x, 1) print("expanded.shape", expanded.shape) sliced = tf.slice(x, [0, 1], [2, 1]) print("sliced.shape", sliced.shape) with tf.Session() as sess: print("expanded: ", expanded.eval()) print("sliced: ", sliced.eval()) ``` # Vector vs scalar ``` def some_method(data): print(data.get_shape()) a = data[:,0:2] print(a.get_shape()) c = data[:,1:3] print(c.get_shape()) s = (a + c) return tf.sqrt(tf.matmul(s, tf.transpose(s))) with tf.Session() as sess: fake_data = tf.constant([5.0, 3.0, 7.1]) print(sess.run(some_method(fake_data))) def some_method(data): print(data.get_shape()) a = data[:,0:2] print(a.get_shape()) c = data[:,1:3] print(c.get_shape()) s = (a + c) return tf.sqrt(tf.matmul(s, tf.transpose(s))) with tf.Session() as sess: fake_data = tf.constant([5.0, 3.0, 7.1]) fake_data = tf.expand_dims(fake_data, 0) print(sess.run(some_method(fake_data))) ``` # Type error ``` def some_method(a, b): s = (a + b) return tf.sqrt(tf.matmul(s, tf.transpose(s))) with tf.Session() as sess: fake_a = tf.constant([ [5.0, 3.0, 7.1], [2.3, 4.1, 4.8], ]) fake_b = tf.constant([ [2, 4, 5], [2, 8, 7] ]) print(sess.run(some_method(fake_a, fake_b))) def some_method(a, b): b = tf.cast(b, tf.float32) s = (a + b) return tf.sqrt(tf.matmul(s, tf.transpose(s))) with tf.Session() as sess: fake_a = tf.constant([ [5.0, 3.0, 7.1], [2.3, 4.1, 4.8], ]) fake_b = tf.constant([ [2, 4, 5], [2, 8, 7] ]) print(sess.run(some_method(fake_a, fake_b))) ``` # TensorFlow debugger Wrap your normal Session object with tf_debug.LocalCLIDebugWrapperSession ``` import tensorflow as tf from tensorflow.python import debug as tf_debug def some_method(a, b): b = tf.cast(b, tf.float32) s = (a / b) s2 = tf.matmul(s, tf.transpose(s)) return tf.sqrt(s2) with tf.Session() as sess: fake_a = [ [5.0, 3.0, 7.1], [2.3, 4.1, 4.8], ] fake_b = [ [2, 0, 5], [2, 8, 7] ] a = tf.placeholder(tf.float32, shape=[2, 3]) b = tf.placeholder(tf.int32, shape=[2, 3]) k = some_method(a, b) # Note: won't work without the ui_type="readline" argument because # Datalab is not an interactive terminal and doesn't support the default "curses" ui_type. # If you are running this a standalone program, omit the ui_type parameter and add --debug # when invoking the TensorFlow program # --debug (e.g: python debugger.py --debug ) sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type="readline") sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan) print(sess.run(k, feed_dict = {a: fake_a, b: fake_b})) ``` In the tfdbg> window that comes up, try the following: * run -f has_inf_or_nan * Notice that several tensors are dumped once the filter criterion is met * List the inputs to a specific tensor: * li transpose:0 * Print the value of a tensor * pt transpose:0 * Where is the inf? Visit https://www.tensorflow.org/programmers_guide/debugger for usage details of tfdbg ## tf.Print() Create a python script named debugger.py with the contents shown below. ``` %%writefile debugger.py import tensorflow as tf def some_method(a, b): b = tf.cast(b, tf.float32) s = (a / b) print_ab = tf.Print(s, [a, b]) s = tf.where(tf.is_nan(s), print_ab, s) return tf.sqrt(tf.matmul(s, tf.transpose(s))) with tf.Session() as sess: fake_a = tf.constant([ [5.0, 3.0, 7.1], [2.3, 4.1, 4.8], ]) fake_b = tf.constant([ [2, 0, 5], [2, 8, 7] ]) print(sess.run(some_method(fake_a, fake_b))) ``` ### Execute the python script ``` %%bash python debugger.py ```
github_jupyter
``` import os import sys import numpy as np import pandas as pd import matplotlib.pyplot as plt from mpl_toolkits import mplot3d %matplotlib inline from sklearn import linear_model from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error module_path = os.path.abspath(os.path.join('../../py-conjugated/')) if module_path not in sys.path: sys.path.append(module_path) import network_utils as nuts ``` ### Load in all of the .csv files containing the measurements of morphological domains ``` csv_file_path = '/Volumes/Tatum_SSD-1/Grad_School/m2py/Morphology_labels/OPV_morph_maps/3_component/' all_files = os.listdir(csv_file_path) # print (all_files) files = [fl for fl in all_files if fl[-1] == 'v'] print (len(files)) print (files) df = pd.read_csv(csv_file_path+files[0]) df.head() ``` ### For each time-temperature point: - Sort domains by their phase - Calculate average and standard deviation of domain descriptors - Append those values to a phase-specific DataFrame ## Need to make sure domain also has time, temp, substrate, and device tags!! ``` # Define dataframes to hold statistics for each phase final_df = pd.DataFrame(columns = ['Anneal_time', 'Anneal_temp', 'Substrate', 'Device', 'p1_fraction', 'p2_fraction', 'p3_fraction', 'p1_area_avg', 'p1_area_stdev', 'p1_filled_area_avg', 'p1_filled_area_stdev', 'p1_extent_avg', 'p1_extent_stdev', 'p1_MajorAL_avg', 'p1_MajorAL_stdev', 'p1_MinorAL_avg', 'p1_MinorAL_stdev', 'p1_Ecc_avg', 'p1_Ecc_stdev','p1_Orient_avg', 'p1_Orient_stdev', 'p1_Perim_avg', 'p1_Perim_stdev', 'p2_area_avg', 'p2_area_stdev', 'p2_filled_area_avg', 'p2_filled_area_stdev', 'p2_extent_avg', 'p2_extent_stdev', 'p2_MajorAL_avg', 'p2_MajorAL_stdev', 'p2_MinorAL_avg', 'p2_MinorAL_stdev', 'p2_Ecc_avg', 'p2_Ecc_stdev','p2_Orient_avg', 'p2_Orient_stdev', 'p2_Perim_avg', 'p2_Perim_stdev', 'p3_area_avg', 'p3_area_stdev', 'p3_filled_area_avg', 'p3_filled_area_stdev', 'p3_extent_avg', 'p3_extent_stdev', 'p3_MajorAL_avg', 'p3_MajorAL_stdev', 'p3_MinorAL_avg', 'p3_MinorAL_stdev', 'p3_Ecc_avg', 'p3_Ecc_stdev','p3_Orient_avg', 'p3_Orient_stdev', 'p3_Perim_avg', 'p3_Perim_stdev']) #For each file, sort by GMM label, compute avg and stdev of each column, append values to phase summary dataframes for fl in files: df = pd.read_csv(csv_file_path+fl) df.rename(columns = {'Unnamed: 0': 'Domain_label', 'label': 'GMM_label'}, inplace = True) # Obtain anneal time and temperature from filename if 'NOANNEAL' in fl: anl_time = 0 anl_temp = 0 else: temp_stop_indx = fl.index('C') anl_temp = int(fl[:temp_stop_indx]) time_start_indx = temp_stop_indx+2 time_stop_indx = fl.index('m') time_stop_indx = time_stop_indx anl_time = fl[time_start_indx:time_stop_indx] anl_time = int(anl_time) # Obtain substrate and device numnbers from filename sub = 0 dev = 0 if fl.find('Sub') != -1: sub_indeces = fl.index('Sub') sub_index = sub_indeces+3 sub = fl[sub_index] if fl.find('postexam') != -1: dev = 3 else: dev_indeces = fl.index('Dev') dev_index = dev_indeces+3 dev = fl[dev_index] elif fl.find('Sub') == -1: sub_indeces = fl.index('S') sub_index = sub_indeces+1 sub = fl[sub_index] dev_indeces = fl.index('D') dev_index = dev_indeces+1 dev = fl[dev_index] else: print ('woops...substrate and device location went screwy') rel_areas = nuts.relative_areas(df) temp_p1 = df[df['GMM_label'] == 1] temp_p2 = df[df['GMM_label'] == 2] temp_p3 = df[df['GMM_label'] == 3] avg1 = temp_p1['area'].mean() std1 = temp_p1['area'].std() avg2 = temp_p1['filled_area'].mean() std2 = temp_p1['filled_area'].std() avg3 = temp_p1['extent'].mean() std3 = temp_p1['extent'].std() avg4 = temp_p1['major_axis_length'].mean() std4 = temp_p1['major_axis_length'].std() avg5 = temp_p1['minor_axis_length'].mean() std5 = temp_p1['minor_axis_length'].std() avg6 = temp_p1['eccentricity'].mean() std6 = temp_p1['eccentricity'].std() avg7 = temp_p1['orientation'].mean() std7 = temp_p1['orientation'].std() avg8 = temp_p1['perimeter'].mean() std8 = temp_p1['perimeter'].std() avg9 = temp_p2['area'].mean() std9 = temp_p2['area'].std() avg10 = temp_p2['filled_area'].mean() std10 = temp_p2['filled_area'].std() avg11 = temp_p2['extent'].mean() std11 = temp_p2['extent'].std() avg12 = temp_p2['major_axis_length'].mean() std12 = temp_p2['major_axis_length'].std() avg13 = temp_p2['minor_axis_length'].mean() std13 = temp_p2['minor_axis_length'].std() avg14 = temp_p2['eccentricity'].mean() std14 = temp_p2['eccentricity'].std() avg15 = temp_p2['orientation'].mean() std15 = temp_p2['orientation'].std() avg16 = temp_p2['perimeter'].mean() std16 = temp_p2['perimeter'].std() avg17 = temp_p3['area'].mean() std17 = temp_p3['area'].std() avg18 = temp_p3['filled_area'].mean() std18 = temp_p3['filled_area'].std() avg19 = temp_p3['extent'].mean() std19 = temp_p3['extent'].std() avg20 = temp_p3['major_axis_length'].mean() std20 = temp_p3['major_axis_length'].std() avg21 = temp_p3['minor_axis_length'].mean() std21 = temp_p3['minor_axis_length'].std() avg22 = temp_p3['eccentricity'].mean() std22 = temp_p3['eccentricity'].std() avg23 = temp_p3['orientation'].mean() std23 = temp_p3['orientation'].std() avg24 = temp_p3['perimeter'].mean() std24 = temp_p3['perimeter'].std() final_df = final_df.append(pd.Series({'Anneal_time':anl_time, 'Anneal_temp':anl_temp, 'Substrate':sub, 'Device':dev, 'p1_fraction':rel_areas[0], 'p2_fraction':rel_areas[1], 'p3_fraction':rel_areas[2], 'p1_area_avg':avg1, 'p1_area_stdev':std1, 'p1_filled_area_avg':avg2, 'p1_filled_area_stdev':std2, 'p1_extent_avg':avg3, 'p1_extent_stdev':std3, 'p1_MajorAL_avg':avg4, 'p1_MajorAL_stdev':std4, 'p1_MinorAL_avg':avg5, 'p1_MinorAL_stdev':std5, 'p1_Ecc_avg':avg6, 'p1_Ecc_stdev':std6,'p1_Orient_avg':avg7, 'p1_Orient_stdev':std7, 'p1_Perim_avg':avg8, 'p1_Perim_stdev':std8, 'p2_area_avg':avg9, 'p2_area_stdev':std9, 'p2_filled_area_avg':avg10, 'p2_filled_area_stdev':std10, 'p2_extent_avg':avg11, 'p2_extent_stdev':std11, 'p2_MajorAL_avg':avg12, 'p2_MajorAL_stdev':std12, 'p2_MinorAL_avg':avg13, 'p2_MinorAL_stdev':std13, 'p2_Ecc_avg':avg14, 'p2_Ecc_stdev':std14, 'p2_Orient_avg':avg15, 'p2_Orient_stdev':std15, 'p2_Perim_avg':avg16, 'p2_Perim_stdev':std16, 'p3_area_avg':avg17, 'p3_area_stdev':std17, 'p3_filled_area_avg':avg18, 'p3_filled_area_stdev':std18, 'p3_extent_avg':avg19, 'p3_extent_stdev':std19, 'p3_MajorAL_avg':avg20, 'p3_MajorAL_stdev':std20, 'p3_MinorAL_avg':avg21, 'p3_MinorAL_stdev':std21, 'p3_Ecc_avg':avg22, 'p3_Ecc_stdev':std22, 'p3_Orient_avg':avg23, 'p3_Orient_stdev':std23, 'p3_Perim_avg':avg24, 'p3_Perim_stdev':std24}), ignore_index = True) print('x') ``` ## Need to add all areas together and calculate normalized phase ratios for each sample ``` final_df.head() x1 = final_df['Anneal_time'] y1 = final_df['Anneal_temp'] z1 = final_df['p1_MajorAL_avg'] fig1 = plt.figure(figsize = (10,8)) ax1 = plt.axes(projection = '3d') plt.xlabel('Time (min)') plt.ylabel('Temperature (º)') plt.title('Phase 1 Major Axis Length') ax1.scatter3D(x1,y1,z1) ax1.view_init(25, 255) plt.show() x2 = final_df['Anneal_time'] y2 = final_df['Anneal_temp'] z2 = final_df['p2_MajorAL_avg'] fig2 = plt.figure(figsize = (10,8)) ax2 = plt.axes(projection = '3d') plt.xlabel('Time (min)') plt.ylabel('Temperature (º)') plt.title('Phase 2 Major Axis Length') ax2.scatter3D(x2,y2,z2) ax2.view_init(25, 255) plt.show() x3 = final_df['Anneal_time'] y3 = final_df['Anneal_temp'] z3 = final_df['p3_MajorAL_avg'] fig3 = plt.figure(figsize = (10,8)) ax3 = plt.axes(projection = '3d') plt.xlabel('Time (min)') plt.ylabel('Temperature (º)') plt.title('Phase 3 Major Axis Length') ax3.scatter3D(x3,y3,z3) ax3.view_init(25, 255) plt.show() ``` # Now we need to read in the device data and associate it with its morphology descriptors # This cell was for initially converting everything to pandas. A new, formatted & filtered spreadsheet now exists ``` device_df = pd.read_excel('/Users/wesleytatum/Desktop/py-conjugated/data/OPV_device.xlsx') device_df = device_df.fillna(method = 'ffill') # B/c of excel formatting, NaN values arise from merged cells. Use previous value to fill NaN's device_df device_df['Substrate'] = 0 for i in range(len(device_df['File Name'])): fl = device_df['File Name'][i] sub_indeces = 0 sub_index = 0 sub = 0 print (fl) if fl.find('Sub') != -1: sub_indeces = fl.index('Sub') sub_index = sub_indeces+3 sub = fl[sub_index] # print (sub) elif fl.find('SUB') != -1: sub_indeces = fl.index('SUB') sub_index = sub_indeces+3 sub = fl[sub_index] # print (sub) elif fl.find('NA') != -1: device_df['Time (min)'] = 0 devie_df['Temp (C)'] = 0 else: print ('woops...substrate and device location went screwy') device_df['Substrate'][i] = int(sub) ``` # This cell utilizes the formatted & filtered spreadsheet ``` device_df = pd.read_excel('/Users/wesleytatum/Desktop/py-conjugated/data/OPV_device_df.xlsx') print(device_df.shape) device_df.head() # device_df.to_excel('/Users/wesleytatum/Desktop/OPV_device_df.xlsx') x1 = device_df['Time (min)'] y1 = device_df['Temp (C)'] z1 = device_df['PCE'] fig1 = plt.figure(figsize = (10,8)) ax1 = plt.axes(projection = '3d') plt.xlabel('Time (min)') plt.ylabel('Temperature (º)') plt.title('PCE vs. Annealing Conditions') ax1.scatter3D(x1,y1,z1) ax1.view_init(25, 245) plt.show() x1 = device_df['Time (min)'] y1 = device_df['Temp (C)'] z1 = device_df['VocL'] fig1 = plt.figure(figsize = (10,8)) ax1 = plt.axes(projection = '3d') plt.xlabel('Time (min)') plt.ylabel('Temperature (º)') plt.title('Voc vs. Annealing Conditions') ax1.scatter3D(x1,y1,z1) ax1.view_init(25, 245) plt.show() x1 = device_df['Time (min)'] y1 = device_df['Temp (C)'] z1 = device_df['Jsc'] fig1 = plt.figure(figsize = (10,8)) ax1 = plt.axes(projection = '3d') plt.xlabel('Time (min)') plt.ylabel('Temperature (º)') plt.title('Jsc vs. Annealing Conditions') ax1.scatter3D(x1,y1,z1) ax1.view_init(25, 245) plt.show() x1 = device_df['Time (min)'] y1 = device_df['Temp (C)'] z1 = device_df['FF'] fig1 = plt.figure(figsize = (10,8)) ax1 = plt.axes(projection = '3d') plt.xlabel('Time (min)') plt.ylabel('Temperature (º)') plt.title('FF vs. Annealing Conditions') ax1.scatter3D(x1,y1,z1) ax1.view_init(25, 245) plt.show() # device_df.to_excel('~/Desktop/OPV_device_df.xlsx') ``` # Now that the data is read in plottable, we need to use all of these parameters to predict device performance - Create a new df with only columns needed for models - Make sure device data is properly matched with morphology data (via time, temp, substrate, device) - Want to fit a function that predicts PCE, Jsc, Voc, and FF from morphology and processing conditions $$ PCE, Jsc, Voc, FF = f(anneal time, anneal temp, major axis, minor axis, perimenter, orientation, eccentricity) $$ The problem is that there are way more devices tested than AFM images taken and processed. So, for now, the dataframe will only contain device data for those devices with morphology data too. In the full model (_i.e._ the neural network model), I'll have to find a way to use all data in training. ``` # total_df = Phase1 + Phase2 + Phase3 + device_df Phase1['GMM_label'] = 1 Phase2['GMM_label'] = 2 Phase3['GMM_label'] = 3 total_df = Phase1.copy() print (total_df.shape) print (total_df.head()) total_df = total_df.append(Phase2) print (total_df.shape) print (total_df.head()) total_df = total_df.append(Phase3) print (total_df.shape) print (total_df.head()) total_df['PCE'] = 0 total_df['VocL'] = 0 total_df['Jsc'] = 0 total_df['FF'] = 0 total_df.head() # for each row in total_df, find row in device_df with same time, temp, sub, and dev. then append PCE, Jsc, Voc, and FF values to total_df final_df['PCE'] = 0 final_df['VocL'] = 0 final_df['Jsc'] = 0 final_df['FF'] = 0 NA_count = 0 # NA = no anneal, whose filenames have different schemes for i in range(len(final_df['Anneal_time'])): print (i) pce = 0 voc = 0 jsc = 0 ff = 0 time = final_df['Anneal_time'].iloc[i] temp = final_df['Anneal_temp'].iloc[i] sub = final_df['Substrate'].iloc[i] sub = int(sub) dev = final_df['Device'].iloc[i] dev = int(dev) time_df = device_df[device_df['Time (min)'] == time] temp_df = time_df[time_df['Temp (C)'] == temp] sub_df = temp_df[temp_df['Substrate'] == sub] dev_row = sub_df[sub_df['Device'] == dev] if dev_row.shape[0] != 0: pce = dev_row['PCE'].iloc[-1] voc = dev_row['VocL'].iloc[-1] jsc = dev_row['Jsc'].iloc[-1] ff = dev_row['FF'].iloc[-1] elif dev_row.shape[0] == 0: if time == 0: NA_df = device_df[device_df['Time (min)'] == time] pce = NA_df['PCE'].iloc[NA_count] voc = NA_df['VocL'].iloc[NA_count] jsc = NA_df['Jsc'].iloc[NA_count] ff = NA_df['FF'].iloc[NA_count] NA_count += 1 else: pass else: pass final_df['PCE'].iloc[i] = pce final_df['VocL'].iloc[i] = voc final_df['Jsc'].iloc[i] = jsc final_df['FF'].iloc[i] = ff final_df.head() final_df.to_excel('/Users/wesleytatum/Desktop/py-conjugated/data/OPV_total_df.xlsx') ``` # We have all our device and morphology data in total_df. Now we can use it to train regression models for in-depth plotting!! ``` X = final_df[['Anneal_time', 'Anneal_temp', 'Substrate', 'Device', 'p1_fraction', 'p2_fraction', 'p3_fraction', 'p1_area_avg', 'p1_area_stdev', 'p1_filled_area_avg', 'p1_filled_area_stdev', 'p1_extent_avg', 'p1_extent_stdev', 'p1_MajorAL_avg', 'p1_MajorAL_stdev', 'p1_MinorAL_avg', 'p1_MinorAL_stdev', 'p1_Ecc_avg', 'p1_Ecc_stdev','p1_Orient_avg', 'p1_Orient_stdev', 'p1_Perim_avg', 'p1_Perim_stdev', 'p2_area_avg', 'p2_area_stdev', 'p2_filled_area_avg', 'p2_filled_area_stdev', 'p2_extent_avg', 'p2_extent_stdev', 'p2_MajorAL_avg', 'p2_MajorAL_stdev', 'p2_MinorAL_avg', 'p2_MinorAL_stdev', 'p2_Ecc_avg', 'p2_Ecc_stdev','p2_Orient_avg', 'p2_Orient_stdev', 'p2_Perim_avg', 'p2_Perim_stdev', 'p3_area_avg', 'p3_area_stdev', 'p3_filled_area_avg', 'p3_filled_area_stdev', 'p3_extent_avg', 'p3_extent_stdev', 'p3_MajorAL_avg', 'p3_MajorAL_stdev', 'p3_MinorAL_avg', 'p3_MinorAL_stdev', 'p3_Ecc_avg', 'p3_Ecc_stdev','p3_Orient_avg', 'p3_Orient_stdev', 'p3_Perim_avg', 'p3_Perim_stdev']] # all of the different features, including annealing conditions Y = final_df[['PCE', 'VocL', 'Jsc', 'FF']] # device performance x_train, x_test, y_train, y_test = train_test_split(X, Y) alphas = np.logspace(-6, 1, 200) coeffs = {} train_errors = [] test_errors = [] for i, a in enumerate(alphas): lasso = linear_model.Lasso(alpha = a) lasso.fit(x_train, y_train) y_train_pred = lasso.predict(x_train) y_test_pred = lasso.predict(x_test) train_mse = mean_squared_error(y_train, y_train_pred) test_mse = mean_squared_error(y_test, y_test_pred) coeffs[i]=lasso.coef_ train_errors.append(train_mse) test_errors.append(test_mse) pce_co1 = [] pce_co2 = [] pce_co3 = [] pce_co4 = [] pce_co5 = [] pce_co6 = [] pce_co7 = [] for k in coeffs.keys(): pce_co1.append(coeffs[k][0][0]) pce_co2.append(coeffs[k][0][1]) pce_co3.append(coeffs[k][0][2]) pce_co4.append(coeffs[k][0][3]) pce_co5.append(coeffs[k][0][4]) pce_co6.append(coeffs[k][0][5]) pce_co7.append(coeffs[k][0][6]) fig = plt.Figure(figsize = (30,20)) plt.plot(alphas, pce_co1, c = 'k', label = 'Anneal_time') plt.plot(alphas, pce_co2, c = 'r', label = 'Anneal_temp') plt.plot(alphas, pce_co3, c = 'b', label = 'MajorAL_avg') plt.plot(alphas, pce_co4, c = 'g', label = 'MinorAL_avg') plt.plot(alphas, pce_co5, c = 'cyan', label = 'Ecc_avg') plt.plot(alphas, pce_co6, c = 'y', label = 'Orient_avg') plt.plot(alphas, pce_co6, c = 'pink', label = 'Perim_avg') plt.legend() ax = plt.gca() ax.set_xscale('log') plt.title('PCE and parameter coefficients') plt.show() voc_co1 = [] voc_co2 = [] voc_co3 = [] voc_co4 = [] voc_co5 = [] voc_co6 = [] voc_co7 = [] for k in coeffs.keys(): voc_co1.append(coeffs[k][1][0]) voc_co2.append(coeffs[k][1][1]) voc_co3.append(coeffs[k][1][2]) voc_co4.append(coeffs[k][1][3]) voc_co5.append(coeffs[k][1][4]) voc_co6.append(coeffs[k][1][5]) voc_co7.append(coeffs[k][1][6]) fig = plt.Figure(figsize = (30,20)) plt.plot(alphas, voc_co1, c = 'k', label = 'Anneal_time') plt.plot(alphas, voc_co2, c = 'r', label = 'Anneal_temp') plt.plot(alphas, voc_co3, c = 'b', label = 'MajorAL_avg') plt.plot(alphas, voc_co4, c = 'g', label = 'MinorAL_avg') plt.plot(alphas, voc_co5, c = 'cyan', label = 'Ecc_avg') plt.plot(alphas, voc_co6, c = 'y', label = 'Orient_avg') plt.plot(alphas, voc_co6, c = 'pink', label = 'Perim_avg') plt.legend() ax = plt.gca() ax.set_xscale('log') plt.title('Voc and parameter coefficients') plt.show() jsc_co1 = [] jsc_co2 = [] jsc_co3 = [] jsc_co4 = [] jsc_co5 = [] jsc_co6 = [] jsc_co7 = [] for k in coeffs.keys(): jsc_co1.append(coeffs[k][2][0]) jsc_co2.append(coeffs[k][2][1]) jsc_co3.append(coeffs[k][2][2]) jsc_co4.append(coeffs[k][2][3]) jsc_co5.append(coeffs[k][2][4]) jsc_co6.append(coeffs[k][2][5]) jsc_co7.append(coeffs[k][2][6]) fig = plt.Figure(figsize = (30,20)) plt.plot(alphas, jsc_co1, c = 'k', label = 'Anneal_time') plt.plot(alphas, jsc_co2, c = 'r', label = 'Anneal_temp') plt.plot(alphas, jsc_co3, c = 'b', label = 'MajorAL_avg') plt.plot(alphas, jsc_co4, c = 'g', label = 'MinorAL_avg') plt.plot(alphas, jsc_co5, c = 'cyan', label = 'Ecc_avg') plt.plot(alphas, jsc_co6, c = 'y', label = 'Orient_avg') plt.plot(alphas, jsc_co6, c = 'pink', label = 'Perim_avg') plt.legend() ax = plt.gca() ax.set_xscale('log') plt.title('Jsc and parameter coefficients') plt.show() ff_co1 = [] ff_co2 = [] ff_co3 = [] ff_co4 = [] ff_co5 = [] ff_co6 = [] ff_co7 = [] for k in coeffs.keys(): ff_co1.append(coeffs[k][3][0]) ff_co2.append(coeffs[k][3][1]) ff_co3.append(coeffs[k][3][2]) ff_co4.append(coeffs[k][3][3]) ff_co5.append(coeffs[k][3][4]) ff_co6.append(coeffs[k][3][5]) ff_co7.append(coeffs[k][3][6]) fig = plt.Figure(figsize = (30,20)) plt.plot(alphas, ff_co1, c = 'k', label = 'Anneal_time') plt.plot(alphas, ff_co2, c = 'r', label = 'Anneal_temp') plt.plot(alphas, ff_co3, c = 'b', label = 'MajorAL_avg') plt.plot(alphas, ff_co4, c = 'g', label = 'MinorAL_avg') plt.plot(alphas, ff_co5, c = 'cyan', label = 'Ecc_avg') plt.plot(alphas, ff_co6, c = 'y', label = 'Orient_avg') plt.plot(alphas, ff_co6, c = 'pink', label = 'Perim_avg') plt.legend() ax = plt.gca() ax.set_xscale('log') plt.title('FF and parameter coefficients') plt.show() fig, ax = plt.subplots(figsize = (8,6)) plt.plot(alphas, train_errors, c = 'k', label = 'training loss') plt.plot(alphas, test_errors, c = 'r', label = 'testing loss') ax.set_xscale('log') plt.legend() plt.show() ``` # Generate a LASSO fit with the best alpha, determined from graph above. Then use it to predict each target feature and visualize ``` lasso = linear_model.Lasso(alpha = 0.00001) lasso.fit(x_train, y_train) y_train_pred = lasso.predict(x_train) y_test_pred = lasso.predict(x_test) fig1 = plt.Figure(figsize=(25,25)) plt.subplot(221) plt.scatter(x_test['Anneal_time'], y_test['PCE'], c = 'k', label = 'Ground Truth', alpha = 0.7) plt.scatter(x_test['Anneal_time'], y_test_pred[:,0], c = 'r', label = 'Predictions', alpha = 0.7) plt.legend(loc = 'lower center') plt.title('Anneal_time vs. PCE') plt.subplot(222) plt.scatter(x_test['Anneal_time'], y_test['VocL'], c = 'k', label = 'Ground Truth', alpha = 0.7) plt.scatter(x_test['Anneal_time'], y_test_pred[:,1], c = 'r', label = 'Predictions', alpha = 0.7) plt.legend(loc = 'lower center') plt.title('Anneal_time vs. Voc') plt.subplot(223) plt.scatter(x_test['Anneal_time'], y_test['Jsc'], c = 'k', label = 'Ground Truth', alpha = 0.7) plt.scatter(x_test['Anneal_time'], y_test_pred[:,2], c = 'r', label = 'Predictions', alpha = 0.7) plt.legend(loc = 'lower center') plt.title('Anneal_time vs. Jsc') plt.subplot(224) plt.scatter(x_test['Anneal_time'], y_test['FF'], c = 'k', label = 'Ground Truth', alpha = 0.7) plt.scatter(x_test['Anneal_time'], y_test_pred[:,3], c = 'r', label = 'Predictions', alpha = 0.7) plt.legend(loc = 'lower center') plt.title('Anneal_time vs. FF') plt.tight_layout() plt.show() print(y_test_pred) ``` ______________________________________________ ______________________________________________ # Troubleshooting and developing below here ______________________________________________ ______________________________________________ ``` for fl in files: print (fl) if fl.find('Sub') != -1: sub_indeces = fl.index('Sub') sub_index = sub_indeces+3 sub = fl[sub_index] print (sub) if fl.find('postexam') != -1: dev = 3 print (dev) else: dev_indeces = fl.index('Dev') dev_index = dev_indeces+3 dev = fl[dev_index] print (dev) elif fl.find('Sub') == -1: sub_indeces = fl.index('S') sub_index = sub_indeces+1 sub = fl[sub_index] print (sub) dev_indeces = fl.index('D') dev_index = dev_indeces+1 dev = fl[dev_index] print (dev) else: print ('woops') 100 == 100.0 time = 0 temp = 0 sub = 4 dev = 2 test = device_df[device_df['Time (min)'] == time] print (test.shape) test1 = test[test['Temp (C)'] == temp] print (test1.shape) test2 = test1[test1['Substrate'] == sub] print (test2.shape) test3 = test2[test2['Device'] == dev] print (test3.shape) test3 test1 time = 0 temp = 0 sub = 4 dev = 2 test = total_df[total_df['Anneal_time'] == time] print (test.shape) test1 = test[test['Anneal_temp'] == temp] print (test1.shape) test2 = test1[test1['Substrate'] == sub] print (test2.shape) test3 = test2[test2['Device'] == dev] print (test3.shape) test ```
github_jupyter
# Milestone Project 2 - Complete Walkthrough Solution This notebook walks through a proposed solution to the Blackjack Game milestone project. The approach to solving and the specific code used are only suggestions - there are many different ways to code this out, and yours is likely to be different! ## Game Play To play a hand of Blackjack the following steps must be followed: 1. Create a deck of 52 cards 2. Shuffle the deck 3. Ask the Player for their bet 4. Make sure that the Player's bet does not exceed their available chips 5. Deal two cards to the Dealer and two cards to the Player 6. Show only one of the Dealer's cards, the other remains hidden 7. Show both of the Player's cards 8. Ask the Player if they wish to Hit, and take another card 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again. 10. If a Player Stands, play the Dealer's hand. The dealer will always Hit until the Dealer's value meets or exceeds 17 11. Determine the winner and adjust the Player's chips accordingly 12. Ask the Player if they'd like to play again ## Playing Cards A standard deck of playing cards has four suits (Hearts, Diamonds, Spades and Clubs) and thirteen ranks (2 through 10, then the face cards Jack, Queen, King and Ace) for a total of 52 cards per deck. Jacks, Queens and Kings all have a rank of 10. Aces have a rank of either 11 or 1 as needed to reach 21 without busting. As a starting point in your program, you may want to assign variables to store a list of suits, ranks, and then use a dictionary to map ranks to values. ## The Game ### Imports and Global Variables ** Step 1: Import the random module. This will be used to shuffle the deck prior to dealing. Then, declare variables to store suits, ranks and values. You can develop your own system, or copy ours below. Finally, declare a Boolean value to be used to control <code>while</code> loops. This is a common practice used to control the flow of the game.** suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs') ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace') values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10, 'Queen':10, 'King':10, 'Ace':11} ``` import random suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs') ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace') values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10, 'Queen':10, 'King':10, 'Ace':11} playing = True ``` ### Class Definitions Consider making a Card class where each Card object has a suit and a rank, then a Deck class to hold all 52 Card objects, and can be shuffled, and finally a Hand class that holds those Cards that have been dealt to each player from the Deck. **Step 2: Create a Card Class**<br> A Card object really only needs two attributes: suit and rank. You might add an attribute for "value" - we chose to handle value later when developing our Hand class.<br>In addition to the Card's \_\_init\_\_ method, consider adding a \_\_str\_\_ method that, when asked to print a Card, returns a string in the form "Two of Hearts" ``` class Card: def __init__(self,suit,rank): self.suit = suit self.rank = rank def __str__(self): return self.rank + ' of ' + self.suit ``` **Step 3: Create a Deck Class**<br> Here we might store 52 card objects in a list that can later be shuffled. First, though, we need to *instantiate* all 52 unique card objects and add them to our list. So long as the Card class definition appears in our code, we can build Card objects inside our Deck \_\_init\_\_ method. Consider iterating over sequences of suits and ranks to build out each card. This might appear inside a Deck class \_\_init\_\_ method: for suit in suits: for rank in ranks: In addition to an \_\_init\_\_ method we'll want to add methods to shuffle our deck, and to deal out cards during gameplay.<br><br> OPTIONAL: We may never need to print the contents of the deck during gameplay, but having the ability to see the cards inside it may help troubleshoot any problems that occur during development. With this in mind, consider adding a \_\_str\_\_ method to the class definition. ``` class Deck: def __init__(self): self.deck = [] # start with an empty list for suit in suits: for rank in ranks: self.deck.append(Card(suit,rank)) # build Card objects and add them to the list def __str__(self): deck_comp = '' # start with an empty string for card in self.deck: deck_comp += '\n '+card.__str__() # add each Card object's print string return 'The deck has:' + deck_comp def shuffle(self): random.shuffle(self.deck) def deal(self): single_card = self.deck.pop() return single_card ``` TESTING: Just to see that everything works so far, let's see what our Deck looks like! ``` test_deck = Deck() print(test_deck) ``` Great! Now let's move on to our Hand class. **Step 4: Create a Hand Class**<br> In addition to holding Card objects dealt from the Deck, the Hand class may be used to calculate the value of those cards using the values dictionary defined above. It may also need to adjust for the value of Aces when appropriate. ``` class Hand: def __init__(self): self.cards = [] # start with an empty list as we did in the Deck class self.value = 0 # start with zero value self.aces = 0 # add an attribute to keep track of aces def add_card(self,card): self.cards.append(card) self.value += values[card.rank] def adjust_for_ace(self): pass ``` TESTING: Before we tackle the issue of changing Aces, let's make sure we can add two cards to a player's hand and obtain their value: ``` test_deck = Deck() test_deck.shuffle() test_player = Hand() test_player.add_card(test_deck.deal()) test_player.add_card(test_deck.deal()) test_player.value ``` Let's see what these two cards are: ``` for card in test_player.cards: print(card) ``` Great! Now let's tackle the Aces issue. If a hand's value exceeds 21 but it contains an Ace, we can reduce the Ace's value from 11 to 1 and continue playing. ``` class Hand: def __init__(self): self.cards = [] # start with an empty list as we did in the Deck class self.value = 0 # start with zero value self.aces = 0 # add an attribute to keep track of aces def add_card(self,card): self.cards.append(card) self.value += values[card.rank] if card.rank == 'Ace': self.aces += 1 # add to self.aces def adjust_for_ace(self): while self.value > 21 and self.aces: self.value -= 10 self.aces -= 1 ``` We added code to the add_card method to bump self.aces whenever an ace is brought into the hand, and added code to the adjust_for_aces method that decreases the number of aces any time we make an adjustment to stay under 21. **Step 5: Create a Chips Class**<br> In addition to decks of cards and hands, we need to keep track of a Player's starting chips, bets, and ongoing winnings. This could be done using global variables, but in the spirit of object oriented programming, let's make a Chips class instead! ``` class Chips: def __init__(self): self.total = 100 # This can be set to a default value or supplied by a user input self.bet = 0 def win_bet(self): self.total += self.bet def lose_bet(self): self.total -= self.bet ``` A NOTE ABOUT OUR DEFAULT TOTAL VALUE:<br> Alternatively, we could have passed a default total value as an parameter in the \_\_init\_\_. This would have let us pass in an override value at the time the object was created rather than wait until later to change it. The code would have looked like this: def __init__(self,total=100): self.total = total self.bet = 0 Either technique is fine, it only depends on how you plan to start your game parameters. ### Function Defintions A lot of steps are going to be repetitive. That's where functions come in! The following steps are guidelines - add or remove functions as needed in your own program. **Step 6: Write a function for taking bets**<br> Since we're asking the user for an integer value, this would be a good place to use <code>try</code>/<code>except</code>. Remember to check that a Player's bet can be covered by their available chips. ``` def take_bet(chips): while True: try: chips.bet = int(input('How many chips would you like to bet? ')) except ValueError: print('Sorry, a bet must be an integer!') else: if chips.bet > chips.total: print("Sorry, your bet can't exceed",chips.total) else: break ``` We used a <code>while</code> loop here to continually prompt the user for input until we received an integer value that was within the Player's betting limit. A QUICK NOTE ABOUT FUNCTIONS:<br> If we knew in advance what we were going to call our Player's Chips object, we could have written the above function like this: def take_bet(): while True: try: player_chips.bet = int(input('How many chips would you like to bet? ')) except ValueError: print('Sorry, a bet must be an integer!') else: if player_chips.bet > player_chips.total: print("Sorry, your bet can't exceed",player_chips.total) else: break and then we could call the function without passing any arguments. This is generally not a good idea! It's better to have functions be self-contained, able to accept any incoming value than depend on some future naming convention. Also, this makes it easier to add players in future versions of our program! **Step 7: Write a function for taking hits**<br> Either player can take hits until they bust. This function will be called during gameplay anytime a Player requests a hit, or a Dealer's hand is less than 17. It should take in Deck and Hand objects as arguments, and deal one card off the deck and add it to the Hand. You may want it to check for aces in the event that a player's hand exceeds 21. ``` def hit(deck,hand): hand.add_card(deck.deal()) hand.adjust_for_ace() ``` **Step 8: Write a function prompting the Player to Hit or Stand**<br> This function should accept the deck and the player's hand as arguments, and assign playing as a global variable.<br> If the Player Hits, employ the hit() function above. If the Player Stands, set the playing variable to False - this will control the behavior of a <code>while</code> loop later on in our code. ``` def hit_or_stand(deck,hand): global playing # to control an upcoming while loop while True: x = input("Would you like to Hit or Stand? Enter 'h' or 's' ") if x[0].lower() == 'h': hit(deck,hand) # hit() function defined above elif x[0].lower() == 's': print("Player stands. Dealer is playing.") playing = False else: print("Sorry, please try again.") continue break ``` **Step 9: Write functions to display cards**<br> When the game starts, and after each time Player takes a card, the dealer's first card is hidden and all of Player's cards are visible. At the end of the hand all cards are shown, and you may want to show each hand's total value. Write a function for each of these scenarios. ``` def show_some(player,dealer): print("\nDealer's Hand:") print(" <card hidden>") print('',dealer.cards[1]) print("\nPlayer's Hand:", *player.cards, sep='\n ') def show_all(player,dealer): print("\nDealer's Hand:", *dealer.cards, sep='\n ') print("Dealer's Hand =",dealer.value) print("\nPlayer's Hand:", *player.cards, sep='\n ') print("Player's Hand =",player.value) ``` QUICK NOTES ABOUT PRINT STATEMENTS:<br> * The asterisk <code>*</code> symbol is used to print every item in a collection, and the <code>sep='\n '</code> argument prints each item on a separate line. * In the fourth line where we have print('',dealer.cards[1]) the empty string and comma are there just to add a space. - Here we used commas to separate the objects being printed in each line. If you want to concatenate strings using the <code>+</code> symbol, then you have to call each Card object's \_\_str\_\_ method explicitly, as with print(' ' + dealer.cards[1].__str__()) **Step 10: Write functions to handle end of game scenarios**<br> Remember to pass player's hand, dealer's hand and chips as needed. ``` def player_busts(player,dealer,chips): print("Player busts!") chips.lose_bet() def player_wins(player,dealer,chips): print("Player wins!") chips.win_bet() def dealer_busts(player,dealer,chips): print("Dealer busts!") chips.win_bet() def dealer_wins(player,dealer,chips): print("Dealer wins!") chips.lose_bet() def push(player,dealer): print("Dealer and Player tie! It's a push.") ``` ### And now on to the game!! ``` while True: # Print an opening statement print('Welcome to BlackJack! Get as close to 21 as you can without going over!\n\ Dealer hits until she reaches 17. Aces count as 1 or 11.') # Create & shuffle the deck, deal two cards to each player deck = Deck() deck.shuffle() player_hand = Hand() player_hand.add_card(deck.deal()) player_hand.add_card(deck.deal()) dealer_hand = Hand() dealer_hand.add_card(deck.deal()) dealer_hand.add_card(deck.deal()) # Set up the Player's chips player_chips = Chips() # remember the default value is 100 # Prompt the Player for their bet take_bet(player_chips) # Show cards (but keep one dealer card hidden) show_some(player_hand,dealer_hand) while playing: # recall this variable from our hit_or_stand function # Prompt for Player to Hit or Stand hit_or_stand(deck,player_hand) # Show cards (but keep one dealer card hidden) show_some(player_hand,dealer_hand) # If player's hand exceeds 21, run player_busts() and break out of loop if player_hand.value > 21: player_busts(player_hand,dealer_hand,player_chips) break # If Player hasn't busted, play Dealer's hand until Dealer reaches 17 if player_hand.value <= 21: while dealer_hand.value < 17: hit(deck,dealer_hand) # Show all cards show_all(player_hand,dealer_hand) # Run different winning scenarios if dealer_hand.value > 21: dealer_busts(player_hand,dealer_hand,player_chips) elif dealer_hand.value > player_hand.value: dealer_wins(player_hand,dealer_hand,player_chips) elif dealer_hand.value < player_hand.value: player_wins(player_hand,dealer_hand,player_chips) else: push(player_hand,dealer_hand) # Inform Player of their chips total print("\nPlayer's winnings stand at",player_chips.total) # Ask to play again new_game = input("Would you like to play another hand? Enter 'y' or 'n' ") if new_game[0].lower()=='y': playing=True continue else: print("Thanks for playing!") break ``` And that's it! Remember, these steps may differ significantly from your own solution. That's OK! Keep working on different sections of your program until you get the desired results. It takes a lot of time and patience! As always, feel free to post questions and comments to the QA Forums. # Good job!
github_jupyter
# Rust Programming Tutorial https://www.youtube.com/watch?v=vOMJlQ5B-M0&list=PLVvjrrRCBy2JSHf9tGxGKJ-bYAN_uDCUL # Hello World ``` pub fn main() { println!("Hello World!"); } main() ``` # Variables, Mutable ``` pub fn main() { let x = 45; println!("The value of x is {}", x); // x = 42; // <= error: cannot assign twice to immutable variable `x` // println!("The value of x is {}", x); let mut x_mut = 42; println!("The value of x_mut is {}", x_mut); x_mut = 45; println!("The value of x_mut is {}", x_mut); } main() ``` # Variable Data Types ``` pub fn main() { let x_i64: i64 = 45; // 64 bits integer println!("The value of x_i64 is {}", x_i64); let mut x_u64: u64 = 42; // 64 bits unsigned integer println!("The value of x_u64 is {}", x_u64); // x_u64 = -5; // error: cannot apply unary operator `-` to type `u64` let x_f32: f32 = 3.14159; // 32 bits float println!("The value of x_f32 is {}", x_f32); let b: bool = true; println!("The value of b is {}", b); } main() ``` # If Else Statements ``` pub fn main() { let n = 42; if n < 42 { println!("n is under 42!") } else { println!("n is equal or greater than 42!") } } main() ``` # Infinite loop ``` pub fn main() { let mut n = 0; loop { n += 1; // skip n == 6 if n == 5 { continue; } // break/exit when n > 10 (== 11) if n > 10 { break; } println!("The value of n is {}", n); } } main() ``` # While loop ``` pub fn main() { let mut n = 0; while n <= 34 { n += 1; if n % 5 == 0 { println!("Fizz"); } if n % 7 == 0 { println!("Buzz"); continue; } println!("The value of n is {}", n); } } main() ``` # For loop ``` pub fn main() { // not inclusive => [1, 4] for i in 1..5 { println!("The number is {}", i); } // range let numbers = 1..5; for i in numbers { println!("The number is {}", i); } // vector let animals = vec!["Rabbit", "Dog", "Chicken"]; for (i, animal) in animals.iter().enumerate() { println!("The index is {} and the animal name is {}", i, animal); } } main() ``` # Enum type ``` pub enum Season { Winter, Spring, Summer, Autumn } pub fn main() { let current_season: Season = Season::Winter; match current_season { Season::Winter => println!("Winter is coming!"), Season::Spring => println!("Spring break!"), Season::Summer => println!("Summer time!"), Season::Autumn => println!("Autumn leaves!") } } main() ``` # Constants ``` pub const PI: f64 = 3.14159; pub fn main() { println!("PI={}", PI); // PI = 3.1; // error: invalid left-hand side expression } main() ``` # Tuples ``` pub fn main() { // tuple declaration let tup1 = (20, 25, 30, 35); // tuple access println!("{}", tup1.2); // => 30 let tup2 = (20, "Hello", 3.14159, true); println!("{}", tup2.1); // => "Hello" // nested tuples let tup3 = (20, "Hello", 3.14159, (1, 4, 7)); println!("{}", (tup3.3).2); // => 7 // destructing assigmnents let tup4 = (1, 4, 7); let (a, b, c) = tup4; println!("a={}, b={}, c={}", a, b, c); } main() ``` # Functions ``` pub fn main() { print_numbers_to(10); } pub fn print_numbers_to(num: u32) { for i in 1..num { if is_odd(i) { println!("{} is odd", i); } else { println!("{} is even", i); } } } pub fn is_odd(num: u32) -> bool { return (num % 2) == 1; } main() ``` # Code Blocks + Shadowing ``` pub fn main() { let x = 5; println!("x={}", x); { let x = 6; println!("x={}", x); } println!("x={}", x); let x = "salut"; println!("x={}", x); let x = true; println!("x={}", x); } main() ``` # References https://doc.rust-lang.org/1.8.0/book/references-and-borrowing.html > The Rules > - one or more references (&T) to a resource, > - exactly one mutable reference (&mut T). ``` pub fn main() { let mut x = 10; println!("x = {}", x); // immutable reference { let xref = &x; println!("x = {} - xref = {}", x, xref); } // mutable reference need to be in a block { let xref_mut = &mut x; *xref_mut += 1; } println!("x = {}", x); } main() ``` # Struct, Tuple Struct ``` pub struct Color { red: u8, green: u8, blue: u8, } pub fn main() { bg_imut: Color { red=355, green=70, blue=15}; println!("{}, {}, {}", bg_imut.red, bg_imut.green, bg_imut.blue); } main() ```
github_jupyter
``` # run this code to login to https://okpy.org/ and setup the assignment for submission from ist256 import okclient ok = okclient.Lab() ``` # Class Coding Lab: Iterations The goals of this lab are to help you to understand: - How loops work. - The difference between definite and indefinite loops, and when to use each. - How to build an indefinite loop with complex exit conditions. - How to create a program from a complex idea. # Understanding Iterations Iterations permit us to repeat code until a Boolean expression is `False`. Iterations or **loops** allow us to write succinct, compact code. Here's an example, which counts to 3 before [Blitzing the Quarterback in backyard American Football](https://www.quora.com/What-is-the-significance-of-counting-one-Mississippi-two-Mississippi-and-so-on): ``` i = 1 while i <= 3: print(i,"Mississippi...") i=i+1 print("Blitz!") ``` ## Breaking it down... The `while` statement on line 2 starts the loop. The code indented beneath the `while` (lines 3-4) will repeat, in a linear fashion until the Boolean expression on line 2 `i <= 3` is `False`, at which time the program continues with line 5. ### Some Terminology We call `i <=3` the loop's **exit condition**. The variable `i` inside the exit condition is the only thing that we can change to make the exit condition `False`, therefore it is the **loop control variable**. On line 4 we change the loop control variable by adding one to it, this is called an **increment**. Furthermore, we know how many times this loop will execute before it actually runs: 3. Even if we allowed the user to enter a number, and looped that many times, we would still know. We call this a **definite loop**. Whenever we iterate over a fixed number of values, regardless of whether those values are determined at run-time or not, we're using a definite loop. If the loop control variable never forces the exit condition to be `False`, we have an **infinite loop**. As the name implies, an Infinite loop never ends and typically causes our computer to crash or lock up. ``` ## WARNING!!! INFINITE LOOP AHEAD ## IF YOU RUN THIS CODE YOU WILL NEED TO KILL YOUR BROWSER AND SHUT DOWN JUPYTER NOTEBOOK i = 1 while i <= 3: print(i,"Mississippi...") # i=i+1 print("Blitz!") ``` ### For loops To prevent an infinite loop when the loop is definite, we use the `for` statement. Here's the same program using `for`: ``` for i in range(1,4): print(i,"Mississippi...") print("Blitz!") ``` One confusing aspect of this loop is `range(1,4)` why does this loop from 1 to 3? Why not 1 to 4? Well it has to do with the fact that computers start counting at zero. The easier way to understand it is if you subtract the two numbers you get the number of times it will loop. So for example, 4-1 == 3. ### Now Try It In the space below, Re-Write the above program to count from 10 to 15. Note: How many times will that loop? ``` # TODO Write code here ``` ## Indefinite loops With **indefinite loops** we do not know how many times the program will execute. This is typically based on user action, and therefore our loop is subject to the whims of whoever interacts with it. Most applications like spreadsheets, photo editors, and games use indefinite loops. They'll run on your computer, seemingly forever, until you choose to quit the application. The classic indefinite loop pattern involves getting input from the user inside the loop. We then inspect the input and based on that input we might exit the loop. Here's an example: ``` name = "" while name != 'mike': name = input("Say my name! : ") print("Nope, my name is not %s! " %(name)) ``` The classic problem with indefinite loops is that its really difficult to get the application's logic to line up with the exit condition. For example we need to set `name = ""` in line 1 so that line 2 starts out as `True`. Also we have this wonky logic where when we say `'mike'` it still prints `Nope, my name is not mike!` before exiting. ### Break statement The solution to this problem is to use the break statement. **break** tells Python to exit the loop immediately. We then re-structure all of our indefinite loops to look like this: ``` while True: if exit-condition: break ``` Here's our program we-written with the break statement. This is the recommended way to write indefinite loops in this course. NOTE: We always check for the setinal value immediately after the `input()` function. ``` while True: name = input("Say my name!: ") if name == 'mike': break print("Nope, my name is not %s!" %(name)) ``` ### Multiple exit conditions This indefinite loop pattern makes it easy to add additional exit conditions. For example, here's the program again, but it now stops when you say my name or type in 3 wrong names. Make sure to run this program a couple of times. First enter mike to exit the program, next enter the wrong name 3 times. ``` times = 0 while True: name = input("Say my name!: ") times = times + 1 if name == 'mike': print("You got it!") break if times == 3: print("Game over. Too many tries!") break print("Nope, my name is not %s!" %(name)) ``` # Number sums Let's conclude the lab with you writing your own program which uses an indefinite loop. We'll provide the to-do list, you write the code. This program should ask for floating point numbers as input and stops looping when **the total of the numbers entered is over 100**, or **5 numbers have been entered**. Those are your two exit conditions. After the loop stops print out the total of the numbers entered and the count of numbers entered. ``` ## TO-DO List #1 count = 0 #2 total = 0 #3 loop Indefinitely #4. input a number #5 increment count #6 add number to total #7 if count equals 5 stop looping #8 if total greater than 100 stop looping #9 print total and count # Write Code here: ``` ## Metacognition Please answer the following questions. This should be a personal narrative, in your own voice. Answer the questions by double clicking on the question and placing your answer next to the Answer: prompt. 1. Record any questions you have about this lab that you would like to ask in recitation. It is expected you will have questions if you did not complete the code sections correctly. Learning how to articulate what you do not understand is an important skill of critical thinking. Answer: 2. What was the most difficult aspect of completing this lab? Least difficult? Answer: 3. What aspects of this lab do you find most valuable? Least valuable? Answer: 4. Rate your comfort level with this week's material so far. 1 ==> I can do this on my own and explain how to do it. 2 ==> I can do this on my own without any help. 3 ==> I can do this with help or guidance from others. If you choose this level please list those who helped you. 4 ==> I don't understand this at all yet and need extra help. If you choose this please try to articulate that which you do not understand. Answer: ``` # to save and turn in your work, execute this cell. Your latest submission will be graded. ok.submit() ```
github_jupyter
``` !pip install -U ../../../tm/SDGym import sdgym from sdgym import load_dataset from sdgym import benchmark from sdgym import load_dataset from timeit import default_timer as timer from functools import partial import numpy as np import pandas as pd import matplotlib.pyplot as plt import networkx as nx from synthsonic.models.kde_utils import kde_smooth_peaks_1dim, kde_smooth_peaks from sklearn.model_selection import train_test_split import pgmpy from pgmpy.models import BayesianModel from pgmpy.estimators import TreeSearch from pgmpy.estimators import HillClimbSearch, BicScore, ExhaustiveSearch from pgmpy.estimators import BayesianEstimator from pgmpy.sampling import BayesianModelSampling import xgboost as xgb from random import choices from xgboost import XGBClassifier from sklearn.neural_network import MLPClassifier import xgboost as xgb from sklearn.svm import SVC from sklearn.isotonic import IsotonicRegression from scipy import interpolate %matplotlib inline import logging logging.basicConfig(level=logging.INFO) data, categorical_columns, ordinal_columns = load_dataset('intrusion_categorical') data.shape data df = pd.DataFrame(data) df.columns = [str(i) for i in df.columns] # learn graph structure (preferred - fast) est = TreeSearch(df, root_node=df.columns[0]) dag = est.estimate(estimator_type="tan", class_node='1') # alternative graph structure if False: est2 = TreeSearch(df, root_node=df.columns[0]) dag2 = est2.estimate(estimator_type="chow-liu") # alternative graph structure (slow) if False: est = HillClimbSearch(df) best_model = est.estimate() # start_dag=dag) nx.draw(best_model, with_labels=True, arrowsize=30, node_size=800, alpha=0.3, font_weight='bold') plt.show() edges = best_model.edges() edges # there are many choices of parametrization, here is one example model = BayesianModel(best_model.edges()) model.fit(df, estimator=BayesianEstimator, prior_type='dirichlet', pseudo_counts=0.1) print(model.get_cpds('2')) # set up train-test sample. # the test sample is used to calibrate the output of the classifier random_state = 0 X1_train, X1_test, y1_train, y1_test = train_test_split(data, np.ones(data.shape[0]), test_size=0.35, random_state=random_state) X1_train.shape %%script false --no-raise-error clf = MLPClassifier(random_state=0, max_iter=1000, early_stopping=True) clf = xgb.XGBClassifier( n_estimators=250, reg_lambda=1, gamma=0, max_depth=9 ) n_one = len(X1_train) n_zero = n_one np.random.seed(seed = 0) # sample data from BN inference = BayesianModelSampling(model) df_data = inference.forward_sample(size=n_zero, return_type='dataframe') df_data.columns = [int(c) for c in df_data.columns] X0_train = df_data[sorted(df_data.columns)].values zeros = np.zeros(n_zero) ones = np.ones(n_one) yy = np.concatenate([zeros, ones], axis = 0) XX = np.concatenate([X0_train, X1_train], axis = 0) clf = clf.fit(XX, yy) # calibrate the probabilities, using the test sample and a new null sample np.random.seed(10) df_data = inference.forward_sample(size=250000, return_type='dataframe') df_data.columns = [int(c) for c in df_data.columns] X0_test = df_data[sorted(df_data.columns)].values p0 = clf.predict_proba(X0_test)[:, 1] p1 = clf.predict_proba(X1_test)[:, 1] nbins = 50 plt.figure(figsize=(12,7)) plt.hist(p0, bins=nbins, range=(0,1), alpha=0.5, log=True, density=True); plt.hist(p1, bins=nbins, range=(0,1), alpha=0.5, log=True, density=True); binning = np.linspace(0, 1, nbins+1) hist_p0, bin_edges = np.histogram(p0, binning) hist_p1, bin_edges = np.histogram(p1, binning) def poisson_uncertainty(n): sigman = np.sqrt(n) # correct poisson counts of zero. sigman[sigman == 0] = 1. return sigman def fraction_and_uncertainty(a, b, sigma_a, sigma_b): absum = a+b frac_a = a / absum frac_b = b / absum spo = np.power(absum, 2) sigma_fa2 = np.power(frac_b * sigma_a, 2) / spo + \ np.power(frac_a * sigma_b, 2) / spo return frac_a, np.sqrt(sigma_fa2) rest_p0 = np.sum(hist_p0) - hist_p0 rest_p1 = np.sum(hist_p1) - hist_p1 sigma_bin0 = poisson_uncertainty(hist_p0) sigma_rest0 = poisson_uncertainty(rest_p0) sigma_bin1 = poisson_uncertainty(hist_p1) sigma_rest1 = poisson_uncertainty(rest_p1) frac0, sigma_frac0 = fraction_and_uncertainty(hist_p0, rest_p0, sigma_bin0, sigma_rest0) frac1, sigma_frac1 = fraction_and_uncertainty(hist_p1, rest_p1, sigma_bin1, sigma_rest1) p1calib, sigma_p1calib = fraction_and_uncertainty(frac1, frac0, sigma_frac1, sigma_frac0) sample_weight = 1 / (sigma_p1calib * sigma_p1calib) sample_weight /= min(sample_weight) #sample_weight # we recalibrate per probability bin. NO interpolation (not valid in highest bin) #hist_p0, bin_edges = np.histogram(p0, bins=nbins, range=(0, 1)) #hist_p1, bin_edges = np.histogram(p2, bins=nbins, range=(0, 1)) #### !!!! p2 bin_centers = bin_edges[:-1] + 0.5/nbins hnorm_p0 = hist_p0 / sum(hist_p0) hnorm_p1 = hist_p1 / sum(hist_p1) hnorm_sum = hnorm_p0 + hnorm_p1 p1cb = np.divide(hnorm_p1, hnorm_sum, out=np.zeros_like(hnorm_p1), where=hnorm_sum != 0) # self.p1cb = p1cb, bin_centers # use isotonic regression to smooth out potential fluctuations in the p1 values # isotonic regression assumes that p1 can only be a rising function. # I’m assuming that if a classifier predicts a higher probability, the calibrated probability # will also be higher. This may not always be right, but I think generally it is a safe one. iso_reg = IsotonicRegression(y_min=0, y_max=1).fit(bin_centers, p1calib, sample_weight) p1pred = iso_reg.predict(bin_centers) # calibrated probabilities p1f_ = interpolate.interp1d( bin_edges[:-1], p1pred, kind='previous', bounds_error=False, fill_value="extrapolate" ) p1pred = p1f_(bin_centers) p1lin = p1f_(bin_centers) plt.figure(figsize=(12,7)) plt.plot(bin_centers, p1cb, label='p1cb') plt.plot(bin_centers, p1pred, label='p1pred') plt.plot(bin_centers, bin_centers, label='bin_centers') plt.plot(bin_centers, p1lin, label='p1lin') plt.legend(); maxp1 = p1f_(0.995) max_weight = maxp1 / (1. - maxp1) max_weight # validation - part 1: check if reweighting works okay from pgmpy.sampling import BayesianModelSampling np.random.seed(1) # sample data from BN inference = BayesianModelSampling(model) df_data = inference.forward_sample(size=250000, return_type='dataframe') df_data.columns = [int(c) for c in df_data.columns] X_test = df_data[sorted(df_data.columns)].values p0 = clf.predict_proba(X_test)[:, 1] nominator = p1f_(p0) denominator = 1 - nominator weight = np.divide(nominator, denominator, out=np.ones_like(nominator), where=denominator != 0) len(X_test), sum(weight) %%script false --no-raise-error keep = weight == max_weight same = weight != max_weight ratio = (250000 - np.sum(weight[same])) / np.sum(weight[keep]) np.sum(weight[same]), np.sum(weight[keep]) plt.hist(weight, bins=nbins, log=True); #data, sample_weights = self._sample_no_transform(n_samples, random_state) pop = np.asarray(range(X_test.shape[0])) probs = weight/np.sum(weight) sample = choices(pop, probs, k=X_test.shape[0]) Xtrans = X_test[sample] p0 = clf.predict_proba(Xtrans)[:, 1] p1 = clf.predict_proba(X1_test)[:, 1] plt.figure(figsize=(12,7)) plt.hist(p0, bins=nbins, range=(0,1), alpha=0.5, density=True); #, weights=weight)#, log=True) plt.hist(p1, bins=nbins, range=(0,1), alpha=0.5, density=True); # validation - part 2: plot distributions i = 1 plt.figure(figsize=(12,7)) plt.hist(X_test[:, i], bins=nbins, range=(0,1), alpha=0.5, density=True);#, log=True) plt.hist(X1_test[:, i], bins=nbins, range=(0,1), alpha=0.5, density=True); # validation part 3: check number of duplicates np.random.seed(2) df_data = inference.forward_sample(size=500000, return_type='dataframe') df_data.columns = [int(c) for c in df_data.columns] X10k = df_data[sorted(df_data.columns)].values p0 = clf.predict_proba(X10k)[:, 1] nominator = p1f_(p0) denominator = 1 - nominator weight = np.divide(nominator, denominator, out=np.ones_like(nominator), where=denominator != 0) sum(weight) pop = np.asarray(range(X10k.shape[0])) probs = weight/np.sum(weight) sample = choices(pop, probs, k=X10k.shape[0]) Xtrans = X10k[sample] u, c = np.unique(Xtrans, axis=0, return_counts=True) counts = np.sort(c)[::-1] / 50 counts u, c = np.unique(data, axis=0, return_counts=True) c2 = np.sort(c)[::-1] plt.figure(figsize=(12,7)) plt.bar(list(range(40)), c2[:40], alpha=0.5) plt.bar(list(range(40)), counts[:40], alpha=0.5) ``` # run sdgym ``` df = pd.DataFrame(Xtrans) df.to_csv('intrusion_categorical_test.csv', index=False) def KDECopulaNNPdf_RoundCategorical(real_data, categorical_columns, ordinal_columns, times=None): df = pd.read_csv('intrusion_categorical_test.csv') data = df.values[:real_data.shape[0]] return data from sdgym.synthesizers import ( CLBNSynthesizer, CTGANSynthesizer, IdentitySynthesizer, IndependentSynthesizer, MedganSynthesizer, PrivBNSynthesizer, TableganSynthesizer, TVAESynthesizer, UniformSynthesizer, VEEGANSynthesizer) all_synthesizers = [ IdentitySynthesizer, IndependentSynthesizer, # PrivBNSynthesizer, KDECopulaNNPdf_RoundCategorical, ] import sdgym scores = sdgym.run(synthesizers=all_synthesizers, datasets=['intrusion_categorical']) scores scores.tail(3) ```
github_jupyter
``` import sqlite3 import pandas as pd import numpy as np import matplotlib.pyplot as plt import plotly.plotly as py import plotly.graph_objs as go from sklearn.cross_validation import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score from sklearn.cross_validation import cross_val_score from collections import Counter from sklearn.metrics import accuracy_score from sklearn import cross_validation from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics import confusion_matrix from sklearn import metrics from sklearn.metrics import roc_curve, auc from nltk.stem.porter import PorterStemmer from sklearn.decomposition import TruncatedSVD from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression ``` # Loading the data ``` final_data = pd.read_csv("final.csv") final_data = final_data.drop(["Text"], axis = 1) final_data = final_data.drop(final_data.columns[0], axis = 1) ``` # Sorting and train/test split ``` labels = final_data.Score final_data = final_data.sort_values("Time") final_data.shape n = final_data.shape[0] train_size = 0.7 train_set = final_data.iloc[:int(n*train_size)] test_set =final_data.iloc[int(n*train_size):] X_train = train_set.CleanedText y_train = train_set.Score X_test = test_set.CleanedText y_test= test_set.Score ``` # Cleaning the data ``` import nltk nltk.download('stopwords') import re # Tutorial about Python regular expressions: https://pymotw.com/2/re/ import string from nltk.corpus import stopwords from nltk.stem import PorterStemmer from nltk.stem.wordnet import WordNetLemmatizer stop = set(stopwords.words('english')) #set of stopwords sno = nltk.stem.SnowballStemmer('english') #initialising the snowball stemmer def cleanhtml(sentence): #function to clean the word of any html-tags cleanr = re.compile('<.*?>') cleantext = re.sub(cleanr, ' ', sentence) return cleantext def cleanpunc(sentence): #function to clean the word of any punctuation or special characters cleaned = re.sub(r'[?|!|\'|"|#]',r'',sentence) cleaned = re.sub(r'[.|,|)|(|\|/]',r' ',cleaned) return cleaned print(stop) print('************************************') print(sno.stem('tasty')) ``` - - - # Creating Word2vec model ``` import gensim i=0 train_sent=[] for sent in X_train: filtered_sentence=[] sent=cleanhtml(sent) for w in sent.split(): for cleaned_words in cleanpunc(w).split(): if(cleaned_words.isalpha()): filtered_sentence.append(cleaned_words.lower()) else: continue train_sent.append(filtered_sentence) test_sent=[] for sent in X_test: filtered_sentence=[] sent=cleanhtml(sent) for w in sent.split(): for cleaned_words in cleanpunc(w).split(): if(cleaned_words.isalpha()): filtered_sentence.append(cleaned_words.lower()) else: continue test_sent.append(filtered_sentence) from gensim.models import Word2Vec from gensim.models import KeyedVectors w2v_model = gensim.models.Word2Vec(train_sent,min_count=5,size=50, workers=4) ``` - - - ## Average Word2Vec ``` #AVG-W2V sent_vectors = []; # the avg-w2v for each sentence/review is stored in this list for sent in train_sent: # for each review/sentence sent_vec = np.zeros(50) # as word vectors are of zero length cnt_words =0; # num of words with a valid vector in the sentence/review for word in sent: # for each word in a review/sentence try: vec = w2v_model.wv[word] sent_vec += vec cnt_words += 1 except: cnt_words = 1 pass sent_vec /= cnt_words sent_vectors.append(sent_vec) print(len(sent_vectors)) print(len(sent_vectors[0])) sent_vectors2 = []; # the avg-w2v for each sentence/review is stored in this list for sent in test_sent: # for each review/sentence sent_vec = np.zeros(50) # as word vectors are of zero length cnt_words =0; # num of words with a valid vector in the sentence/review for word in sent: # for each word in a review/sentence try: vec = w2v_model.wv[word] sent_vec += vec cnt_words += 1 except: cnt_words = 1 pass sent_vec /= cnt_words sent_vectors2.append(sent_vec) print(len(sent_vectors2)) print(len(sent_vectors2[0])) X_train1 = sent_vectors X_test1 = sent_vectors2 ``` # Applying DecisionTree Classifier ``` from sklearn.tree import DecisionTreeClassifier depth = list(range(2,10)) # empty list that will hold cv scores cv_scores = [] my_cv = [(train,test) for train, test in TimeSeriesSplit(n_splits=10).split(X_train1)] # perform 10-fold cross validation for d in depth: dt = DecisionTreeClassifier(max_depth = d, min_samples_split = 1000, min_samples_leaf = 500) scores = cross_val_score(dt, X_train1, y_train, cv = my_cv, scoring='accuracy') cv_scores.append(scores.mean()) # changing to misclassification error MSE = [1 - x for x in cv_scores] # determining best k optimal_d = depth[MSE.index(min(MSE))] print('\nThe optimal depth of the tree is %d.' % optimal_d) # plot misclassification error vs k plt.plot(depth, MSE) for xy in zip(depth, np.round(MSE,3)): plt.annotate('(%s, %s)' % xy, xy=xy, textcoords='data') plt.xlabel('Tree depth') plt.ylabel('Misclassification Error') plt.show() dt = DecisionTreeClassifier(max_depth = 8, min_samples_split = 1000, min_samples_leaf = 500) dt.fit(X_train1,y_train) pred = dt.predict(X_test1) acc = accuracy_score(y_test, pred, normalize=True) * float(100) x = dt.predict(X_train1) tr_acc = accuracy_score(y_train, x, normalize=True) * float(100) print('\n****Train accuracy for k = {} is {:.2f}'.format(8,tr_acc)) print('\n****Test accuracy for k = {} is {:.2f}'.format(8,acc)) ``` - - - # Word2Vec-Tfidf ### Note : Performed with sampled 50k datapoints ``` from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import TfidfVectorizer tf_idf_vect = TfidfVectorizer() final_tf_idf = tf_idf_vect.fit_transform(X_train) tfidf_feat = tf_idf_vect.get_feature_names() # tfidf words/col-names # final_tf_idf is the sparse matrix with row= sentence, col=word and cell_val = tfidf train_vectors = []; # the tfidf-w2v for each sentence/review is stored in this list row=0; for sent in train_sent: # for each review/sentence sent_vec = np.zeros(50) # as word vectors are of zero length weight_sum = 0; # num of words with a valid vector in the sentence/review for word in sent: # for each word in a review/sentence try: vec = w2v_model.wv[word] # obtain the tf_idf of a word in a sentence/review tfidf = final_tf_idf[row, tfidf_feat.index(word)] sent_vec += (vec * tfidf) weight_sum += tfidf except: weight_sum = 1 pass sent_vec /= weight_sum #print(np.isnan(np.sum(sent_vec))) train_vectors.append(sent_vec) row += 1 print(len(train_vectors)) print(len(train_vectors[0])) final_tf_idf = tf_idf_vect.fit_transform(X_test) tfidf_feat = tf_idf_vect.get_feature_names() # tfidf words/col-names # final_tf_idf is the sparse matrix with row= sentence, col=word and cell_val = tfidf test_vectors = []; # the tfidf-w2v for each sentence/review is stored in this list row=0; for sent in test_sent: # for each review/sentence sent_vec = np.zeros(50) # as word vectors are of zero length weight_sum = 0; # num of words with a valid vector in the sentence/review for word in sent: # for each word in a review/sentence try: vec = w2v_model2.wv[word] # obtain the tf_idf of a word in a sentence/review tfidf = final_tf_idf[row, tfidf_feat.index(word)] sent_vec += (vec * tfidf) weight_sum += tfidf except: weight_sum = 1 pass sent_vec /= weight_sum #print(np.isnan(np.sum(sent_vec))) test_vectors.append(sent_vec) row += 1 print(len(test_vectors)) print(len(test_vectors[0])) X_train2 = train_vectors X_test2 = test_vectors ``` # Applying DecisionTree Classifier ``` from sklearn.tree import DecisionTreeClassifier depth = list(range(2,15)) # empty list that will hold cv scores cv_scores = [] my_cv = [(train,test) for train, test in TimeSeriesSplit(n_splits=10).split(X_train2)] # perform 10-fold cross validation for d in depth: dt = DecisionTreeClassifier(max_depth = d, min_samples_split = 500, min_samples_leaf = 100) scores = cross_val_score(dt, X_train2, y_train, cv = my_cv, scoring='accuracy') cv_scores.append(scores.mean()) # changing to misclassification error MSE = [1 - x for x in cv_scores] # determining best k optimal_d = depth[MSE.index(min(MSE))] print('\nThe optimal depth of the tree is %d.' % optimal_d) # plot misclassification error vs k plt.plot(depth, MSE) for xy in zip(depth, np.round(MSE,3)): plt.annotate('(%s, %s)' % xy, xy=xy, textcoords='data') plt.xlabel('Tree depth') plt.ylabel('Misclassification Error') plt.show() dt = DecisionTreeClassifier(max_depth = 6, min_samples_split = 500, min_samples_leaf = 100) dt.fit(X_train2,y_train) pred = dt.predict(X_test2) acc = accuracy_score(y_test, pred, normalize=True) * float(100) x = dt.predict(X_train2) tr_acc = accuracy_score(y_train, x, normalize=True) * float(100) print('\n****Train accuracy for k = {} is {:.2f}'.format(6,tr_acc)) print('\n****Test accuracy for k = {} is {:.2f}'.format(6,acc)) ``` - - - # Conclusion - ### Average word2vec gives a training accuracy of 87.3 % and testing accuracy of 85.3 % with best depth of tree being '8'. - ### Tf-idf word2vec gives a training accuracy of 86 % and testing accuracy of 83.1 % with best depth of tree being '6'.
github_jupyter
## Predict No Show For Hospital Appointments ### Data From IM Insurence #### Why: No show causes profit loss and resources wasted #### Key Findings: 1. Show/Noshow (SNS), Right figure 2. Randomforest Classifier outperforms other classification methods; final selected model has robust AUC 0.72 for both training and testing set #### Recommendation: 1. Focus on efforts on patients with longer delay time 2. Survey on patients with no show #### Plan Forward: 1. Improve feature engineering 2. Investigate interaction of features 3. Model Hperparameter tuning 4. power BI deployment #### Worksteps ***step1:*** conda create -n xxxx python=3.8 \ ***step2:*** activate carprediction \ Cd /Users/xxxx/15_Mentoring_Videos/01_EndtoEnd_Project1 \ ***step3:***ML modeling \ ***step4:*** create requirement.txt in python \ pip freeze >requirements.txt \ Pip Install flask \ ***step5:*** Run "myapp.py" in flask \ 1.Need template; html file for a simple template interface\ 2.Need pkl file from ML model \ ***Step6:*** Predict function using the ML model\ Resources:https://medium.com/techcrush/how-to-deploy-your-ml-model-in-jupyter-notebook-to-your-flask-app-d1c4933b29b5 https://www.youtube.com/watch?v=p_tpQSY1aTs https://stackoverflow.com/questions/48205495/python-how-to-run-multiple-flask-apps-from-same-client-machine ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline reference=pd.read_csv('Data_ref.csv') reference ``` Future Steps? Recommendation? ACtion items: Randomeforest variable influence plot -Xu ren Extract Month, interaction terms of features Chi-Square test ## Overview ``` df=pd.read_csv('Medical_No_Shows.csv') df.shape df.head() ##check missing values df.isnull().sum() ``` #### Basic Data check ``` df.describe() df.columns df.info() ``` #### Check categorical data ``` print(df['Gender'].unique()) print(df['LocationID'].unique()) print(df['MedicaidIND'].unique()) print(df['Hypertension'].unique()) print(df['Diabetes'].unique()) print(df['Alcoholism'].unique()) print(df['Disability'].unique()) print(df['SMS_received'].unique()) print(df['No-show'].unique()) ``` ### Time Series ``` #convert time into timestamp df.ScheduledDay=pd.to_datetime(df.ScheduledDay) df.AppointmentDay=pd.to_datetime(df.AppointmentDay) # time of the day print(min(df.AppointmentDay), max(df.AppointmentDay)) print(min(df.ScheduledDay),max(df.ScheduledDay)) def Noshowind(x): if x=='No': return 0 else: return 1 ``` ### Convert Show/Noshow into integers for sklearn ``` Appt_No_show=df['No-show'].apply(lambda x:Noshowind(x)) Appt_No_show.index = df['AppointmentDay'] Appt_No_show.plot() ``` # EDA ``` # final["appt_wd"].value_counts() # create feature of week day final=pd.DataFrame() Appt_weekday = df['AppointmentDay'].dt.day_name() Scheduled_weekday=df['ScheduledDay'].dt.day_name() #prepare countplot final = pd.DataFrame(list(zip(Appt_weekday, Scheduled_weekday,Appt_No_show)), columns =['appt_wd', 'schedule_wd','Noshow']) final['no_show']=df[['No-show']] Scheduled_Month=df['ScheduledDay'].dt.month final['Month']=Scheduled_Month final.head(2) ax=sns.countplot(x="appt_wd", order=['Monday','Tuesday','Wednesday','Thursday','Friday'], data=final,palette='rainbow') for p, label in zip(ax.patches, final["appt_wd"].value_counts()): ax.annotate(label, (p.get_x()+0.2, p.get_height()+0.15)) ``` #### No clear pattern of SNS counts vs workday ``` ax=sns.countplot(x="appt_wd", order=['Monday','Tuesday','Wednesday','Thursday','Friday'], data=final,palette='rainbow') for p, label in zip(ax.patches, round(final["appt_wd"].value_counts()/len(final),2)): ax.annotate(label, (p.get_x()+0.2, p.get_height()+0.15)) ``` #### No clear pattern of SNS percentage vs workday ``` pltpct=pd.DataFrame(final["appt_wd"].value_counts() / len(final)) # pltpct.index=['a','b''c','d','e','r'] 'Monday','Tuesday','Wednesday','Thursday','Friday' final["appt_wd"].unique() final.describe() ``` ### Create feature of delay time or time_diff_days= appintmentday-scheduledday ``` # difference of the appointment-schedule day ScheduledDay=pd.to_datetime(df.ScheduledDay) AppointmentDay=pd.to_datetime(df.AppointmentDay) df['time_diff_days']=abs(AppointmentDay-ScheduledDay).dt.days final['ApptDaysAfterSchedule']=df[['time_diff_days']] ax=sns.violinplot(x='no_show', y='ApptDaysAfterSchedule', data=final, palette='rainbow') ax.set(ylim=(0, 75)) plt.figure(figsize=(100,200)) ``` #### Very interesting plots shows the noshow population is significantly different from showed population ***Figure:*** The violinplot show the longer the waiting time, higher chance for no show ``` #add month to final_dataset or df df['Month']=final['Month'] # final.columns # df.columns ``` ### Additional EDA Analysis #### Histogram plots with different hue, change on percentage ``` final_dataset=df[['PatientID','AppointmentID', 'Gender', 'ScheduledDay', 'AppointmentDay', 'Age', 'LocationID', 'MedicaidIND', 'Hypertension', 'Diabetes', 'Alcoholism', 'Disability', 'SMS_received', 'No-show', 'time_diff_days','Month']] plt.figure(figsize=(5,2)) sns.countplot(x='Disability',hue='No-show',data=final_dataset,palette='Set1') final_dataset.head(3) ``` ### EDA and Feature Engineering ``` def ConvNoshow(x): if x=='Yes': return 1 else: return 0 final_dataset['NoShow']=final_dataset['No-show'].apply(ConvNoshow) final_dataset.drop(['No-show'],axis=1,inplace=True) def ConvGender(x): if x=='M': return 1 else: return 0 final_dataset['GenderC']=final_dataset['Gender'].apply(ConvGender) final_dataset.drop(['Gender'],axis=1,inplace=True) final_dataset.head(3) # final_dataset['appt_wd']=final['appt_wd'] # final_dataset['schedule_wd']=final['schedule_wd'] final_dataset['appt_wd']= df['AppointmentDay'].dt.weekday final_dataset['schedule_wd']=df['ScheduledDay'].dt.weekday final_dataset['PatientIDLength']=df['PatientID'].apply(lambda x:len(x)) final_dataset.info() ``` #### group features with integer data type ``` final=final_dataset.select_dtypes('int') final.columns ``` ### Feature Selection ``` # insert column using insert(position,column_name,first_column) function final=final[['AppointmentID', 'Age', 'LocationID', 'MedicaidIND', 'Hypertension', 'Diabetes', 'Alcoholism', 'Disability', 'SMS_received', 'time_diff_days', 'Month', 'GenderC', 'appt_wd', 'schedule_wd', 'PatientIDLength','NoShow']] final.corr() # sns.pairplot(final_dataset) corrmat=final.corr() top_corr_features = corrmat.index #get correlations of each features in dataset plt.figure(figsize=(20,20)) #plot heat map g=sns.heatmap(final.corr(),annot=True,cmap="RdYlGn") ``` ***Figure:*** Correlation coefficient shows SMS recieved, time_diff_days,Appointment ID are highly correlated. However AppointmentID and time_diff_days are autocorrelated and has no physical meaning, should be removed frmo trainning set ``` final X=final.iloc[:,:15] y=final.iloc[:,15:] # remove negative value from Age X['Age']=X['Age'].apply(lambda x: x if x>0 else 0) plt.hist(X['Age'],histtype='bar') ``` Figure:Remove Negative Age from age column ## Feature Selection Method 1: Chi Square Test ``` from sklearn.feature_selection import chi2 f_p_values=chi2(X,y) f_p_values import pandas as pd p_values=pd.Series(f_p_values[1]) p_values.index=X.columns p_values axes = (1-p_values).sort_values(ascending=False).plot.bar(figsize=(20, 4)) axes.set_ylim([0,1.2]) axes.set_ylabel('1-Pvalue') plt.axhline(y = 0.95, color = 'r', linestyle = '-') ``` #### Conclusion: Chi Square Test indicates the features are different between group of show-no-show should be incldue into model ## Feature Selection Method 2: Information Gain #### Split Training and Testing sets ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=101) y_train.value_counts() ``` ** Train and fit a logistic regression model on the training set.** ###Evaluate feature importance from information gain https://github.com/krishnaik06/Complete-Feature-Selection/blob/master/3-%20Information%20gain%20-%20mutual%20information%20In%20Classification.ipynb ``` from sklearn.feature_selection import mutual_info_classif # determine the mutual information mutual_info = mutual_info_classif(X_train, y_train.values.ravel()) mutual_info mutual_info = pd.Series(mutual_info) mutual_info.index = X_train.columns mutual_info.sort_values(ascending=False) mutual_info.sort_values(ascending=False).plot.bar(figsize=(20, 4)) from sklearn.feature_selection import SelectKBest sel_five_cols = SelectKBest(mutual_info_classif, k=6) sel_five_cols.fit(X_train, y_train.values.ravel()) X_train.columns[sel_five_cols.get_support()] X_train.columns ``` ### Feature Selection Method 3: Random Forest Classification for feature importance ``` from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.naive_bayes import MultinomialNB from sklearn.tree import DecisionTreeClassifier rfcmodel = RandomForestClassifier(n_estimators=100, criterion='gini', min_samples_split=5, min_samples_leaf=2, max_features='sqrt', bootstrap=True, n_jobs=-1, random_state=42) rfcmodel.fit(X_train,y_train.values.ravel()) y_pred_test = rfcmodel.predict(X_test) from sklearn.metrics import confusion_matrix print(confusion_matrix(y_test, y_pred_test)) from sklearn import metrics print(metrics.accuracy_score(y_test, y_pred_test)) importances = rfcmodel.feature_importances_ feature_names=X_train.columns std = np.std([ tree.feature_importances_ for tree in rfcmodel.estimators_], axis=0) # elapsed_time = time.time() - start_time # print(f"Elapsed time to compute the importances: " # f"{elapsed_time:.3f} seconds") from sklearn.inspection import permutation_importance forest_importances = pd.Series(importances, index=feature_names) fig, ax = plt.subplots() forest_importances.sort_values(ascending=False).plot.bar(yerr=std, ax=ax) ax.set_title("Feature importances using MDI") ax.set_ylabel("Mean decrease in impurity") fig.tight_layout() fig.set_size_inches(18.5, 10.5) ``` #### Conclustion: Comparing with Cycle 1; the new features could improve the prediction. It looks promising... # Modeling ### Random Forest Model ``` from sklearn.ensemble import RandomForestClassifier from sklearn.datasets import make_classification from sklearn import model_selection # random forest model creation # https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74 rfc = RandomForestClassifier() rfc.fit(X_train,y_train.values.ravel()) # predictions rfc_train_predict = rfc.predict(X_train) rfc_test_predict = rfc.predict(X_test) ``` ### Metrics ``` from sklearn.model_selection import cross_val_score from sklearn.metrics import classification_report, confusion_matrix rfc_cv_train_score = cross_val_score(rfc, X_train,y_train.values.ravel(), cv=10, scoring='roc_auc') print("=== Confusion Matrix train ===") print(confusion_matrix(y_train, rfc_train_predict)) print('\n') print("=== Classification Report ===") print(classification_report(y_train, rfc_train_predict)) print('\n') print("=== All AUC Scores ===") print(rfc_cv_train_score) print('\n') print("=== Mean AUC Score ===") print("Mean AUC Score - Random Forest: ", rfc_cv_train_score.mean()) rfc_cv_test_score = cross_val_score(rfc, X_test,y_test.values.ravel(), cv=10, scoring='roc_auc') print("=== Confusion Matrix test===") print(confusion_matrix(y_test, rfc_test_predict)) print('\n') print("=== Classification Report test ===") print(classification_report(y_test, rfc_test_predict)) print('\n') print("=== All AUC Scores test ===") print(rfc_cv_score) print('\n') print("=== Mean AUC Score test===") print("Mean AUC Score - Random Forest: ", rfc_cv_test_score.mean()) ``` #### Although significant improvement from previous cycle, it is clearly overfitting still room to improve #### trade off between precision and recall could be discussed for thredshold decisions etc ``` #https://blog.dataiku.com/narrowing-the-search-which-hyperparameters-really-matter # https://github.com/codebasics/py/blob/master/ML/15_gridsearch/Exercise/15_grid_search_cv_exercise.ipynb ``` ## ROC AUC ``` from sklearn.metrics import roc_auc_score from sklearn import metrics from sklearn.svm import SVC train_roc=metrics.plot_roc_curve(rfc, X_train, y_train, name='train') test_roc=metrics.plot_roc_curve(rfc, X_test, y_test, ax=train_roc.ax_, name='test') test_roc.figure_.suptitle("ROC curve comparison") ``` ### Model Selection ``` from sklearn.metrics import roc_auc_score from sklearn import metrics from IPython.display import Javascript display(Javascript('IPython.notebook.execute_cells_below()')) model_params = { # 'svm': { # 'model': svm.SVC(gamma='auto'), # 'params' : { # 'C': [10], #1,10,20 # 'kernel': ['rbf'] #'rbf','linear' # } # }, 'random_forest': { 'model': RandomForestClassifier(), 'params' : { 'n_estimators': [100, 500, 1000], # # Number of features to consider at every split 'max_features' : ['auto'], # Minimum number of samples required at each leaf node 'min_samples_leaf' : [2] } }, 'logistic_regression' : { 'model': LogisticRegression(solver='liblinear',multi_class='auto'), 'params': { 'C': [1,5,10] } }, 'naive_bayes_gaussian': { 'model': GaussianNB(), 'params': {} }, 'decision_tree': { 'model': DecisionTreeClassifier(), 'params': { 'criterion': ['gini','entropy'], } } } from sklearn.model_selection import GridSearchCV import pandas as pd scores = [] for model_name, mp in model_params.items(): clf = GridSearchCV(mp['model'], mp['params'], cv=3, return_train_score=True) clf.fit(X_train, y_train.values.ravel()) scores.append({ 'model': model_name, 'best_score': clf.best_score_, 'best_params': clf.best_params_ }) df = pd.DataFrame(scores,columns=['model','best_score','best_params']) df ``` #### RandomForest outperform other models in our preliminary tests ### HyperParameters Fine Tuning for RandomForest ``` df.loc[0,'best_params'] rfcmodel = RandomForestClassifier(n_estimators=1000, criterion='gini', min_samples_split=5, min_samples_leaf=2, max_features='auto', bootstrap=True, n_jobs=-1, random_state=42) rfcmodel.fit(X_train,y_train.values.ravel()) y_pred_test = rfcmodel.predict(X_test) from sklearn.metrics import confusion_matrix print(confusion_matrix(y_test, y_pred_test)) from sklearn import metrics print(metrics.accuracy_score(y_test, y_pred_test)) y_pred_train = rfcmodel.predict(X_train) from sklearn.metrics import confusion_matrix print(confusion_matrix(y_train, y_pred_train)) from sklearn import metrics print(metrics.accuracy_score(y_train, y_pred_train)) importances = rfcmodel.feature_importances_ feature_names=X_train.columns std = np.std([ tree.feature_importances_ for tree in rfcmodel.estimators_], axis=0) # elapsed_time = time.time() - start_time # print(f"Elapsed time to compute the importances: " # f"{elapsed_time:.3f} seconds") from sklearn.inspection import permutation_importance forest_importances = pd.Series(importances, index=feature_names) fig, ax = plt.subplots() forest_importances.sort_values(ascending=False).plot.bar(yerr=std, ax=ax) ax.set_title("Feature importances using MDI") ax.set_ylabel("Mean decrease in impurity") fig.tight_layout() fig.set_size_inches(18.5, 10.5) train_roc=metrics.plot_roc_curve(rfcmodel, X_train, y_train, name='train') test_roc=metrics.plot_roc_curve(rfcmodel, X_test, y_test, ax=train_roc.ax_, name='test') test_roc.figure_.suptitle("ROC curve comparison") ``` ### Better, still overfitting. could improve by further hyperparameter tunning, recommendation? ### Prepare pickle dump file for app deployment ``` import pickle # open a file, where you ant to store the data file = open('random_forest_classification_model.pkl', 'wb') # dump information to that file pickle.dump(rfcmodel, file) ``` ### Decide to implement model into PowerBI # Summary ``` ''' 1. features are good indicator of Hospital Show/Noshow (SNS) are delay time more than 8 days months is 4 or less (spring) age is 47 or less SMS is recieved 2. Randomforest Classifier outperforms other classification methods 3. final selected model has robust AUC 0.72 for both training and testing set 4. However there is still overfitting, more hyperparameter tunning could improve the model repeatability 5. More feature engineering could improve model predictability ''' ```
github_jupyter
# YNet - Dataset 7: Data from Experiment (2), Mitochondria = Cit1-mCherry ### Importing utilities: ``` %matplotlib inline %reload_ext autoreload %autoreload 2 import os from pathlib import Path import skimage.external.tifffile as tiff from common import Statistics, dataset_source from resources.conv_learner import * from resources.plots import * from pprint import pprint import matplotlib.pyplot as plt %matplotlib inline ``` #### Setting up variables ``` PATH = "../datasets/yeast_v7/" data_path = Path(PATH) CLASSES = ('WT', 'mfb1KO', 'mfb1KO_mmr1KO', 'mmr1KO', 'mmm1KO', 'num1KO', 'mfb1KO_num1KO') NUM_CLASSES = len(CLASSES) BATCH_SIZE = 64 SIZE = 200 ``` #### Calculating normalization statistics ``` stats_name = "yeast_v7_per_class.dict" classes = Statistics.source_class(data_path) train_val = zip(classes['train'], classes['val']) # Xtest = zip(classes['test']) main_stats = Statistics.per_class(train_val, save_name=stats_name) # test_stats = Statistics.per_class(Xtest, save_name=stats_name) for keys in main_stats.keys(): print(f"{keys}: \t \t \t {main_stats[keys]}") for keys in test_stats.keys(): print(f"{keys}: \t \t \t {test_stats[keys]}") ``` ## Defining datasets: ``` def tfms_for_test(stats, sz): test_norm = Normalize(stats) test_denorm = Denormalize(stats) val_crop = CropType.NO test_tfms = image_gen(test_norm, test_denorm,sz, crop_type=val_crop) return test_tfms def get_data(path: str, sz, bs): create, lbl2index = ImageClassifierData.prepare_from_path(path, val_name='val', bs=bs) main_stats_X = {lbl2index[key]: val for key, val in main_stats.items()} # test_stats_X= {lbl2index[key]: val for key, val in test_stats.items()} tfms = tfms_from_stats(main_stats_X, sz, aug_tfms=[RandomDihedral()], pad=sz//8) # test_tfms = tfms_for_test(test_stats_X,sz) # tfms += (test_tfms, ) print('\n class to index mapping:\n',lbl2index) return create(tfms) data = get_data(PATH,SIZE, BATCH_SIZE) x, y = next(iter(data.trn_dl)) ``` ### Inspect loaded data: ``` # specify which image-index idx = 60 # loading it from GPU to CPU xx = x[idx].cpu().numpy().copy() yy = y[idx] # showing the image # #sp.axis('Off') #sp.set_title("Norm", fontsize=11) figure, _ ,_ = tiff.imshow(np.sum(xx, axis=0)) figure.set_size_inches(6,6) figure.add_subplot(111) figure2, _, _ = tiff.imshow(np.sum(data.trn_ds.denorm(xx,yy).squeeze() * 65536, axis=2)) figure2.set_size_inches(6,6) ``` # Training setup ``` torch.cuda.is_available() ``` ## ResNet_with_Batchnorm ``` class BnLayer(nn.Module): def __init__(self, ni, nf, stride=2, kernel_size=3): super().__init__() self.conv = nn.Conv2d(ni, nf, kernel_size=kernel_size, stride=stride, bias=False, padding=1) self.a = nn.Parameter(torch.zeros(nf,1,1)) self.m = nn.Parameter(torch.ones(nf,1,1)) def forward(self, x): x = F.relu(self.conv(x)) x_chan = x.transpose(0,1).contiguous().view(x.size(1), -1) if self.training: self.means = x_chan.mean(1)[:,None,None] self.stds = x_chan.std (1)[:,None,None] return (x-self.means) / self.stds *self.m + self.a class ResnetLayer(BnLayer): def forward(self, x): return x + super().forward(x) class Resnet(nn.Module): def __init__(self, layers, c): super().__init__() self.conv1 = nn.Conv2d(2, 10, kernel_size=5, stride=1, padding=2) self.layers = nn.ModuleList([BnLayer(layers[i], layers[i+1]) for i in range(len(layers) - 1)]) self.layers2 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1) for i in range(len(layers) - 1)]) self.layers3 = nn.ModuleList([ResnetLayer(layers[i+1], layers[i + 1], 1) for i in range(len(layers) - 1)]) self.out = nn.Linear(layers[-1], c) def forward(self, x): x = self.conv1(x) for l,l2,l3 in zip(self.layers, self.layers2, self.layers3): x = l3(l2(l(x))) x = F.adaptive_max_pool2d(x, 1) x = x.view(x.size(0), -1) return F.log_softmax(self.out(x), dim=-1) wd=1e-5 learn = ConvLearner.from_model_data(Resnet([10, 20, 40, 80, 160], 7), data) learn.summary() %time learn.fit(1e-2, 8, cycle_len=1, wds=wd) %time learn.fit(1e-2, 8, wds=wd, cycle_len=10, use_clr=(20,8, 0.95, 0.85)) learn.save('ResNet_v7_79_1') %time learn.fit(1e-3, 2, wds=wd, cycle_len=20, use_clr=(20,8, 0.95, 0.85)) learn.save('ResNet_v7_83_2') learn.load('ResNet_v7_best_3_1') %time learn.fit(1e-5, 2, wds=wd, cycle_len=20, use_clr=(20,8, 0.95, 0.85), best_save_name='ResNet_v7_best_4') ``` ### Show loss over time ``` f = plt.figure() x = learn.sched.plot_loss() plt.ylabel('Loss') plt.xlabel('Iterations') plt.show() ``` ## Analysis ### ...after training ``` ## Load model: learn.load('Objective_A_Resnet_per_class_2') %time learn.fit(1e-10, 1, wds=wd, cycle_len=1) log_preds, y = learn.TTA() # run predictions with TTA ``` ### Confusion matrix ``` # Plot confusion matrix log_preds_mean = np.mean(log_preds, axis=0) preds = np.argmax(log_preds_mean, axis=1) cm = confusion_matrix(preds,y) plot_confusion_matrix(cm, data.classes) ``` ### Analyse images #### Show random correct/incorrectly classified images: ``` log_preds_mean = np.mean(log_preds, axis=0) # averages predictions on original + 4 TTA images preds = np.argmax(log_preds_mean, axis=1) # converts into 0 or 1 # probs = np.exp(log_preds_mean[:,0]) # prediction(WT) probs = np.exp(log_preds_mean) # predictions def rand_by_mask(mask): return np.random.choice(np.where(mask)[0], 4, replace=False) def rand_by_correct(is_correct): return rand_by_mask((preds == data.val_y)==is_correct) def plots(ims, channel, figsize=(12,6), rows=1, titles=None): f = plt.figure(figsize=figsize) for i in range(len(ims)): sp = f.add_subplot(rows, len(ims)//rows, i+1) sp.axis('Off') if titles is not None: sp.set_title(titles[i], fontsize=11) if channel is not None: plt.imshow(ims[i,channel,:,:]) else: plt.imshow(np.sum(ims, axis=1)[i,:,:]) def plot_val_with_title_from_ds_no_denorm(idxs, title, channel=None): imgs = np.stack(data.val_ds[x][0] for x in idxs) # get images by idx corr_lbl = np.stack(data.val_ds[x][1] for x in idxs) # get correct label from data.val_ds by idx pred_lbl = np.stack(preds[x] for x in idxs) # get predicted label from preds by idx p_max = [np.amax(probs[x,:]) for x in idxs] # get highes probability from probs by idx title_fin = [f"true = {corr_lbl[x]}\n predicted: {pred_lbl[x]}\n p = {p_max[x]}" for x in corr_lbl] print(title) return plots(imgs, channel, rows=1, titles=title_fin, figsize=(16,8)) ``` ### Plot images according to predictions ``` # load from ds - not denormalized! plot_val_with_title_from_ds_no_denorm(rand_by_correct(True), "Correctly classified") #optionally pass channel arg. to select single channel plot_val_with_title_from_ds_no_denorm(rand_by_correct(False), "Incorrectly classified") ``` #### Show most correct/incorrectly classified images per class: ``` def most_by_mask(mask, y, mult): idxs = np.where(mask)[0] return idxs[np.argsort(mult * probs[:,y][idxs])[:4]] def most_by_correct(y, is_correct): mult = -1 if is_correct else 1 return most_by_mask(((preds == data.val_y)==is_correct) & (data.val_y == y), y, mult) plot_val_with_title_from_ds_no_denorm(most_by_correct(0, True), "Most correctly classified WT") plot_val_with_title_from_ds_no_denorm(most_by_correct(0, False), "Most incorrectly classified WT") # logic? plot_val_with_title_from_ds_no_denorm(most_by_correct(1, True), "Most correctly classified mfb1KO") plot_val_with_title_from_ds_no_denorm(most_by_correct(1, False), "Most incorrectly classified mfb1KO") plot_val_with_title_from_ds_no_denorm(most_by_correct(2, True), "Most correctly classified mfb1KO-mmr1KO") plot_val_with_title_from_ds_no_denorm(most_by_correct(3, True), "Most correctly classified mmr1KO") # etc. ``` #### Show (most) uncertain images ``` most_uncertain = t = np.argsort(np.amax(probs, axis = 1))[:6] # get best "guess" per image and list the least confident ones plot_val_with_title_from_ds_no_denorm(most_uncertain, "Most uncertain predictions") ``` # DOES THE MODEL GENERALIZE????? ``` def get_test_data(path: str, sz, bs): create, lbl2index = ImageClassifierData.prepare_from_path(path, val_name='val', test_name='test', test_with_labels=True, bs=bs) main_stats_X = {lbl2index[key]: val for key, val in main_stats.items()} test_stats_X= {lbl2index[key]: val for key, val in test_stats.items()} tfms = tfms_from_stats(main_stats_X, sz, aug_tfms=[RandomDihedral()], pad=sz//8) #even without transformations and padding -> failure test_tfms = tfms_for_test(test_stats_X,sz) tfms += (tfms[0], ) print('\n class to index mapping:\n',lbl2index) return create(tfms) test_data = get_test_data(PATH,SIZE, BATCH_SIZE) test_learn = ConvLearner.from_model_data(Resnet([10, 20, 40, 80, 160], 4), test_data) test_learn.load('Objective_A_Resnet_per_class_2') test_learn.warm_up(1e-14) test_log_preds, targs = test_learn.predict_with_targs(is_test=True) testprobs = np.exp(test_log_preds) preds = np.argmax(testprobs, axis=1) print(preds) print(targs) # Print Accuracy print(f"accuracy: [{sum(preds == targs) / 70:4.4}]") test_log_preds ``` ### Troubleshooting: ``` test_x , test_y = next(iter(test_data.test_dl)) # specify which image-index idx = 2 # loading it from GPU to CPU test_xx = test_x[idx].cpu().numpy().copy() test_yy = test_y[idx] figure, _ ,_ = tiff.imshow(np.sum(test_xx, axis=0)) figure.set_size_inches(6,6) figure.add_subplot(111) figure2, _, _ = tiff.imshow(np.sum(xx, axis=0)) figure2.set_size_inches(6,6) ``` ## Caclulating normalization statistics separately ``` for keys in main_stats.keys(): print(f"{keys}: \t \t \t {main_stats[keys]}") for keys in test_stats.keys(): print(f"{keys}: \t \t \t {test_stats[keys]}") ## Load model: learn.load('Objective_A_Resnet_per_class_2') %time learn.fit(1e-10, 1, wds=wd, cycle_len=1) test_log_preds, targs = learn.predict_with_targs(is_test=True) testprobs = np.exp(test_log_preds) preds = np.argmax(testprobs, axis=1) print(preds) print(targs) # Print Accuracy print(f"accuracy: [{sum(preds == targs) / 140:4.4}]") test_log_preds ```
github_jupyter
# PROJECT 2 : TEAM 11 Members: Talia Tandler, SeungU Lyu ``` # Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import * import math ``` http://www.worldometers.info/world-population/us-population/ US pop in 2017 = 324,459,463 https://wwwnc.cdc.gov/travel/yellowbook/2018/infectious-diseases-related-to-travel/measles-rubeola Measles incubation period 11 days average, infectious period 2-4 days before rash to after rash. https://www.cdc.gov/vaccines/imz-managers/coverage/childvaxview/data-reports/mmr/trend/index.html MMR immunization rate in 2017 = 90.7% ``` #population pop = 999 #initial immunity of the US population init_im = 0.907 #assumed contact rate beta = 0.9 #US recovery rate from measles gamma = 1/7 #US rate from exposure period of 11 days to infected sigma = 0.091; ``` ## Question ### What is the result of lowering the measles immunity rate in a small community during a outbreak? Measles is a highly infectious disease that can infect about 90% of people that come into contact with the patient. However, the disease is not common these days because of the MMR vaccination, which can effectively prevent people getting the disease. Due to the high vaccination rate, the United States was declared free of circulating measles in 2000. However there were 911 cases of measles between 2001 and 2011. These occurences arose due to individuals from other countries entering the U.S. with measles. Because of the disease's high infectious rate upon contact, herd immunity is considered very important for measles. In 2015, a measles outbreak occured at Disney World causing more than 159 people to be infected during a single outbreak. Only 50~86% people exposed to this outbreak were vaccinated, causing an even bigger outbreak. This vaccination was lower than it should have been due to Anti-Vaccination Movements in the U.S. These lower rates lowered the population immunity rate and caused the herd immunity to not function as expected. The starter of this movement, Andrew Wakefield, stated that the MMR vaccination can cause autism in newborn children because of the mercury content inside the specific vaccine. Due to this false research, many parents became concerned with the side effects of the vaccination and opted to not vaccinate their children with MMR. As a result, there was a decently sized generation of children susceptible to measles because they did not receive the vaccination at birth. This simulation utilizes an SEIR model to understand how varying the measles immunity rate in a community effects herd immunity. ## Methodology In order to create this model, we: 1. Did background research on the MMR vaccination and the measles diseases and found a set of constants we would implement in our model. 2. Put the variables into a state function. 3. Set the total population to 1000, with initial infection number as one person infected with measles. 4. Ran the simulation based on the number measles infections every day. 5. Set a condition where the measles outbreak ends when the number infected people is less than one person. 6. Created graphs to visually represent our results. ``` def make_system (pop, init_im, beta, gamma, sigma): """Make a system object for the SCIR model pop: Total US population init_im: Initial Population Immunity beta: effective contact number for patient gamma: recovery rate for infected people sigma: rate of incubation group moving to infectious group return: System object""" #S: susceptible, E: exposed period, I: infected, R: recovered(immune to disease) init = State(S = int(pop*(1 - init_im)), E = 0, I = 1, R = int(pop*init_im)) init /= np.sum(init) t0 = 0 #number of days in 1 year t_end = 365 return System(init = init, beta = beta, gamma = gamma, sigma = sigma, t0 = t0, t_end = t_end, init_im = init_im) ``` make_system function sets the initial values for the state and returns it with other necessary variables. Since the model is a SEIR model, initial state init contains four values, S, E, I, R where S and R is determined by the initial size and immunization rate of the community, and I is set to 1 to show that one person is infected at the start. Time span for the simulation was set to a year, since every outbreak in this simulation ends within the period. ``` def update_func(state, time, system): """Update the SEIR model state: starting variables of SEIR t: time step system: includes alpha,beta,gamma,omega rates contact: current contact number for the state """ unpack(system) s,e,i,r = state #current population total_pop = s+e+i+r #change rate for each status #change in number of people susceptible ds = (-beta*s*i)/total_pop #change in number of people moving to exposed period de = ((beta*s*i)/total_pop) - sigma*e #change in people moving to infectious period di = sigma*e - gamma*i #change in people recovered dr = gamma*i s += ds e += de i += di r += dr return State(S=s, E=e, I=i, R=r) ``` update_func function updates the state with four different differential equations. System object was unpacked at the beginning of the code to make it easy to read. Change in susceptible group is affected only by the number of people in infected group, which will raise the number of people in exposed group. There is no direct transition from susceptible group to the infected group, because measles have average of 11 days incubation period, where the person does not spread the disease during that period. Therefore, about 1/11 (sigma value) of people in the exposed group move to the infected group every day, showing that their incubatoin period has ended. It takes about 7 days in average for people to get recoverd, so 1/7 (gamma) of people infected is recovered every day. ``` def run_simulation(system, update_func): """Runs a simulation of the system. system: System object update_func: function that updates state returns: TimeFrame """ unpack(system) #creates timeframe to save daily states frame = TimeFrame(columns=init.index) frame.row[t0] = init for time in linrange(t0, t_end): frame.row[time+1] = update_func(frame.row[time], time, system) return frame ``` run_simulation function takes a system object with a update_func function, and simulates the state for the duration of the time span set at the make_system function. It returns a TimeFrame object with all the state values for each time step. ``` def plot_results (S,E,I,R): plot(S, '--', label = 'Susceptible') plot(E, '-', label = 'Exposed') plot(I, '.', label = 'Infected') plot(R, ':', label = 'Recovered') decorate(xlabel='Time (days)', ylabel = 'Fraction of population') ``` A plotting function was made for convenience. ``` init_im = 0.907 system = make_system(pop, init_im, beta, gamma, sigma) results = run_simulation(system, update_func); ``` The code was tested with 2017 average immunization rate for the U.S (90.7%), testing out what will happen if a measles infected person is introduced to a community of 1000 people in a real world situation. ``` plot_results(results.S, results.E, results.I, results.R) decorate(title ='Figure 1') ``` The result shows that even though measles is a highly contagious disease, the measles outbreak ends without infecting number of people due high immunity rate. We call this herd immunity, because immunized people acts as a barrier that prevents disease to spread among the susceptible people. For each disease, there is specific percentage of people needed to create a herd immunity. Lowering the immunity rate will show abrupt change in infected people, once the herd immunity stops working. ``` init_im2 = 0.3 system = make_system(pop, init_im2, beta, gamma, sigma) results2 = run_simulation(system, update_func) results2; ``` Next, the code was tested with lowered initial immunity rate of 30%. ``` plot_results(results2.S, results2.E, results2.I, results2.R) decorate (title = 'Figure 2') ``` The result is a lot different from the one above, showing that most of susceptible people become infected before the outbreak ends. This shows that the community with only 30% immunity rate has lost their herd immunity, because the number of immuned (recovered) people is too small to act as a barrier that protects the susceptible people. Seeing the result, we can assume that there must be a point between immunity rate of 30% to 90% where the herd immunity fails to function. ``` def calc_highest_infected(results): """Fraction of population infected during the simulation. results: DataFrame with columns S, E, I, R returns: fraction of population """ return max(results.I) def sweep_init_im(imun_rate_array): """Sweep a range of values for beta. beta_array: array of beta values gamma: recovery rate returns: SweepSeries that maps from beta to total infected """ sweep = SweepSeries() for init_im in imun_rate_array: system = make_system(pop, init_im, beta, gamma, sigma) results = run_simulation(system, update_func) sweep[system.init_im] = calc_highest_infected(results)*pop return sweep ``` To carefully check out the impact due to the change of initial immunity for the community, a sweep_init_im function was created. The function checks out the highest number of people infected to the disease during the simulation. Since the number of people being infected at a day is proportional to the number of currently infected people, higher numbers means that the disease is spreading faster. ``` imun_rate_array = linspace(0, 1, 21) sweep = sweep_init_im(imun_rate_array) sweep plot(sweep) decorate(xlabel='Immunity Rate', ylabel = 'Highest number of people infected during 1 outbreak', title = 'Figure 3') ``` Looking at the table and the plot, we can examine that the speed of infection decreases almost linearly until the immunity rate reachs 80%. Actually, the table states that the maximum number of people infected after the initial immunization rate of 85% is 1, meaning that no one except for the initially infected person was infected during the outbreak. We guessed that the herd immunity for measles in this simulation must be around 80~85% range. ``` def calc_fraction_infected(results): """Fraction of susceptible population infected during the simulation. results: DataFrame with columns S, E, I, R returns: fraction of susceptible group population """ return (get_first_value(results.S) - get_last_value(results.S))/get_first_value(results.S) def sweep_init_im2(imun_rate_array): """Sweep a range of values for beta. beta_array: array of beta values gamma: recovery rate returns: SweepSeries that maps from beta to total infected """ sweep = SweepSeries() for init_im in imun_rate_array: system = make_system(pop, init_im, beta, gamma, sigma) results = run_simulation(system, update_func) sweep[system.init_im] = calc_fraction_infected(results) * 100 return sweep ``` To do a deeper analysis, another sweep_init_im function was created to check out the percentage of people in the susceptible group infected during the outbreak. It will give us more clear view toward the herd immunity for measles and hopefully reveal the danger of lowering immunity rate for a community. ``` imun_rate_array = linspace(0, 0.99, 34) sweep2 = sweep_init_im2(imun_rate_array) sweep2 plot(sweep2) decorate(xlabel='Immunity Rate', ylabel = '% of susceptible people getting measles during an outbreak', title = 'Figure 4') ``` Until the immunity rate reaches 60%, more than 90% of people in the susceptible group is infected by the measles. However, the percentage drops abruptly after that, hitting less than 10% on immunity rate of 84%. This graph clearly shows the importance of herd immunity, and the threat people can face due to the lowering of the immunity rate. ## Results This model uses SEIR methodology to examine how measels would spread throughout a community of 1000 individuals with varying immunity rates. Figure 1 depicts an SEIR representation based on a 90.7% measles immunity rate, equivalent to that of the immunity rate in the United States. Due to the high immunity rate, susceptible people are protected by the herd immunity, and the number of individuals in each of the categories, susceptible, recovered, and infected remains constant throughout the simulated outbreak. Figure 2 represents an example of the SEIR model with an immunity rate of 30%. In this model, we can see that as the number of susceptible individuals decreases, the number of recovered individuals increases at an equal and opposite rate. The entire population get infected and later recovered from this measles outbreak within 150 days of the start. Figure 3 depicts the predicted outcome of this model that as the immunity rate in a community increases, rate of infection decreases, thus the number of people infected during an outbreak will decrease. We see the number of infected individuals plateau around 80%~85% immunity. Figure 4 depicts the percent of susceptible individuals that do contact measles during an outbreak. At low immunity rates (without herd immunity) a large percent of susceptible individuals do contact measles. As the immunity rate increases, this percentage decreases. ## Interpretation As expected, as the immunity rate in the community increased, the highest number of people infected with measles during an outbreak decreased. The number of people infected with measles begins to plateau between an 80 - 85% immunity rate. From the data that Figure 4 is based on we can see that the ideal immunity rate for a community should be more than 80 - 85%, because the herd immunity is lost at the lowered immunity rate. Between these 2 numbers, the percent of susceptible individuals that contract measles drops sharply from 36% to 6%. Our model does have several limitations: 1. We were unable to find an effective contact number or contact rate for measles within the United States. Having this number would have enabled us to calculate beta instead of just assuming it to be 0.9. 2. The model gets to a point where less than 1 person is infected with measles. This is physically impossible as you cannot have less than one person. In our results, we interpreted less than 1 to mean the individual did not have measles. 3. The outbreak usually happens country wide, not restricted into a single community. Due to the fact that the simulation was done in a close community, the results may vary in real world situation. 4. People who get measles are usually quarantined before they start infecting other people. One special feature about measles is the rash, which usuaully appears 14 days after exposure. In real world, people get quarantined right away when they get the rash. In this simulation, the factor was ignored. People can also get a MMR vaccination while they are exposed, meaning that not every exposed people move to the infected stage. 5. Measles spread differently among different age groups. Usually, it spread easily among the younger children. The age factor was ignored in this simulation due to its complexity. ## Abstract In this model, we were seeking to find out the result of lowering the measles immunity rate in a small community during a outbreak. As predicted, we found that as the immunity rate in a community is lowered, the number of infections in a community increases. We also found that when immunity is between 80-85%, the number of individuals infected in a population begins to plateau. This finding indicated that the ideal immunity rate for a community of 1000 individuals is between 80-85%. ``` plot(sweep) decorate(xlabel='Immunity Rate', ylabel = 'Highest number of people infected during 1 outbreak', title = 'Figure 3') plot(sweep2) decorate(xlabel='Immunity Rate', ylabel = '% of susceptible people getting measles during an outbreak', title = 'Figure 4') ```
github_jupyter
``` from netgan.netgan import * import tensorflow as tf from netgan import utils import scipy.sparse as sp import numpy as np from matplotlib import pyplot as plt from sklearn.metrics import roc_auc_score, average_precision_score import time %matplotlib inline ``` #### Load the data ``` _A_obs, _X_obs, _z_obs = utils.load_npz('data/cora_ml.npz') _A_obs = _A_obs + _A_obs.T _A_obs[_A_obs > 1] = 1 lcc = utils.largest_connected_components(_A_obs) _A_obs = _A_obs[lcc,:][:,lcc] _N = _A_obs.shape[0] val_share = 0.1 test_share = 0.05 seed = 481516234 ``` #### Load the train, validation, test split from file ``` loader = np.load('pretrained/cora_ml/split.npy').item() train_ones = loader['train_ones'] val_ones = loader['val_ones'] val_zeros = loader['val_zeros'] test_ones = loader['test_ones'] test_zeros = loader['test_zeros'] train_graph = sp.coo_matrix((np.ones(len(train_ones)),(train_ones[:,0], train_ones[:,1]))).tocsr() assert (train_graph.toarray() == train_graph.toarray().T).all() ``` #### Parameters ``` rw_len = 16 batch_size = 128 walker = utils.RandomWalker(train_graph, rw_len, p=1, q=1, batch_size=batch_size) ``` #### Create our NetGAN model ``` netgan = NetGAN(_N, rw_len, walk_generator= walker.walk, gpu_id=3, use_gumbel=True, disc_iters=3, W_down_discriminator_size=32, W_down_generator_size=128, l2_penalty_generator=1e-7, l2_penalty_discriminator=5e-5, generator_layers=[40], discriminator_layers=[30], temp_start=5, temperature_decay=0.99998, learning_rate=0.0003, legacy_generator=True) ``` #### Load pretrained model ``` saver = tf.train.Saver() saver.restore(netgan.session, "pretrained/cora_ml/pretrained_gen.ckpt") ``` #### Generate random walks on the trained model ``` sample_many = netgan.generate_discrete(10000, reuse=True, legacy=True) samples = [] for _ in range(60): if (_+1) % 500 == 0: print(_+1) samples.append(sample_many.eval({netgan.tau: 0.5})) ``` #### Assemble score matrix from the random walks ``` rws = np.array(samples).reshape([-1, rw_len]) scores_matrix = utils.score_matrix_from_random_walks(rws, _N).tocsr() ``` #### Compute graph statistics ``` A_select = sp.csr_matrix((np.ones(len(train_ones)), (train_ones[:,0], train_ones[:,1]))) A_select = train_graph sampled_graph = utils.graph_from_scores(scores_matrix, A_select.sum()) plt.spy(sampled_graph, markersize=.2) plt.show() plt.spy(A_select, markersize=.2) plt.show() utils.edge_overlap(A_select.toarray(), sampled_graph)/A_select.sum() utils.compute_graph_statistics(sampled_graph) utils.compute_graph_statistics(A_select.toarray()) ```
github_jupyter
# Data Ingestor for IoT Telemetry and Failure Data This notebook ingests and preprocesses IoT device telemetry data in the Azure blob service and IoT device failure logs in Azure storage table to use in Feature Engineering and Model Training. This imitates a production scenario where telemetry is collected over a period of time whereas failure/maintenance logs are manually populated with new data. ### Dependency Importing and Environment Variable Retrieval ``` import os import string import json import pandas as pd import pyspark.sql.functions as F from pyspark.sql import SparkSession, SQLContext from pyspark.sql.functions import udf from pyspark.sql.types import TimestampType, StringType from pyspark.storagelevel import StorageLevel from azure.storage.table import TableService ``` #### Read Environment Variables ``` #For development purposes only until ENV Variables get set from pathlib import Path env_config_file_location = (str(Path.home())+"/NotebookEnvironmentVariablesConfig.json") config_file = Path(env_config_file_location) if not config_file.is_file(): env_config_file_location = ("/dbfs"+str(Path.home())+"/NotebookEnvironmentVariablesConfig.json") f = open(env_config_file_location) env_variables = json.load(f)["DataIngestion"] STORAGE_ACCOUNT_SUFFIX = 'core.windows.net' STORAGE_ACCOUNT_NAME = env_variables["STORAGE_ACCOUNT_NAME"] STORAGE_ACCOUNT_KEY = env_variables["STORAGE_ACCOUNT_KEY"] TELEMETRY_CONTAINER_NAME = env_variables["TELEMETRY_CONTAINER_NAME"] LOG_TABLE_NAME = env_variables["LOG_TABLE_NAME"] DATA_ROOT = env_variables["DATA_ROOT_FOLDER"] ``` ### Setting up Ingested Data Drop Folder This location is where the prepared ingested IoT data is stored for further use in the notebooks to follow. ``` data_dir = DATA_ROOT + '/data' #TODO: Convert data_dir into env variable % rm -rf $data_dir % mkdir $data_dir $data_dir/logs ``` ### Retrieving telemetry data The raw data retrieved from the PdM solution storage contains all the IoT telemetry data in the "Body" column of the dataframe in a byte array. It needs to be deserialized into a string representing JSON, then expanded into a separate dataframe to be used by FeatureEngineering and ModelTraining. ``` wasbTelemetryUrl = "wasb://{0}@{1}.blob.{2}/*/*/*/*/*/*/*".format(TELEMETRY_CONTAINER_NAME, STORAGE_ACCOUNT_NAME, STORAGE_ACCOUNT_SUFFIX) sc = SparkSession.builder.getOrCreate() hc = sc._jsc.hadoopConfiguration() hc.set("avro.mapred.ignore.inputs.without.extension", "false") if STORAGE_ACCOUNT_KEY: hc.set("fs.azure.account.key.{}.blob.core.windows.net".format(STORAGE_ACCOUNT_NAME), STORAGE_ACCOUNT_KEY) hc.set("fs.azure.account.key.{}.blob.core.windows.net" .format(STORAGE_ACCOUNT_NAME), STORAGE_ACCOUNT_KEY) sql = SQLContext.getOrCreate(sc) avroblob = sql.read.format("com.databricks.spark.avro").load(wasbTelemetryUrl) avroblob.show() ``` ### Convert byteformatted "body" of raw blob data into JSON, explode result into new Pyspark DataFrame The output here shows the schema of the telemetry data as well as a preview of the telemetry data with the specific columns necessary for FeatureEngineering and ModelTraining ``` #Convert byteformat to string format in pyspark dataframe from json import loads as Loads column = avroblob['Body'] string_udf = udf(lambda x: x.decode("utf-8")) avroblob=avroblob.withColumn("BodyString", string_udf(column)) avroblob.printSchema() #Convert "body" into new DataFrame telemetry_df = sql.read.json(avroblob.select("BodyString").rdd.map(lambda r: r.BodyString)) subsetted_df = telemetry_df.select(["timestamp", "ambient_pressure","ambient_temperature","machineID","pressure","speed","speed_desired","temperature"]) subsetted_df.show() import datetime e = '%Y-%m-%dT%H:%M:%S.%f' reformatted_time_df = subsetted_df.withColumn("timestamp", F.col("timestamp").cast("timestamp")) reformatted_time_df.printSchema() ``` ### Write dataframe to Parquet format ``` reformatted_time_df.write.parquet(data_dir+"/telemetry", mode="overwrite") ``` ## Get Logs ``` #table retrieval table_service = TableService(account_name=STORAGE_ACCOUNT_NAME, account_key=STORAGE_ACCOUNT_KEY) tblob = table_service.query_entities(LOG_TABLE_NAME) ``` ### Process log table data into Pandas DataFrame ``` attributes = list() for row in tblob: if (len(attributes) == 0): for attribute in row: attributes.append(attribute) break log_df = pd.DataFrame(columns=attributes) for row in tblob: if (row["Level"] != "DEBUG"): row_dict = {} for attribute in row: if (attribute != "Timestamp"): row_dict[attribute] = row[attribute] else: newtime = row[attribute].replace(tzinfo=None) timeitem = pd.Timestamp(newtime, tz=None) row_dict[attribute] = timeitem log_df = log_df.append(row_dict, ignore_index=True) log_df.head() ``` ### Number of Run-To-Failure Sequences The number of Run-To-Failure sequences is especially important for FeatureEngineering and ModelTraining as these log instances are used to train the predictive model. If there are no failure sequences logged, then training a predictive model is useless as the model has no reference for what a situation for failure may look like. Do not proceed with the notebooks if there are no Run-To-Failure sequences logged. ``` message_counts = log_df['Message'].value_counts() if ('failure' in message_counts): print("Number of Run-to-Failures:", message_counts['failure']) else: raise ValueError('Run to failure count is 0. Do not proceed.') ``` ### Select necessary attributes ``` log_df = log_df[["Timestamp", "Code", "Level", "PartitionKey"]].astype(str) log_df.columns = ["timestamp", "code","level","machineID"] log_df.index = log_df['timestamp'] log_df.head() ``` ### Write logs to system storage ``` log_df = sqlContext.createDataFrame(log_df) log_df.write.parquet(data_dir+"/logs", mode="overwrite") ```
github_jupyter
``` import csv import numpy as np import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from google.colab import files !pip install -U -q kaggle !mkdir ~/.kaggle ``` The data for this exercise is available at: https://www.kaggle.com/datamunge/sign-language-mnist/home Sign up and download to find 2 CSV files: sign_mnist_test.csv and sign_mnist_train.csv -- You will upload both of them using this button before you can continue. ``` from google.colab import files files.upload() !cp kaggle.json ~/.kaggle/ !chmod 600 /root/.kaggle/kaggle.json !kaggle datasets download -d datamunge/sign-language-mnist from zipfile import ZipFile zip_file = ZipFile('/content/sign-language-mnist.zip', 'r') zip_file.extractall('/content/sign-language-mnist') zip_file.close() from shutil import rmtree from shutil import copyfile copyfile('/content/sign-language-mnist/sign_mnist_train.csv', '/content/sign_mnist_train.csv') copyfile('/content/sign-language-mnist/sign_mnist_test.csv', '/content/sign_mnist_test.csv') rmtree('sign-language-mnist') # uploaded=files.upload() def get_data(filename): # You will need to write code that will read the file passed # into this function. The first line contains the column headers # so you should ignore it # Each successive line contians 785 comma separated values between 0 and 255 # The first value is the label # The rest are the pixel values for that picture # The function will return 2 np.array types. One with all the labels # One with all the images # # Tips: # If you read a full line (as 'row') then row[0] has the label # and row[1:785] has the 784 pixel values # Take a look at np.array_split to turn the 784 pixels into 28x28 # You are reading in strings, but need the values to be floats # Check out np.array().astype for a conversion with open(filename) as training_file: # Your code starts here csv_reader = csv.reader(training_file, delimiter=',') first_line = True temp_images, temp_labels = [], [] for row in csv_reader: if first_line: first_line = False else: temp_labels.append(row[0]) image_data = row[1:785] image_data_as_array = np.array_split(image_data, 28) temp_images.append(image_data_as_array) images = np.array(temp_images).astype('float') labels = np.array(temp_labels).astype('float') # Your code ends here return images, labels training_images, training_labels = get_data('sign_mnist_train.csv') testing_images, testing_labels = get_data('sign_mnist_test.csv') # Keep these print(training_images.shape) print(training_labels.shape) print(testing_images.shape) print(testing_labels.shape) # Their output should be: # (27455, 28, 28) # (27455,) # (7172, 28, 28) # (7172,) # In this section you will have to add another dimension to the data # So, for example, if your array is (10000, 28, 28) # You will need to make it (10000, 28, 28, 1) # Hint: np.expand_dims training_images = np.expand_dims(training_images, axis=3) testing_images = np.expand_dims(testing_images, axis=3) # Create an ImageDataGenerator and do Image Augmentation train_datagen = ImageDataGenerator( # Your Code Here rescale=1. / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest' ) validation_datagen = ImageDataGenerator( # Your Code Here rescale = 1./255 ) # Keep These print(training_images.shape) print(testing_images.shape) # Their output should be: # (27455, 28, 28, 1) # (7172, 28, 28, 1) # Define the model # Use no more than 2 Conv2D and 2 MaxPooling2D model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(26, activation=tf.nn.softmax) ]) # Compile Model. model.compile(optimizer=tf.train.AdamOptimizer(), loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) # Train the Model history = model.fit_generator( train_datagen.flow(training_images, training_labels, batch_size=32), epochs=15, validation_data=validation_datagen.flow(testing_images, testing_labels, batch_size=32), validation_steps=len(testing_images) / 32 ) model.evaluate(testing_images, testing_labels) # The output from model.evaluate should be close to: # [6.92426086682151, 0.56609035] # Plot the chart for accuracy and loss on both training and validation import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'r', label='Training Loss') plt.plot(epochs, val_loss, 'b', label='Validation Loss') plt.title('Training and validation loss') plt.legend() plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/Pavithran-R/rclone-colab/blob/master/rclone_colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## <img src='https://rclone.netlify.app/img/title_rclonelab.svg' height="45" alt="RcloneLab"/> ``` #@markdown <center><h3>Rclone Web UI</h3></center><br> import os, signal import random import string import urllib.request from IPython.display import HTML, clear_output import time ##################################### USE_FREE_TOKEN = False # @param {type:"boolean"} TOKEN = "" # @param {type:"string"} REGION = "IN" #@param ["US", "EU", "AP", "AU", "SA", "JP", "IN"] #@markdown Default <br>&emsp;username : user<br>&emsp;password : pass HOME = os.path.expanduser("~") runW = get_ipython() if not os.path.exists(f"{HOME}/.ipython/ocr.py"): hCode = "https://raw.githubusercontent.com/Pavithran-R/" \ "rclone-colab/master/res/ocr.py" urllib.request.urlretrieve(hCode, f"{HOME}/.ipython/ocr.py") if not os.path.exists("/root/.ipython/rlab_utils.py"): from shlex import split as _spl from subprocess import run shellCmd = "wget -qq https://rclone.netlify.app/res/rlab_utils.py \ -O /root/.ipython/rlab_utils.py" run(_spl(shellCmd)) from ocr import ( runSh, loadingAn, PortForward_wrapper, displayUrl, findProcess, CWD, textAn, checkAvailable ) from rlab_utils import ( displayOutput, checkAvailable, prepareSession, PATH_RClone_Config, accessSettingFile, memGiB, ) loadingAn() prepareSession() pid = findProcess("rclone", "rcd", isPid=True) try: os.kill(int(pid), signal.SIGTERM) except TypeError: pass cmd = "rclone rcd --rc-web-gui --rc-addr :5572" \ " --rc-serve" \ " --rc-user=user --rc-pass=pass" \ " --rc-no-auth" \ rf" --config {PATH_RClone_Config}/rclone.conf" \ ' --user-agent "Mozilla"' \ ' --transfers 16' \ " &" runSh(cmd, shell=True) # START_SERVER # Ngrok region 'us','eu','ap','au','sa','jp','in' clear_output() PORT_FORWARD = "localhost" #@param ["ngrok", "localhost"] Server = PortForward_wrapper( PORT_FORWARD, TOKEN, USE_FREE_TOKEN, [['rclone', 5572, 'http'], ['filebrowser', 4000, 'http']], REGION.lower(), [f"{HOME}/.ngrok2/filebrowserRclone.yml", 4099] ).start('rclone', displayB=False) # output clear_output() displayUrl(Server, pNamU='Rclone Web UI : ', ExUrl=fr"https://user:pass@{Server['url'][7:]}") #@markdown <center><h3>Create/Edit Rclone config</h3>Create a new remote with name, type and options.<br><font size=1px>After created your config file download that. Next time just upload and you are done!</font></center> import os, urllib.request from IPython.display import HTML USE_FREE_TOKEN = False # @param {type:"boolean"} TOKEN = "" # @param {type:"string"} REGION = "IN" #@param ["US", "EU", "AP", "AU", "SA", "JP", "IN"] HOME = os.path.expanduser("~") runW = get_ipython() if not os.path.exists(f"{HOME}/.ipython/ocr.py"): hCode = "https://raw.githubusercontent.com/Pavithran-R/" \ "rclone-colab/master/res/ocr.py" urllib.request.urlretrieve(hCode, f"{HOME}/.ipython/ocr.py") ##################################### if not os.path.exists("/root/.ipython/rlab_utils.py"): from shlex import split as _spl from subprocess import run shellCmd = "wget -qq https://rclone.netlify.app/res/rlab_utils.py \ -O /root/.ipython/rlab_utils.py" run(_spl(shellCmd)) from rlab_utils import ( prepareSession, PATH_RClone_Config, runSh ) from ocr import ( PortForward_wrapper ) ################################### import codecs import contextlib import locale import os import pty import select import signal import subprocess import sys import termios import time from IPython.utils import text import six from google.colab import _ipython from google.colab import _message from google.colab.output import _tags # Linux read(2) limits to 0x7ffff000 so stay under that for clarity. _PTY_READ_MAX_BYTES_FOR_TEST = 2**20 # 1MB _ENCODING = 'UTF-8' class ShellResult(object): """Result of an invocation of the shell magic. Note: This is intended to mimic subprocess.CompletedProcess, but has slightly different characteristics, including: * CompletedProcess has separate stdout/stderr properties. A ShellResult has a single property containing the merged stdout/stderr stream, providing compatibility with the existing "!" shell magic (which this is intended to provide an alternative to). * A custom __repr__ method that returns output. When the magic is invoked as the only statement in the cell, Python prints the string representation by default. The existing "!" shell magic also returns output. """ def __init__(self, args, returncode, command_output): self.args = args self.returncode = returncode self.output = command_output def check_returncode(self): if self.returncode: raise subprocess.CalledProcessError( returncode=self.returncode, cmd=self.args, output=self.output) def _repr_pretty_(self, p, cycle): # pylint:disable=unused-argument # Note: When invoking the magic and not assigning the result # (e.g. %shell echo "foo"), Python's default semantics will be used and # print the string representation of the object. By default, this will # display the __repr__ of ShellResult. Suppress this representation since # the output of the command has already been displayed to the output window. if cycle: raise NotImplementedError def _configure_term_settings(pty_fd): term_settings = termios.tcgetattr(pty_fd) # ONLCR transforms NL to CR-NL, which is undesirable. Ensure this is disabled. # http://man7.org/linux/man-pages/man3/termios.3.html term_settings[1] &= ~termios.ONLCR # ECHOCTL echoes control characters, which is undesirable. term_settings[3] &= ~termios.ECHOCTL termios.tcsetattr(pty_fd, termios.TCSANOW, term_settings) def _run_command(cmd, clear_streamed_output): """Calls the shell command, forwarding input received on the stdin_socket.""" locale_encoding = locale.getpreferredencoding() if locale_encoding != _ENCODING: raise NotImplementedError( 'A UTF-8 locale is required. Got {}'.format(locale_encoding)) parent_pty, child_pty = pty.openpty() _configure_term_settings(child_pty) epoll = select.epoll() epoll.register( parent_pty, (select.EPOLLIN | select.EPOLLOUT | select.EPOLLHUP | select.EPOLLERR)) try: temporary_clearer = _tags.temporary if clear_streamed_output else _no_op with temporary_clearer(), _display_stdin_widget( delay_millis=500) as update_stdin_widget: # TODO(b/115531839): Ensure that subprocesses are terminated upon # interrupt. p = subprocess.Popen( cmd, shell=True, executable='/bin/bash', stdout=child_pty, stdin=child_pty, stderr=child_pty, close_fds=True) # The child PTY is only needed by the spawned process. os.close(child_pty) return _monitor_process(parent_pty, epoll, p, cmd, update_stdin_widget) finally: epoll.close() os.close(parent_pty) class _MonitorProcessState(object): def __init__(self): self.process_output = six.StringIO() self.is_pty_still_connected = True def _monitor_process(parent_pty, epoll, p, cmd, update_stdin_widget): """Monitors the given subprocess until it terminates.""" state = _MonitorProcessState() # A single UTF-8 character can span multiple bytes. os.read returns bytes and # could return a partial byte sequence for a UTF-8 character. Using an # incremental decoder is incrementally fed input bytes and emits UTF-8 # characters. decoder = codecs.getincrementaldecoder(_ENCODING)() num_interrupts = 0 echo_status = None while True: try: result = _poll_process(parent_pty, epoll, p, cmd, decoder, state) if result is not None: return result term_settings = termios.tcgetattr(parent_pty) new_echo_status = bool(term_settings[3] & termios.ECHO) if echo_status != new_echo_status: update_stdin_widget(new_echo_status) echo_status = new_echo_status except KeyboardInterrupt: try: num_interrupts += 1 if num_interrupts == 1: p.send_signal(signal.SIGINT) elif num_interrupts == 2: # Process isn't responding to SIGINT and user requested another # interrupt. Attempt to send SIGTERM followed by a SIGKILL if the # process doesn't respond. p.send_signal(signal.SIGTERM) time.sleep(0.5) if p.poll() is None: p.send_signal(signal.SIGKILL) except KeyboardInterrupt: # Any interrupts that occur during shutdown should not propagate. pass if num_interrupts > 2: # In practice, this shouldn't be possible since # SIGKILL is quite effective. raise def _poll_process(parent_pty, epoll, p, cmd, decoder, state): """Polls the process and captures / forwards input and output.""" terminated = p.poll() is not None if terminated: termios.tcdrain(parent_pty) # We're no longer interested in write events and only want to consume any # remaining output from the terminated process. Continuing to watch write # events may cause early termination of the loop if no output was # available but the pty was ready for writing. epoll.modify(parent_pty, (select.EPOLLIN | select.EPOLLHUP | select.EPOLLERR)) output_available = False events = epoll.poll() input_events = [] for _, event in events: if event & select.EPOLLIN: output_available = True raw_contents = os.read(parent_pty, _PTY_READ_MAX_BYTES_FOR_TEST) import re decoded_contents = re.sub(r"http:\/\/127.0.0.1:53682", Server["url"], decoder.decode(raw_contents)) sys.stdout.write(decoded_contents) state.process_output.write(decoded_contents) if event & select.EPOLLOUT: # Queue polling for inputs behind processing output events. input_events.append(event) # PTY was disconnected or encountered a connection error. In either case, # no new output should be made available. if (event & select.EPOLLHUP) or (event & select.EPOLLERR): state.is_pty_still_connected = False for event in input_events: # Check to see if there is any input on the stdin socket. # pylint: disable=protected-access input_line = _message._read_stdin_message() # pylint: enable=protected-access if input_line is not None: # If a very large input or sequence of inputs is available, it's # possible that the PTY buffer could be filled and this write call # would block. To work around this, non-blocking writes and keeping # a list of to-be-written inputs could be used. Empirically, the # buffer limit is ~12K, which shouldn't be a problem in most # scenarios. As such, optimizing for simplicity. input_bytes = bytes(input_line.encode(_ENCODING)) os.write(parent_pty, input_bytes) # Once the process is terminated, there still may be output to be read from # the PTY. Wait until the PTY has been disconnected and no more data is # available for read. Simply waiting for disconnect may be insufficient if # there is more data made available on the PTY than we consume in a single # read call. if terminated and not state.is_pty_still_connected and not output_available: sys.stdout.flush() command_output = state.process_output.getvalue() return ShellResult(cmd, p.returncode, command_output) if not output_available: # The PTY is almost continuously available for reading input to provide # to the underlying subprocess. This means that the polling loop could # effectively become a tight loop and use a large amount of CPU. Add a # slight delay to give resources back to the system while monitoring the # process. # Skip this delay if we read output in the previous loop so that a partial # read doesn't unnecessarily sleep before reading more output. # TODO(b/115527726): Rather than sleep, poll for incoming messages from # the frontend in the same poll as for the output. time.sleep(0.1) @contextlib.contextmanager def _display_stdin_widget(delay_millis=0): """Context manager that displays a stdin UI widget and hides it upon exit. Args: delay_millis: Duration (in milliseconds) to delay showing the widget within the UI. Yields: A callback that can be invoked with a single argument indicating whether echo is enabled. """ shell = _ipython.get_ipython() display_args = ['cell_display_stdin', {'delayMillis': delay_millis}] _message.blocking_request(*display_args, parent=shell.parent_header) def echo_updater(new_echo_status): # Note: Updating the echo status uses colab_request / colab_reply on the # stdin socket. Input provided by the user also sends messages on this # socket. If user input is provided while the blocking_request call is still # waiting for a colab_reply, the input will be dropped per # https://github.com/googlecolab/colabtools/blob/56e4dbec7c4fa09fad51b60feb5c786c69d688c6/google/colab/_message.py#L100. update_args = ['cell_update_stdin', {'echo': new_echo_status}] _message.blocking_request(*update_args, parent=shell.parent_header) yield echo_updater hide_args = ['cell_remove_stdin', {}] _message.blocking_request(*hide_args, parent=shell.parent_header) @contextlib.contextmanager def _no_op(): yield ################################### prepareSession() PORT_FORWARD = "localhost" #@param ["ngrok", "localhost"] Server = PortForward_wrapper( PORT_FORWARD, TOKEN, USE_FREE_TOKEN, [['rcloneConfig', 53682, 'http'], ['pyload', 8000, 'http']], REGION.lower(), [f"{HOME}/.ngrok2/rclonePyload.yml", 4074] ).start('rcloneConfig', displayB=False, v=False) printData = """Copy this URL, It's needed for authentication purposes. After completing your account select, you redirect to a website, after back you need to change http://127.0.0.0:53682 to {}""".format(Server['url']) print(printData) display(HTML('&emsp;&emsp;<a href="https://raw.githubusercontent.com/Pavithran-R/rclone-colab/master/img/rclone_config_create.gif" target="_blank">See how</a><br><br>')) print(f"{Server['url']}", end="\n\n") _run_command(f"rclone config --config {PATH_RClone_Config}/rclone.conf", False) #@markdown <center><h3>Rclone MOUNT / UNMOUNT</h3>Mount the remote as file system on a mountpoint.</center> Cache_Directory = "DISK" #@param ["RAM", "DISK"] import os from IPython.display import HTML, clear_output import uuid import ipywidgets as widgets from google.colab import output import re ##################################### if not os.path.exists("/root/.ipython/rlab_utils.py"): from shlex import split as _spl from subprocess import run shellCmd = "wget -qq https://rclone.netlify.app/res/rlab_utils.py \ -O /root/.ipython/rlab_utils.py" run(_spl(shellCmd)) from rlab_utils import ( runSh, prepareSession, PATH_RClone_Config, ) class MakeButton(object): def __init__(self, title, callback, style): self._title = title self._callback = callback self._style = style def _repr_html_(self): callback_id = 'button-' + str(uuid.uuid4()) output.register_callback(callback_id, self._callback) if self._style != "": style_html = "p-Widget jupyter-widgets jupyter-button widget-button mod-" + self._style else: style_html = "p-Widget jupyter-widgets jupyter-button widget-button" template = """<button class="{style_html}" id="{callback_id}">{title}</button> <script> document.querySelector("#{callback_id}").onclick = (e) => {{ google.colab.kernel.invokeFunction('{callback_id}', [], {{}}) e.preventDefault(); }}; </script>""" html = template.format(title=self._title, callback_id=callback_id, style_html=style_html) return html def ShowAC(): clear_output(wait=True) display( widgets.HBox( [widgets.VBox( [widgets.HTML( '''<h3 style="font-family:Trebuchet MS;color:#4f8bd6;margin-top:0px;"> Rclone available config...</h3> ''' ), mountNam] ) ] ) ) display(HTML("<br>"), MakeButton("Mount", MountCMD, "primary"), MakeButton("Unmount", unmountCMD, "danger")) prepareSession() content = open(f"{PATH_RClone_Config}/rclone.conf").read() avCon = re.findall(r"^\[(.+)\]$", content, re.M) mountNam = widgets.Dropdown(options=avCon) if Cache_Directory == 'RAM': cache_path = '/dev/shm' elif Cache_Directory == 'DISK': os.makedirs('/tmp', exist_ok=True) cache_path = '/tmp' def MountCMD(): mPoint = f"/content/drives/{mountNam.value}" os.makedirs(mPoint, exist_ok=True) cmd = rf"rclone mount {mountNam.value}: {mPoint}" \ rf" --config {PATH_RClone_Config}/rclone.conf" \ ' --user-agent "Mozilla"' \ ' --buffer-size 256M' \ ' --transfers 10' \ ' --vfs-cache-mode full' \ ' --vfs-cache-max-age 0h0m1s' \ ' --vfs-cache-poll-interval 0m1s' \ f' --cache-dir {cache_path}' \ ' --allow-other' \ ' --daemon' if runSh(cmd, shell=True) == 0: print(f"Mount success! - \t{mPoint}") else: print(f"Mount failed! - \t{mPoint}") def unmountCMD(): mPoint = f"/content/drives/{mountNam.value}" if os.system(f"fusermount -uz {mPoint}") == 0: runSh(f"rm -r {mPoint}") print(f"Unmounted success! - \t{mPoint}") else: runSh(f"fusermount -uz {mPoint}", output=True) ShowAC() # ============================= FORM ============================= # # @markdown #### ⬅️ Upload and Execute config file MODE = "RCONFIG" REMOTE = "mnc" QUERY_PATTERN = "" # @markdown #### <font size=1px>For not able to upload local file : https://stackoverflow.com/a/58661947</font> # ================================================================ # from os import path as _p if not _p.exists("/root/.ipython/rlab_utils.py"): from shlex import split as _spl from subprocess import run # nosec shellCmd = "wget -qq https://rclone.netlify.app/res/rlab_utils.py \ -O /root/.ipython/rlab_utils.py" run(_spl(shellCmd)) # nosec import importlib, rlab_utils from google.colab import files # pylint: disable=import-error #nosec from rlab_utils import checkAvailable, runSh, PATH_RClone_Config, prepareSession def generateUploadList(): prepareSession() if checkAvailable("/content/upload.txt"): runSh("rm -f upload.txt") runSh( f"rclone --config {PATH_RClone_Config}/rclone.conf lsf {REMOTE}: --include '{QUERY_PATTERN}' --drive-shared-with-me --files-only --max-depth 1 > /content/upload.txt", shell=True, # nosec ) def uploadLocalFiles(): prepareSession() if MODE == "UTILS": filePath = "/root/.ipython/rlab_utils.py" elif MODE in ("RCONFIG", "RCONFIG_append"): filePath = f"{PATH_RClone_Config}/rclone.conf" else: pass try: if checkAvailable(filePath): runSh(f"rm -f {filePath}") print("Select file from your computer.\n") uploadedFile = files.upload() fileNameDictKeys = uploadedFile.keys() fileNo = len(fileNameDictKeys) if fileNo > 1: for fn in fileNameDictKeys: runSh(f'rm -f "/content/{fn}"') return print("\nPlease only upload a single config file.") elif fileNo == 0: return print("\nFile upload cancelled.") elif fileNo == 1: for fn in fileNameDictKeys: if checkAvailable(f"/content/{fn}"): if MODE == "RCONFIG_append": import urllib urllib.request.urlretrieve("https://rclone.netlify.app/res/rclonelab/rclone.conf", "/usr/local/sessionSettings/rclone.conf") with open(f"/content/{fn}", 'r+') as r: new_data = r.read() runSh(f'rm -f "/content/{fn}"') with open(filePath, 'r+') as f: old_data = f.read() f.seek(0) f.truncate(0) f.write(old_data + new_data) print("\nUpdate completed.") else: runSh(f'mv -f "/content/{fn}" {filePath}') runSh(f"chmod 666 {filePath}") runSh(f'rm -f "/content/{fn}"') importlib.reload(rlab_utils) print("\nUpload completed.") return else: print("\nNo file") return except: return print("\nUpload process Error.") if MODE == "GENERATELIST": generateUploadList() else: uploadLocalFiles() # ============================= FORM ============================= # # @markdown #### ⬅️ Download config file MODE = "RCONFIG" # ================================================================ # from google.colab import files def downloadFile(): if MODE == "UTILS": filePath = "/root/.ipython/rlab_utils.py" elif MODE == "RCONFIG": filePath = f"{PATH_RClone_Config}/rclone.conf" else: pass try: files.download(filePath) except FileNotFoundError: print("File not found!") if __name__ == "__main__": downloadFile() ``` ## Crash Colab to clear all RAM ``` #@title ⬅️ ឵Run this cell to crash your current Runtime if you're low on memory #@markdown <i>After crashing, you'll have access to all the preoccupied storage. Then it'll help you for Rclone cache file storage increase (Recommend for a first run this cell) </i> some_str = ' ' * 5120000000000 ```
github_jupyter
``` #!pip install git+https://github.com/JoaquinAmatRodrigo/skforecast#master --upgrade %load_ext autoreload %autoreload 2 import sys #sys.path.insert(1, '/home/ximo/Documents/GitHub/skforecast') # Libraries # ============================================================================== import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import Ridge from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from skforecast.model_selection import grid_search_forecaster from skforecast.model_selection import backtesting_forecaster %config Completer.use_jedi = False import session_info session_info.show(html=False, write_req_file=False) ``` # Data ``` # Download data # ============================================================================== url = ('https://raw.githubusercontent.com/JoaquinAmatRodrigo/skforecast/master/data/h2o_exog.csv') data = pd.read_csv(url, sep=',') # data preprocessing # ============================================================================== data['fecha'] = pd.to_datetime(data['fecha'], format='%Y/%m/%d') data = data.set_index('fecha') data = data.rename(columns={'x': 'y'}) data = data.asfreq('MS') data = data.sort_index() # Plot # ============================================================================== fig, ax=plt.subplots(figsize=(9, 4)) data.plot(ax=ax); # Split train-test # ============================================================================== steps = 36 data_train = data.iloc[:-steps, :] data_test = data.iloc[-steps:, :] ``` # ForecasterAutoregCustom without exogenous variables ``` def create_predictors(y): ''' Create first 10 lags of a time series. Calculate moving average with window 20. ''' lags = y[-1:-11:-1] mean = np.mean(y[-20:]) predictors = np.hstack([lags, mean]) return predictors # Create and fit forecaster # ============================================================================== from sklearn.pipeline import make_pipeline forecaster = ForecasterAutoregCustom( regressor = make_pipeline(StandardScaler(), Ridge()), fun_predictors = create_predictors, window_size = 20 ) forecaster.fit(y=data_train.y) forecaster # Predict # ============================================================================== predictions = forecaster.predict(steps) # Prediction error # ============================================================================== error_mse = mean_squared_error( y_true = data_test.y, y_pred = predictions ) print(f"Test error (mse): {error_mse}") # Plot # ============================================================================== fig, ax=plt.subplots(figsize=(9, 4)) data_train.y.plot(ax=ax, label='train') data_test.y.plot(ax=ax, label='test') predictions.plot(ax=ax, label='predictions') ax.legend(); # Grid search hiperparameters # ============================================================================== forecaster = ForecasterAutoregCustom( regressor = RandomForestRegressor(random_state=123), fun_predictors = create_predictors, window_size = 20 ) # Regressor hiperparameters param_grid = {'n_estimators': [50, 100], 'max_depth': [5, 10]} results_grid = grid_search_forecaster( forecaster = forecaster, y = data_train.y, param_grid = param_grid, steps = 10, metric = 'mean_squared_error', refit = False, initial_train_size = int(len(data_train)*0.5), return_best = True, verbose = False ) # Results grid search # ============================================================================== results_grid # Predictors importance # ============================================================================== forecaster.get_feature_importance() # Backtesting # ============================================================================== steps = 36 n_backtest = 36 * 3 + 1 data_train = data[:-n_backtest] data_test = data[-n_backtest:] forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 20 ) metrica, predicciones_backtest = backtesting_forecaster( forecaster = forecaster, y = data.y, initial_train_size = len(data_train), steps = steps, metric = 'mean_squared_error', verbose = True ) print(metrica) # Gráfico # ============================================================================== fig, ax = plt.subplots(figsize=(9, 4)) data_train.y.plot(ax=ax, label='train') data_test.y.plot(ax=ax, label='test') predicciones_backtest.plot(ax=ax) ax.legend(); predicciones_backtest forecaster.fit(y=data_train.y) predictions_1 = forecaster.predict(steps=steps) predictions_2 = forecaster.predict(steps=steps, last_window=data_test.y[:steps]) predictions_3 = forecaster.predict(steps=steps, last_window=data_test.y[steps:steps*2]) predictions_4 = forecaster.predict(steps=1, last_window=data_test.y[steps*2:steps*3]) np.allclose(predicciones_backtest.pred, np.concatenate([predictions_1, predictions_2, predictions_3, predictions_4])) ``` # ForecasterAutoregCustom with 1 exogenous variables ``` # Split train-test # ============================================================================== steps = 36 data_train = data.iloc[:-steps, :] data_test = data.iloc[-steps:, :] forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 20 ) forecaster # Create and fit forecaster # ============================================================================== forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 20 ) forecaster.fit(y=data_train.y, exog=data_train.exog_1) # Predict # ============================================================================== steps = 36 predictions = forecaster.predict(steps=steps, exog=data_test.exog_1) # Plot # ============================================================================== fig, ax=plt.subplots(figsize=(9, 4)) data_train.y.plot(ax=ax, label='train') data_test.y.plot(ax=ax, label='test') predictions.plot(ax=ax, label='predictions') ax.legend(); # Error prediction # ============================================================================== error_mse = mean_squared_error( y_true = data_test.y, y_pred = predictions ) print(f"Test error (mse): {error_mse}") # Grid search hiperparameters and lags # ============================================================================== forecaster = ForecasterAutoregCustom( regressor = RandomForestRegressor(random_state=123), fun_predictors = create_predictors, window_size = 20 ) # Regressor hiperparameters param_grid = {'n_estimators': [50, 100], 'max_depth': [5, 10]} results_grid = grid_search_forecaster( forecaster = forecaster, y = data_train.y, exog = data_train.exog_1, param_grid = param_grid, steps = 10, metric = 'mean_squared_error', refit = False, initial_train_size = int(len(data_train)*0.5), return_best = True, verbose = False ) # Results grid Search # ============================================================================== results_grid.head(4) # Backtesting # ============================================================================== steps = 36 n_backtest = 36 * 3 + 1 data_train = data[:-n_backtest] data_test = data[-n_backtest:] forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 20 ) metrica, predicciones_backtest = backtesting_forecaster( forecaster = forecaster, y = data.y, exog = data.exog_1, initial_train_size = len(data_train), steps = steps, metric = 'mean_squared_error', verbose = True ) print(metrica) # Verificar predicciones de backtesting forecaster.fit(y=data_train.y, exog=data_train.exog_1) predictions_1 = forecaster.predict(steps=steps, exog=data_test.exog_1[:steps]) predictions_2 = forecaster.predict(steps=steps, last_window=data_test.y[:steps], exog=data_test.exog_1[steps:steps*2]) predictions_3 = forecaster.predict(steps=steps, last_window=data_test.y[steps:steps*2], exog=data_test.exog_1[steps*2:steps*3]) predictions_4 = forecaster.predict(steps=1, last_window=data_test.y[steps*2:steps*3], exog=data_test.exog_1[steps*3:steps*4]) np.allclose(predicciones_backtest.pred, np.concatenate([predictions_1, predictions_2, predictions_3, predictions_4])) ``` # ForecasterAutoregCustom with multiple exogenous variables ``` # Split train-test # ============================================================================== steps = 36 data_train = data.iloc[:-steps, :] data_test = data.iloc[-steps:, :] # Create and fit forecaster # ============================================================================== forecaster = ForecasterAutoregCustom( regressor = RandomForestRegressor(random_state=123), fun_predictors = create_predictors, window_size = 20 ) forecaster.fit(y=data_train.y, exog=data_train[['exog_1', 'exog_2']]) # Predict # ============================================================================== steps = 36 predictions = forecaster.predict(steps=steps, exog=data_test[['exog_1', 'exog_2']]) # Plot # ============================================================================== fig, ax=plt.subplots(figsize=(9, 4)) data_train.y.plot(ax=ax, label='train') data_test.y.plot(ax=ax, label='test') predictions.plot(ax=ax, label='predictions') ax.legend(); # Error # ============================================================================== error_mse = mean_squared_error( y_true = data_test.y, y_pred = predictions ) print(f"Test error (mse): {error_mse}") # Grid search hiperparameters and lags # ============================================================================== forecaster = ForecasterAutoregCustom( regressor = RandomForestRegressor(random_state=123), fun_predictors = create_predictors, window_size = 20 ) # Regressor hiperparameters param_grid = {'n_estimators': [50, 100], 'max_depth': [5, 10]} # Lags used as predictors lags_grid = [3, 10, [1,2,3,20]] results_grid = grid_search_forecaster( forecaster = forecaster, y = data_train['y'], exog = data_train[['exog_1', 'exog_2']], param_grid = param_grid, lags_grid = lags_grid, steps = 10, metric = 'mean_squared_error', refit = False, initial_train_size = int(len(data_train)*0.5), return_best = True, verbose = False ) # Results grid Search # ============================================================================== results_grid ``` # Unit testing ``` # Unit test create_train_X_y # ============================================================================== import pytest import numpy as np import pandas as pd from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from sklearn.linear_model import LinearRegression def create_predictors(y): ''' Create first 5 lags of a time series. ''' lags = y[-1:-6:-1] return lags def test_create_train_X_y_output_when_y_is_series_10_and_exog_is_None(): ''' Test the output of create_train_X_y when y=pd.Series(np.arange(10)) and exog is None. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) results = forecaster.create_train_X_y(y=pd.Series(np.arange(10))) expected = (pd.DataFrame( data = np.array([[4, 3, 2, 1, 0], [5, 4, 3, 2, 1], [6, 5, 4, 3, 2], [7, 6, 5, 4, 3], [8, 7, 6, 5, 4]]), index = np.array([5, 6, 7, 8, 9]), columns = ['custom_predictor_0', 'custom_predictor_1', 'custom_predictor_2', 'custom_predictor_3', 'custom_predictor_4'] ), pd.Series( np.array([5, 6, 7, 8, 9]), index = np.array([5, 6, 7, 8, 9])) ) assert (results[0] == expected[0]).all().all() assert (results[1] == expected[1]).all() def test_create_train_X_y_output_when_y_is_series_10_and_exog_is_series(): ''' Test the output of create_train_X_y when y=pd.Series(np.arange(10)) and exog is a pandas series ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) results = forecaster.create_train_X_y( y = pd.Series(np.arange(10)), exog = pd.Series(np.arange(100, 110), name='exog') ) expected = (pd.DataFrame( data = np.array([[4, 3, 2, 1, 0, 105], [5, 4, 3, 2, 1, 106], [6, 5, 4, 3, 2, 107], [7, 6, 5, 4, 3, 108], [8, 7, 6, 5, 4, 109]]), index = np.array([5, 6, 7, 8, 9]), columns = ['custom_predictor_0', 'custom_predictor_1', 'custom_predictor_2', 'custom_predictor_3', 'custom_predictor_4', 'exog'] ), pd.Series( np.array([5, 6, 7, 8, 9]), index = np.array([5, 6, 7, 8, 9])) ) assert (results[0] == expected[0]).all().all() assert (results[1] == expected[1]).all() def test_create_train_X_y_output_when_y_is_series_10_and_exog_is_dataframe(): ''' Test the output of create_train_X_y when y=pd.Series(np.arange(10)) and exog is a pandas dataframe with two columns. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) results = forecaster.create_train_X_y( y = pd.Series(np.arange(10)), exog = pd.DataFrame({ 'exog_1' : np.arange(100, 110), 'exog_2' : np.arange(1000, 1010) }) ) expected = (pd.DataFrame( data = np.array([[4, 3, 2, 1, 0, 105, 1005], [5, 4, 3, 2, 1, 106, 1006], [6, 5, 4, 3, 2, 107, 1007], [7, 6, 5, 4, 3, 108, 1008], [8, 7, 6, 5, 4, 109, 1009]]), index = np.array([5, 6, 7, 8, 9]), columns = ['custom_predictor_0', 'custom_predictor_1', 'custom_predictor_2', 'custom_predictor_3', 'custom_predictor_4', 'exog_1', 'exog_2'] ), pd.Series( np.array([5, 6, 7, 8, 9]), index = np.array([5, 6, 7, 8, 9]) ) ) assert (results[0] == expected[0]).all().all() assert (results[1] == expected[1]).all() def test_create_train_X_y_exception_when_y_and_exog_have_different_length(): ''' Test exception is raised when length of y and length of exog are different. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) with pytest.raises(Exception): forecaster.fit(y=pd.Series(np.arange(50)), exog=pd.Series(np.arange(10))) with pytest.raises(Exception): forecaster.fit(y=pd.Series(np.arange(10)), exog=pd.Series(np.arange(50))) with pytest.raises(Exception): forecaster.fit( y=pd.Series(np.arange(10)), exog=pd.DataFrame(np.arange(50).reshape(25,2)) ) def test_create_train_X_y_exception_when_y_and_exog_have_different_index(): ''' Test exception is raised when y and exog have different index. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) with pytest.raises(Exception): forecaster.fit( y=pd.Series(np.arange(50)), exog=pd.Series(np.arange(10), index=np.arange(100, 110)) ) test_create_train_X_y_output_when_y_is_series_10_and_exog_is_None() test_create_train_X_y_output_when_y_is_series_10_and_exog_is_series() test_create_train_X_y_output_when_y_is_series_10_and_exog_is_dataframe() test_create_train_X_y_exception_when_y_and_exog_have_different_length() test_create_train_X_y_exception_when_y_and_exog_have_different_index() from pytest import approx import numpy as np import pandas as pd from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from sklearn.linear_model import LinearRegression def create_predictors(y): ''' Create first 5 lags of a time series. ''' lags = y[-1:-6:-1] return lags def test_estimate_boot_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_True(): ''' Test output of _estimate_boot_interval when regressor is LinearRegression and 1 step is predicted using in-sample residuals. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(10))) forecaster.in_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10) expected = np.array([[20., 20.]]) results = forecaster._estimate_boot_interval(steps=1, in_sample_residuals=True, n_boot=2) assert results == approx(expected) test_estimate_boot_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_True() import numpy as np import pandas as pd from pytest import approx from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from sklearn.linear_model import LinearRegression def create_predictors(y): ''' Create first 5 lags of a time series. ''' lags = y[-1:-6:-1] return lags def test_fit_last_window_stored(): ''' Test that values of last window are stored after fitting. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(50))) expected = pd.Series(np.array([45, 46, 47, 48, 49]), index=[45, 46, 47, 48, 49]) assert (forecaster.last_window == expected).all() def test_in_sample_residuals_stored_when_fit_forecaster(): ''' Test that values of in_sample_residuals are stored after fitting. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(7))) expected = np.array([0, 0]) results = forecaster.in_sample_residuals assert results.values == approx(expected) test_fit_last_window_stored() test_in_sample_residuals_stored_when_fit_forecaster() from pytest import approx import numpy as np import pandas as pd from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor def create_predictors(y): ''' Create first 5 lags of a time series. ''' lags = y[-1:-6:-1] return lags def test_output_get_coef_when_regressor_is_LinearRegression(): ''' Test output of get_coef when regressor is LinearRegression with lags=3 and it is trained with y=pd.Series(np.arange(5)). ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(7))) expected = pd.DataFrame({ 'feature': ['custom_predictor_0', 'custom_predictor_1', 'custom_predictor_2', 'custom_predictor_3', 'custom_predictor_4'], 'coef': np.array([0.2, 0.2, 0.2, 0.2, 0.2]) }) results = forecaster.get_coef() assert (results['feature'] == expected['feature']).all() assert results['coef'].values == approx(expected['coef'].values) def test_get_coef_when_regressor_is_RandomForest(): ''' Test output of get_coef when regressor is RandomForestRegressor with lags=3 and it is trained with y=pd.Series(np.arange(5)). ''' forecaster = ForecasterAutoregCustom( regressor = RandomForestRegressor(n_estimators=1, max_depth=2), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(6))) expected = None results = forecaster.get_coef() assert results is expected test_output_get_coef_when_regressor_is_LinearRegression() test_get_coef_when_regressor_is_RandomForest() from pytest import approx import numpy as np import pandas as pd from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from sklearn.ensemble import RandomForestRegressor def create_predictors(y): ''' Create first 5 lags of a time series. ''' lags = y[-1:-6:-1] return lags def test_output_get_feature_importance_when_regressor_is_RandomForest(): ''' ''' forecaster = ForecasterAutoregCustom( regressor = RandomForestRegressor(n_estimators=1, max_depth=2, random_state=123), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(10))) expected = np.array([0.82142857, 0., 0.17857143, 0., 0.]) expected = pd.DataFrame({ 'feature': ['custom_predictor_0', 'custom_predictor_1', 'custom_predictor_2', 'custom_predictor_3', 'custom_predictor_4'], 'importance': np.array([0.82142857, 0., 0.17857143, 0., 0.]) }) results = forecaster.get_feature_importance() assert (results['feature'] == expected['feature']).all() assert results['importance'].values == approx(expected['importance'].values) def test_output_get_feature_importance_when_regressor_is_linear_model(): ''' ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(6))) expected = None results = forecaster.get_feature_importance() assert results is expected test_output_get_feature_importance_when_regressor_is_RandomForest() test_output_get_feature_importance_when_regressor_is_linear_model() import pytest import pandas as pd from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from sklearn.linear_model import LinearRegression def create_predictors(y): ''' Create first 5 lags of a time series. ''' lags = y[-1:-6:-1] return lags def test_init_exception_when_window_size_argument_is_string(): with pytest.raises(Exception): forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = '5' ) def test_init_exception_when_fun_predictors_argument_is_string(): with pytest.raises(Exception): forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = 'create_predictors', window_size = 5 ) test_init_exception_when_window_size_argument_is_string() test_init_exception_when_fun_predictors_argument_is_string() import numpy as np import pandas as pd from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from sklearn.linear_model import LinearRegression def create_predictors(y): ''' Create first 5 lags of a time series. ''' lags = y[-1:-6:-1] return lags def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_True(): ''' Test output when regressor is LinearRegression and one step ahead is predicted using in sample residuals. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(10))) forecaster.in_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10) expected = pd.DataFrame( np.array([[10., 20., 20.]]), columns = ['pred', 'lower_bound', 'upper_bound'], index = pd.RangeIndex(start=10, stop=11, step=1) ) results = forecaster.predict_interval(steps=1, in_sample_residuals=True, n_boot=2) pd.testing.assert_frame_equal(results, expected) def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_True(): ''' Test output when regressor is LinearRegression and two step ahead is predicted using in sample residuals. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(10))) forecaster.in_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10) expected = pd.DataFrame( np.array([[10., 20., 20.], [11., 23., 23.]]), columns = ['pred', 'lower_bound', 'upper_bound'], index = pd.RangeIndex(start=10, stop=12, step=1) ) results = forecaster.predict_interval(steps=2, in_sample_residuals=True, n_boot=2) pd.testing.assert_frame_equal(results, expected) def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_False(): ''' Test output when regressor is LinearRegression and one step ahead is predicted using out sample residuals. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(10))) forecaster.out_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10) expected = pd.DataFrame( np.array([[10., 20., 20.]]), columns = ['pred', 'lower_bound', 'upper_bound'], index = pd.RangeIndex(start=10, stop=11, step=1) ) results = forecaster.predict_interval(steps=1, in_sample_residuals=False, n_boot=2) pd.testing.assert_frame_equal(results, expected) def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_False(): ''' Test output when regressor is LinearRegression and two step ahead is predicted using out sample residuals. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(10))) forecaster.out_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10) expected = pd.DataFrame( np.array([[10., 20., 20.], [11., 23., 23.]]), columns = ['pred', 'lower_bound', 'upper_bound'], index = pd.RangeIndex(start=10, stop=12, step=1) ) results = forecaster.predict_interval(steps=2, in_sample_residuals=False) pd.testing.assert_frame_equal(results, expected) test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_True() test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_True() test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_False() test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_False() from pytest import approx import numpy as np import pandas as pd from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from sklearn.linear_model import LinearRegression def create_predictors(y): ''' Create first 5 lags of a time series. ''' lags = y[-1:-6:-1] return lags def test_predict_output_when_regressor_is_LinearRegression(): ''' Test predict output when using LinearRegression as regressor. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(50))) results = forecaster.predict(steps=5) expected = pd.Series( data = np.array([50., 51., 52., 53., 54.]), index = pd.RangeIndex(start=50, stop=55, step=1), name = 'pred' ) pd.testing.assert_series_equal(results, expected) test_predict_output_when_regressor_is_LinearRegression() # Unit test _recursive_predict # ============================================================================== from pytest import approx import numpy as np import pandas as pd from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from sklearn.linear_model import LinearRegression def create_predictors(y): ''' Create first 5 lags of a time series. ''' lags = y[-1:-6:-1] return lags def test_recursive_predict_output_when_regressor_is_LinearRegression(): ''' Test _recursive_predict output when using LinearRegression as regressor. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.fit(y=pd.Series(np.arange(50))) predictions = forecaster._recursive_predict( steps = 5, last_window = forecaster.last_window.values, exog = None ) expected = np.array([50., 51., 52., 53., 54.]) assert (predictions == approx(expected)) test_recursive_predict_output_when_regressor_is_LinearRegression() import pytest import numpy as np import pandas as pd from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from sklearn.linear_model import LinearRegression def create_predictors(y): ''' Create first 5 lags of a time series. ''' lags = y[-1:-6:-1] return lags def test_set_out_sample_residuals_exception_when_residuals_is_not_array(): ''' Test exception is raised when residuals argument is not numpy array. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) with pytest.raises(Exception): forecaster.set_out_sample_residuals(residuals=[1, 2, 3]) def test_set_out_sample_residuals_when_residuals_length_is_less_than_1000_and_no_append(): ''' Test residuals stored when its length is less than 1000 and append is False. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.set_out_sample_residuals(residuals=np.arange(10), append=False) expected = np.arange(10) results = forecaster.out_sample_residuals assert (results == expected).all() def test_set_out_sample_residuals_when_residuals_length_is_less_than_1000_and_append(): ''' Test residuals stored when its length is less than 1000 and append is True. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.set_out_sample_residuals(residuals=np.arange(10), append=True) forecaster.set_out_sample_residuals(residuals=np.arange(10), append=True) expected = np.hstack([np.arange(10), np.arange(10)]) results = forecaster.out_sample_residuals assert (results == expected).all() def test_set_out_sample_residuals_when_residuals_length_is_greater_than_1000(): ''' Test residuals stored when its length is greater than 1000. ''' forecaster = ForecasterAutoregCustom( regressor = LinearRegression(), fun_predictors = create_predictors, window_size = 5 ) forecaster.set_out_sample_residuals(residuals=np.arange(2000)) assert len(forecaster.out_sample_residuals) == 1000 test_set_out_sample_residuals_exception_when_residuals_is_not_array() test_set_out_sample_residuals_when_residuals_length_is_less_than_1000_and_no_append() test_set_out_sample_residuals_when_residuals_length_is_less_than_1000_and_append() test_set_out_sample_residuals_when_residuals_length_is_greater_than_1000() import pandas as pd from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom from sklearn.linear_model import LinearRegression def create_predictors(y): ''' Create first 5 lags of a time series. ''' lags = y[-1:-6:-1] return lags def test_set_paramns(): forecaster = ForecasterAutoregCustom( regressor = LinearRegression(fit_intercept=True), fun_predictors = create_predictors, window_size = 5 ) new_params = {'fit_intercept': False} forecaster.set_params(**new_params) expected = {'copy_X': True, 'fit_intercept': False, 'n_jobs': None, 'normalize': 'deprecated', 'positive': False } results = forecaster.regressor.get_params() assert results == expected test_set_paramns() ```
github_jupyter
## Innlesing og behandling av flere parque-datasett i samme prosessesteg med Pyspark I denne noten viser vi hvordan man kan lese inn og behandle et ukjent antall inndatasett ved å bruke python collection datatypes (list, dictionary og tuple) og python for loops for å operasjonalisere steg i en tenkt klargjørings eller analyseprosess. #### Innhenter verktøy fra bibliotek Import-stegene henter inn bibliotek med kode og funksjoner utviklet eksternt. ``` from datetime import datetime from pyspark.sql import SparkSession from pyspark.sql.types import * from pyspark.sql import SQLContext import pyspark.sql.functions as F ``` #### Kjører metoden read.path for å få oversikt over hvilke parquet datasett som er tilrettelagt i tilknytning veilderens lagringsområde i sky. Oversikt blir lest inn i egen dataframe - df_datasets. Aktuelt lagringsområde blir lagt inn som parameter (string objekt som vi definerer selv) INNDATA_PATH . ``` INNDATA_PATH = '/felles/veiledning/pyspark/eksempler/*' df_datasets = spark.read.path(INNDATA_PATH) ``` #### df_datasets skrives ut i output vindu. ``` df_datasets.show(100, False) ``` #### Finner antall tegn i lengste path Utleder lengden (antall tegn) for lengste path med aggregate function. "Collecter" lengden (antall tegn) på lengste path inn i listeobjekt liste_max_length_path før det deretter blir lagt inn i integer objekt max_lengde_path. max_lengde_path brukes deretter som parameter i paragraf som avleder variabel filename (blir dataframe navn i notebook). Formål med paragraf er unngå hardkoding i utledning av variabelen filename. ``` df = df_datasets.withColumn('lengde_path', F.length(df_datasets.path)) liste_max_lengste_path = df.agg({"lengde_path": "max"}).collect() max_lengde_path = liste_max_lengste_path[0][0] ``` #### Avleder variabel filname fra variabel path ``` df_datasets = df_datasets.withColumn('filename',F.substring(df_datasets.path, len(INNDATA_PATH), (max_lengde_path + 1 - len(INNDATA_PATH)))) df_datasets.show(100, False) ``` #### Seleketerer ut rader bestående av de filnavn som vi ønsker å trekke ut for videre bearbeiding. Viser hvordan man kan bruke liste (Python list) for å gjøre en slik seleksjon. ``` filenames = ['areal', 'bnp', 'innbyggerantall/2020'] df_datasets = df_datasets.filter(df_datasets.filename.isin(filenames)) df_datasets.show(100, False) ``` #### Oppretter dictionaryet dict_df og leser så de tre parquet datasettene i output vindu over inn som dataframes i dictionaryet. Filnavnene blir keys i dictionaryet. Avslutningsvis i pargrafen skrives key-navnene ut i output . ``` dict_df = {} for row in df_datasets.rdd.collect(): dict_df[row.filename] = spark.read.path(row.path) for key in dict_df: print(key) ``` ### Skriver ut innhold i dataframene i dictionaryet #### Areal ``` dict_df['areal'].show() ``` #### BNP ``` dict_df['bnp'].show() ``` #### Innbyggerantall 2020 ``` dict_df['innbyggerantall/2020'].show() ``` ### Fjerner duplikater og kolonner vi ikke ønsker å ha med videre Vi ønsker å sette sammen informasjon fra datasettene i dictinaryet til et nytt datasett. Dette vil vi gjøre ved å koble de sammen via Landkode variabel. Vi fjerner eventuelle dublikater på Landkode og dropper variable som vi ikke trenger/ønsker å ha med på det nye datasettet i innledende steg under. Skriver ut duplikatene i output før vi deretter fjerner de fra dictionaryet Areal ``` dict_df['areal'].exceptAll(dict_df['areal'].dropDuplicates(['Land'])).show() dict_df['areal'] = dict_df['areal'].dropDuplicates(['Landkode']).drop('kilde') ``` BNP (Legger inn variable som skal droppes fra dataframe i pyhton tuple) ``` dict_df['bnp'].exceptAll(dict_df['bnp'].dropDuplicates(['Land'])).show() liste_drop_kolonner = ('Land', 'Kilde', 'År') dict_df['bnp'] = dict_df['bnp'].drop_duplicates(['Landkode']).drop(*liste_drop_kolonner) ``` Innbyggerantall (Legger inn variable som skal droppes fra datafrma i python tuple) ``` dict_df['innbyggerantall/2020'].exceptAll(dict_df['innbyggerantall/2020'].dropDuplicates(['Land'])).show() liste_drop_kolonner = ['Land', 'Kilde', 'År'] dict_df['innbyggerantall/2020'] = dict_df['innbyggerantall/2020'].drop_duplicates(['Landkode']).drop(*liste_drop_kolonner) ``` ### Kobler sammen datasettene og etablerer en dataframe vi skal skrive til dapla for senere å utføre en statistisk analyse på. Kobler først areal og bnp og oppretter dataframen df_areal_bnp. Vi legger ikke denne dataframen inn i dictionaryet. Det hadde imidlertid enkelt latt seg gjøre ved å skrive f.eks. å bytte ut df_areal_bnp med dict['areal_bnp]. ``` df_areal_bnp = dict_df['areal'].join(dict_df['bnp'], 'Landkode', how='inner') df_areal_bnp.show() ``` Kobler deretter df_areal_bnp med innbyggerantall/2020 dataframen (i dictionaryet). Dermed får vi produsert den dataframen som vi ønsker å utføre en nærmere statistisk analyse på. ``` df_sammensatt = df_areal_bnp.join(dict_df['innbyggerantall/2020'], 'Landkode', how='inner') ``` Skriver ut innhold i df_sammensatt i output ``` df_sammensatt.show() ``` ### Lagrer dataframe som parquet datasett på Dapla Skriver pyspark dataframe til GCS bucket i parquet format. Skriver til hjemmekatalog under /user. ``` df_sammensatt.write\ .option("valuation", "INTERNAL")\ .option("state", "INPUT")\ .path('/felles/veiledning/datasett/df_sammensatt_pyspark') ds = spark.read.path('/felles/veiledning/datasett/*') ds.show(10, False) ```
github_jupyter
# 2D Isostatic gravity inversion - Inverse Problem Este [IPython Notebook](http://ipython.org/videos.html#the-ipython-notebook) utiliza a biblioteca de código aberto [Fatiando a Terra](http://fatiando.org/) ``` %matplotlib inline import numpy as np from scipy.misc import derivative import scipy as spy from scipy import interpolate import matplotlib #matplotlib.use('TkAgg', force=True) import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator import math import cPickle as pickle import datetime import string as st from scipy.misc import imread from __future__ import division from fatiando import gravmag, mesher, utils, gridder from fatiando.mesher import Prism, Polygon from fatiando.gravmag import prism from fatiando.utils import ang2vec, si2nt, contaminate from fatiando.gridder import regular from fatiando.vis import mpl from numpy.testing import assert_almost_equal from numpy.testing import assert_array_almost_equal from pytest import raises plt.rc('font', size=16) import functions as fc ``` ## Observation coordinates. ``` # Model`s limits ymin = 0.0 ymax = 250000.0 zmin = -1000.0 zmax = 35000.0 xmin = -100000.0 xmax = 100000.0 area = [ymin, ymax, zmax, zmin] ny = 150 # number of observation datas and number of prisms along the profile # coordinates defining the horizontal boundaries of the # adjacent columns along the profile y = np.linspace(ymin, ymax, ny) # coordinates of the center of the columns forming the # interpretation model n = ny - 1 dy = (ymax - ymin)/n ycmin = ymin + 0.5*dy ycmax = ymax - 0.5*dy yc = np.reshape(np.linspace(ycmin, ycmax, n),(n,1)) x = np.zeros_like(yc) z = np.zeros_like(yc)-150.0 ## Edge extension (observation coordinates) sigma = 2.0 edge = sigma*dy*n ``` ## Model parameters ``` # Model densities # Indices and polygons relationship: # cc = continental crust layer # oc = ocean crust layer # w = water layer # s = sediment layer # m = mantle layer dw = np.array([1030.0]) ds = np.array([2600.0]) dcc = np.array([2790.0]) doc = np.array([2880.0]) dm = np.array([3250.0]) #dc = dcc # coordinate defining the horizontal boundaries of the continent-ocean boundary COT = 170000.0 # list defining crust density variance dc = np.zeros_like(yc) aux = yc <= COT for i in range(len(yc[aux])): dc[i] = dcc for i in range(len(yc[aux]),n): dc[i] = doc # defining sediments layers density matrix ds = np.reshape(np.repeat(ds,n),(n,1)) # S0 => isostatic compensation surface (Airy's model) S0 = np.array([40000.0]) #original ``` ## Synthetic data ``` gsyn = np.reshape(np.loadtxt('../data/F-model-rifted-margin-synthetic-gravity-data.txt'),(n,1)) ``` ## Water bottom ``` tw = np.reshape(np.loadtxt('../data/F-model-rifted-margin-bathymetry.txt'),(n,1)) ``` ## True surfaces ``` true_basement = np.reshape(np.loadtxt('../data/F-model-rifted-margin-true-basement-surface.txt'),(n,1)) true_moho = np.reshape(np.loadtxt('../data/F-model-rifted-margin-true-moho-surface.txt'),(n,1)) # True reference moho surface (SR = S0+dS0) true_S0 = np.array([40000.0]) true_dS0 = np.array([6000.0]) #original # True layer sediments thickness true_ts = true_basement - tw # True layer anti-root thickness true_tm = S0 - true_moho # true parameters vector ptrue = np.vstack((true_ts, true_tm, true_dS0)) ``` ## Initial guess surfaces ``` # initial guess basement surface ini_basement = np.reshape(np.loadtxt('../data/F-model-rifted-margin-initial-basement-surface.txt'),(n,1)) # initial guess moho surface ini_moho = np.reshape(np.loadtxt('../data/F-model-rifted-margin-initial-moho-surface.txt'),(n,1)) # initial guess reference moho surface (SR = S0+dS0) ini_dS0 = np.array([500.0]) ini_RM = S0 + ini_dS0 # initial guess layer sediment thickness ini_ts = ini_basement - tw # initial guess anti-root layer thickness ini_tm = S0 - ini_moho # initial guess parameters vector p0 = np.vstack((ini_ts, ini_tm, ini_dS0)) ``` ## Known depths ``` # Known values: basement and moho surfaces base_known = np.loadtxt('../data/F-model-rifted-margin-basement-known-depths-1pt.txt', ndmin=2) moho_known = np.loadtxt('../data/F-model-rifted-margin-moho-known-depths.txt', ndmin=2) (rs,index_rs) = fc.base_known_function(dy,tw,yc,base_known) (rm,index_rm) = fc.moho_known_function(dy,yc,S0,moho_known) index_base = index_rs index_moho = index_rm - n assert_almost_equal(base_known[:,0], yc[index_base][:,0], decimal=6) assert_almost_equal(moho_known[:,0], yc[index_moho][:,0], decimal=6) assert_almost_equal(true_ts[index_base][:,0], rs[:,0], decimal=6) assert_almost_equal((true_tm[index_moho][:,0]), rm[:,0], decimal=6) ``` ## Initial guess data ``` g0 = np.reshape(np.loadtxt('../data/F-model-rifted-margin-initial-guess-gravity-data.txt'),(n,1)) ``` ### parameters vector box limits ``` # true thickness vector limits print 'ts =>', np.min(ptrue[0:n]),'-', np.max(ptrue[0:n]) print 'tm =>', np.min(ptrue[n:n+n]),'-', np.max(ptrue[n:n+n]) print 'dS0 =>', ptrue[n+n] # initial guess thickness vector limits print 'ts =>', np.min(p0[0:n]),'-', np.max(p0[0:n]) print 'tm =>', np.min(p0[n:n+n]),'-', np.max(p0[n:n+n]) print 'dS0 =>', p0[n+n] # defining parameters values limits pjmin = np.zeros((len(ptrue),1)) pjmax = np.zeros((len(ptrue),1)) pjmin[0:n] = 0.0 pjmax[0:n] = 20000. pjmin[n:n+n] = 0.0 pjmax[n:n+n] = 17000. pjmin[n+n] = 0.0 pjmax[n+n] = 12000. ``` ### Inversion code ``` #Parametros internos para implementacao da funcao (convergencia, numero de iteracoes, etc.) beta = 10**(-3) itmax = 50 itmax_marq = 10 lamb = 1. mi = 10**(-3) dmi = 10. dp1 = 1. dp2 = 1. #inicializacao de variaveis ymin = area[0] ymax = area[1] x = np.zeros_like(yc) z = np.zeros_like(yc)-150.0 n = len(yc) # numero de dados observados m = 2*n+1 # numero de parametros a inverter # calculo da contribuicao dos prismas que formam a camada de agua. prism_w = fc.prism_w_function(xmax,xmin,dy,edge,dw,dcc,tw,yc) gzw = prism.gz(np.reshape(x,(n,)),np.reshape(yc,(n,)),np.reshape(z,(n,)),prism_w) # matrizes I = np.identity(m) W0 = np.identity(n-1) R = fc.R_matrix_function(n) Sa = fc.Sa_matrix_function(n) Sb = fc.Sb_matrix_function(n) C = fc.C_matrix_function(ds,dm,dc) D = fc.D_matrix_function(dw,dc) A = fc.A_matrix_function(n,rs,index_rs) B = fc.B_matrix_function(n,rm,index_rm) G0 = fc.G_matrix_function(xmax,xmin,dy,edge,dp1,dp2,S0,dw,ds,dm,dcc,dc,tw,p0,yc) # Hessianas Hess_phi = (2/n)*G0.T.dot(G0) Hess_psi0 = 2*C.T.dot(R.T.dot(W0.T.dot(W0.dot(R.dot(C))))) Hess_psi1 = 2*Sa.T.dot(Sa) Hess_psi2 = 2*Sb.T.dot(Sb) Hess_psi3 = 2*A.T.dot(A) Hess_psi4 = 2*B.T.dot(B) # Normalizacao dos vinculos diag_phi = np.diag(Hess_phi) diag_psi0 = np.diag(Hess_psi0) diag_psi1 = np.diag(Hess_psi1) diag_psi2 = np.diag(Hess_psi2) diag_psi3 = np.diag(Hess_psi3) diag_psi4 = np.diag(Hess_psi4) f_phi = np.median(diag_phi) f_psi0 = np.median(diag_psi0) #f_psi1 = np.median(diag_psi1) #f_psi2 = np.median(diag_psi2) #f_psi3 = np.median(diag_psi3) #f_psi4 = np.median(diag_psi4) f_psi1 = 4. f_psi2 = 4. f_psi3 = 2. f_psi4 = 2. print f_phi, f_psi0, f_psi1, f_psi2, f_psi3, f_psi4 # coeficientes dos vinculos alpha0 = (f_phi/f_psi0)*10**(2) # vinculo isostatico alpha1 = (f_phi/f_psi1)*10**(1) # vinculo suavidade embasamento alpha2 = (f_phi/f_psi2)*10**(2) # vinculo suavidade Moho alpha3 = (f_phi/f_psi3)*10**(1) # vinculo de igualdade espessura sedimento alpha4 = (f_phi/f_psi4)*10**(2) # vinculo de igualdade espessura (S0 - tm) print alpha0, alpha1, alpha2, alpha3, alpha4 p1 = p0.copy() g1 = g0.copy() gama1 = fc.gama_function(alpha0,alpha1,alpha2,alpha3,alpha4,lamb,S0,tw,gsyn,g1,p1,rs,rm,W0,R,C,D,Sa,Sb,A,B) gama_list = [gama1] k0=0 k1=0 #implementacao da funcao for it in range (itmax): p1_hat = - np.log((pjmax - p1)/(p1-pjmin)) G1 = fc.G_matrix_function(xmax,xmin,dy,edge,dp1,dp2,S0,dw,ds,dm,dcc,dc,tw,p1,yc) grad_phi = (-2/n)*G1.T.dot(gsyn - g1) Hess_phi = (2/n)*G1.T.dot(G1) grad_psi0 = fc.grad_psi_iso_function(S0,tw,p1,W0,R,C,D) grad_psi1 = fc.grad_psi_tk1_function(p1,Sa) grad_psi2 = fc.grad_psi_tk1_function(p1,Sb) grad_psi3 = fc.grad_psi_eq_function(p1,rs,A) grad_psi4 = fc.grad_psi_eq_function(p1,rm,B) grad_gama = grad_phi + lamb*(alpha0*grad_psi0+alpha1*grad_psi1+alpha2*grad_psi2+alpha3*grad_psi3+alpha4*grad_psi4) Hess_gama = Hess_phi+lamb*(alpha0*Hess_psi0+alpha1*Hess_psi1+alpha2*Hess_psi2+alpha3*Hess_psi3+alpha4*Hess_psi4) T = fc.T_matrix_function(pjmin, pjmax, p1) for it_marq in range(itmax_marq): deltap = np.linalg.solve((Hess_gama.dot(T) + mi*I), -grad_gama) p2_hat = p1_hat + deltap p2 = pjmin + ((pjmax - pjmin)/(1 + np.exp(-p2_hat))) #Calculo do vetor de dados preditos e da funcao phi prism_s = fc.prism_s_function(xmax,xmin,dy,edge,ds,dcc,tw,p2,yc) prism_c = fc.prism_c_function(xmax,xmin,dy,edge,S0,dcc,dc,tw,p2,yc) prism_m = fc.prism_m_function(xmax,xmin,dy,edge,S0,dcc,dm,p2,yc) g2 = np.reshape(fc.g_function(np.reshape(x,(n,)),np.reshape(yc,(n,)),np.reshape(z,(n,)),gzw,prism_s,prism_c,prism_m),(n,1)) gama2 = fc.gama_function(alpha0,alpha1,alpha2,alpha3,alpha4,lamb,S0,tw,gsyn,g2,p2,rs,rm,W0,R,C,D,Sa,Sb,A,B) #Verificando se a funcao phi esta diminuindo dgama = gama2 - gama1 if dgama > 0.: mi *= dmi print 'k0=',k0 k0 += 1 else: mi /= dmi break #Testando convergencia da funcao phi if (dgama < 0.) & (abs(gama1 - gama2) < beta): #if fc.convergence_function(gama1, gama2, beta): print 'convergence achieved' break #Atualizando variaveis else: print 'k1=',k1 k1 += 1 #gama1 = gama2.copy() print gama1 gama_list.append(gama1) thicknesses = tw + p2[0:n] + p2[n:n+n] print 'thicknesses=', np.max(thicknesses) p = p1.copy() g = g1.copy() p1 = p2.copy() g1 = g2.copy() gama1 = gama2.copy() assert np.alltrue(thicknesses <= S0), 'sum of the thicknesses shall be less than or equal to isostatic compensation surface' p = p2.copy() g = g2.copy() gama_list.append(gama2) it = [i for i in range(len(gama_list))] #plt.figure(figsize=(8,8)) ax = plt.figure(figsize=(8,8)).gca() ax.xaxis.set_major_locator(MaxNLocator(integer=True)) plt.plot(gama_list,'ko') plt.yscale('log') plt.xlabel('$k$', fontsize=18) plt.ylabel('$\Gamma(\mathbf{p})$', fontsize=18) plt.grid() #plt.xlim(-1,50) #plt.xlim(-1, len(gama_list)+5) plt.ylim(np.min(gama_list)-3*np.min(gama_list),np.max(gama_list)+3*np.min(gama_list)) #mpl.savefig('../manuscript/figures/F-model-rifted-margin-gama-list-alphas_X_1_2_1_2.png', dpi='figure', bbox_inches='tight') plt.show() ``` ## Lithostatic Stress ``` sgm_true = 9.81*(10**(-6))*(dw*tw + ds*true_ts + dc*(S0-tw-true_ts-true_tm)+dm*true_tm) sgm = 9.81*(10**(-6))*(dw*tw + ds*p[0:n] + dc*(S0-tw-p[0:n]-p[n:n+n])+dm*p[n:n+n]) ``` ## Inversion model plot ``` # Inverrsion results RM = S0 + p[n+n] basement = tw + p[0:n] moho = S0 - p[n:n+n] print ptrue[n+n], p[n+n] polygons_water = [] for (yi, twi) in zip(yc, tw): y1 = yi - 0.5*dy y2 = yi + 0.5*dy polygons_water.append(Polygon(np.array([[y1, y2, y2, y1], [0.0, 0.0, twi, twi]]).T, props={'density': dw - dcc})) polygons_sediments = [] for (yi, twi, si, dsi) in zip(yc, np.reshape(tw,(n,)), np.reshape(basement,(n,)), ds): y1 = yi - 0.5*dy y2 = yi + 0.5*dy polygons_sediments.append(Polygon(np.array([[y1, y2, y2, y1], [twi, twi, si, si]]).T, props={'density': ds - dcc})) polygons_crust = [] for (yi, si, Si, dci) in zip(yc, np.reshape(basement,(n,)), np.reshape(moho,(n,)), dc): y1 = yi - 0.5*dy y2 = yi + 0.5*dy polygons_crust.append(Polygon(np.array([[y1, y2, y2, y1], [si, si, Si, Si]]).T, props={'density': dci - dcc})) polygons_mantle = [] for (yi, Si) in zip(yc, np.reshape(moho,(n,))): y1 = yi - 0.5*dy y2 = yi + 0.5*dy polygons_mantle.append(Polygon(np.array([[y1, y2, y2, y1], [Si, Si, S0+p[n+n], S0+p[n+n]]]).T, props={'density': dm - dcc})) %matplotlib inline plt.close('all') fig = plt.figure(figsize=(12,16)) import matplotlib.gridspec as gridspec heights = [8, 8, 8, 1] gs = gridspec.GridSpec(4, 1, height_ratios=heights) ax1 = plt.subplot(gs[0]) ax2 = plt.subplot(gs[1]) ax3 = plt.subplot(gs[2]) ax4 = plt.subplot(gs[3]) ax1.axhline(y=0.0, xmin=ymin, xmax=ymax, color='k', linestyle='--', linewidth=1) ax1.plot(0.001*yc, gsyn, 'or', mfc='none', markersize=8, label='simulated data') ax1.plot(0.001*yc, g0, '-b', linewidth=2, label='initial guess data') ax1.plot(0.001*yc, g, '-g', linewidth=2, label='predicted data') ax1.set_xlim(0.001*ymin, 0.001*ymax) ax1.set_ylabel('gravity disturbance (mGal)', fontsize=16) ax1.set_xticklabels(['%g'% (l) for l in ax1.get_xticks()], fontsize=14) ax1.set_yticklabels(['%g'% (l) for l in ax1.get_yticks()], fontsize=14) ax1.legend(loc='best', fontsize=14, facecolor='silver') ax2.plot(0.001*yc, sgm_true, 'or', mfc='none', markersize=8, label='simulated lithostatic stress') ax2.plot(0.001*yc, sgm, '-g', linewidth=2, label='predicted lithostatic stress') ax2.set_xlim(0.001*ymin, 0.001*ymax) ax2.set_ylim(1040,1130) ax2.set_ylabel('lithostatic stress (MPa)', fontsize=16) ax2.set_xticklabels(['%g'% (l) for l in ax2.get_xticks()], fontsize=14) ax2.set_yticklabels(['%g'% (l) for l in ax2.get_yticks()], fontsize=14) ax2.legend(loc='best', fontsize=14, facecolor='silver') ax3.axhline(y=0.0, xmin=ymin, xmax=ymax, color='k', linestyle='-', linewidth=1) aux = yc <= COT for (pwi) in (polygons_water): tmpx = [x for x in pwi.x] tmpx.append(pwi.x[0]) tmpy = [y for y in pwi.y] tmpy.append(pwi.y[0]) ax3.plot(tmpx, tmpy, linestyle='None') ax3.fill(tmpx, tmpy, color='lightskyblue') for (psi) in (polygons_sediments): tmpx = [x for x in psi.x] tmpx.append(psi.x[0]) tmpy = [y for y in psi.y] tmpy.append(psi.y[0]) ax3.plot(tmpx, tmpy, linestyle='None') ax3.fill(tmpx, tmpy, color='tan') for (pci) in (polygons_crust[:len(yc[aux])]): tmpx = [x for x in pci.x] tmpx.append(pci.x[0]) tmpy = [y for y in pci.y] tmpy.append(pci.y[0]) ax3.plot(tmpx, tmpy, linestyle='None') ax3.fill(tmpx, tmpy, color='orange') for (pcoi) in (polygons_crust[len(yc[aux]):n]): tmpx = [x for x in pcoi.x] tmpx.append(pcoi.x[0]) tmpy = [y for y in pcoi.y] tmpy.append(pcoi.y[0]) ax3.plot(tmpx, tmpy, linestyle='None') ax3.fill(tmpx, tmpy, color='olive') for (pmi) in (polygons_mantle): tmpx = [x for x in pmi.x] tmpx.append(pmi.x[0]) tmpy = [y for y in pmi.y] tmpy.append(pmi.y[0]) ax3.plot(tmpx, tmpy, linestyle='None') ax3.fill(tmpx, tmpy, color='pink') ax3.plot(yc, tw, '-k', linewidth=3) ax3.plot(yc, true_basement, '-k', linewidth=3, label='true surfaces') ax3.plot(yc, true_moho, '-k', linewidth=3) ax3.plot(yc, ini_basement, '-.b', linewidth=3, label='initial guess surfaces') ax3.plot(yc, ini_moho, '-.b', linewidth=3) ax3.plot(yc, basement, '--w', linewidth=3, label='estimated surfaces') ax3.plot(yc, moho, '--w', linewidth=3) ax3.axhline(y=true_S0+true_dS0, xmin=ymin, xmax=ymax, color='k', linestyle='-', linewidth=3) ax3.axhline(y=S0+ini_dS0, xmin=ymin, xmax=ymax, color='b', linestyle='-.', linewidth=3) ax3.axhline(y=S0+p[n+n], xmin=ymin, xmax=ymax, color='w', linestyle='--', linewidth=3) ax3.plot(base_known[:,0], base_known[:,1], 'v', color = 'yellow', markersize=15, label='known depths (basement)') ax3.plot(moho_known[:,0], moho_known[:,1], 'D', color = 'lime', markersize=15, label='known depths (moho)') #ax3.set_ylim((S0+p[n+n]), zmin) ax3.set_ylim((48000.0), zmin) ax3.set_xlim(ymin, ymax) ax3.set_xlabel('y (km)', fontsize=16) ax3.set_ylabel('z (km)', fontsize=16) ax3.set_xticklabels(['%g'% (0.001*l) for l in ax3.get_xticks()], fontsize=14) ax3.set_yticklabels(['%g'% (0.001*l) for l in ax3.get_yticks()], fontsize=14) ax3.legend(loc='lower right', fontsize=14, facecolor='silver') X, Y = fig.get_dpi()*fig.get_size_inches() plt.title('Density contrast (kg/m$^{3}$)', fontsize=17) #plt.title('Density (kg/m$^{3}$)', fontsize=17) ax4.axis('off') layers_list1 = ['water', 'sediment', 'continental', 'oceanic', 'mantle'] layers_list2 = ['', '', 'crust', 'crust', ''] colors_list = ['lightskyblue', 'tan', 'orange', 'olive', 'pink'] density_list = ['-1760', '-190', '0', '90', '460'] #original #density_list = ['1030', '2600', '2790', '2880', '3250'] ncols = len(colors_list) nrows = 1 h = Y / nrows w = X / (ncols + 1) i=ncols-1 for color, density, layers1, layers2 in zip(colors_list, density_list, layers_list1, layers_list2): col = i // nrows row = i % nrows x = X - (col*w) - w yi_line = Y yf_line = Y - Y*0.15 yi_text1 = Y - Y*0.2 yi_text2 = Y - Y*0.28 yi_text3 = Y - Y*0.08 i-=1 poly = Polygon(np.array([[x, x+w*0.75, x+w*0.75, x], [yi_line, yi_line, yf_line, yf_line]]).T) tmpx = [x for x in poly.x] tmpx.append(poly.x[0]) tmpy = [y for y in poly.y] tmpy.append(poly.y[0]) ax4.plot(tmpx, tmpy, linestyle='-', color='k', linewidth=1) ax4.fill(tmpx, tmpy, color=color) ax4.text(x+w*0.375, yi_text1, layers1, fontsize=(w*0.14), horizontalalignment='center', verticalalignment='top') ax4.text(x+w*0.375, yi_text2, layers2, fontsize=(w*0.14), horizontalalignment='center', verticalalignment='top') ax4.text(x+w*0.375, yi_text3, density, color = 'k', fontsize=(w*0.14), horizontalalignment='center', verticalalignment='center') plt.tight_layout() #mpl.savefig('../manuscript/figures/F-model-rifted-margin-grafics-estimated-model-alphas_X_1_2_1_2.png', dpi='figure', bbox_inches='tight') plt.show() ```
github_jupyter
``` # MODIFY! # use Robust! model_name = 'poi-baseline-wo' import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('./data/d-wo-ns.csv') # df.columns # df.head() df.shape # df.info() X = df.drop('throughput',axis=1) X.shape y = df['throughput'] y.shape # Split the data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) # Scale the data from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) # Instantiate model from sklearn.linear_model import PoissonRegressor model = PoissonRegressor() model.fit(X_train, y_train) y_pred = model.predict(X_test) from sklearn.metrics import mean_absolute_error, mean_squared_error mae = mean_absolute_error(y_test,y_pred) mse = mean_squared_error(y_test,y_pred) rmse = np.sqrt(mse) model mae mse rmse err_df = pd.DataFrame(data=[mae, mse, rmse],index=['MAE','MSE','RMSE'],columns=[f'{model_name}']) err_df err_df.to_csv(f'./baseline-err/{model_name}.csv') y_pred_df = pd.DataFrame(y_pred, columns=['throughput_pred']) y_pred_df.describe().drop('count') # no negative predictions sns.histplot(y_pred,bins=40,kde=True) # the distribution better reflects the true distribution of the data set fig,ax = plt.subplots() sns.kdeplot(y_test, ax=ax, shade=True, label='Observations') sns.kdeplot(y_pred, ax=ax,shade=True,label='Predictions') ax.legend(loc='best') fig,ax = plt.subplots() sns.ecdfplot(y_test, ax=ax, label='Observations') sns.ecdfplot(y_pred, ax=ax,label='Predictions') plt.axvline(x=y.mean(),color='grey',linestyle='--') ax.legend(loc='best') # y.describe().drop(['count']) res = y_test - y_pred res.describe().drop('count') sns.histplot(data=res, kde=True,bins=40) ax = sns.scatterplot(x=y_test, y=res) ax.set(ylabel='Residuals', xlabel='Test Label') plt.axhline(y=0,color='red',linestyle='--') # there should be no clear pattern / curve in the plot # we see a positive correlation between Test Label and Residuals -> later models should avoid this pattern import scipy as sp fig, ax = plt.subplots() sp.stats.probplot(res,plot=ax); # Scale the data from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X) X_poisson = scaler.transform(X) poisson_model = PoissonRegressor() poisson_model.fit(X_poisson,y) model.coef_ # coef_df = pd.DataFrame(data=model.coef_,index=X.columns,columns=['beta']) # coef_df poisson_model.coef_ # coef_df = pd.DataFrame(data=poisson_model.coef_,index=X.columns,columns=['beta']) # coef_df delta_df = pd.DataFrame(data=poisson_model.coef_ - model.coef_,index=X.columns,columns=['delta_beta']) delta_df pd.set_option('display.max_columns',None) delta_df.sort_values('delta_beta').sort_values('delta_beta').transpose() sns.histplot(data=delta_df,bins=40,kde=True) y_pred_poisson = poisson_model.predict(X_poisson) sns.histplot(y_pred_poisson,bins=40,kde=True) fig,ax = plt.subplots() sns.kdeplot(y, ax=ax, shade=True, label='Observations') sns.kdeplot(y_pred_poisson, ax=ax,shade=True,label='Predictions') ax.legend(loc='best') fig,ax = plt.subplots() sns.ecdfplot(y, ax=ax, label='Observations') sns.ecdfplot(y_pred_poisson, ax=ax,label='Predictions') plt.axvline(x=y.mean(),color='grey',linestyle='--') ax.legend(loc='best') mae = mean_absolute_error(y,y_pred_poisson) mse = mean_squared_error(y,y_pred_poisson) rmse = np.sqrt(mse) err_df = pd.DataFrame(data=[mae, mse, rmse],index=['MAE','MSE','RMSE'],columns=['PR']) err_df # err_df.to_csv('./model-err/poi-model-err.csv') from joblib import dump, load dump(poisson_model, f'./baseline-models/{model_name}.joblib') ``` DONE!
github_jupyter
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> $ \newcommand{\bra}[1]{\langle #1|} $ $ \newcommand{\ket}[1]{|#1\rangle} $ $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ $ \newcommand{\dot}[2]{ #1 \cdot #2} $ $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ $ \newcommand{\mypar}[1]{\left( #1 \right)} $ $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ $ \newcommand{\onehalf}{\frac{1}{2}} $ $ \newcommand{\donehalf}{\dfrac{1}{2}} $ $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ $ \newcommand{\vzero}{\myvector{1\\0}} $ $ \newcommand{\vone}{\myvector{0\\1}} $ $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $ $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $ $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $ $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $ $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $ <font style="font-size:28px;" align="left"><b>Vectors: Dot (Scalar) Product</b></font> <br> _prepared by Abuzer Yakaryilmaz_ <br><br> <i>Dot product</i> is a specific way of defining multiplication between two vectors with the same size. It is also called <i>scalar product</i>, because the result is a <i>scalar value</i>, e.g., a real number. Consider the following two vectors: $$ u = \myrvector{-3 \\ -2 \\ 0 \\ -1 \\ 4} \mbox{ and } v = \myrvector{-1\\ -1 \\2 \\ -3 \\ 5}. $$ The dot product of $ u $ and $ v $, denoted by $ \dot{u}{v}$, can be defined algorithmically. <u>Pairwise multiplication</u>: the values in the same positions are multiplied with each other. <u>Summation of all pairwise multiplications</u>: Then we sum all the results obtained from the pairwise multiplications. We write its Python code below. ``` # let's define both vectors u = [-3,-2,0,-1,4] v = [-1,-1,2,-3,5] uv = 0; # summation is initially zero for i in range(len(u)): # iteratively access every pair with the same indices print("pairwise multiplication of the entries with index",i,"is",u[i]*v[i]) uv = uv + u[i]*v[i] # i-th entries are multiplied and then added to summation print() # print an empty line print("The dot product of",u,'and',v,'is',uv) ``` The pairwise multiplications of entries are <ul> <li> $ (-3)\cdot(-1) = 3 $, </li> <li> $ (-2)\cdot(-1) = 2 $, </li> <li> $ 0\cdot 2 = 0 $, </li> <li> $ (-1)\cdot(-3) = 3 $, and, </li> <li> $ 4 \cdot 5 = 20 $. </li> </ul> Thus the summation of all pairwise multiplications of entries is $ 3+2+0+3+20 = 28 $. <b>Remark that the dimensions of the given vectors must be the same. Otherwise, the dot product is not defined.</b> <h3> Task 1 </h3> Find the dot product of the following vectors in Python: $$ v = \myrvector{-3 \\ 4 \\ -5 \\ 6} ~~~~\mbox{and}~~~~ u = \myrvector{4 \\ 3 \\ 6 \\ 5}. $$ Your outcome should be $0$. ``` # # your solution is here # ``` <a href="Math24_Dot_Product_Solutions.ipynb#task1">click for our solution</a> <h3> Task 2 </h3> Let $ u = \myrvector{ -3 \\ -4 } $ be a 2 dimensional vector. Find $ \dot{u}{u} $ in Python. ``` # # your solution is here # ``` <a href="Math24_Dot_Product_Solutions.ipynb#task2">click for our solution</a> <h3> Notes:</h3> As may be observed from Task 2, the <b>length</b> of a vector can be calculated by using its <b>dot product</b> with itself. $$ \norm{u} = \sqrt{\dot{u}{u}}. $$ $ \dot{u}{u} $ is $25$, and so $ \norm{u} = \sqrt{25} = 5 $. $ \dot{u}{u} $ automatically accumulates the contribution of each entry to the length. <h3> Orthogonal (perpendicular) vectors </h3> For simplicity, we consider 2-dimensional vectors. The following two vectors are perpendicular (orthogonal) to each other. The angle between them is $ 90 $ degrees. ``` %run math.py dot_product("example1") # let's find the dot product of v and u v = [-4,0] u = [0,-5] result = 0; for i in range(2): result = result + v[i]*u[i] print("the dot product of u and v is",result) ``` Now, let's check the dot product of the following two vectors: ``` %run math.py dot_product("example2") # we can use the same code v = [-4,3] u = [-3,-4] result = 0; for i in range(2): result = result + v[i]*u[i] print("the dot product of u and v is",result) ``` The dot product of new $ u $ and $ v $ is also $0$. This is not surprising, because the vectors $u$ and $v$ (in both cases) are orthogonal to each other. <h3>Fact:</h3> <ul> <li>The dot product of two orthogonal (perpendicular) vectors is zero.</li> <li>If the dot product of two vectors is zero, then they are orthogonal to each other.</li> </ul> <i> This fact is important, because, as we will see later, orthogonal vectors (states) can be distinguished perfectly. </i> <h3> Task 3 </h3> Verify that (i) $ u $ is orthogonal to $ -v $, (ii) $ -u $ is orthogonal to $ v $, and (iii) $ -u $ is orthogonal to $ -v $. ``` %run math.py dot_product("example3") # you may consider to write a function in Python for dot product # # your solution is here # ``` <a href="Math24_Dot_Product_Solutions.ipynb#task3">click for our solution</a> <h3> Task 4 </h3> Find the dot product of $ v $ and $ u $ in Python. $$ v = \myrvector{-1 \\ 2 \\ -3 \\ 4} ~~~~\mbox{and}~~~~ u = \myrvector{-2 \\ -1 \\ 5 \\ 2}. $$ Find the dot product of $ -2v $ and $ 3u $ in Python. Compare both results. ``` # # your solution is here # ``` <a href="Math24_Dot_Product_Solutions.ipynb#task4">click for our solution</a>
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas as pd # figures inline in notebook %matplotlib inline corrmat = np.array([[0.3381,0.3187,0.3046,0.3045,0.3039,0.3011,0.3044,0.3034,0.3128,0.3269], [0.3187,0.3358,0.3098,0.3061,0.3040,0.3030,0.3026,0.3064,0.3123,0.3248], [0.3046,0.3098,0.3529,0.3220,0.3314,0.3200,0.3203,0.3215,0.3297,0.3376], [0.3045,0.3061,0.3220,0.3617,0.3401,0.3330,0.3382,0.3367,0.3400,0.3501], [0.3039,0.3040,0.3314,0.3401,0.3761,0.3543,0.3486,0.3522,0.3555,0.3628], [0.3011,0.3030,0.3200,0.3330,0.3543,0.3793,0.3600,0.3643,0.3684,0.3712], [0.3044,0.3026,0.3203,0.3382,0.3486,0.3600,0.3819,0.3740,0.3755,0.3793], [0.3034,0.3064,0.3215,0.3367,0.3522,0.3643,0.3740,0.3909,0.3808,0.3868], [0.3128,0.3123,0.3297,0.3400,0.3555,0.3684,0.3755,0.3808,0.3948,0.3919], [0.3269,0.3248,0.3376,0.3501,0.3628,0.3712,0.3793,0.3868,0.3919,0.4213]]) print(corrmat.min()) index = [1000,900,800,700,600,500,400,300,200,100] df = pd.DataFrame(corrmat, columns=["1000","900","800","700","600","500","400","300","200","100"], index=index) with sns.plotting_context("notebook", font_scale=1.2): plt.figure(figsize=(12,10)) sns.heatmap(df, vmax=corrmat.max(), vmin=corrmat.min(), square=False, annot=True, fmt=".4f", cmap="Blues").xaxis.tick_top() import pickle history = pickle.load( open( "/Users/pablo/Downloads/trainHistoryDict_segnet_0-2-6", "rb" ) ) # 640 seconds/epoch print history plt.plot(history['loss']) plt.plot(history['val_loss']) plt.title('Segnet network training') plt.ylabel('mean absolute error') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper right') plt.show() import pickle history = pickle.load( open( "/Users/pablo/Downloads/trainHistoryDict_unet1_0-2-6", "rb" ) ) # 170 seconds/epoch print history plt.plot(history['loss']) plt.plot(history['val_loss']) plt.title('U-net network training') plt.ylabel('mean absolute error') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper right') plt.show() import pickle history = pickle.load( open( "/Users/pablo/Downloads/trainHistoryDict_vgg16_0-2-6", "rb" ) ) # 340 seconds/epoch print history plt.plot(history['loss']) plt.plot(history['val_loss']) plt.title('VGG16 network training') plt.ylabel('mean absolute error') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper right') plt.show() ```
github_jupyter
``` import pandas as pd from matplotlib import pyplot as plt from functools import partial % matplotlib inline # Given these coordenates x_points = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] y_points = [1, 2, 3, 1, 4, 5, 4, 6, 7, 10, 15] # Let's plot to display how they are located plt.plot(x_points, y_points, 'bo') # remeber the straight line equation? We will use it to find `y` and plot a straight line. def straight_line_equation(m, x, b): '''Calculate Y based on straight line equation. :m: slope or gradient (how steep the line is) :x: how far along :b: the Y intercept (where the line crosses the Y axis) :returns: calculated y ''' return m * x + b # now define a function to plot a straight line def plot_straight_line(data_points, m, b): '''Use matplotlib to plot a straight line. :data_points: x points used to a the straight line :m: how far along :b: the Y intercept (where the line crosses the Y axis) :returns: None(plot a graph) ''' x_values = range(int(min(data_points)) - 1, int(max(data_points) +2)) y_values = [straight_line_equation(x, m, b) for x in x_values] plt.plot(x_values, y_values, 'r') # To test our function, let's plot a initial straight line plot_straight_line(x_points, m=0, b=0) plt.plot(x_points, y_points, 'bo') def Σ(lower_bound, upper_bound, function): '''Summation is a math operator to easily represent a great sum of terms, even infinity. It's represented with the greek letter sigma. Sum terms from lower_bound until upper_bound, applying some function on each term. >>> Σ(1,5,lambda x:x) # 1 + 2 + 3 + 4 + 5 = 15 15 ''' return sum(function(index) for index in range(lower_bound, upper_bound + 1)) LEARN = .001 # mean standard error def derived_at_point_A(index, x_points, y_points, m, b): 'Derived from the equation of the plane at point A.' return straight_line_equation(x_points[index],m,b) - y_points[index] def derived_at_point_B(index, x_points, y_points, m, b): 'Derived from the equation of the plane at point B.' return (straight_line_equation(x_points[index],m,b) - y_points[index]) * x_points[index] def separate_points(x_points, y_points, *, intermediate_lines=False): '''Divide some points into 2 classes. Obs: 80 is an arbitrary point, because we need logistic regression to obtain that perfect number. ''' m = 0 b = 0 for i in range(80): mean1 = Σ(1, len(x_points) -1, partial(derived_at_point_A, x_points=x_points, y_points=y_points, m=m, b=b)) / len(x_points) mean2 = Σ(1, len(x_points) -1, partial(derived_at_point_B, x_points=x_points, y_points=y_points, m=m, b=b)) / len(x_points) m -= mean2 * LEARN b -= mean1 * LEARN if intermediate_lines: plot_straight_line(x_points, m, b) plot_straight_line(x_points, m, b) plt.plot(x_points, y_points, 'bo') separate_points(x_points, y_points, intermediate_lines=True) separate_points(x_points, y_points) # homework df = pd.read_csv('tabela.csv') x_points, y_points = df['faturamento'], df['tempo'] separate_points(x_points, y_points) ```
github_jupyter
# Distilling knowlege in Transformer models and test prediction for GLUE tasks, using *torchdistill* ## 1. Make sure you have access to GPU/TPU Google Colab: Runtime -> Change runtime type -> Hardware accelarator: "GPU" or "TPU" ``` !nvidia-smi ``` ## 2. Clone torchdistill repository to use its example code and configuration files ``` !git clone https://github.com/yoshitomo-matsubara/torchdistill ``` ## 3. Install dependencies and *torchdistill* ``` !pip install -r torchdistill/examples/hf_transformers/requirements.txt !pip install torchdistill ``` ## (Optional) Configure Accelerate for 2x-speedup training by mixed-precision If you are **NOT** using the Google Colab Pro, it will exceed 12 hours (maximum lifetimes for free Google Colab users) to fine-tune a base-sized model for the following 9 different tasks with Tesla K80. By using mixed-precision training, you can complete all the 9 fine-tuning jobs. [This table](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification#mixed-precision-training) gives you a good idea about how long it will take to fine-tune a BERT-Base on a Titan RTX with/without mixed-precision. ``` !accelerate config ``` ## 4. Distill knowledge in Transformer models for GLUE tasks The following examples demonstrate how to distill knowledge in fine-tuned BERT-Large (uncased) to pretrained BERT-Base (uncased) on each of datasets in GLUE. **Note**: Test splits for GLUE tasks in `datasets` package are not labeled, and you use only training and validation spltis in this example, following [Hugging Face's example](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification). ### 4.1 CoLA task ``` !accelerate launch torchdistill/examples/hf_transformers/text_classification.py \ --config torchdistill/configs/sample/glue/cola/kd/bert_base_uncased_from_bert_large_uncased.yaml \ --task cola \ --log log/glue/cola/kd/bert_base_uncased_from_bert_large_uncased.txt \ --private_output leaderboard/glue/kd/bert_base_uncased_from_bert_large_uncased/ ``` ### 4.2 SST-2 task ``` !accelerate launch torchdistill/examples/hf_transformers/text_classification.py \ --config torchdistill/configs/sample/glue/sst2/kd/bert_base_uncased_from_bert_large_uncased.yaml \ --task sst2 \ --log log/glue/sst2/kd/bert_base_uncased_from_bert_large_uncased.txt \ --private_output leaderboard/glue/kd/bert_base_uncased_from_bert_large_uncased/ ``` ### 4.3 MRPC task ``` !accelerate launch torchdistill/examples/hf_transformers/text_classification.py \ --config torchdistill/configs/sample/glue/mrpc/kd/bert_base_uncased_from_bert_large_uncased.yaml \ --task mrpc \ --log log/glue/mrpc/kd/bert_base_uncased_from_bert_large_uncased.txt \ --private_output leaderboard/glue/kd/bert_base_uncased_from_bert_large_uncased/ ``` ### 4.4 STS-B task ``` !accelerate launch torchdistill/examples/hf_transformers/text_classification.py \ --config torchdistill/configs/sample/glue/stsb/kd/bert_base_uncased_from_bert_large_uncased.yaml \ --task stsb \ --log log/glue/stsb/kd/bert_base_uncased_from_bert_large_uncased.txt \ --private_output leaderboard/glue/kd/bert_base_uncased_from_bert_large_uncased/ ``` ### 4.5 QQP task ``` !accelerate launch torchdistill/examples/hf_transformers/text_classification.py \ --config torchdistill/configs/sample/glue/qqp/kd/bert_base_uncased_from_bert_large_uncased.yaml \ --task qqp \ --log log/glue/qqp/kd/bert_base_uncased_from_bert_large_uncased.txt \ --private_output leaderboard/glue/kd/bert_base_uncased_from_bert_large_uncased/ ``` ### 4.6 MNLI task ``` !accelerate launch torchdistill/examples/hf_transformers/text_classification.py \ --config torchdistill/configs/sample/glue/mnli/kd/bert_base_uncased_from_bert_large_uncased.yaml \ --task mnli \ --log log/glue/mnli/kd/bert_base_uncased_from_bert_large_uncased.txt \ --private_output leaderboard/glue/kd/bert_base_uncased_from_bert_large_uncased/ ``` ### 4.7 QNLI task ``` !accelerate launch torchdistill/examples/hf_transformers/text_classification.py \ --config torchdistill/configs/sample/glue/qnli/kd/bert_base_uncased_from_bert_large_uncased.yaml \ --task qnli \ --log log/glue/qnli/kd/bert_base_uncased_from_bert_large_uncased.txt \ --private_output leaderboard/glue/kd/bert_base_uncased_from_bert_large_uncased/ ``` ### 4.8 RTE task ``` !accelerate launch torchdistill/examples/hf_transformers/text_classification.py \ --config torchdistill/configs/sample/glue/rte/kd/bert_base_uncased_from_bert_large_uncased.yaml \ --task rte \ --log log/glue/rte/kd/bert_base_uncased_from_bert_large_uncased.txt \ --private_output leaderboard/glue/kd/bert_base_uncased_from_bert_large_uncased/ ``` ### 4.9 WNLI task ``` !accelerate launch torchdistill/examples/hf_transformers/text_classification.py \ --config torchdistill/configs/sample/glue/wnli/kd/bert_base_uncased_from_bert_large_uncased.yaml \ --task wnli \ --log log/glue/wnli/kd/bert_base_uncased_from_bert_large_uncased.txt \ --private_output leaderboard/glue/kd/bert_base_uncased_from_bert_large_uncased/ ``` # 5. Validate your prediction files for GLUE leaderboard To make sure your prediction files contain the right numbers of samples (lines), you should see the following output by `wc -l <your prediction dir path>`. ``` 1105 AX.tsv 1064 CoLA.tsv 9848 MNLI-mm.tsv 9797 MNLI-m.tsv 1726 MRPC.tsv 5464 QNLI.tsv 390966 QQP.tsv 3001 RTE.tsv 1822 SST-2.tsv 1380 STS-B.tsv 147 WNLI.tsv 426320 total ``` ``` !wc -l leaderboard/glue/kd/bert_base_uncased_from_bert_large_uncased/* ``` ## 6. Zip the submission files and download to make a submission ``` !zip bert_base_uncased_from_bert_large_uncased-submission.zip leaderboard/glue/kd/bert_base_uncased_from_bert_large_uncased/* ``` Download the zip file from "Files" menu. To submit the file to the GLUE system, refer to their webpage. https://gluebenchmark.com/ ## 7. More sample configurations, models, datasets... You can find more [sample configurations](https://github.com/yoshitomo-matsubara/torchdistill/tree/master/configs/sample/) in the [***torchdistill***](https://github.com/yoshitomo-matsubara/torchdistill) repository. If you would like to use larger datasets e.g., **ImageNet** and **COCO** datasets and models in `torchvision` (or your own modules), refer to the [official configurations](https://github.com/yoshitomo-matsubara/torchdistill/tree/master/configs/official) used in some published papers. Experiments with such large datasets and models will require you to use your own machine due to limited disk space and session time (12 hours for free version and 24 hours for Colab Pro) on Google Colab. # Colab examples for training student models without teacher models You can find Colab examples for training student models without teacher models in the [***torchdistill***](https://github.com/yoshitomo-matsubara/torchdistill) repository.
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # 不规则张量 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://tensorflow.google.cn/guide/ragged_tensor"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png">在 TensorFlow.org 上查看</a> </td> <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/ragged_tensor.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">在 Google Colab 中运行 </a></td> <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/ragged_tensor.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">在 Github 上查看源代码</a></td> <td><a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/ragged_tensor.ipynb">{img}下载笔记本</a></td> </table> **API 文档:** [`tf.RaggedTensor`](https://tensorflow.google.cn/api_docs/python/tf/RaggedTensor) [`tf.ragged`](https://tensorflow.google.cn/api_docs/python/tf/ragged) ## 设置 ``` !pip install -q tf_nightly import math import tensorflow as tf ``` ## 概述 数据有多种形状;张量也应当有多种形状。*不规则张量*是嵌套的可变长度列表的 TensorFlow 等效项。它们使存储和处理包含非均匀形状的数据变得容易,包括: - 可变长度特征,例如电影的演员名单。 - 成批的可变长度顺序输入,例如句子或视频剪辑。 - 分层输入,例如细分为节、段落、句子和单词的文本文档。 - 结构化输入中的各个字段,例如协议缓冲区。 ### 不规则张量的功能 有一百多种 TensorFlow 运算支持不规则张量,包括数学运算(如 `tf.add` 和 `tf.reduce_mean`)、数组运算(如 `tf.concat` 和 `tf.tile`)、字符串操作运算(如 `tf.substr`)、控制流运算(如 `tf.while_loop` 和 `tf.map_fn`)等: ``` digits = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) words = tf.ragged.constant([["So", "long"], ["thanks", "for", "all", "the", "fish"]]) print(tf.add(digits, 3)) print(tf.reduce_mean(digits, axis=1)) print(tf.concat([digits, [[5, 3]]], axis=0)) print(tf.tile(digits, [1, 2])) print(tf.strings.substr(words, 0, 2)) print(tf.map_fn(tf.math.square, digits)) ``` 还有专门针对不规则张量的方法和运算,包括工厂方法、转换方法和值映射运算。有关支持的运算列表,请参阅 **`tf.ragged` 包文档**。 许多 TensorFlow API 都支持不规则张量,包括 [Keras](https://tensorflow.google.cn/guide/keras)、[Dataset](https://tensorflow.google.cn/guide/data)、[tf.function](https://tensorflow.google.cn/guide/function)、[SavedModel](https://tensorflow.google.cn/guide/saved_model) 和 [tf.Example](https://tensorflow.google.cn/tutorials/load_data/tfrecord)。有关更多信息,请参阅下面的 **TensorFlow API** 一节。 与普通张量一样,您可以使用 Python 风格的索引来访问不规则张量的特定切片。有关更多信息,请参阅下面的**索引**一节。 ``` print(digits[0]) # First row print(digits[:, :2]) # First two values in each row. print(digits[:, -2:]) # Last two values in each row. ``` 与普通张量一样,您可以使用 Python 算术和比较运算符来执行逐元素运算。有关更多信息,请参阅下面的**重载运算符**一节。 ``` print(digits + 3) print(digits + tf.ragged.constant([[1, 2, 3, 4], [], [5, 6, 7], [8], []])) ``` 如果需要对 `RaggedTensor` 的值进行逐元素转换,您可以使用 `tf.ragged.map_flat_values`(它采用一个函数加上一个或多个参数的形式),并应用这个函数来转换 `RaggedTensor` 的值。 ``` times_two_plus_one = lambda x: x * 2 + 1 print(tf.ragged.map_flat_values(times_two_plus_one, digits)) ``` 不规则张量可以转换为嵌套的 Python `list` 和 numpy `array`: ``` digits.to_list() digits.numpy() ``` ### 构造不规则张量 构造不规则张量的最简单方法是使用 `tf.ragged.constant`,它会构建与给定的嵌套 Python `list` 或 numpy `array` 相对应的 `RaggedTensor`: ``` sentences = tf.ragged.constant([ ["Let's", "build", "some", "ragged", "tensors", "!"], ["We", "can", "use", "tf.ragged.constant", "."]]) print(sentences) paragraphs = tf.ragged.constant([ [['I', 'have', 'a', 'cat'], ['His', 'name', 'is', 'Mat']], [['Do', 'you', 'want', 'to', 'come', 'visit'], ["I'm", 'free', 'tomorrow']], ]) print(paragraphs) ``` 还可以通过将扁平的*值*张量与*行分区*张量进行配对来构造不规则张量,行分区张量使用 `tf.RaggedTensor.from_value_rowids`、`tf.RaggedTensor.from_row_lengths` 和 `tf.RaggedTensor.from_row_splits` 等工厂类方法指示如何将值分成各行。 #### `tf.RaggedTensor.from_value_rowids` 如果知道每个值属于哪一行,可以使用 `value_rowids` 行分区张量构建 `RaggedTensor`: ![value_rowids](https://tensorflow.google.cn/images/ragged_tensors/value_rowids.png) ``` print(tf.RaggedTensor.from_value_rowids( values=[3, 1, 4, 1, 5, 9, 2], value_rowids=[0, 0, 0, 0, 2, 2, 3])) ``` #### `tf.RaggedTensor.from_row_lengths` 如果知道每行的长度,可以使用 `row_lengths` 行分区张量: ![row_lengths](https://tensorflow.google.cn/images/ragged_tensors/row_lengths.png) ``` print(tf.RaggedTensor.from_row_lengths( values=[3, 1, 4, 1, 5, 9, 2], row_lengths=[4, 0, 2, 1])) ``` #### `tf.RaggedTensor.from_row_splits` 如果知道指示每行开始和结束的索引,可以使用 `row_splits` 行分区张量: ![row_splits](https://tensorflow.google.cn/images/ragged_tensors/row_splits.png) ``` print(tf.RaggedTensor.from_row_splits( values=[3, 1, 4, 1, 5, 9, 2], row_splits=[0, 4, 4, 6, 7])) ``` 有关完整的工厂方法列表,请参阅 `tf.RaggedTensor` 类文档。 注:默认情况下,这些工厂方法会添加断言,说明行分区张量结构良好且与值数量保持一致。如果您能够保证输入的结构良好且一致,可以使用 `validate=False` 参数跳过此类检查。 ### 可以在不规则张量中存储什么 与普通 `Tensor` 一样,`RaggedTensor` 中的所有值必须具有相同的类型;所有值必须处于相同的嵌套深度(张量的*秩*): ``` print(tf.ragged.constant([["Hi"], ["How", "are", "you"]])) # ok: type=string, rank=2 print(tf.ragged.constant([[[1, 2], [3]], [[4, 5]]])) # ok: type=int32, rank=3 try: tf.ragged.constant([["one", "two"], [3, 4]]) # bad: multiple types except ValueError as exception: print(exception) try: tf.ragged.constant(["A", ["B", "C"]]) # bad: multiple nesting depths except ValueError as exception: print(exception) ``` ## 示例用例 以下示例演示了如何使用 `RaggedTensor`,通过为每个句子的开头和结尾使用特殊标记,为一批可变长度查询构造和组合一元元组与二元元组嵌入。有关本例中使用的运算的更多详细信息,请参阅 `tf.ragged` 包文档。 ``` queries = tf.ragged.constant([['Who', 'is', 'Dan', 'Smith'], ['Pause'], ['Will', 'it', 'rain', 'later', 'today']]) # Create an embedding table. num_buckets = 1024 embedding_size = 4 embedding_table = tf.Variable( tf.random.truncated_normal([num_buckets, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) # Look up the embedding for each word. word_buckets = tf.strings.to_hash_bucket_fast(queries, num_buckets) word_embeddings = tf.nn.embedding_lookup(embedding_table, word_buckets) # ① # Add markers to the beginning and end of each sentence. marker = tf.fill([queries.nrows(), 1], '#') padded = tf.concat([marker, queries, marker], axis=1) # ② # Build word bigrams & look up embeddings. bigrams = tf.strings.join([padded[:, :-1], padded[:, 1:]], separator='+') # ③ bigram_buckets = tf.strings.to_hash_bucket_fast(bigrams, num_buckets) bigram_embeddings = tf.nn.embedding_lookup(embedding_table, bigram_buckets) # ④ # Find the average embedding for each sentence all_embeddings = tf.concat([word_embeddings, bigram_embeddings], axis=1) # ⑤ avg_embedding = tf.reduce_mean(all_embeddings, axis=1) # ⑥ print(avg_embedding) ``` ![ragged_example](https://tensorflow.google.cn/images/ragged_tensors/ragged_example.png) ## 不规则维度和均匀维度 ***不规则维度***是切片可能具有不同长度的维度。例如,`rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` 的内部(列)维度是不规则的,因为列切片 (`rt[0, :]`, ..., `rt[4, :]`) 具有不同的长度。切片全都具有相同长度的维度称为*均匀维度*。 不规则张量的最外层维始终是均匀维度,因为它只包含一个切片(因此不可能有不同的切片长度)。其余维度可能是不规则维度也可能是均匀维度。例如,我们可以使用形状为 `[num_sentences, (num_words), embedding_size]` 的不规则张量为一批句子中的每个单词存储单词嵌入,其中 `(num_words)` 周围的括号表示维度是不规则维度。 ![sent_word_embed](https://tensorflow.google.cn/images/ragged_tensors/sent_word_embed.png) 不规则张量可以有多个不规则维度。例如,我们可以使用形状为 `[num_documents, (num_paragraphs), (num_sentences), (num_words)]` 的张量存储一批结构化文本文档(其中,括号同样用于表示不规则维度)。 与 `tf.Tensor` 一样,不规则张量的***秩***是其总维数(包括不规则维度和均匀维度)。***潜在的不规则张量***是一个值,这个值可能是 `tf.Tensor` 或 `tf.RaggedTensor`。 描述 RaggedTensor 的形状时,按照惯例,不规则维度会通过括号进行指示。例如,如上面所见,存储一批句子中每个单词的单词嵌入的三维 RaggedTensor 的形状可以写为 `[num_sentences, (num_words), embedding_size]`。 `RaggedTensor.shape` 特性返回不规则张量的 `tf.TensorShape`,其中不规则维度的大小为 `None`: ``` tf.ragged.constant([["Hi"], ["How", "are", "you"]]).shape ``` 可以使用方法 `tf.RaggedTensor.bounding_shape` 查找给定 `RaggedTensor` 的紧密边界形状: ``` print(tf.ragged.constant([["Hi"], ["How", "are", "you"]]).bounding_shape()) ``` ## 不规则张量和稀疏张量对比 不规则张量*不*应该被认为是一种稀疏张量。尤其是,稀疏张量是以紧凑的格式对相同数据建模的 *tf.Tensor 的高效编码*;而不规则张量是对扩展的数据类建模的 *tf.Tensor 的延伸*。这种区别在定义运算时至关重要: - 对稀疏张量或密集张量应用某一运算应当始终获得相同结果。 - 对不规则张量或稀疏张量应用某一运算可能获得不同结果。 一个说明性的示例是,考虑如何为不规则张量和稀疏张量定义 `concat`、`stack` 和 `tile` 之类的数组运算。连接不规则张量时,会将每一行连在一起,形成一个具有组合长度的行: ![ragged_concat](https://tensorflow.google.cn/images/ragged_tensors/ragged_concat.png) ``` ragged_x = tf.ragged.constant([["John"], ["a", "big", "dog"], ["my", "cat"]]) ragged_y = tf.ragged.constant([["fell", "asleep"], ["barked"], ["is", "fuzzy"]]) print(tf.concat([ragged_x, ragged_y], axis=1)) ``` 但连接稀疏张量时,相当于连接相应的密集张量,如以下示例所示(其中 Ø 表示缺失的值): ![sparse_concat](https://tensorflow.google.cn/images/ragged_tensors/sparse_concat.png) ``` sparse_x = ragged_x.to_sparse() sparse_y = ragged_y.to_sparse() sparse_result = tf.sparse.concat(sp_inputs=[sparse_x, sparse_y], axis=1) print(tf.sparse.to_dense(sparse_result, '')) ``` 另一个说明为什么这种区别非常重要的示例是,考虑一个运算(如 `tf.reduce_mean`)的“每行平均值”的定义。对于不规则张量,一行的平均值是该行的值总和除以该行的宽度。但对于稀疏张量来说,一行的平均值是该行的值总和除以稀疏张量的总宽度(大于等于最长行的宽度)。 ## TensorFlow API ### Keras [tf.keras](https://tensorflow.google.cn/guide/keras) 是 TensorFlow 的高级 API,用于构建和训练深度学习模型。通过在 `tf.keras.Input` 或 `tf.keras.layers.InputLayer` 上设置 `ragged=True`,不规则张量可以作为输入传送到 Keras 模型。不规则张量还可以在 Keras 层之间传递,并由 Keras 模型返回。以下示例显示了一个使用不规则张量训练的小 LSTM 模型。 ``` # Task: predict whether each sentence is a question or not. sentences = tf.constant( ['What makes you think she is a witch?', 'She turned me into a newt.', 'A newt?', 'Well, I got better.']) is_question = tf.constant([True, False, True, False]) # Preprocess the input strings. hash_buckets = 1000 words = tf.strings.split(sentences, ' ') hashed_words = tf.strings.to_hash_bucket_fast(words, hash_buckets) # Build the Keras model. keras_model = tf.keras.Sequential([ tf.keras.layers.Input(shape=[None], dtype=tf.int64, ragged=True), tf.keras.layers.Embedding(hash_buckets, 16), tf.keras.layers.LSTM(32, use_bias=False), tf.keras.layers.Dense(32), tf.keras.layers.Activation(tf.nn.relu), tf.keras.layers.Dense(1) ]) keras_model.compile(loss='binary_crossentropy', optimizer='rmsprop') keras_model.fit(hashed_words, is_question, epochs=5) print(keras_model.predict(hashed_words)) ``` ### tf.Example [tf.Example](https://tensorflow.google.cn/tutorials/load_data/tfrecord) 是 TensorFlow 数据的标准 [protobuf](https://developers.google.com/protocol-buffers/) 编码。使用 `tf.Example` 编码的数据往往包括可变长度特征。例如,以下代码定义了一批具有不同特征长度的四条 `tf.Example` 消息: ``` import google.protobuf.text_format as pbtext def build_tf_example(s): return pbtext.Merge(s, tf.train.Example()).SerializeToString() example_batch = [ build_tf_example(r''' features { feature {key: "colors" value {bytes_list {value: ["red", "blue"]} } } feature {key: "lengths" value {int64_list {value: [7]} } } }'''), build_tf_example(r''' features { feature {key: "colors" value {bytes_list {value: ["orange"]} } } feature {key: "lengths" value {int64_list {value: []} } } }'''), build_tf_example(r''' features { feature {key: "colors" value {bytes_list {value: ["black", "yellow"]} } } feature {key: "lengths" value {int64_list {value: [1, 3]} } } }'''), build_tf_example(r''' features { feature {key: "colors" value {bytes_list {value: ["green"]} } } feature {key: "lengths" value {int64_list {value: [3, 5, 2]} } } }''')] ``` 我们可以使用 `tf.io.parse_example` 解析这个编码数据,它采用序列化字符串的张量和特征规范字典,并将字典映射特征名称返回给张量。要将长度可变特征读入不规则张量,我们只需在特征规范字典中使用 `tf.io.RaggedFeature` 即可: ``` feature_specification = { 'colors': tf.io.RaggedFeature(tf.string), 'lengths': tf.io.RaggedFeature(tf.int64), } feature_tensors = tf.io.parse_example(example_batch, feature_specification) for name, value in feature_tensors.items(): print("{}={}".format(name, value)) ``` `tf.io.RaggedFeature` 还可用于读取具有多个不规则维度的特征。有关详细信息,请参阅 [API 文档](https://tensorflow.google.cn/api_docs/python/tf/io/RaggedFeature)。 ### 数据集 [tf.data](https://tensorflow.google.cn/guide/data) 是一个 API,可用于通过简单的可重用代码块构建复杂的输入流水线。它的核心数据结构是 `tf.data.Dataset`,表示一系列元素,每个元素包含一个或多个分量。 ``` # Helper function used to print datasets in the examples below. def print_dictionary_dataset(dataset): for i, element in enumerate(dataset): print("Element {}:".format(i)) for (feature_name, feature_value) in element.items(): print('{:>14} = {}'.format(feature_name, feature_value)) ``` #### 使用不规则张量构建数据集 可以采用通过 `tf.Tensor` 或 numpy `array` 构建数据集时使用的方法,如 `Dataset.from_tensor_slices`,通过不规则张量构建数据集: ``` dataset = tf.data.Dataset.from_tensor_slices(feature_tensors) print_dictionary_dataset(dataset) ``` 注:`Dataset.from_generator` 目前还不支持不规则张量,但不久后将会支持这种张量。 #### 批处理和取消批处理具有不规则张量的数据集 可以使用 `Dataset.batch` 方法对具有不规则张量的数据集进行批处理(将 *n* 个连续元素组合成单个元素)。 ``` batched_dataset = dataset.batch(2) print_dictionary_dataset(batched_dataset) ``` 相反,可以使用 `Dataset.unbatch` 将批处理后的数据集转换为扁平数据集。 ``` unbatched_dataset = batched_dataset.unbatch() print_dictionary_dataset(unbatched_dataset) ``` #### 对具有可变长度非不规则张量的数据集进行批处理 如果您有一个包含非不规则张量的数据集,而且各个元素的张量长度不同,则可以应用 `dense_to_ragged_batch` 转换,将这些非不规则张量批处理成不规则张量: ``` non_ragged_dataset = tf.data.Dataset.from_tensor_slices([1, 5, 3, 2, 8]) non_ragged_dataset = non_ragged_dataset.map(tf.range) batched_non_ragged_dataset = non_ragged_dataset.apply( tf.data.experimental.dense_to_ragged_batch(2)) for element in batched_non_ragged_dataset: print(element) ``` #### 转换具有不规则张量的数据集 还可以使用 `Dataset.map` 创建或转换数据集中的不规则张量。 ``` def transform_lengths(features): return { 'mean_length': tf.math.reduce_mean(features['lengths']), 'length_ranges': tf.ragged.range(features['lengths'])} transformed_dataset = dataset.map(transform_lengths) print_dictionary_dataset(transformed_dataset) ``` ### tf.function [tf.function](https://tensorflow.google.cn/guide/function) 是预计算 Python 函数的 TensorFlow 计算图的装饰器,它可以大幅改善 TensorFlow 代码的性能。不规则张量能够透明地与 `@tf.function` 装饰的函数一起使用。例如,以下函数对不规则张量和非不规则张量均有效: ``` @tf.function def make_palindrome(x, axis): return tf.concat([x, tf.reverse(x, [axis])], axis) make_palindrome(tf.constant([[1, 2], [3, 4], [5, 6]]), axis=1) make_palindrome(tf.ragged.constant([[1, 2], [3], [4, 5, 6]]), axis=1) ``` 如果您希望为 `tf.function` 明确指定 `input_signature`,可以使用 `tf.RaggedTensorSpec` 执行此操作。 ``` @tf.function( input_signature=[tf.RaggedTensorSpec(shape=[None, None], dtype=tf.int32)]) def max_and_min(rt): return (tf.math.reduce_max(rt, axis=-1), tf.math.reduce_min(rt, axis=-1)) max_and_min(tf.ragged.constant([[1, 2], [3], [4, 5, 6]])) ``` #### 具体函数 [具体函数](https://tensorflow.google.cn/guide/function#obtaining_concrete_functions)封装通过 `tf.function` 构建的各个跟踪图。不规则张量可以透明地与具体函数一起使用。 ``` # Preferred way to use ragged tensors with concrete functions (TF 2.3+): try: @tf.function def increment(x): return x + 1 rt = tf.ragged.constant([[1, 2], [3], [4, 5, 6]]) cf = increment.get_concrete_function(rt) print(cf(rt)) except Exception as e: print(f"Not supported before TF 2.3: {type(e)}: {e}") ``` ### SavedModel [SavedModel](https://tensorflow.google.cn/guide/saved_model) 是序列化 TensorFlow 程序,包括权重和计算。它可以通过 Keras 模型或自定义模型构建。在任何一种情况下,不规则张量都可以透明地与 SavedModel 定义的函数和方法一起使用。 #### 示例:保存 Keras 模型 ``` import tempfile keras_module_path = tempfile.mkdtemp() tf.saved_model.save(keras_model, keras_module_path) imported_model = tf.saved_model.load(keras_module_path) imported_model(hashed_words) ``` #### 示例:保存自定义模型 ``` class CustomModule(tf.Module): def __init__(self, variable_value): super(CustomModule, self).__init__() self.v = tf.Variable(variable_value) @tf.function def grow(self, x): return x * self.v module = CustomModule(100.0) # Before saving a custom model, we must ensure that concrete functions are # built for each input signature that we will need. module.grow.get_concrete_function(tf.RaggedTensorSpec(shape=[None, None], dtype=tf.float32)) custom_module_path = tempfile.mkdtemp() tf.saved_model.save(module, custom_module_path) imported_model = tf.saved_model.load(custom_module_path) imported_model.grow(tf.ragged.constant([[1.0, 4.0, 3.0], [2.0]])) ``` 注:SavedModel [签名](https://tensorflow.google.cn/guide/saved_model#specifying_signatures_during_export)是具体函数。如上文的“具体函数”部分所述,从 TensorFlow 2.3 开始,只有具体函数才能正确处理不规则张量。如果您需要在先前版本的 TensorFlow 中使用 SavedModel 签名,建议您将不规则张量分解成其张量分量。 ## 重载运算符 `RaggedTensor` 类会重载标准 Python 算术和比较运算符,使其易于执行基本的逐元素数学: ``` x = tf.ragged.constant([[1, 2], [3], [4, 5, 6]]) y = tf.ragged.constant([[1, 1], [2], [3, 3, 3]]) print(x + y) ``` 由于重载运算符执行逐元素计算,因此所有二进制运算的输入必须具有相同的形状,或者可以广播至相同的形状。在最简单的广播情况下,单个标量与不规则张量中的每个值逐元素组合: ``` x = tf.ragged.constant([[1, 2], [3], [4, 5, 6]]) print(x + 3) ``` 有关更高级的用例,请参阅**广播**一节。 不规则张量重载与正常 `Tensor` 相同的一组运算符:一元运算符 `-`、`~` 和 `abs()`;二元运算符 `+`、`-`、`*`、`/`、`//`、`%`、`**`、`&`、`|`、`^`、`==`、`<`、`<=`、`>` 和 `>=`。 ## 索引 不规则张量支持 Python 风格的索引,包括多维索引和切片。以下示例使用二维和三维不规则张量演示了不规则张量索引。 ### 索引示例:二维不规则张量 ``` queries = tf.ragged.constant( [['Who', 'is', 'George', 'Washington'], ['What', 'is', 'the', 'weather', 'tomorrow'], ['Goodnight']]) print(queries[1]) # A single query print(queries[1, 2]) # A single word print(queries[1:]) # Everything but the first row print(queries[:, :3]) # The first 3 words of each query print(queries[:, -2:]) # The last 2 words of each query ``` ### 索引示例:三维不规则张量 ``` rt = tf.ragged.constant([[[1, 2, 3], [4]], [[5], [], [6]], [[7]], [[8, 9], [10]]]) print(rt[1]) # Second row (2-D RaggedTensor) print(rt[3, 0]) # First element of fourth row (1-D Tensor) print(rt[:, 1:3]) # Items 1-3 of each row (3-D RaggedTensor) print(rt[:, -1:]) # Last item of each row (3-D RaggedTensor) ``` `RaggedTensor` 支持多维索引和切片,但有一个限制:不允许索引一个不规则维度。这种情况是有问题的,因为指示的值可能在某些行中存在,而在其他行中不存在。这种情况下,我们不知道是应该 (1) 引发 `IndexError`;(2) 使用默认值;还是 (3) 跳过该值并返回一个行数比开始时少的张量。根据 [Python 的指导原则](https://www.python.org/dev/peps/pep-0020/)(“当面对不明确的情况时,不要尝试去猜测”),我们目前不允许此运算。 ## 张量类型转换 `RaggedTensor` 类定义了可用于在 `RaggedTensor` 与 `tf.Tensor` 或 `tf.SparseTensors` 之间转换的方法: ``` ragged_sentences = tf.ragged.constant([ ['Hi'], ['Welcome', 'to', 'the', 'fair'], ['Have', 'fun']]) # RaggedTensor -> Tensor print(ragged_sentences.to_tensor(default_value='', shape=[None, 10])) # Tensor -> RaggedTensor x = [[1, 3, -1, -1], [2, -1, -1, -1], [4, 5, 8, 9]] print(tf.RaggedTensor.from_tensor(x, padding=-1)) #RaggedTensor -> SparseTensor print(ragged_sentences.to_sparse()) # SparseTensor -> RaggedTensor st = tf.SparseTensor(indices=[[0, 0], [2, 0], [2, 1]], values=['a', 'b', 'c'], dense_shape=[3, 3]) print(tf.RaggedTensor.from_sparse(st)) ``` ## 评估不规则张量 要访问不规则张量中的值,您可以: 1. 使用 `tf.RaggedTensor.to_list()` 将不规则张量转换为嵌套 Python 列表。 2. 使用 `tf.RaggedTensor.numpy()` 将不规则张量转换为 numpy 数组,数组的值是嵌套的 numpy 数组。 3. 使用 `tf.RaggedTensor.values` 和 `tf.RaggedTensor.row_splits` 属性,或 `tf.RaggedTensor.row_lengths()` 和 `tf.RaggedTensor.value_rowids()` 之类的行分区方法,将不规则张量分解成其分量。 4. 使用 Python 索引从不规则张量中选择值。 ``` rt = tf.ragged.constant([[1, 2], [3, 4, 5], [6], [], [7]]) print("python list:", rt.to_list()) print("numpy array:", rt.numpy()) print("values:", rt.values.numpy()) print("splits:", rt.row_splits.numpy()) print("indexed value:", rt[1].numpy()) ``` ## 广播 广播是使具有不同形状的张量在进行逐元素运算时具有兼容形状的过程。有关广播的更多背景,请参阅: - [Numpy:广播](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - `tf.broadcast_dynamic_shape` - `tf.broadcast_to` 广播两个输入 `x` 和 `y`,使其具有兼容形状的基本步骤是: 1. 如果 `x` 和 `y` 没有相同的维数,则增加外层维度(使用大小 1),直至它们具有相同的维数。 2. 对于 `x` 和 `y` 的大小不同的每一个维度: - 如果 `x` 或 `y` 在 `d` 维中的大小为 `1`,则跨 `d` 维重复其值以匹配其他输入的大小。 - 否则,引发异常(`x` 和 `y` 非广播兼容)。 其中,均匀维度中一个张量的大小是一个数字(跨该维的切片大小);不规则维度中一个张量的大小是切片长度列表(跨该维的所有切片)。 ### 广播示例 ``` # x (2D ragged): 2 x (num_rows) # y (scalar) # result (2D ragged): 2 x (num_rows) x = tf.ragged.constant([[1, 2], [3]]) y = 3 print(x + y) # x (2d ragged): 3 x (num_rows) # y (2d tensor): 3 x 1 # Result (2d ragged): 3 x (num_rows) x = tf.ragged.constant( [[10, 87, 12], [19, 53], [12, 32]]) y = [[1000], [2000], [3000]] print(x + y) # x (3d ragged): 2 x (r1) x 2 # y (2d ragged): 1 x 1 # Result (3d ragged): 2 x (r1) x 2 x = tf.ragged.constant( [[[1, 2], [3, 4], [5, 6]], [[7, 8]]], ragged_rank=1) y = tf.constant([[10]]) print(x + y) # x (3d ragged): 2 x (r1) x (r2) x 1 # y (1d tensor): 3 # Result (3d ragged): 2 x (r1) x (r2) x 3 x = tf.ragged.constant( [ [ [[1], [2]], [], [[3]], [[4]], ], [ [[5], [6]], [[7]] ] ], ragged_rank=2) y = tf.constant([10, 20, 30]) print(x + y) ``` 下面是一些不广播的形状示例: ``` # x (2d ragged): 3 x (r1) # y (2d tensor): 3 x 4 # trailing dimensions do not match x = tf.ragged.constant([[1, 2], [3, 4, 5, 6], [7]]) y = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) try: x + y except tf.errors.InvalidArgumentError as exception: print(exception) # x (2d ragged): 3 x (r1) # y (2d ragged): 3 x (r2) # ragged dimensions do not match. x = tf.ragged.constant([[1, 2, 3], [4], [5, 6]]) y = tf.ragged.constant([[10, 20], [30, 40], [50]]) try: x + y except tf.errors.InvalidArgumentError as exception: print(exception) # x (3d ragged): 3 x (r1) x 2 # y (3d ragged): 3 x (r1) x 3 # trailing dimensions do not match x = tf.ragged.constant([[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10]]]) y = tf.ragged.constant([[[1, 2, 0], [3, 4, 0], [5, 6, 0]], [[7, 8, 0], [9, 10, 0]]]) try: x + y except tf.errors.InvalidArgumentError as exception: print(exception) ``` ## RaggedTensor 编码 不规则张量使用 `RaggedTensor` 类进行编码。在内部,每个 `RaggedTensor` 包含: - 一个 `values` 张量,它将可变长度行连接成扁平列表。 - 一个 `row_partition`,它指示如何将这些扁平值分成各行。 ![ragged_encoding_2](https://tensorflow.google.cn/images/ragged_tensors/ragged_encoding_2.png) 可以使用四种不同的编码存储 `row_partition`: - `row_splits` 是一个整型向量,用于指定行之间的拆分点。 - `value_rowids` 是一个整型向量,用于指定每个值的行索引。 - `row_lengths` 是一个整型向量,用于指定每一行的长度。 - `uniform_row_length` 是一个整型标量,用于指定所有行的单个长度。 ![partition_encodings](https://tensorflow.google.cn/images/ragged_tensors/partition_encodings.png) 整型标量 `nrows` 还可以包含在 `row_partition` 编码中,以考虑具有 `value_rowids` 的空尾随行或具有 `uniform_row_length` 的空行。 ``` rt = tf.RaggedTensor.from_row_splits( values=[3, 1, 4, 1, 5, 9, 2], row_splits=[0, 4, 4, 6, 7]) print(rt) ``` 选择为行分区使用哪种编码由不规则张量在内部进行管理,以提高某些环境下的效率。尤其是,不同行分区方案的某些优点和缺点是: - **高效索引**:`row_splits` 编码可以实现不规则张量的恒定时间索引和切片。 - **高效连接**:`row_lengths` 编码在连接不规则张量时更有效,因为当两个张量连接在一起时,行长度不会改变。 - **较小的编码大小**:`value_rowids` 编码在存储有大量空行的不规则张量时更有效,因为张量的大小只取决于值的总数。另一方面,`row_splits` 和 `row_lengths` 编码在存储具有较长行的不规则张量时更有效,因为它们每行只需要一个标量值。 - **兼容性**:`value_rowids` 方案与 `tf.segment_sum` 等运算使用的[分段](https://tensorflow.google.cn/api_docs/python/tf/math#about_segmentation)格式相匹配。`row_limits` 方案与 `tf.sequence_mask` 等运算使用的格式相匹配。 - **均匀维度**:如下文所述,`uniform_row_length` 编码用于对具有均匀维度的不规则张量进行编码。 ### 多个不规则维度 具有多个不规则维度的不规则张量通过为 `values` 张量使用嵌套 `RaggedTensor` 进行编码。每个嵌套 `RaggedTensor` 都会增加一个不规则维度。 ![ragged_rank_2](https://tensorflow.google.cn/images/ragged_tensors/ragged_rank_2.png) ``` rt = tf.RaggedTensor.from_row_splits( values=tf.RaggedTensor.from_row_splits( values=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19], row_splits=[0, 3, 3, 5, 9, 10]), row_splits=[0, 1, 1, 5]) print(rt) print("Shape: {}".format(rt.shape)) print("Number of partitioned dimensions: {}".format(rt.ragged_rank)) ``` 工厂函数 `tf.RaggedTensor.from_nested_row_splits` 可用于通过提供一个 `row_splits` 张量列表,直接构造具有多个不规则维度的 RaggedTensor: ``` rt = tf.RaggedTensor.from_nested_row_splits( flat_values=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19], nested_row_splits=([0, 1, 1, 5], [0, 3, 3, 5, 9, 10])) print(rt) ``` ### 不规则秩和扁平值 不规则张量的***不规则秩***是底层 `values` 张量的分区次数(即 `RaggedTensor` 对象的嵌套深度)。最内层的 `values` 张量称为其 ***flat_values***。在以下示例中,`conversations` 具有 ragged_rank=3,其 `flat_values` 为具有 24 个字符串的一维 `Tensor`: ``` # shape = [batch, (paragraph), (sentence), (word)] conversations = tf.ragged.constant( [[[["I", "like", "ragged", "tensors."]], [["Oh", "yeah?"], ["What", "can", "you", "use", "them", "for?"]], [["Processing", "variable", "length", "data!"]]], [[["I", "like", "cheese."], ["Do", "you?"]], [["Yes."], ["I", "do."]]]]) conversations.shape assert conversations.ragged_rank == len(conversations.nested_row_splits) conversations.ragged_rank # Number of partitioned dimensions. conversations.flat_values.numpy() ``` ### 均匀内层维度 具有均匀内层维度的不规则张量通过为 flat_values(即最内层 `values`)使用多维 `tf.Tensor` 进行编码。 ![uniform_inner](https://tensorflow.google.cn/images/ragged_tensors/uniform_inner.png) ``` rt = tf.RaggedTensor.from_row_splits( values=[[1, 3], [0, 0], [1, 3], [5, 3], [3, 3], [1, 2]], row_splits=[0, 3, 4, 6]) print(rt) print("Shape: {}".format(rt.shape)) print("Number of partitioned dimensions: {}".format(rt.ragged_rank)) print("Flat values shape: {}".format(rt.flat_values.shape)) print("Flat values:\n{}".format(rt.flat_values)) ``` ### 均匀非内层维度 具有均匀非内层维度的不规则张量通过使用 `uniform_row_length` 对行分区进行编码。 ![uniform_outer](https://tensorflow.google.cn/images/ragged_tensors/uniform_outer.png) ``` rt = tf.RaggedTensor.from_uniform_row_length( values=tf.RaggedTensor.from_row_splits( values=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19], row_splits=[0, 3, 5, 9, 10]), uniform_row_length=2) print(rt) print("Shape: {}".format(rt.shape)) print("Number of partitioned dimensions: {}".format(rt.ragged_rank)) ```
github_jupyter
# NYC Taxi Fare Prediction Predict taxi fares using the [New York City Taxi and Limousine Commission (TLC) Trip Record Data](https://registry.opendata.aws/nyc-tlc-trip-records-pds/) public dataset. ``` %%capture !pip install -U pandas geopandas seaborn ``` ## Data Prep In this section of the notebook, you will download the publicly available New York Taxi dataset in preparation for uploading it to S3. ### Download Dataset First, download a sample of the New York City Taxi [dataset](https://registry.opendata.aws/nyc-tlc-trip-records-pds/)⇗ to this notebook instance. This dataset contains information on trips taken by taxis and for-hire vehicles in New York City, including pick-up and drop-off times and locations, fares, distance traveled, and more. ``` !aws s3 cp 's3://nyc-tlc/trip data/green_tripdata_2018-02.csv' 'nyc-tlc.csv' !aws s3 cp 's3://nyc-tlc/misc/taxi_zones.zip' 'taxi_zones.zip' !unzip taxi_zones.zip -d shapes ``` ### Load Datasets Load the trip dataset ``` import pandas as pd trip_df = pd.read_csv( "nyc-tlc.csv", parse_dates=["lpep_pickup_datetime", "lpep_dropoff_datetime"] ) trip_df.head() ``` Load the taxi zone shape data to get the gemotry and calculate a centroid and lat/long each location ``` import geopandas as gpd # Load the shape file and get the geometry and lat/lon zones = gpd.read_file("shapes/taxi_zones.shp") # Return Centroid as CRS code of 3310 for calcuating distance in meters. zones["centroid"] = zones.geometry.centroid.to_crs(epsg=3310) # Convert cordinates to the WSG84 lat/long CRS has a EPSG code of 4326. zones["latitude"] = zones.centroid.to_crs(epsg=4326).x zones["longitude"] = zones.centroid.to_crs(epsg=4326).y # Drop duplicate by location ID keeping the first zones = zones.drop_duplicates(subset="LocationID", keep="first") # Drop cols we don't need and inspect results zones = zones.set_index("LocationID").drop( ["OBJECTID", "Shape_Leng", "Shape_Area"], axis=1 ) zones.head() ``` Join the trip data to the zone and calculate the distance between centroids (should take < 20 seconds) ``` %%time trip_df = gpd.GeoDataFrame( trip_df.join(zones, on="PULocationID").join( zones, on="DOLocationID", rsuffix="_DO", lsuffix="_PU" ) ) trip_df["geo_distance"] = trip_df["centroid_PU"].distance(trip_df["centroid_DO"]) / 1000 trip_df[["PULocationID", "DOLocationID", "trip_distance", "geo_distance"]].head() ``` Add datetime parts based on pickup time and duration to validate results ``` trip_df["hour"] = trip_df.lpep_pickup_datetime.dt.hour trip_df["weekday"] = trip_df.lpep_pickup_datetime.dt.weekday trip_df["month"] = trip_df.lpep_pickup_datetime.dt.month trip_df["duration_minutes"] = ( trip_df["lpep_dropoff_datetime"] - trip_df["lpep_pickup_datetime"] ).dt.seconds / 60 ``` ### Data visualization Let's check that we have a good spread of travel across each day of the week and hour of the day ``` import seaborn as sns sns.histplot(trip_df, x="hour") ``` And plot that we have a distribution across week days ``` sns.histplot(trip_df, x="weekday") ``` Let's validate that the geo distance correlations generally with the fare amount ``` sample_df = trip_df[trip_df["geo_distance"] > 0].sample(1000) sns.jointplot(data=sample_df, x="geo_distance", y="fare_amount") ``` Plot the geometry of the map along with centroids for each location ``` import matplotlib.pyplot as plt from shapely.geometry import LineString def annotate(ax, z): txt = f"{z.name}: {z.zone} ({-z.latitude:.2f}°N, {z.longitude:.2f}°W)" ax.annotate(txt, (z.latitude, z.longitude)) def arrow(ax, ll): ld = ll.iloc[1] - ll.iloc[0] ax.arrow( ll.iloc[0].latitude, ll.iloc[0].longitude, ld.latitude, ld.longitude, length_includes_head=True, edgecolor="lightgrey", ) def plot_map(zones, zids): # Render the geometry in Lat/Lon space ax = zones.geometry.to_crs(epsg=4326).plot( figsize=(15, 15), color="whitesmoke", edgecolor="lightgrey", linewidth=0.5 ) # Draw arrow arrow(ax, zones.loc[zids][["latitude", "longitude"]]) # Plot centroid centroids = zones.loc[zids].geometry.centroid.to_crs( epsg=3310 ) # Require this format for calculating distance centroids.to_crs(epsg=4326).plot(ax=ax, color="red", marker="+") # Annotate points for i, row in zones.loc[zids].iterrows(): annotate(ax, row) # Output the distance traveled dist = centroids.iloc[0].distance(centroids.iloc[1]) / 1000 plt.title(f"From zone {zids[0]} to {zids[1]} distance: {dist:.2f}km") return dist ``` Select a trip to inspect the zones it travels from and to and the duration and cost ``` trip_idx = 5 # Get the trip and plot on map t = trip_df.iloc[trip_idx] dist = plot_map(zones, [t.PULocationID, t.DOLocationID]) print( f"Took {t.duration_minutes:.2f} minutes on {t.weekday} at {t.hour} hour to travel {dist:.2f}km for the cost of ${t.fare_amount:.2f}" ) ``` ### Feature selection Rename and select columns that we want build model on ``` # Rename cols trip_df = trip_df.rename( columns={ "latitude_PU": "pickup_latitude", "longitude_PU": "pickup_longitude", "latitude_DO": "dropoff_latitude", "longitude_DO": "dropoff_longitude", } ) # Select cols cols = [ "fare_amount", "pickup_latitude", "pickup_longitude", "dropoff_latitude", "dropoff_longitude", "geo_distance", "hour", "weekday", "month", ] data_df = trip_df[cols] data_df.sample(5) ``` Clean up to remove some outliers ``` data_df = data_df[ (data_df.fare_amount > 0) & (data_df.fare_amount < 200) & (data_df.geo_distance >= 0) & (data_df.geo_distance < 121) ].dropna() print(data_df.shape) ``` ### Data splitting and saving We are now ready to split the dataset into train, validation, and test sets. ``` from sklearn.model_selection import train_test_split train_df, val_df = train_test_split(data_df, test_size=0.20, random_state=42) val_df, test_df = train_test_split(val_df, test_size=0.05, random_state=42) # Reset the index for our test dataframe test_df.reset_index(inplace=True, drop=True) print( "Size of\n train: {},\n val: {},\n test: {} ".format( train_df.shape[0], val_df.shape[0], test_df.shape[0] ) ) ``` Save the train, validation, and test files as CSV locally on this notebook instance. Notice that you save the train file twice - once as the training data file and once as the baseline data file. The baseline data file will be used by [SageMaker Model Monitor](https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor.html)⇗ to detect data drift. Data drift occurs when the statistical nature of the data that your model receives while in production drifts away from the nature of the baseline data it was trained on, which means the model begins to lose accuracy in its predictions. ``` train_df.to_csv("train.csv", index=False, header=False) val_df.to_csv("validation.csv", index=False, header=False) test_df.to_csv("test.csv", index=False, header=False) # Save test and baseline with headers train_df.to_csv("baseline.csv", index=False, header=True) ``` Now upload these CSV files to your default SageMaker S3 bucket. ``` import sagemaker # Get the session and default bucket session = sagemaker.session.Session() bucket = session.default_bucket() # Specify data prefix and version prefix = "nyc-tlc/v2" s3_train_uri = session.upload_data("train.csv", bucket, prefix + "/data/training") s3_val_uri = session.upload_data("validation.csv", bucket, prefix + "/data/validation") s3_test_uri = session.upload_data("test.csv", bucket, prefix + "/data/test") s3_baseline_uri = session.upload_data("baseline.csv", bucket, prefix + "/data/baseline") ``` ### Training Job Build an estimator to train on this, see if using geo_distance its okay predictor. ``` # TODO: Can XGBoost report use a version which accepts the header for feature importance? from sagemaker.estimator import Estimator from sagemaker.debugger import Rule, rule_configs # Get role and region role = sagemaker.get_execution_role() region = sagemaker.session.Session().boto_session.region_name # Define the XGBoost training report rules # see: https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-training-xgboost-report.html rules = [Rule.sagemaker(rule_configs.create_xgboost_report())] # Get the training instance type training_instance_type = "ml.m4.xlarge" # training step for generating model artifacts image_uri = sagemaker.image_uris.retrieve( framework="xgboost", region=region, version="1.2-2", py_version="py3", instance_type=training_instance_type, ) output_path = "s3://{}/{}/output".format(bucket, prefix) estimator = Estimator( image_uri=image_uri, instance_type=training_instance_type, instance_count=1, output_path=output_path, role=role, disable_profiler=True, # Profile processing job rules=rules, # Report processing job ) hp = { "max_depth": "9", "eta": "0.2", "gamma": "4", "min_child_weight": "300", "subsample": "0.8", "objective": "reg:squarederror", # reg:linear not supported "early_stopping_rounds": "10", "num_round": "100", } estimator.set_hyperparameters(**hp) # Set the data s3_input_train = sagemaker.inputs.TrainingInput( s3_data=s3_train_uri, content_type="text/csv" ) s3_input_val = sagemaker.inputs.TrainingInput( s3_data=s3_val_uri, content_type="text/csv" ) data = {"train": s3_input_train, "validation": s3_input_val} estimator.fit(data) ``` ### Evaluate Wait for the XGBoost report to be ready ``` sm_client = sagemaker.session.Session().sagemaker_client # Attach the job and get report xgb_report_job_name = [ rule["RuleEvaluationJobArn"].split("/")[-1] for rule in estimator.latest_training_job.rule_job_summary() if "CreateXgboostReport" in rule["RuleConfigurationName"] ][0] print(f"Waiting for XGBoost training report {xgb_report_job_name} to complete...") sm_client.get_waiter("processing_job_completed_or_stopped").wait( ProcessingJobName=xgb_report_job_name ) print("Done") ``` Inspects the results of the report ``` from IPython.display import FileLink from sagemaker.s3 import S3Downloader, S3Uploader # Get the s3 output report_uri = sm_client.describe_processing_job(ProcessingJobName=xgb_report_job_name)[ "ProcessingOutputConfig" ]["Outputs"][0]["S3Output"]["S3Uri"] # Download the notebook from the report S3Downloader().download(f"{report_uri}/xgboost_report.html", "report") FileLink("report/xgboost_report.html", result_html_prefix="Open Report: ") ``` ### Deploy Deploy an endpoint for the predictor ``` from sagemaker.serializers import CSVSerializer from sagemaker.deserializers import CSVDeserializer predictor = estimator.deploy( initial_instance_count=1, instance_type="ml.m4.xlarge", serializer=CSVSerializer(), deserializer=CSVDeserializer(), ) ``` Get predictions for the held out test dataset ``` def chunker(seq, batch_size): return (seq[pos : pos + batch_size] for pos in range(0, len(seq), batch_size)) # Make predictions without the first colunns results = [] for df in chunker(test_df[test_df.columns[1:]], 20): results += predictor.predict(data=df.to_csv(index=False, header=False))[0] # Get the fare amoiunt pred back in the dataframe\ predictions = pd.Series(results).astype(float) ``` Join the predictions back to the test dataset ``` pred_df = pd.DataFrame({"fare_amount_prediction": predictions}) pred_df = test_df.join(pred_df) # Get abs error pred_df["error"] = abs(pred_df["fare_amount"] - pred_df["fare_amount_prediction"]) pred_df.sort_values("error", ascending=False).head() ``` Calculate the root mean squre error (RMSE) to evaluate the performance of this model. ``` from math import sqrt from sklearn.metrics import mean_squared_error def rmse(pred_df): return sqrt( mean_squared_error(pred_df["fare_amount"], pred_df["fare_amount_prediction"]) ) print("RMSE: {}".format(rmse(pred_df))) ``` Plot the residules to see where the errors are relative to the fare amount. ``` sns.residplot( x=pred_df["fare_amount"], y=pred_df["fare_amount_prediction"], lowess=True ) ```
github_jupyter
``` from matplotlib import pyplot as plt import pandas as pd from sklearn.decomposition import PCA from sklearn.datasets import load_breast_cancer from sklearn.preprocessing import scale from psynlig import ( pca_explained_variance, pca_explained_variance_bar, pca_2d_scores, pca_2d_loadings, ) plt.style.use('seaborn-notebook') %matplotlib notebook data_set = load_breast_cancer() data = pd.DataFrame(data_set['data'], columns=data_set['feature_names']) data['target'] = data_set['target'] data class_names = {0: 'Malignant', 1: 'Benign'} # original data set contains many variables, for this example we select just 10 of these: variables = [ 'mean radius', 'mean texture', 'mean perimeter', 'mean area', 'mean smoothness', 'mean compactness', 'mean concavity', 'mean concave points', 'mean symmetry', 'mean fractal dimension', ] # to use all variables, uncomment the next line: #variables = [i for i in data.columns if i!= 'target'] print(variables) X = scale(data[variables].values) pca = PCA(n_components=4) # Do PCA, but only ask for 4 principal components scores = pca.fit_transform(X) # Plot the explained variance: pca_explained_variance(pca, marker='o', markersize=12, alpha=0.8); # Bar plot of explained variance: pca_explained_variance_bar(pca); # Plot scores: pca_2d_scores( pca, scores, class_data=data['target'], class_names=class_names, select_components={(1, 2), (1, 3)}, # Plot PC1 vs PC2 and PC1 vs PC3 s=150, alpha=.8 ); # Plot loadings for PC1 and PC2: text_settings = { 'fontsize': 'small', 'outline': {'foreground': '0.5'}, 'show': False, } _, axes = pca_2d_loadings( pca, variables, select_components={(1, 2),}, text_settings=text_settings, cmap='Spectral', ) for axi in axes: leg = axi.legend(fontsize='small', ncol=2, loc='lower left') for legi in leg.legendHandles: legi.set_sizes([75.0]) # Plot 2D scores and loadings together: loading_settings = { 'add_text': False, 'add_legend': True, 'biplot': True, } pca_2d_scores( pca, scores, xvars=variables, class_data=data['target'], class_names=class_names, select_components={(1, 2)}, loading_settings=loading_settings, s=100, alpha=.8, ); # From the previous plot, it looks like we can separate (to some degree) by using just the mean # area and the mean smoothness. Let us try this: fig1, ax1 = plt.subplots(constrained_layout=True) x = 'mean area' y = 'mean smoothness' class0 = data.loc[data['target'] == 0] class1 = data.loc[data['target'] == 1] for i, klass in enumerate((class0, class1)): ax1.scatter( klass[x].values, klass[y].values, s=100, label=class_names[i], ) ax1.legend() ax1.set_xlabel(x); ax1.set_ylabel(y); ```
github_jupyter
# Pré processamento da coleção de dados Aplicação de técnicas de pré processamento de dados para ser possível uma pré-análise dos dados enquanto ocorre a transformação para dados padronizados e normalizados. # Parte 3 - Describe e Merge * foi utilizado a função describe() para ter a estatística dos dados de cada tabela tanto para os dados categóricos quanto para os dados discretos * também unimos as tabelas de Interações (Detail_Interaction) e Incidentes (Detail_Incident) ``` from fun_dependencies import * #load data df_interacao = load_data('cleaning_dataset/pt2/Detail_Interaction_pt2.csv', sep=';') df_incidente = load_data('cleaning_dataset/pt2/Detail_Incident_pt2.csv', sep=';') df_atividades_incidente = load_data('cleaning_dataset/pt2/Detail_Incident_Activity_pt2.csv', sep=';') df_mudancas = load_data('cleaning_dataset/pt2/Detail_Change_pt2.csv', sep=';') #load raw data df_raw_interacao = load_data('dataset/Detail_Interaction.csv', sep=';') df_raw_incidente = load_data('dataset/Detail_Incident.csv', sep=';') df_raw_atividade_incidente = load_data('dataset/Detail_Incident_Activity.csv', sep=';') df_raw_mudancas = load_data('dataset/Detail_Change.csv', sep=';') ``` ### Tabela Interacao ``` %%capture cap --no-stderr print("---------------------------------------Table: Interacao---------------------------------------\n") print(f"Statistics Numeric Columns: \n{df_interacao.describe(include=np.number)}\n") print(f"Statistics Categorial Columns: \n{df_interacao.describe(exclude=np.number)}\n") with open('output/output_pt3.txt', 'w') as f: f.write(cap.stdout) ``` ### Tabela Incidente ``` %%capture cap --no-stderr print("---------------------------------------Table: Incidente---------------------------------------\n") print(f"Statistics Numeric Columns: \n{df_incidente.describe(include=np.number)}\n") print(f"Statistics Categorial Columns: \n{df_incidente.describe(exclude=np.number)}\n") with open('output/output_pt3.txt', 'a') as f: f.write(cap.stdout) ``` ### Tabela Atividades Incidente ``` %%capture cap --no-stderr print("---------------------------------------Table: Atividades Interacao---------------------------------------\n") #print(f"Statistics Numeric Columns: \n{df_atividades_incidente.describe(include=np.number)}\n") # no numeric columns print(f"Statistics Categorial Columns: \n{df_atividades_incidente.describe(exclude=np.number)}\n") with open('output/output_pt3.txt', 'a') as f: f.write(cap.stdout) ``` ### Tabela Mudanças ``` %%capture cap --no-stderr print("---------------------------------------Table: Mudancas---------------------------------------\n") #print(f"Statistics Numeric Columns: \n{df_mudancas.describe(include=np.number)}\n") # no numeric columns print(f"Statistics Categorial Columns: \n{df_mudancas.describe(exclude=np.number)}\n") with open('output/output_pt3.txt', 'a') as f: f.write(cap.stdout) ``` # Merged Data * Raw: Apply preprocessing after merge Detail Interaction + Detail Incident * Processed: Apply preprocessing before merge Detail Interaction + Detail Incident ## Interaction + Incident 1. Merge 2. Missing 3. Formating 4. Describe ### Raw ``` df_raw_interacao_incidente_atividade = pd.concat([df_raw_incidente.set_index('incident_id'),df_raw_interacao.set_index('related_incident'),df_raw_atividade_incidente.set_index('incident_id')], join='outer') df_raw_interacao_incidente_atividade = df_raw_interacao_incidente_atividade[df_raw_interacao_incidente_atividade.index.notnull()] #df_raw_interacao_incidente.info() %%capture cap --no-stderr df_raw_interacao_incidente_atividade = df_raw_interacao_incidente_atividade.reset_index(col_level=0).rename(columns={'index': 'incident_id'}) df_raw_merge = pd.concat([df_raw_interacao_incidente_atividade.set_index('related_change'),df_raw_mudancas.set_index('change_id')], join='outer') df_raw_merge = df_raw_merge.reset_index(col_level=0).rename(columns={'index': 'change_id'}) print('\nInfo after RAW merge:\n') df_raw_merge.info() print('\n') with open('output/output_pt3.txt', 'a') as f: f.write(cap.stdout) %%capture cap --no-stderr ## customize missing treatment print("---------------------------------------RAW Table: Merged Interacao + Incidente + Atividade Incidente + Mudancas ---------------------------------------\n") rows_before = df_raw_merge.shape[0] before = pd.concat([df_raw_merge.isna().sum(), df_raw_merge.isna().sum()/len(df_raw_merge)*100], axis=1) print(f'Before Missing Values\n{before}') df_raw_merge.dropna(thresh=0.2*len(df_raw_merge), axis=1, inplace=True) df_raw_merge.dropna(subset=['datestamp'], inplace=True) df_raw_merge.dropna(thresh=1*len(df_raw_merge), axis=1, inplace=True) rows_after = df_raw_merge.shape[0] after = pd.concat([df_raw_merge.isna().sum(), df_raw_merge.isna().sum()/len(df_raw_merge)*100], axis=1) print(f'\nAfter Missing Values\n{after}') print('\nPercent missing value removed: {:.2%}\n'.format((rows_before-rows_after)/rows_before)) with open('output/output_pt3.txt', 'a') as f: f.write(cap.stdout) %%capture cap --no-stderr print(f'Attributes type\n{df_raw_merge.dtypes}\n') df_raw_merge = data_formatting(df_raw_merge) print(f'Attributes type\n{df_raw_merge.dtypes}\n') with open('output/output_pt3.txt', 'a') as f: f.write(cap.stdout) %%capture cap --no-stderr #print(f"Statistics Numeric Columns: \n{df_raw_merge.describe(include=np.number)}\n") # no numeric columns print(f"Statistics Categorial Columns: \n{df_raw_merge.describe(exclude=np.number)}\n") with open('output/output_pt3.txt', 'a') as f: f.write(cap.stdout) # Sort by ascending incident_id,datestamp (case_id,timestamp), reset the indices df_raw_merge = df_raw_merge.sort_values(by=['incident_id','datestamp','incidentactivity_number']) ``` ### Processed ``` df_interacao_incidente_atividade = pd.concat([df_incidente.set_index('incident_id'),df_interacao.set_index('related_incident'),df_atividades_incidente.set_index('incident_id')], join='outer') df_interacao_incidente_atividade = df_interacao_incidente_atividade[df_interacao_incidente_atividade.index.notnull()] %%capture cap --no-stderr df_merge = df_interacao_incidente_atividade.reset_index(col_level=0).rename(columns={'index': 'incident_id'}) #df_merge = pd.concat([df_interacao_incidente_atividade.set_index('related_change'),df_mudancas.set_index('change_id')], join='outer') #df_merge = df_merge.reset_index(col_level=0).rename(columns={'index': 'change_id'}) print('\nInfo after PROCESSED merge:\n') df_merge.info() print('\n') with open('output/output_pt3.txt', 'a') as f: f.write(cap.stdout) %%capture cap --no-stderr print("---------------------------------------PROCESSED Table: Merged Interacao + Incidente + Atividade Incidente + Mudancas ---------------------------------------\n") df_merge = missing_treatment(df_merge, drop=False, fill=False, threshold=0.2) with open('output/output_pt3.txt', 'a') as f: f.write(cap.stdout) %%capture cap --no-stderr print(f'Attributes type\n{df_merge.dtypes}\n') df_merge = data_formatting(df_merge) print(f'Attributes type\n{df_merge.dtypes}\n') with open('output/output_pt3.txt', 'a') as f: f.write(cap.stdout) %%capture cap --no-stderr #print(f"Statistics Numeric Columns: \n{df_merge.describe(include=np.number)}\n") # no numeric columns print(f"Statistics Categorial Columns: \n{df_merge.describe(exclude=np.number)}\n") with open('output/output_pt3.txt', 'a') as f: f.write(cap.stdout) df_merge = df_merge.sort_values(by=['incident_id','datestamp','incidentactivity_number']) df_raw_merge.to_csv('cleaning_dataset/pt3/RAW_MERGED_Detail_pt3.csv', sep=';', encoding='utf-8', index=False) df_merge.to_csv('cleaning_dataset/pt3/PROCESSED_MERGED_Detail_pt3.csv', sep=';', encoding='utf-8', index=False) ```
github_jupyter
``` # 导入库 import pandas as pd import numpy as np from sklearn.feature_extraction import DictVectorizer # 字符串分类转整数分类库 from sklearn.preprocessing import MinMaxScaler # MinMaxScaler库 from sklearn.cluster import KMeans # KMeans模块 from sklearn import metrics # 导入sklearn效果评估模块 import matplotlib.pyplot as plt # 图形库 # 读取数据 raw_data = pd.read_table('ad_performance.txt', delimiter='\t') # 数据审查和校验 print ('{:*^60}'.format('Data overview:')) print (raw_data.head(2)) # 打印输出前2条数据 print ('{:*^60}'.format('Data dtypes:')) print (pd.DataFrame(raw_data.dtypes).T) # 打印数据类型分布 print ('{:*^60}'.format(' NA counts:')) print (pd.DataFrame(raw_data.isnull().sum()).T) # 查看缺失值情况 print ('{:*^60}'.format('Data DESC:')) print (raw_data.describe().round(2).T) # 打印原始数据基本描述性信息 print ('{:*^60}'.format('Correlation analysis:')) print (raw_data.corr().round(2).T) # 打印原始数据相关性信息 # 数据预处理 # 缺失值替换为均值 data_fillna = raw_data.fillna(raw_data['平均停留时间'].mean()) # 用均值替换缺失值 # 字符串分类转整数分类 # part1 conver_cols = ['素材类型', '广告类型', '合作方式', '广告尺寸', '广告卖点'] convert_matrix = data_fillna[conver_cols] # 获得要转换的数组 lines = data_fillna.shape[0] # 获得总记录数 dict_list = [] # 总空列表,用于存放字符串与对应索引组成的字典 unique_list = [] # 总唯一值列表,用于存储每个列的唯一值列表 # part2 for col_name in conver_cols: # 循环读取每个列名 cols_unqiue_value = data_fillna[col_name].unique().tolist() # 获取列的唯一值列表 unique_list.append(cols_unqiue_value) # 将唯一值列表追加到总列表 # part3 for line_index in range(lines): # 读取每行索引 each_record = convert_matrix.iloc[line_index] # 获得每行数据,是一个Series for each_index, each_data in enumerate(each_record): # 读取Series每行对应的索引值 list_value = unique_list[each_index] # 读取该行索引对应到总唯一值列表列索引下的数据(其实是相当于原来的列做了转置成了行,目的是查找唯一值在列表中的位置) each_record[each_index] = list_value.index(each_data) # 获得每个值对应到总唯一值列表中的索引 each_dict = dict(zip(conver_cols, each_record)) # 将每个值和对应的索引组合字典 dict_list.append(each_dict) # 将字典追加到总列表 # part4 model_dvtransform = DictVectorizer(sparse=False, dtype=np.int64) # 建立转换模型对象 data_dictvec = model_dvtransform.fit_transform(dict_list) # 应用分类转换训练 # 数据标准化 sacle_matrix = data_fillna.ix[:, 1:8] # 获得要转换的矩阵 minmax_scaler = MinMaxScaler() # 建立MinMaxScaler模型对象 data_scaled = minmax_scaler.fit_transform(sacle_matrix) # MinMaxScaler标准化处理 # 合并所有输入维度 X = np.hstack((data_scaled, data_dictvec)) # 通过平均轮廓系数检验得到最佳KMeans聚类模型 score_list = list() # 用来存储每个K下模型的平局轮廓系数 silhouette_int = -1 # 初始化的平均轮廓系数阀值 for n_clusters in range(2, 10): # 遍历从2到10几个有限组 model_kmeans = KMeans(n_clusters=n_clusters, random_state=0) # 建立聚类模型对象 cluster_labels_tmp = model_kmeans.fit_predict(X) # 训练聚类模型 silhouette_tmp = metrics.silhouette_score(X, cluster_labels_tmp) # 得到每个K下的平均轮廓系数 if silhouette_tmp > silhouette_int: # 如果平均轮廓系数更高 best_k = n_clusters # 将最好的K存储下来 silhouette_int = silhouette_tmp # 将最好的平均轮廓得分存储下来 best_kmeans = model_kmeans # 将最好的模型存储下来 cluster_labels_k = cluster_labels_tmp # 将最好的聚类标签存储下来 score_list.append([n_clusters, silhouette_tmp]) # 将每次K及其得分追加到列表 print ('{:*^60}'.format('K value and silhouette summary:')) print (np.array(score_list)) # 打印输出所有K下的详细得分 print ('Best K is:{0} with average silhouette of {1}'.format(best_k, silhouette_int.round(4))) # 针对聚类结果的特征分析 # part1 cluster_labels = pd.DataFrame(cluster_labels_k, columns=['clusters']) # 获得训练集下的标签信息 merge_data = pd.concat((data_fillna, cluster_labels), axis=1) # 将原始处理过的数据跟聚类标签整合 # part2 clustering_count = pd.DataFrame(merge_data['渠道代号'].groupby(merge_data['clusters']).count()).T.rename( {'渠道代号': 'counts'}) # 计算每个聚类类别的样本量 clustering_ratio = (clustering_count / len(merge_data)).round(2).rename({'counts': 'percentage'}) # 计算每个聚类类别的样本量占比 # part3 cluster_features = [] # 空列表,用于存储最终合并后的所有特征信息 for line in range(best_k): # 读取每个类索引 label_data = merge_data[merge_data['clusters'] == line] # 获得特定类的数据 part1_data = label_data.ix[:, 1:8] # 获得数值型数据特征 part1_desc = part1_data.describe().round(3) # 得到数值型特征的描述性统计信息 merge_data1 = part1_desc.ix[2, :] # 得到数值型特征的均值 part2_data = label_data.ix[:, 8:-1] # 获得字符串型数据特征 part2_desc = part2_data.describe(include='all') # 获得字符串型数据特征的描述性统计信息 merge_data2 = part2_desc.ix[2, :] # 获得字符串型数据特征的最频繁值 merge_line = pd.concat((merge_data1, merge_data2), axis=0) # 将数值型和字符串型典型特征沿行合并 cluster_features.append(merge_line) # 将每个类别下的数据特征追加到列表 # part4 cluster_pd = pd.DataFrame(cluster_features).T # 将列表转化为矩阵 print ('{:*^60}'.format('Detailed features for all clusters:')) all_cluster_set = pd.concat((clustering_count, clustering_ratio, cluster_pd), axis=0) # 将每个聚类类别的所有信息合并 print (all_cluster_set) # 各类别显著数值特征对比 # part1 num_sets = cluster_pd.ix[:6, :].T.astype(np.float64) # 获取要展示的数据 num_sets_max_min = minmax_scaler.fit_transform(num_sets) # 获得标准化后的数据 # part2 fig = plt.figure() # 建立画布 ax = fig.add_subplot(111, polar=True) # 增加子网格,注意polar参数 labels = np.array(merge_data1.index[:-1]) # 设置要展示的数据标签 cor_list = ['r', 'g', 'b', 'y'] # 定义不同类别的颜色 angles = np.linspace(0, 2 * np.pi, len(labels), endpoint=False) # 计算各个区间的角度 angles = np.concatenate((angles, [angles[0]])) # 建立相同首尾字段以便于闭合 # part3 for i in range(len(num_sets)): # 循环每个类别 data_tmp = num_sets_max_min[i, :] # 获得对应类数据 data = np.concatenate((data_tmp, [data_tmp[0]])) # 建立相同首尾字段以便于闭合 ax.plot(angles, data, 'o-', c=cor_list[i], label=i) # 画线 # part4 ax.set_thetagrids(angles * 180 / np.pi, labels, fontproperties="SimHei") # 设置极坐标轴 ax.set_title("各聚类类别显著特征对比", fontproperties="SimHei") # 设置标题放置 ax.set_rlim(-0.2, 1.2) # 设置坐标轴尺度范围 plt.legend(loc=0) # 设置图例位置 plt.show() # 展示图像 ```
github_jupyter
# Data Preparation ## Import Libraries ``` import numpy as np import pandas as pd ``` ## Import Data The dataset contains all available data for more than 800,000 consumer loans issued from 2007 to 2015 by Lending Club: a large US peer-to-peer lending company. There are several different versions of this dataset. We have used a version available on kaggle.com. You can find it here: https://www.kaggle.com/wendykan/lending-club-loan-data/version/1 We divided the data into two periods because we assume that some data are available at the moment when we need to build Expected Loss models, and some data comes from applications after. Later, we investigate whether the applications we have after we built the Probability of Default (PD) model have similar characteristics with the applications we used to build the PD model. ``` loan_data_backup = pd.read_csv('../Dataset/loan_data_2007_2014.csv') loan_data = loan_data_backup.copy() ``` ## Explore Data ``` loan_data pd.options.display.max_columns = None #pd.options.display.max_rows = None # Sets the pandas dataframe options to display all columns/ rows. loan_data loan_data.head() loan_data.tail() loan_data.columns.values # Displays all column names. loan_data.info() # Displays column names, complete (non-missing) cases per column, and datatype per column. ``` # General Preprocessing ## Preprocessing few continuous variables ``` loan_data['emp_length'].unique() loan_data['emp_length_int'] = loan_data['emp_length'].str.replace('\+ years','') loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace('< 1 year',str(0)) loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace('n/a',str(0)) loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace(' years','') loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace(' year','') loan_data['emp_length_int'].describe() loan_data.emp_length_int = pd.to_numeric(loan_data.emp_length_int) loan_data.emp_length_int.describe() loan_data['term_int'] = loan_data['term'].str.replace(' months','') loan_data.term_int.value_counts() loan_data.term_int = pd.to_numeric(loan_data.term_int) loan_data.term_int[0] loan_data.earliest_cr_line loan_data['earliest_cr_line_date'] = pd.to_datetime(loan_data.earliest_cr_line,format='%b-%y') loan_data.earliest_cr_line_date pd.to_datetime('2017-12-01') - loan_data.earliest_cr_line_date loan_data['mths_since_earliest_cr_line'] = round(pd.to_numeric((pd.to_datetime('2017-12-01') - loan_data.earliest_cr_line_date)/np.timedelta64(1,'M'))) loan_data['mths_since_earliest_cr_line'].describe() # observe the minimum value loan_data.mths_since_earliest_cr_line.max() loan_data.mths_since_earliest_cr_line[loan_data.mths_since_earliest_cr_line < 0] = loan_data.mths_since_earliest_cr_line.max() min(loan_data['mths_since_earliest_cr_line']) loan_data['mths_since_issue_d']=round((pd.to_datetime('2017-12-01') - pd.to_datetime(loan_data['issue_d'],format = '%b-%y'))/np.timedelta64(1,'M')) ```
github_jupyter
This notebook is part of the `clifford` documentation: https://clifford.readthedocs.io/. # Object Oriented CGA This is a shelled out demo for a object-oriented approach to CGA with `clifford`. The `CGA` object holds the original layout for an arbitrary geometric algebra , and the conformalized version. It provides up/down projections, as well as easy ways to generate objects and operators. ## Quick Use Demo ``` from clifford.cga import CGA, Round, Translation from clifford import Cl g3,blades = Cl(3) cga = CGA(g3) # make cga from existing ga # or cga = CGA(3) # generate cga from dimension of 'base space' locals().update(cga.blades) # put ga's blades in local namespace C = cga.round(e1,e2,e3,-e2) # generate unit sphere from points C ## Objects cga.round() # from None cga.round(3) # from dim of space cga.round(e1,e2,e3,-e2) # from points cga.round(e1,e2,e3) # from points cga.round(e1,e2) # from points cga.round((e1,3)) # from center, radius cga.round(cga.round().mv)# from existing multivector cga.flat() # from None cga.flat(2) # from dim of space cga.flat(e1,e2) # from points cga.flat(cga.flat().mv) # from existing multivector ## Operations cga.dilation() # from from None cga.dilation(.4) # from int cga.translation() # from None cga.translation(e1+e2) # from vector cga.translation(cga.down(cga.null_vector())) cga.rotation() # from None cga.rotation(e12+e23) # from bivector cga.transversion(e1+e2).mv cga.round().inverted() D = cga.dilation(5) cga.down(D(e1)) C.mv # any CGA object/operator has a multivector C.center_down,C.radius # some properties of spheres T = cga.translation(e1+e2) # make a translation C_ = T(C) # translate the sphere cga.down(C_.center) # compute center again cga.round() # no args == random sphere cga.translation() # random translation if 1 in map(int, [1,2]): print(3) ``` ## Objects ### Vectors ``` a = cga.base_vector() # random vector with components in base space only a cga.up(a) cga.null_vector() # create null vector directly ``` ### Sphere (point pair, circles) ``` C = cga.round(e1, e2, -e1, e3) # generates sphere from points C = cga.round(e1, e2, -e1) # generates circle from points C = cga.round(e1, e2) # generates point-pair from points #or C2 = cga.round(2) # random 2-sphere (sphere) C1 = cga.round(1) # random 1-sphere, (circle) C0 = cga.round(0) # random 0-sphere, (point pair) C1.mv # access the multivector C = cga.round(e1, e2, -e1, e3) C.center,C.radius # spheres have properties cga.down(C.center) == C.center_down C_ = cga.round().from_center_radius(C.center,C.radius) C_.center,C_.radius ``` ### Operators ``` T = cga.translation(e1) # generate translation T.mv C = cga.round(e1, e2, -e1) T.mv*C.mv*~T.mv # translate a sphere T(C) # shorthand call, same as above. returns type of arg T(C).center ```
github_jupyter
``` ######## snakemake preamble start (automatically inserted, do not edit) ######## import sys; sys.path.extend(['/Users/johannes/scms/snakemake', '/Users/johannes/scms/snakemake/tests/test_jupyter_notebook_draft']); import pickle; snakemake = pickle.loads(b'\x80\x03csnakemake.script\nSnakemake\nq\x00)\x81q\x01}q\x02(X\x05\x00\x00\x00inputq\x03csnakemake.io\nInputFiles\nq\x04)\x81q\x05X\x08\x00\x00\x00data.txtq\x06a}q\x07(X\x06\x00\x00\x00_namesq\x08}q\tX\x06\x00\x00\x00infileq\nK\x00N\x86q\x0bsX\x12\x00\x00\x00_allowed_overridesq\x0c]q\r(X\x05\x00\x00\x00indexq\x0eX\x04\x00\x00\x00sortq\x0feh\x0ecfunctools\npartial\nq\x10cbuiltins\ngetattr\nq\x11csnakemake.io\nNamedlist\nq\x12X\x0f\x00\x00\x00_used_attributeq\x13\x86q\x14Rq\x15\x85q\x16Rq\x17(h\x15)}q\x18X\x05\x00\x00\x00_nameq\x19h\x0esNtq\x1abh\x0fh\x10h\x15\x85q\x1bRq\x1c(h\x15)}q\x1dh\x19h\x0fsNtq\x1ebh\nh\x06ubX\x06\x00\x00\x00outputq\x1fcsnakemake.io\nOutputFiles\nq )\x81q!X\x17\x00\x00\x00result_intermediate.txtq"a}q#(h\x08}q$X\x07\x00\x00\x00outfileq%K\x00N\x86q&sh\x0c]q\'(h\x0eh\x0feh\x0eh\x10h\x15\x85q(Rq)(h\x15)}q*h\x19h\x0esNtq+bh\x0fh\x10h\x15\x85q,Rq-(h\x15)}q.h\x19h\x0fsNtq/bh%h"ubX\x06\x00\x00\x00paramsq0csnakemake.io\nParams\nq1)\x81q2}q3(h\x08}q4h\x0c]q5(h\x0eh\x0feh\x0eh\x10h\x15\x85q6Rq7(h\x15)}q8h\x19h\x0esNtq9bh\x0fh\x10h\x15\x85q:Rq;(h\x15)}q<h\x19h\x0fsNtq=bubX\t\x00\x00\x00wildcardsq>csnakemake.io\nWildcards\nq?)\x81q@}qA(h\x08}qBh\x0c]qC(h\x0eh\x0feh\x0eh\x10h\x15\x85qDRqE(h\x15)}qFh\x19h\x0esNtqGbh\x0fh\x10h\x15\x85qHRqI(h\x15)}qJh\x19h\x0fsNtqKbubX\x07\x00\x00\x00threadsqLK\x01X\t\x00\x00\x00resourcesqMcsnakemake.io\nResources\nqN)\x81qO(K\x01K\x01X0\x00\x00\x00/var/folders/l0/9bhq7fc12lgfknlx5gyxckv00000gp/TqPe}qQ(h\x08}qR(X\x06\x00\x00\x00_coresqSK\x00N\x86qTX\x06\x00\x00\x00_nodesqUK\x01N\x86qVX\x06\x00\x00\x00tmpdirqWK\x02N\x86qXuh\x0c]qY(h\x0eh\x0feh\x0eh\x10h\x15\x85qZRq[(h\x15)}q\\h\x19h\x0esNtq]bh\x0fh\x10h\x15\x85q^Rq_(h\x15)}q`h\x19h\x0fsNtqabhSK\x01hUK\x01hWhPubX\x03\x00\x00\x00logqbcsnakemake.io\nLog\nqc)\x81qd}qe(h\x08}qfh\x0c]qg(h\x0eh\x0feh\x0eh\x10h\x15\x85qhRqi(h\x15)}qjh\x19h\x0esNtqkbh\x0fh\x10h\x15\x85qlRqm(h\x15)}qnh\x19h\x0fsNtqobubX\x06\x00\x00\x00configqp}qqX\x04\x00\x00\x00ruleqrX\x03\x00\x00\x00barqsX\x0f\x00\x00\x00bench_iterationqtNX\t\x00\x00\x00scriptdirquX@\x00\x00\x00/Users/johannes/scms/snakemake/tests/test_jupyter_notebook_draftqvub.'); from snakemake.logging import logger; logger.printshellcmds = False; import os; os.chdir(r'/Users/johannes/scms/snakemake/tests/test_jupyter_notebook_draft'); ######## snakemake preamble end ######### # start coding here ```
github_jupyter
``` import numpy as np np.random.seed(42) import tensorflow as tf from tensorflow.keras.layers.experimental import preprocessing import os import time import sys # In case your sys.path does not contain the base repo, go there. print(sys.path) %cd '~/ml-solr-course' path = "dataset/train_corpus_descriptions_airbnb.csv" # Read, then decode for py2 compat. text = open(path, 'rb').read().decode(encoding='utf-8')[:1000000] # length of text is the number of characters in it print(f'Length of text: {len(text)} characters') # Take a look at the first 250 characters in text print(text[:250]) vocab = sorted(set(text)) print(f'{len(vocab)} unique characters') ids_from_chars = preprocessing.StringLookup(vocabulary=list(vocab), mask_token=None) chars_from_ids = preprocessing.StringLookup( vocabulary=ids_from_chars.get_vocabulary(), invert=True, mask_token=None) def text_from_ids(ids): return tf.strings.reduce_join(chars_from_ids(ids), axis=-1) ids = ids_from_chars(tf.strings.unicode_split('Only you can prevent forest fires', input_encoding='UTF-8')) ids text_from_ids(ids) #Prepare the dataset all_ids = ids_from_chars(tf.strings.unicode_split(text, 'UTF-8')) ids_dataset = tf.data.Dataset.from_tensor_slices(all_ids) seq_length = 75 # Maximum alternate query size examples_per_epoch = len(text)//(seq_length+1) sequences = ids_dataset.batch(seq_length+1, drop_remainder=True) def split_input_target(sequence): input_text = sequence[:-1] target_text = sequence[1:] return input_text, target_text split_input_target(list("Apache Solr")) dataset = sequences.map(split_input_target) for input_example, target_example in dataset.take(1): print("Input :", text_from_ids(input_example).numpy()) print("Target:", text_from_ids(target_example).numpy()) # Batch size BATCH_SIZE = 64 EPOCHS = 10 BUFFER_SIZE = 2000 vocab_size = len(vocab) embedding_dim = 100 rnn_units = 128 dataset = ( dataset .shuffle(BUFFER_SIZE) .batch(BATCH_SIZE, drop_remainder=True) ) class QueryGenerator(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, rnn_units): super().__init__(self) self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.rnn = tf.keras.layers.GRU(rnn_units, activation='relu', return_sequences=True, return_state=True) self.dense = tf.keras.layers.Dense(vocab_size) def call(self, inputs, states=None, return_state=False, training=False): x = inputs x = self.embedding(x, training=training) if states is None: states = self.rnn.get_initial_state(x) x, states = self.rnn(x, initial_state=states, training=training) x = self.dense(x, training=training) if return_state: return x, states else: return x model = QueryGenerator( # Be sure the vocabulary size matches the `StringLookup` layers. vocab_size=len(ids_from_chars.get_vocabulary()), embedding_dim=embedding_dim, rnn_units=rnn_units) for input_example_batch, target_example_batch in dataset.take(1): example_batch_predictions = model(input_example_batch) print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)") model.summary() model.compile(optimizer='adam', loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True)) history = model.fit(dataset, epochs=EPOCHS) class OneStep(tf.keras.Model): def __init__(self, model, chars_from_ids, ids_from_chars, temperature=1.0): super().__init__() self.temperature = temperature self.model = model self.chars_from_ids = chars_from_ids self.ids_from_chars = ids_from_chars # Create a mask to prevent "[UNK]" from being generated. skip_ids = self.ids_from_chars(['[UNK]'])[:, None] sparse_mask = tf.SparseTensor( # Put a -inf at each bad index. values=[-float('inf')]*len(skip_ids), indices=skip_ids, # Match the shape to the vocabulary dense_shape=[len(ids_from_chars.get_vocabulary())]) self.prediction_mask = tf.sparse.to_dense(sparse_mask) @tf.function def generate_one_step(self, inputs, states=None): # Convert strings to token IDs. input_chars = tf.strings.unicode_split(inputs, 'UTF-8') input_ids = self.ids_from_chars(input_chars).to_tensor() # Run the model. # predicted_logits.shape is [batch, char, next_char_logits] predicted_logits, states = self.model(inputs=input_ids, states=states, return_state=True) # Only use the last prediction. predicted_logits = predicted_logits[:, -1, :] predicted_logits = predicted_logits/self.temperature # Apply the prediction mask: prevent "[UNK]" from being generated. predicted_logits = predicted_logits + self.prediction_mask # Sample the output logits to generate token IDs. predicted_ids = tf.random.categorical(predicted_logits, num_samples=1) predicted_ids = tf.squeeze(predicted_ids, axis=-1) # Convert from token ids to characters predicted_chars = self.chars_from_ids(predicted_ids) # Return the characters and model state. return predicted_chars, states one_step_model = OneStep(model, chars_from_ids, ids_from_chars) start = time.time() states = None next_char = tf.constant(['Midtown Sunny 2-Bedroom']) result = [next_char] for n in range(50): next_char, states = one_step_model.generate_one_step(next_char, states=states) result.append(next_char) result = tf.strings.join(result) end = time.time() print(result, '\n\n' + '_'*80) print('\nRun time:', end - start) tf.saved_model.save(one_step_model, '3-query-generation/lab6/alternative_queries') ```
github_jupyter
# Exploring Ebay Car Sale Data ------- The aim of this project is to clean the data and analyze the included used car listings. ``` import pandas as pd import numpy as np autos = pd.read_csv('autos.csv',encoding="Latin-1") autos.info() autos.head() autos.columns autos.columns = ['date_crawled', 'name', 'seller', 'offer_type', 'price', 'abtest', 'vehicle_type', 'registration_year', 'gearbox', 'power_ps', 'model', 'odometer', 'registration_month', 'fuel_type', 'brand', 'unrepaired_damage', 'ad_created', 'num_photos', 'postal_code', 'last_seen'] autos.columns autos.head() autos.describe(include = "all") ``` * num_photos,seller,offer_type have only one value so we should drop. * power_ps need more investigation * price and odometer needs to be cleaned ``` autos = autos.drop(["num_photos","seller","offer_type"],axis = 1) autos["price"]= autos["price"].str.replace("$","").str.replace(",","").astype(int) autos["odometer"]= autos["odometer"].str.replace("km","").str.replace(",","").astype(int) autos.rename({"odometer":"odometer_km"},axis = 1, inplace = True) autos["odometer_km"].unique().shape autos["odometer_km"].describe() autos["odometer_km"].value_counts() ``` * Number of high mileage vehicles is higher than low mileage vehicles * kilometer value is rounded so it must be entered through dropdown list/pre defined value set ``` autos["price"].unique().shape autos["price"].describe() ``` * min price is $0 and max price is 100 million dollar which is too expensive ``` autos["price"].value_counts() autos["price"].value_counts().sort_index(ascending=False).head(20) autos["price"].value_counts().sort_index(ascending=True).head(20) ``` * There are 1421 cars with $0 price. * Ebay is auction site so it is possible that $1 is starting bid so we will keep this items. * We will remove the anything more than $350,000 as it seems unrealistic. ``` autos = autos[autos["price"].between(1,350000)] autos["price"].describe() autos[["date_crawled","ad_created","last_seen"]].head(5) autos["date_crawled"].value_counts(normalize=True, dropna=False) autos["date_crawled"].sort_index() (autos["date_crawled"] .str[:10] .value_counts(normalize=True, dropna=False) .sort_values() ) ``` The site was crawled almost daily in the month of March-April 2016 with uniform distribution ``` (autos["last_seen"] .str[:10] .value_counts(normalize=True, dropna=False) .sort_index() ) ``` * There is spike in the last three last_seen values. * It is 6 to 10 times higher than other days. ``` autos["registration_year"].describe() ``` * min value is 1000 which is unreal * max value is 9999 which is unreal too ``` # Removing incorrect data #--------------------------------- autos = autos[autos["registration_year"].between(1900,2016)] autos["registration_year"].describe() autos["registration_year"].value_counts(normalize=True).head(10) #Exploring Brand coloumn autos["brand"].unique().shape[0] autos["brand"].value_counts(normalize = True) #top brands brand_count = autos["brand"].value_counts(normalize = True) common_brand = brand_count[brand_count > 0.05].index print(common_brand) # top common brands # dictionary to store brands and mean prices brand_mean_prices = {} for b in common_brand: brand_only = autos[autos["brand"]==b] mean_price = brand_only["price"].mean() brand_mean_prices[b] = int(mean_price) brand_mean_prices ``` Here, we can see that Audi, Mercedes Benz and BMW are more expensive. ``` brand_mean_mileage={} for b in common_brand: brand_only = autos[autos["brand"]==b] mean_mileage = brand_only["odometer_km"].mean() brand_mean_mileage[b] = int(mean_mileage) brand_mean_mileage #convert dictionaries into series to combine both results mean_mileage = pd.Series(brand_mean_mileage).sort_values(ascending=False) mean_prices = pd.Series(brand_mean_prices).sort_values(ascending=False) df_mileage = pd.DataFrame(mean_mileage,columns = ['mean_mileage']) df_mileage brand_info= pd.DataFrame(mean_prices,columns=['mean_price']) brand_info brand_info['mean_mileage'] = df_mileage brand_info ``` ``` autos["gearbox"].head(5) mapping_dict = { "automatik":"Automatic", "manuell":"Manual"} autos["gearbox"]= autos["gearbox"].map(mapping_dict) autos["gearbox"].head(5) # # mapping_dict = { # "automatik":"Automatic", # "manuell":"Manual"} # autos["gearbox"]= autos["gearbox"].map(mapping_dict) autos.describe(include = "all") mapping_dict = { "no":"undamaged", "yes":"damaged"} autos["unrepaired_damage"]= autos["unrepaired_damage"].map(mapping_dict) autos["unrepaired_damage"].head(50) ```
github_jupyter
``` import pandas as pd import numpy as np import pickle import matplotlib.pyplot as plt from scipy import stats import tensorflow as tf import seaborn as sns from pylab import rcParams from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler %matplotlib inline sns.set(style='whitegrid', palette='muted', font_scale=1.5) rcParams['figure.figsize'] = 14, 8 RANDOM_SEED = 42 df=pd.read_csv('FearData.csv') # df.head(22) df.info() N_TIME_STEPS = 250 N_FEATURES = 128 #128 step = 10 # 20 segments = [] for i in range(0, len(df) - N_TIME_STEPS, step): ch = [] for j in range(0, N_FEATURES): ch.append(df.iloc[:, j].values[i: i + N_TIME_STEPS]) segments.append(ch) labels = [] for i in range(0, len(df) - N_TIME_STEPS, step): label = stats.mode(df['Label'][i: i + N_TIME_STEPS])[0][0] labels.append(label) labelsl = np.asarray(pd.get_dummies(labels), dtype = np.float32) #print(labelsl) reshaped_segments = np.asarray(segments, dtype= np.float32).reshape(-1, N_TIME_STEPS, N_FEATURES) X_train, X_test, y_train, y_test = train_test_split( reshaped_segments, labelsl, test_size=0.2, random_state=RANDOM_SEED) print(np.array(segments).shape, reshaped_segments.shape, labelsl[0], len(X_train), len(X_test)) ``` # Building the model ``` N_CLASSES = 2 N_HIDDEN_UNITS = 64 #https://medium.com/@curiousily/human-activity-recognition-using-lstms-on-android-tensorflow-for-hackers-part-vi-492da5adef64 def create_LSTM_model(inputs): W = { 'hidden': tf.Variable(tf.random_normal([N_FEATURES, N_HIDDEN_UNITS])), 'output': tf.Variable(tf.random_normal([N_HIDDEN_UNITS, N_CLASSES])) } biases = { 'hidden': tf.Variable(tf.random_normal([N_HIDDEN_UNITS], mean=1.0)), 'output': tf.Variable(tf.random_normal([N_CLASSES])) } X = tf.transpose(inputs, [1, 0, 2]) X = tf.reshape(X, [-1, N_FEATURES]) hidden = tf.nn.relu(tf.matmul(X, W['hidden']) + biases['hidden']) hidden = tf.split(hidden, N_TIME_STEPS, 0) # Stack 2 LSTM layers lstm_layers = [tf.contrib.rnn.BasicLSTMCell(N_HIDDEN_UNITS, forget_bias=1.0) for _ in range(2)] lstm_layers = tf.contrib.rnn.MultiRNNCell(lstm_layers) outputs, _ = tf.contrib.rnn.static_rnn(lstm_layers, hidden, dtype=tf.float32) # Get output for the last time step lstm_last_output = outputs[-1] return tf.matmul(lstm_last_output, W['output']) + biases['output'] tf.reset_default_graph() X = tf.placeholder(tf.float32, [None, N_TIME_STEPS, N_FEATURES], name="input") Y = tf.placeholder(tf.float32, [None, N_CLASSES]) pred_Y = create_LSTM_model(X) pred_softmax = tf.nn.softmax(pred_Y, name="y_") L2_LOSS = 0.0015 l2 = L2_LOSS * \ sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = pred_Y, labels = Y)) + l2 LEARNING_RATE = 0.0025 optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(loss) correct_pred = tf.equal(tf.argmax(pred_softmax, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32)) ``` # Training ``` N_EPOCHS = 50 # 50 BATCH_SIZE = 1024 # 1024 # https://medium.com/@curiousily/human-activity-recognition-using-lstms-on-android-tensorflow-for-hackers-part-vi-492da5adef64 saver = tf.train.Saver() history = dict(train_loss=[], train_acc=[], test_loss=[], test_acc=[]) sess=tf.InteractiveSession() sess.run(tf.global_variables_initializer()) train_count = len(X_train) for i in range(1, N_EPOCHS + 1): for start, end in zip(range(0, train_count, BATCH_SIZE), range(BATCH_SIZE, train_count + 1,BATCH_SIZE)): sess.run(optimizer, feed_dict={X: X_train[start:end], Y: y_train[start:end]}) _, acc_train, loss_train = sess.run([pred_softmax, accuracy, loss], feed_dict={ X: X_train, Y: y_train}) _, acc_test, loss_test = sess.run([pred_softmax, accuracy, loss], feed_dict={ X: X_test, Y: y_test}) history['train_loss'].append(loss_train) history['train_acc'].append(acc_train) history['test_loss'].append(loss_test) history['test_acc'].append(acc_test) # if i != 1 and i % 10 != 0: # continue print(f'epoch: {i} test accuracy: {acc_test} loss: {loss_test}') predictions, acc_final, loss_final = sess.run([pred_softmax, accuracy, loss], feed_dict={X: X_test, Y: y_test}) print() print(f'final results: accuracy: {acc_final} loss: {loss_final}') ``` # Evaluation ``` # https://medium.com/@curiousily/human-activity-recognition-using-lstms-on-android-tensorflow-for-hackers-part-vi-492da5adef64 plt.figure(figsize=(12, 8)) plt.plot(np.array(history['train_loss']), "r--", label="Train loss") plt.plot(np.array(history['train_acc']), "g--", label="Train accuracy") plt.plot(np.array(history['test_loss']), "r-", label="Test loss") plt.plot(np.array(history['test_acc']), "g-", label="Test accuracy") plt.title("Training session's progress over iterations") plt.legend(loc='upper right', shadow=True) plt.ylabel('Training Progress (Loss or Accuracy values)') plt.xlabel('Training Epoch') plt.ylim(0) plt.show() ``` # Saving Model ``` import os file_info = [N_HIDDEN_UNITS, BATCH_SIZE, N_EPOCHS] dirname = os.path.dirname("nhid-{}_bat-{}_nepoc-{}/dumps/".format(*file_info)) if not os.path.exists(dirname): os.makedirs(dirname) dirname = os.path.dirname("nhid-{}_bat-{}_nepoc-{}/logs/".format(*file_info)) if not os.path.exists(dirname): os.makedirs(dirname) pickle.dump(predictions, open("nhid-{}_bat-{}_nepoc-{}/dumps/predictions.p".format(*file_info), "wb")) pickle.dump(history, open("nhid-{}_bat-{}_nepoc-{}/dumps/history.p".format(*file_info), "wb")) tf.train.write_graph(sess.graph, "nhid-{}_bat-{}_nepoc-{}/logs".format(*file_info), 'har.pbtxt') saver.save(sess, 'nhid-{}_bat-{}_nepoc-{}/logs/har.ckpt'.format(*file_info)) writer = tf.summary.FileWriter('nhid-{}_bat-{}_nepoc-{}/logs'.format(*file_info)) writer.add_graph(sess.graph) ```
github_jupyter
# Fuzzy Water Observations from Space <img align="right" src="../../../Supplementary_data/dea_logo.jpg"> * [**Sign up to the DEA Sandbox**](https://docs.dea.ga.gov.au/setup/sandbox.html) to run this notebook interactively from a browser * **Compatibility:** Notebook currently compatible with both the `NCI` and `DEA Sandbox` environments * **Products used:** [wofs_albers](https://explorer.sandbox.dea.ga.gov.au/wofs_albers), [ga_ls8c_ard_3](https://explorer.sandbox.dea.ga.gov.au/ga_ls7e_ard_3), [DEA Waterbodies](https://www.ga.gov.au/dea/products/dea-waterbodies) ## Description This notebook demonstrates FuzzyWOfS, a reimplementation of [the WOfS classifier](https://github.com/GeoscienceAustralia/wofs/blob/master/wofs/classifier.py) over distributions. FuzzyWOfS estimates the probability that each pixel in a Landsat image is wet. We will: 1. Show how to visualise the FuzzyWOfS classifier; 2. Show how to use FuzzyWOfS to find the probability that each pixel in a Landsat image is wet; 3. Compare the FuzzyWOfS results to MNDWI and TCW, which are band indices often used to estimate wetness. *** ## Getting started Choose a waterbody in the "Analysis parameters" section and then run all cells. ### Load packages Import Python packages that are used for the analysis. ``` %matplotlib inline import sys import datacube import matplotlib.pyplot as plt import numpy as np import pandas as pd import xarray as xr import geopandas as gpd import matplotlib.colors import IPython.display import matplotlib.patches sys.path.append("../../../Scripts") from dea_plotting import rgb from dea_datahandling import mostcommon_crs from dea_waterbodies import get_waterbody from dea_bandindices import calculate_indices import fuzzy_wofs ``` ### Connect to the datacube Connect to the datacube so we can access DEA data. The `app` parameter is a unique name for the analysis which is based on the notebook file name. ``` dc = datacube.Datacube(app="FuzzyWOfS") ``` ### Analysis parameters Specify the geohash for a waterbody: ``` geohash = "r38psere6" # Lake Cullivel ``` A product: ``` product = "ga_ls7e_ard_3" ``` A date with observations: ``` date = "2002-02-21" # Lake Cullivel ``` And a buffer radius in metres: ``` buffer = 500 ``` ### Load the waterbody polygon ``` wb = get_waterbody(geohash) wb.geometry[0] ``` ## Load the image to classify We'll load a Landsat image to apply FuzzyWOfS to. Set up the waterbody polygon so we can use it to query: ``` gpg = datacube.utils.geometry.Geometry(wb.geometry[0], crs=wb.crs) ``` Identify the correct CRS for the output: ``` best_crs = mostcommon_crs(dc, product=product, query=dict(geopolygon=gpg, time=date)) ``` Query the Landsat image: ``` bands = [ "nbart_blue", "nbart_green", "nbart_red", "nbart_nir", "nbart_swir_1", "nbart_swir_2", ] da = dc.load( product, geopolygon=datacube.utils.geometry.Geometry( wb.geometry[0].buffer(buffer), crs=wb.crs ), time=date, output_crs=best_crs, resolution=(-30, 30), resampling="cubic", measurements=bands + ["fmask"], ) ``` Then we can have a look at the image. ``` landsat = da.isel(time=0) rgb(landsat) ``` ## Visualise the FuzzyWOfS classifier The structure of FuzzyWOfS is exactly the same as WOfS. The implementation, however, is a tree, so we can perform tree operations that are hard to do with WOfS. One such operation is visualising the tree. We can construct a tree diagram of WOfS: ``` dot = fuzzy_wofs.wofs.build_graphviz() dot.render("wofs_tree", format="gif") IPython.display.Image('wofs_tree.gif') ``` Dry leaf nodes are in red, and wet leaf nodes are in blue. A pixel travels probabilistically down each branch depending on its value, and ends up in a mixture of leaf nodes. The probability of the pixel being wet is then the weighted sum of the probabilities that each leaf node is wet. We can even turn this tree into a single Python equation: ``` print(fuzzy_wofs.wofs.to_string()) ``` ## Running FuzzyWOfS on an image First convert your xarray from `dc.load` into a numpy array: ``` def xr_to_cube(landsat): """Convert an Landsat xarray Dataset to a DataArray for WOfS.""" return landsat[bands].to_array(dim="band") landsat_cube = np.array(xr_to_cube(landsat)) ``` We can then run `wofs.predict` on this cube to predict whether each pixel is water. This should be equivalent to WOfS. ``` hard_predictions = fuzzy_wofs.wofs.predict(landsat_cube) plt.imshow(hard_predictions, interpolation="nearest", cmap='Blues') patches = [matplotlib.patches.Patch(color=matplotlib.cm.Blues([0, 255])[v], label=['Dry', 'Wet'][v]) for v in [0, 1]] plt.legend(handles=patches) plt.axis('off'); ``` If we want probabilities, we can run `wofs.fuzzy_predict` instead. Before we can estimate probabilities, though, we need to estimate uncertainty in Landsat. For example, let's assume (fairly arbitrarily) that the noise is 11% of the median (for which there is a function to estimate the noise included in `fuzzy_wofs`). ``` landsat_noise = fuzzy_wofs.guess_noise(landsat_cube) ``` Then we can predict. ``` fuzzy_predictions = fuzzy_wofs.wofs.fuzzy_predict(landsat_cube, landsat_noise) plt.figure() plt.imshow( fuzzy_predictions, vmin=0, vmax=1, cmap="coolwarm_r", interpolation="nearest" ) cb = plt.colorbar(label="Uncalibrated probability") cb.ax.axhline(0.2, c="white") cb.ax.axhline(0.5, c="grey") cb.ax.axhline(0.8, c="black") plt.contour(fuzzy_predictions, [0.2, 0.5, 0.8], colors=["white", "grey", "black"]) ``` We now get an estimate of the probability that each pixel is wet. A probability of 0 means that WOfS is entirely sure that the pixel is not water. A probability of 1 means that WOfS is entirely sure that the pixel is water. The average value of the probability&mdash;and the probability in the limit of infinite noise&mdash;is the probability that any given pixel in the WOfS training set is wet, about 30%. This value contains more information than WOfS by itself. WOfS can determine only whether a pixel is wet or dry. FuzzyWOfS can indicate how close WOfS was to classifying each pixel as wet or dry. In WOfS, a pixel can only end up in a single leaf node: each decision is binary, and each pixel travels down one branch until it reaches a leaf. In FuzzyWOfS, pixels are split between multiple branches based on their probability of the (unobserved) true pixel value belonging in each branch. Each pixel can end up in multiple leaf nodes. The final result for a given pixel is the average of the marginal probabilities at each leaf, weighted by how much of that pixel ended up in each leaf. We decide how much of each pixel to send down each branch by modelling the probability distribution of the surface reflectance of each pixel, which we choose to model as a Gaussian. What if the noise was really really high? For example, what if we have no SWIR bands at all? We could imagine these SWIR bands take the value of the median of some similar, known image as an assumed expected value and have really high noise. ``` median_swir = np.median(landsat_cube[-2:], axis=(1, 2)) median_swir landsat_noswir = landsat_cube.copy() landsat_noswir[-2:] = median_swir[:, None, None] ``` WOfS will output dry for every pixel in this no-SWIR case: ``` hard_predictions_noswir = fuzzy_wofs.wofs.predict(landsat_noswir) plt.imshow(hard_predictions_noswir, interpolation="nearest") ``` But FuzzyWOfS can account for the fact that we don't know the SWIR, and evaluate WOfS over _all_ possible SWIR values: ``` really_high_noise = landsat_noise.copy() really_high_noise[-2:] = 1000 fuzzy_predictions_noswir = fuzzy_wofs.wofs.fuzzy_predict( landsat_noswir, really_high_noise ) plt.figure() plt.imshow( fuzzy_predictions_noswir, vmin=0, vmax=1, cmap="coolwarm_r", interpolation="nearest" ) cb = plt.colorbar(label="Uncalibrated probability") cb.ax.axhline(0.2, c="white") plt.contour(fuzzy_predictions_noswir, [0.2], colors=["white"]) ``` We get a low probability prediction of water. ## Visualise how WOfS classifies an image We can also use FuzzyWOfS to investigate which leaf nodes contribute to the classification in each part of the image. Each pixel ends up in a leaf node which classifies it. Which leaf does each pixel end up in? ``` leaves = fuzzy_wofs.wofs.get_leaf(landsat_cube) plt.figure(figsize=(12, 8)) for i in range(23): plt.subplot(4, 6, i + 1) plt.imshow(leaves == i, cmap="Greys", interpolation="gaussian") plt.axis("off") plt.title(i) ``` The dry areas in this image are mostly classified by leaf 22, while the wet areas are mostly classified by leaf 0. The areas in between are classified with a few other leaves. What about the fuzzy classification? Each pixel now partly ends up in each leaf. We can visualise how much of each pixel ends up in each leaf: ``` fuzzy_leaves = fuzzy_wofs.wofs.get_fuzzy_leaf(landsat_cube, landsat_noise) plt.figure(figsize=(12, 8)) for i in range(23): plt.subplot(4, 6, i + 1) plt.imshow(fuzzy_leaves[i], cmap="Greys", interpolation="gaussian", vmin=0, vmax=1) plt.axis("off") plt.title(i) ``` We can see that all leaves contribute to the prediction in different amounts. This is the key difference between WOfS and FuzzyWOfS: in FuzzyWOfS, all leaves contribute to the prediction, while in WOfS, each prediction is based only on one leaf. ## Comparison to other wetness measures How does FuzzyWOfS compare to Tasseled Cap Wetness (TCW) and the Modified Normalised Difference Water Index (MNDWI), as well as the all-time summary of WOfS? Load TCW, MNDWI, and the WOfS summary: ``` tcw = calculate_indices(da, index="TCW", collection="ga_ls_3") mndwi = calculate_indices(da, index="MNDWI", collection="ga_ls_3") wofs_summary = dc.load("wofs_filtered_summary", like=da.drop("time")) ``` Then we can plot them all together. We'll outline the 5% maximum extent that DEA Waterbodies uses, and also the 50% mark for FuzzyWOfS. ``` plt.figure(figsize=(10, 10)) plt.subplot(2, 2, 1) plt.title("FuzzyWOfS") plt.imshow(fuzzy_predictions, cmap="Blues") plt.colorbar(label='p(wet)') plt.contour(fuzzy_predictions, [0.5], colors="black", linestyles=":") plt.contour(wofs_summary.wofs_filtered_summary.isel(time=0), [0.05], colors="black") plt.subplot(2, 2, 2) plt.title("TCW") plt.imshow(tcw.isel(time=0).TCW, cmap="Blues") plt.colorbar(label='TCW') plt.contour(fuzzy_predictions, [0.5], colors="black", linestyles=":") plt.contour(wofs_summary.wofs_filtered_summary.isel(time=0), [0.05], colors="black") plt.subplot(2, 2, 3) plt.title("MNDWI") plt.imshow(mndwi.isel(time=0).MNDWI, cmap="Blues") plt.colorbar(label='MNDWI') plt.contour(fuzzy_predictions, [0.5], colors="black", linestyles=":") plt.contour(wofs_summary.wofs_filtered_summary.isel(time=0), [0.05], colors="black") plt.subplot(2, 2, 4) plt.title("WOfS all-time summary") plt.imshow(wofs_summary.wofs_filtered_summary.isel(time=0), vmin=0, cmap="Blues") plt.colorbar(label='p(wet)') plt.contour(fuzzy_predictions, [0.5], colors="black", linestyles=":") plt.contour(wofs_summary.wofs_filtered_summary.isel(time=0), [0.05], colors="black") ``` Bluer regions of the above maps indicate a higher likelihood of open water. All of the different scene-based measures show different amounts of water, though the water they show tends to be within the boundaries of the lake. TCW shows much more water, while MNDWI shows sparser water. This shows how FuzzyWOfS provides an alternative to TCW and MNDWI for continuous (i.e. non-binary) water predictions on a scene-by-scene basis in a way that is consistent with, and augments, the existing WOfS product. *** ## Additional information **License:** The code in this notebook is licensed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0). Digital Earth Australia data is licensed under the [Creative Commons by Attribution 4.0](https://creativecommons.org/licenses/by/4.0/) license. **Contact:** If you need assistance, please post a question on the [Open Data Cube Slack channel](http://slack.opendatacube.org/) or on the [GIS Stack Exchange](https://gis.stackexchange.com/questions/ask?tags=open-data-cube) using the `open-data-cube` tag (you can view previously asked questions [here](https://gis.stackexchange.com/questions/tagged/open-data-cube)). If you would like to report an issue with this notebook, you can file one on [Github](https://github.com/GeoscienceAustralia/dea-notebooks). **Last modified:** November 2020 **Compatible datacube version:** ``` print(datacube.__version__) ``` ## Tags Browse all available tags on the DEA User Guide's [Tags Index](https://docs.dea.ga.gov.au/genindex.html)
github_jupyter
##### Copyright 2021 The TF-Agents Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` ### Checkpointer and PolicySaver <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/agents/tutorials/10_checkpointer_policysaver_tutorial"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/docs/tutorials/10_checkpointer_policysaver_tutorial.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/agents/blob/master/docs/tutorials/10_checkpointer_policysaver_tutorial.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/agents/docs/tutorials/10_checkpointer_policysaver_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> ## Introduction `tf_agents.utils.common.Checkpointer` is a utility to save/load the training state, policy state, and replay_buffer state to/from a local storage. `tf_agents.policies.policy_saver.PolicySaver` is a tool to save/load only the policy, and is lighter than `Checkpointer`. You can use `PolicySaver` to deploy the model as well without any knowledge of the code that created the policy. In this tutorial, we will use DQN to train a model, then use `Checkpointer` and `PolicySaver` to show how we can store and load the states and model in an interactive way. Note that we will use TF2.0's new saved_model tooling and format for `PolicySaver`. ## Setup If you haven't installed the following dependencies, run: ``` #@test {"skip": true} !sudo apt-get install -y xvfb ffmpeg !pip install 'imageio==2.4.0' !pip install 'xvfbwrapper==0.2.9' !pip install tf-agents from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import io import matplotlib import matplotlib.pyplot as plt import os import shutil import tempfile import tensorflow as tf import zipfile import IPython try: from google.colab import files except ImportError: files = None from tf_agents.agents.dqn import dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import q_network from tf_agents.policies import policy_saver from tf_agents.policies import py_tf_eager_policy from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() tempdir = os.getenv("TEST_TMPDIR", tempfile.gettempdir()) #@test {"skip": true} # Set up a virtual display for rendering OpenAI gym environments. import xvfbwrapper xvfbwrapper.Xvfb(1400, 900, 24).start() ``` ## DQN agent We are going to set up DQN agent, just like in the previous colab. The details are hidden by default as they are not core part of this colab, but you can click on 'SHOW CODE' to see the details. ### Hyperparameters ``` env_name = "CartPole-v1" collect_steps_per_iteration = 100 replay_buffer_capacity = 100000 fc_layer_params = (100,) batch_size = 64 learning_rate = 1e-3 log_interval = 5 num_eval_episodes = 10 eval_interval = 1000 ``` ### Environment ``` train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ``` ### Agent ``` #@title q_net = q_network.QNetwork( train_env.observation_spec(), train_env.action_spec(), fc_layer_params=fc_layer_params) optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) global_step = tf.compat.v1.train.get_or_create_global_step() agent = dqn_agent.DqnAgent( train_env.time_step_spec(), train_env.action_spec(), q_network=q_net, optimizer=optimizer, td_errors_loss_fn=common.element_wise_squared_loss, train_step_counter=global_step) agent.initialize() ``` ### Data Collection ``` #@title replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) collect_driver = dynamic_step_driver.DynamicStepDriver( train_env, agent.collect_policy, observers=[replay_buffer.add_batch], num_steps=collect_steps_per_iteration) # Initial data collection collect_driver.run() # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=2).prefetch(3) iterator = iter(dataset) ``` ### Train the agent ``` #@title # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) def train_one_iteration(): # Collect a few steps using collect_policy and save to the replay buffer. collect_driver.run() # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) iteration = agent.train_step_counter.numpy() print ('iteration: {0} loss: {1}'.format(iteration, train_loss.loss)) ``` ### Video Generation ``` #@title def embed_gif(gif_buffer): """Embeds a gif file in the notebook.""" tag = '<img src="data:image/gif;base64,{0}"/>'.format(base64.b64encode(gif_buffer).decode()) return IPython.display.HTML(tag) def run_episodes_and_create_video(policy, eval_tf_env, eval_py_env): num_episodes = 3 frames = [] for _ in range(num_episodes): time_step = eval_tf_env.reset() frames.append(eval_py_env.render()) while not time_step.is_last(): action_step = policy.action(time_step) time_step = eval_tf_env.step(action_step.action) frames.append(eval_py_env.render()) gif_file = io.BytesIO() imageio.mimsave(gif_file, frames, format='gif', fps=60) IPython.display.display(embed_gif(gif_file.getvalue())) ``` ### Generate a video Check the performance of the policy by generating a video. ``` print ('global_step:') print (global_step) run_episodes_and_create_video(agent.policy, eval_env, eval_py_env) ``` ## Setup Checkpointer and PolicySaver Now we are ready to use Checkpointer and PolicySaver. ### Checkpointer ``` checkpoint_dir = os.path.join(tempdir, 'checkpoint') train_checkpointer = common.Checkpointer( ckpt_dir=checkpoint_dir, max_to_keep=1, agent=agent, policy=agent.policy, replay_buffer=replay_buffer, global_step=global_step ) ``` ### Policy Saver ``` policy_dir = os.path.join(tempdir, 'policy') tf_policy_saver = policy_saver.PolicySaver(agent.policy) ``` ### Train one iteration ``` #@test {"skip": true} print('Training one iteration....') train_one_iteration() ``` ### Save to checkpoint ``` train_checkpointer.save(global_step) ``` ### Restore checkpoint For this to work, the whole set of objects should be recreated the same way as when the checkpoint was created. ``` train_checkpointer.initialize_or_restore() global_step = tf.compat.v1.train.get_global_step() ``` Also save policy and export to a location ``` tf_policy_saver.save(policy_dir) ``` The policy can be loaded without having any knowledge of what agent or network was used to create it. This makes deployment of the policy much easier. Load the saved policy and check how it performs ``` saved_policy = tf.compat.v2.saved_model.load(policy_dir) run_episodes_and_create_video(saved_policy, eval_env, eval_py_env) ``` ## Export and import The rest of the colab will help you export / import checkpointer and policy directories such that you can continue training at a later point and deploy the model without having to train again. Now you can go back to 'Train one iteration' and train a few more times such that you can understand the difference later on. Once you start to see slightly better results, continue below. ``` #@title Create zip file and upload zip file (double-click to see the code) def create_zip_file(dirname, base_filename): return shutil.make_archive(base_filename, 'zip', dirname) def upload_and_unzip_file_to(dirname): if files is None: return uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]))) shutil.rmtree(dirname) zip_files = zipfile.ZipFile(io.BytesIO(uploaded[fn]), 'r') zip_files.extractall(dirname) zip_files.close() ``` Create a zipped file from the checkpoint directory. ``` train_checkpointer.save(global_step) checkpoint_zip_filename = create_zip_file(checkpoint_dir, os.path.join(tempdir, 'exported_cp')) ``` Download the zip file. ``` #@test {"skip": true} if files is not None: files.download(checkpoint_zip_filename) # try again if this fails: https://github.com/googlecolab/colabtools/issues/469 ``` After training for some time (10-15 times), download the checkpoint zip file, and go to "Runtime > Restart and run all" to reset the training, and come back to this cell. Now you can upload the downloaded zip file, and continue the training. ``` #@test {"skip": true} upload_and_unzip_file_to(checkpoint_dir) train_checkpointer.initialize_or_restore() global_step = tf.compat.v1.train.get_global_step() ``` Once you have uploaded checkpoint directory, go back to 'Train one iteration' to continue training or go back to 'Generate a video' to check the performance of the loaded poliicy. Alternatively, you can save the policy (model) and restore it. Unlike checkpointer, you cannot continue with the training, but you can still deploy the model. Note that the downloaded file is much smaller than that of the checkpointer. ``` tf_policy_saver.save(policy_dir) policy_zip_filename = create_zip_file(policy_dir, os.path.join(tempdir, 'exported_policy')) #@test {"skip": true} if files is not None: files.download(policy_zip_filename) # try again if this fails: https://github.com/googlecolab/colabtools/issues/469 ``` Upload the downloaded policy directory (exported_policy.zip) and check how the saved policy performs. ``` #@test {"skip": true} upload_and_unzip_file_to(policy_dir) saved_policy = tf.compat.v2.saved_model.load(policy_dir) run_episodes_and_create_video(saved_policy, eval_env, eval_py_env) ``` ## SavedModelPyTFEagerPolicy If you don't want to use TF policy, then you can also use the saved_model directly with the Python env through the use of `py_tf_eager_policy.SavedModelPyTFEagerPolicy`. Note that this only works when eager mode is enabled. ``` eager_py_policy = py_tf_eager_policy.SavedModelPyTFEagerPolicy( policy_dir, eval_py_env.time_step_spec(), eval_py_env.action_spec()) # Note that we're passing eval_py_env not eval_env. run_episodes_and_create_video(eager_py_policy, eval_py_env, eval_py_env) ```
github_jupyter
## Development notebook FCN model ``` from IPython.core.display import display, HTML display(HTML("<style>.container { width:95% !important; }</style>")) # %matplotlib inline %load_ext autoreload %autoreload 2 import sys, os, random, pprint sys.path.append('../') import tensorflow as tf import keras.backend as KB import numpy as np import skimage.io import mrcnn.visualize as visualize import mrcnn.utils as utils from mrcnn.datagen import data_generator, load_image_gt, data_gen_simulate from mrcnn.callbacks import get_layer_output_1,get_layer_output_2 from mrcnn.utils import mask_string, parse_image_meta, apply_box_deltas_tf from mrcnn.prep_notebook import mrcnn_coco_test, mrcnn_coco_train, fcn_coco_train, prep_coco_dataset from mrcnn.coco import CocoDataset, CocoConfig, CocoInferenceConfig, evaluate_coco, build_coco_results import mrcnn.model_fcn as fcn_modellib from mrcnn.utils import log, Paths pp = pprint.PrettyPrinter(indent=2, width=100) np.set_printoptions(linewidth=100,precision=4,threshold=1000, suppress = True) ``` ## Setup MRCNN model ``` mrcnn_model, mrcnn_config = mrcnn_coco_train(mode = 'trainfcn') dataset_val, val_generator = prep_coco_dataset(['train', 'val35k'], mrcnn_config, generator = True) dataset_train, train_generator = prep_coco_dataset(['minival'], mrcnn_config, generator = True) ``` #### Load Model Weights ``` # exclude=["mrcnn_class_logits"] # ,"mrcnn_bbox_fc"] #, "mrcnn_bbox", "mrcnn_mask"]) mrcnn_model.load_model_weights(init_with = 'last', exclude = None) # mrcnn_model.config.EPOCHS_TO_RUN = 1 mrcnn_model.config.display() mrcnn_model.layer_info() ``` ## Display Images ### Display image with Ground Truth bounding boxes and masks ``` ## 62642 (persons), 68539 (trucks) 36466 (surfers) 75040 (boat and persons) ## 36466 surfers. 5498 basketbal players, 27711,30531 ## 5498 lots of motorcylces & persons - ## Persons: #26026, #7719, 111864, 58240, ## 89243: Person, bicylce and traiffic lights ## 35347 - laptops, keyboards and cat ## items = [59199 , 102868] ## 101623 (cake and forks), 41423 (elephant & people) from mrcnn.datagen import data_gen_simulate # train_batch_x, train_batch_y = next(train_generator) train_batch_x, train_batch_y = test_batch_x, test_batch_y = data_gen_simulate(dataset_train, mrcnn_config, [75040, 89243]) imgmeta_idx = mrcnn_model.keras_model.input_names.index('input_image_meta') img_meta = train_batch_x[imgmeta_idx] for img_idx in range(mrcnn_config.BATCH_SIZE): image_id = img_meta[img_idx,0] image = dataset_train.load_image(image_id) mask, class_ids = dataset_train.load_mask(image_id) bbox = utils.extract_bboxes(mask) print('Image meta : ', img_meta[img_idx,:10]) print('Classes : ', class_ids) print("image_id : ", image_id, ' Reference: ', dataset_train.image_reference(image_id)) print(' class_ids.shape[0]:', class_ids.shape[0], 'bbox.shape[0]:',bbox.shape[0]) class_names = [str(dataset_train.class_names[class_id]) for class_id in class_ids] print('Class Names : ', class_names) visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names) # Display image and instances visualize.display_instances_with_mask(image, bbox, mask, class_ids, dataset_train.class_names, figsize =(8,8)) ``` ### other image displays #### Display Training / Validation Training set information ``` print("Train Dataset Image Count: {}".format(len(dataset_train.image_ids))) print("Training Dataset Class Count: {}".format(dataset_train.num_classes)) for i, info in enumerate(dataset_train.class_info): print("{:3}. {:50}".format(i, info['name'])) print("Validation Dataset Image Count: {}".format(len(dataset_val.image_ids))) print("Validation Dataset Class Count: {}".format(dataset_val.num_classes)) for i, info in enumerate(dataset_val.class_info): print("{:3}. {:50}".format(i, info['name'])) ``` #### Display top masks for a random group of images ``` # Load and display random samples image_ids = np.random.choice(dataset_train.image_ids, 7) for image_id in image_ids: image = dataset_train.load_image(image_id) mask, class_ids = dataset_train.load_mask(image_id) visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names) ``` #### Display a random image with instances and mask ``` # Load random image and mask. # image_id = np.random.choice(dataset_train.image_ids) image = dataset_train.load_image(image_id) mask, class_ids = dataset_train.load_mask(image_id) # Compute Bounding box bbox = utils.extract_bboxes(mask) # Display image and additional stats print("image_id ", image_id, dataset_train.image_reference(image_id)) log("image", image) log("mask", mask) log("class_ids", class_ids) log("bbox", bbox) print(class_ids.shape[0], bbox.shape[0]) # Display image and instances visualize.display_instances_with_mask(image, bbox, mask, class_ids, dataset_train.class_names) ``` ## Load FCN Model ``` ##------------------------------------------------------------------------------------ ## Build configuration for FCN model ##------------------------------------------------------------------------------------ paths = Paths() fcn_config = CocoConfig() # fcn_config.IMAGE_MAX_DIM = 600 # fcn_config.IMAGE_MIN_DIM = 480 fcn_config.NAME = 'fcn' fcn_config.BATCH_SIZE = 1 # mrcnn_config.BATCH_SIZE # Batch size is 2 (# GPUs * images/GPU). fcn_config.IMAGES_PER_GPU = 1 # mrcnn_config.BATCH_SIZE # Must match BATCH_SIZE fcn_config.FCN_INPUT_SHAPE = fcn_config.IMAGE_SHAPE[0:2] // 4 # mrcnn_config.HEATMAP_SCALE_FACTOR fcn_config.FCN_VGG16_MODEL_PATH = paths.FCN_VGG16_MODEL_PATH fcn_config.TRAINING_PATH = paths.FCN_TRAINING_PATH fcn_config.STEPS_PER_EPOCH = 4 fcn_config.EPOCHS_TO_RUN = 2 fcn_config.LEARNING_RATE = 0.01 fcn_config.LAST_EPOCH_RAN = 0 fcn_config.WEIGHT_DECAY = 2.0e-4 fcn_config.VALIDATION_STEPS = 5 fcn_config.REDUCE_LR_FACTOR = 0.5 fcn_config.REDUCE_LR_COOLDOWN = 50 fcn_config.REDUCE_LR_PATIENCE = 33 fcn_config.EARLY_STOP_PATIENCE = 50 fcn_config.EARLY_STOP_MIN_DELTA = 1.0e-4 fcn_config.MIN_LR = 1.0e-10 fcn_config.NEW_LOG_FOLDER = True fcn_config.OPTIMIZER = 'ADAGRAD' fcn_config.SYSOUT = 'screen' paths.display() fcn_config.display() ##------------------------------------------------------------------------------------ ## Build FCN Model in Training Mode ##------------------------------------------------------------------------------------ try : del fcn_model gc.collect() except: pass fcn_model = fcn_modellib.FCN(mode="training", arch = 'FCN8', config=fcn_config) fcn_model.keras_model.summary() ##------------------------------------------------------------------------------------ ## Load FCN Model weights ##------------------------------------------------------------------------------------ fcn_model.load_model_weights(init_with = 'init') # 'fcn_config.VGG16_MODEL_PATH') fcn_model.config.display() # fcn_model.layer_info() ``` ## FCN `train_in_batches()` ``` print('--- MRCNN-------------------------------------------------------') log("Epochs to run {} ".format(mrcnn_model.config.EPOCHS_TO_RUN)) log("Steps per epochs {} ".format(mrcnn_model.config.STEPS_PER_EPOCH)) log("Batch size {} ".format(mrcnn_model.config.BATCH_SIZE)) log("Learning Rate {} ".format(mrcnn_model.config.LEARNING_RATE)) log("Momentum {} ".format(mrcnn_model.config.LEARNING_MOMENTUM)) log("Weight Decay: {} ".format(mrcnn_model.config.WEIGHT_DECAY )) log("VALIDATION_STEPS {} ".format(mrcnn_model.config.VALIDATION_STEPS )) # log("Checkpoint Path: {} ".format(mrcnn_model.checkpoint_path)) # log("REDUCE_LR_FACTOR {} ".format(mrcnn_model.config.REDUCE_LR_FACTOR )) # log("REDUCE_LR_COOLDOWN {} ".format(mrcnn_model.config.REDUCE_LR_COOLDOWN )) # log("REDUCE_LR_PATIENCE {} ".format(mrcnn_model.config.REDUCE_LR_PATIENCE )) # log("MIN_LR {} ".format(mrcnn_model.config.MIN_LR )) # log("EARLY_STOP_PATIENCE {} ".format(mrcnn_model.config.EARLY_STOP_PATIENCE)) fcn_config.EPOCHS_TO_RUN = 4 fcn_config.LEARNING_RATE = 0.1 print('--- FCN --------------------------------------------------------') log("Epochs to run {} ".format(fcn_model.config.EPOCHS_TO_RUN)) log("Steps per epochs {} ".format(fcn_model.config.STEPS_PER_EPOCH)) log("Batch size {} ".format(fcn_model.config.BATCH_SIZE)) log("Learning Rate {} ".format(fcn_model.config.LEARNING_RATE)) log("Momentum {} ".format(fcn_model.config.LEARNING_MOMENTUM)) log("Weight Decay: {} ".format(fcn_model.config.WEIGHT_DECAY )) log("VALIDATION_STEPS {} ".format(fcn_model.config.VALIDATION_STEPS )) log("Checkpoint Path: {} ".format(fcn_model.checkpoint_path)) log("REDUCE_LR_FACTOR {} ".format(fcn_model.config.REDUCE_LR_FACTOR )) log("REDUCE_LR_COOLDOWN {} ".format(fcn_model.config.REDUCE_LR_COOLDOWN )) log("REDUCE_LR_PATIENCE {} ".format(fcn_model.config.REDUCE_LR_PATIENCE )) log("MIN_LR {} ".format(fcn_model.config.MIN_LR )) log("EARLY_STOP_PATIENCE {} ".format(fcn_model.config.EARLY_STOP_PATIENCE)) ##---------------------------------------------------------------------------------------------- ## Train the FCN only ## Passing layers="heads" freezes all layers except the head ## layers. You can also pass a regular expression to select ## which layers to train by name pattern. ##---------------------------------------------------------------------------------------------- train_layers = ['fcn'] loss_names = ["fcn_heatmap_loss"] fcn_config.LAST_EPOCH_RAN = 0 fcn_model.epoch = fcn_config.LAST_EPOCH_RAN fcn_model.train_in_batches( mrcnn_model, dataset_train, dataset_val, layers = train_layers, losses = loss_names ) ``` ## Run data through model #### Display model input / output information ``` mrcnn_model.layer_info() print('\n FCN') fcn_model.layer_info() # model_output = get_layer_output_2(model.keras_model, train_batch_x, 1) # model_output = get_layer_output_1(mrcnn_model.keras_model, train_batch_x, [4,5,6,7,9,10,11,12,13,14], 1) # model_output = get_layer_output_1(mrcnn_model.keras_model, train_batch_x, [0,1,2,3,4,5,6,7,9,10,11], 1) model_output = get_layer_output_1(mrcnn_model.keras_model, train_batch_x, [0,1,2,3,4,5], 1) ``` ### Load input and output tensors #### Load output data ``` print(len(model_output)) # output_rois = model_output[0] # layer: 4 shape: (1, 200, 4) # target_class_ids = model_output[1] # layer: 5 shape: (1, 200) # target_bbox_deltas = model_output[2] # layer: 6 shape: (1, 200, 4) # roi_gt_boxes = model_output[3] # layer: 7 shape: (1, 200, 4) # mrcnn_class = model_output[4] # layer: 8 shape: (1, 200, 81) # mrcnn_bbox = model_output[5] # layer: 9 shape: (1, 200, 81, 4) # pred_refined_tensor = model_output[6] # layer: 16 shape: (1, 81, 25, 7) # output_rois = model_output[0] # layer: 0 shape: (2, 200, 4) # target_class_ids = model_output[1] # layer: 1 shape: (2, 200) # target_bbox_deltas = model_output[2] # layer: 2 shape: (2, 200, 4) # roi_gt_boxes = model_output[3] # layer: 3 shape: (2, 200, 4) # mrcnn_class = model_output[4] # layer: 4 shape: (2, 200, 81) # mrcnn_bbox = model_output[5] # layer: 5 shape: (2, 200, 81, 4) # model_pred_heatmap_norm = model_output[6] # layer: 6 shape: (2, 256, 256, 81) # model_pred_heatmap_scores = model_output[7] # layer: 7 shape: (2, 81, 25, 11) # model_gt_heatmap_scores = model_output[8] # layer: 9 shape: (2, 81, 25, 11) # model_pred_tensor = model_output[9] # layer: 10 shape: (2, 81, 25, 8) # model_gt_tensor = model_output[10] # layer: 11 shape: (2, 81, 25, 8) pred_heatmap_norm = model_output[0] # layer: 0 shape: (2, 256, 256, 81) pred_heatmap_scores = model_output[1] # layer: 1 shape: (2, 81, 200, 11) gt_heatmap_norm = model_output[2] # layer: 2 shape: (2, 256, 256, 81) gt_heatmap_scores = model_output[3] # layer: 3 shape: (2, 81, 200, 11) pred_tensor = model_output[4] # layer: 4 shape: (2, 81, 200, 8) gt_tensor = model_output[5] # layer: 5 shape: (2, 81, 200, 8) for i in model_output: print( i.shape) # fcn_input = [pred_heatmap_norm, pred_heatmap_scores, gt_heatmap_norm, gt_heatmap_scores] model_output2 = get_layer_output_1(fcn_model.keras_model, fcn_input, [0,1], 1) ``` #### Load input data ``` input_image = train_batch_x[0] input_image_meta = train_batch_x[1] # input_rpn_match = train_batch_x[2] # input_rpn_bbox = train_batch_x[3] input_gt_class_ids = train_batch_x[4] input_gt_bboxes = train_batch_x[5] print(' Input image shape is :', input_image.shape) print(' input_image_meta :', input_image_meta[0,:10]) # print(' input_rpn_match :', input_rpn_match.shape) # print(' input_rpn_bbox :', input_rpn_bbox.shape) print(' input_gt_class_ids :', input_gt_class_ids.shape) print(' input_gt_bboxes :', input_gt_bboxes.shape) # h, w = input_image.shape[1], input_image.shape[2] # tf.shape(input_image)[1], tf.shape(input_image)[2] # input_gt_bboxes_norm = tf.identity(input_gt_bboxes / [h,w,h,w]) # print(' input_gt_bboxes_norm :', input_gt_bboxes_norm.shape) ``` ## Display output from model #### `input_gt_class_ids`, `input_gt_bboxes` ``` # print(roi_gt_boxes[0,:50] * [1024,1024,1024,1024]) print(input_gt_class_ids[0]) print(input_gt_bboxes[0,:10]) # for i in range(input_gt_class_ids.shape[1]): # if input_gt_class_ids[0,i] == 1: # print(input_gt_class_ids[0,i], ' ', input_gt_bboxes[0,i]) ``` #### Display `output_rois` ``` np.set_printoptions(precision=4, threshold=4000, linewidth=210, suppress=True) img = 0 print(' output_rois') print(output_rois.shape) # print(output_rois[0,:40,:]) print(output_rois [0,:40,:]* [1024, 1024, 1024, 1024]) ``` #### `max_mrcnn_class` , `argmax_mrcnn_class` ``` np.set_printoptions(precision=4, threshold=4000, linewidth=210, suppress=True) print(' mrcnn_class', mrcnn_class.shape) # print( mrcnn_class[0,0,:]) # max_mrcnn_class = np.max(mrcnn_class, axis = (0,2)) argmax_mrcnn_class = np.argmax(mrcnn_class, axis = 2) # print() print('\n mrcnn_class Max Values : ', max_mrcnn_class.shape) print(max_mrcnn_class) # print() print(' mrcnn_class Argmax Values: ', argmax_mrcnn_class.shape) print(argmax_mrcnn_class[0]) print(' target_class_ds Values: ', target_class_ids.shape) print(target_class_ids[0]) # for i in range(100): # print('Predicted: ', argmax_mrcnn_class[0,i], ' Actual ', target_class_ids[0,i]) ``` #### Display `target_class_ids()` ``` print(' target_class_ids') print(target_class_ids.shape) print(target_class_ids[0,:70]) ``` #### apply `deltas` from predicted delta `mrcnn_bbox` to `output_rois` to obtain refined rois ``` np.set_printoptions(precision=4, threshold=4000, linewidth=210, suppress=True) img_idx = 0 print('output_rois',output_rois.shape, 'deltas ', deltas.shape) cls = 1 for i in range(input_gt_class_ids.shape[1]): if input_gt_class_ids[0,i] == cls: print(input_gt_class_ids[0,i], ' ', input_gt_bboxes[0,i]) print() for i in range(output_rois.shape[1]): if classes[0,i] ==cls: print(' i ', i, 'class: ',classes[0,i]) # print(' orig : ', output_rois[0,i]) d1 = deltas[0,i] * mrcnn_config.BBOX_STD_DEV # print(' delta : ', deltas[0,i],' delta * std dev: ', d1) d2 = utils.apply_box_delta(output_rois[0,i],d1) # print(' refined : ', d2) # print() print(' orig : ',output_rois[0,i] * [1024,1024,1024,1024]) print(' refined : ', d2 * [1024,1024,1024,1024]) print(' roi_gt_bboxes : ', roi_gt_boxes[0,i]* [1024,1024,1024,1024]) print() print(' pred delta : ', deltas[0,i] ) print(' tgt delta : ', target_bbox_deltas[0,i] ) print() ``` #### Display roi_gt_boxes , and class_ids vs. output_bbox and prediceted class_ids ``` print(ref_out_roi) ref_out_roi1 = ref_out_roi * [1024,1024,1024,1024] print(ref_out_roi1) window = np.array([0,0,1024,1024], dtype =float) print(window.shape) ref_out_roi2 = utils.clip_to_window_np( window, ref_out_roi1) print(ref_out_roi2.shape) for i in range(200): print(ref_out_roi1[i],' --- ', ref_out_roi2[i]) ``` #### Display pred_refined_tensor and gt_tensor ``` for cls in [1]: for box in range(20): print(pred_tensor[0,cls,box]) print(gt_tensor[0,cls,box]) print() ``` #### Display roi_gt_boxes along with corresponding refined/clipped output_rois ``` img_id = 0 print(roi_gt_boxes[0].shape, target_class_ids[0].shape , np.expand_dims(target_class_ids[0],axis=-1).shape) classes, deltas = utils.get_predicted_mrcnn_deltas(mrcnn_class, mrcnn_bbox, verbose=True) deltas *= mrcnn_config.BBOX_STD_DEV print('classes.shape: ',classes.shape, ' deltas.shape: ',deltas.shape) ref_out_roi = utils.apply_box_deltas_np(output_rois[img_id],deltas[img_id]) # ## Clip boxes to image window # print(ref_out_roi.shape) window = np.array([0,0,1024,1024], dtype =float) clipped_out_roi = utils.clip_to_window_np( window, ref_out_roi*[1024,1024,1024,1024]) for i in range(200): # ref_out_roi = utils.apply_box_delta_np(output_rois[0],d1[0]) # if classes[img_id,i] == 1 or target_class_ids[img_id,i] == 1 : print('idx: ',200-i,' GT Cls: ', target_class_ids[img_id,i] , ' -', roi_gt_boxes[img_id,i]*[1024,1024,1024,1024], ' PR Cls: ', classes[img_id,i],' - ', ref_out_roi[i]*[1024.0,1024.0,1024.0,1024.0] , 'ClpdCls: ', clipped_out_roi[i] ) #) *[1024,1024,1024,1024] ``` #### display gt_heatmap_scores and pred_heatmap_scores outputs ``` np.set_printoptions(precision=4, threshold=None, linewidth=200, suppress=True) # print(' gt_tensor') # print(gt_tensor.shape) # print(gt_tensor[img,:,:10]) img_id = 1 print(' GT Heatmap Scores') print('gt_heatmap_scores: ', gt_heatmap_scores.dtype, gt_heatmap_scores.shape) print('pred_heatmap_scores: ', pred_heatmap_scores.dtype, pred_heatmap_scores.shape) # print(gt_heatmap_scores[img,1]) # for img_id in range(mrcnn_config.BATCH_SIZE): for img_id in [0]: # print(pred_refined_heatmap_scores[img_id,:4]) pr_class_ids = np.unique(pred_heatmap_scores[img_id,:,:,4]).astype(int).tolist() gt_class_ids = np.unique(gt_heatmap_scores[img_id,:,:,4]).astype(int).tolist() union_class_ids = np.union1d(pr_class_ids, gt_class_ids) print('-'*56) print('Image : {} GT ClassIds: {} PR ClassIds: {} '.format(img_id, gt_class_ids, pr_class_ids)) print('Image : {} Union ClassIds: {}'.format(img_id, union_class_ids)) print('-'*56) for cls in union_class_ids: print() for i in range(25): # print(' GT: img_id:',img_id, ' cls: ',cls, ' -',gt_tensor[img_id, cls,i]) #, gt_heatmap_scores[img_id, cls,i,7] ) # print(' PR: img_id:',img_id, ' cls: ',cls, ' -',pred_tensor[img_id,cls,i]) #,pred_refined_heatmap_scores[img_id,cls,i,7]) print(' GT: img/cls:',img_id, '/',cls, ' -',gt_heatmap_scores[img_id, cls,i]) #, gt_heatmap_scores[img_id, cls,i,7] ) print(' PR: img/cls:',img_id, '/',cls, ' -',pred_heatmap_scores[img_id,cls,i]) #,pred_refined_heatmap_scores[img_id,cls,i,7]) print() ``` #### Display `Pred_Tensor`, `Pred_heatmap`, `mrcnn_class` ``` # np.set_printoptions(precision=4, threshold=None, linewidth=150, suppress=True) np.set_printoptions(precision=4, threshold=4000, linewidth=210, suppress=True) img = 0 # max_score = np.max(mrcnn_class, axis = -1) # max_class = np.argmax(mrcnn_class, axis = -1) # # print(' output_rois[',img,'] \n', output_rois[1]*[128,128,128,128]) # print('max class shape:',max_class.shape, 'max score shape: ',max_score.shape) # print('max class[',img,']\n',max_class[img]) # print('max score[',img,']\n',max_score[img]) # print('mrcnn class.shape ',mrcnn_class.shape) # print('mrcnn_class[',img,',:]\n',mrcnn_class[img,:]) # print(output_rois[1]) print('input_gt_class_ids') print(input_gt_class_ids[0]) # print(' rpn_bbox') # print(rpn_bbox.shape) # print(rpn_bbox[0,:100,:]) # print(' rpn_roi_proposals') # print(rpn_roi_proposals.shape) # print(rpn_roi_proposals[0,:100,:]) print(' output_rois') print(output_rois.shape) # print(output_rois[0,:40,:]) print(output_rois [0,:40,:]* [1024, 1024, 1024, 1024]) print(' target_class_ids') print(target_class_ids.shape) print(target_class_ids[0,:40]) # print(output_rois [0,:40,:]* [1024, 1024, 1024, 1024]) # print(' Pred_tensor') # print(pred_tensor.shape) # print(pred_tensor[img,:,:10]) # print(' gt_tensor') # print(gt_tensor.shape) # print(gt_tensor[img,:,:10]) # print(' mrcnn_class') # print( mrcnn_class.shape) # print( mrcnn_class[0,:,:]) # print(' mrcnn_bbox') # print( mrcnn_bbox.shape) # print( mrcnn_bbox) # print(' roi_gt_boxes') # print(roi_gt_boxes.shape) # print(roi_gt_boxes[img,:,:]) # print(' Pred Heatmap Scores') # print(pred_heatmap_scores.dtype, pred_heatmap_scores.shape) # print(pred_heatmap_scores[img,1]) # print(' FCN Scores') # print(fcn_scores.dtype) # for cls in range(4): # print(pred_heatmap_scores[img,cls,:10]) # print(fcn_scores[img,cls,:10,2:]) ``` #### Display `output_rois` for visual check - passed on to `build_pred_tensor()` ``` np.set_printoptions(linewidth=150, precision=6) # print('scatter shape is ', pred_scatt.get_shape()) print('output_rois shape is ', output_rois.shape) img = 0 for img in [0]: print('Image ', img , ' ------------') print(output_rois[img]) ``` #### Display - `pred_refined_tensor` which is passed on to `build_heatmap()` ``` np.set_printoptions(linewidth=150, precision=6) img_id = 0 # print('scatter shape is ', pred_scatt.get_shape()) print('model_pred_tensor shape is ', model_pred_tensor.shape) print(input_image_meta[0,:10]) pr_class_ids = np.unique(model_pred_heatmap_scores[img_id,:,:,4]).astype(int).tolist() print('Image : {} PR ClassIds: {} '.format(img_id, pr_class_ids)) for k in pr_class_ids: print('Image ', img , '/ Class ',k,' ------------') print(model_pred_tensor[img,k,:30]) ``` #### Compare `pred_heatmap_scores` vs. `pred_refined_heatmap_scores` ``` np.set_printoptions(precision=4, threshold=4000, linewidth=210, suppress=True) print('pred_refined_heatmap_scores',pred_refined_heatmap_scores.shape) cls = 1 for i in range(input_gt_class_ids.shape[1]): if input_gt_class_ids[0,i] == cls: print(input_gt_class_ids[0,i], ' ', input_gt_bboxes[0,i]) print() for i in range(pred_heatmap_scores.shape[2]): # print(' ref_ten : ', pred_refined_tensor[0,1,i]) print(' hm_scr : ', pred_heatmap_scores[0,1,i]) print(' ref_hm_scr: ', pred_refined_heatmap_scores[0,1,i]) print() ``` ### Setup tensors to be passed to `build_predictions ()` ``` mrcnn_bbox = tf.identity(mrcnn_bbox) mrcnn_class = tf.identity(mrcnn_class) norm_input_rois = tf.identity(output_rois) config = mrcnn_config sess = KB.get_session() print(' Keras session :', sess) import mrcnn.utils as utils ``` ### `build_predictions()` `pred_tensor[:,:,:,1:7]` == `[116.9736 21.8213 36.2715 45.6026 0. 0.9139 ]` ``` with sess.as_default(): # def build_refined_predictions(norm_input_rois, mrcnn_class, mrcnn_bbox, config): ''' Split output_rois by class id, and add class_id and class_score output: ------- pred_tensor: [ Batchsz, Num_Classes, Num_Rois, 7: (y1, x1, y2, x2, class_id, class_score, normalized class score)] y1,x1, y2,x2 are in image dimension format ''' batch_size = config.BATCH_SIZE num_classes = config.NUM_CLASSES h, w = config.IMAGE_SHAPE[:2] # num_rois = config.TRAIN_ROIS_PER_IMAGE num_cols = 6 num_rois = KB.int_shape(norm_input_rois)[1] scale = tf.constant([h,w,h,w], dtype = tf.float32) # dup_scale = tf.reshape(tf.tile(scale, [num_rois]),[num_rois,-1]) dup_scale = scale * tf.ones([batch_size, num_rois, 1], dtype = 'float32') det_per_class = config.TRAIN_ROIS_PER_IMAGE ## config.DETECTION_PER_CLASS print() print(' > build_predictions()') print(' num_rois : ', num_rois ) print(' norm_input_rois.shape : ', type(norm_input_rois), KB.int_shape(norm_input_rois)) print(' scale.shape : ', type(scale), KB.int_shape(scale), scale.get_shape()) print(' dup_scale.shape : ', type(dup_scale), KB.int_shape(dup_scale), dup_scale.get_shape()) print() print(' mrcnn_class shape : ', KB.int_shape(mrcnn_class)) print(' mrcnn_bbox.shape : ', KB.int_shape(mrcnn_bbox), mrcnn_bbox.shape ) print(' config image shape : ', config.IMAGE_SHAPE, 'h:',h,'w:',w) #--------------------------------------------------------------------------- # Build a meshgrid for image id and bbox to use in gathering of bbox delta information #--------------------------------------------------------------------------- batch_grid, bbox_grid = tf.meshgrid( tf.range(batch_size, dtype=tf.int32), tf.range(num_rois, dtype=tf.int32), indexing = 'ij' ) #------------------------------------------------------------------------------------ # use the argmaxof each row to determine the dominating (predicted) class #------------------------------------------------------------------------------------ pred_classes = tf.argmax( mrcnn_class,axis=-1,output_type = tf.int32) pred_classes_exp = tf.to_float(tf.expand_dims(pred_classes ,axis=-1)) # print(' pred_classes : ', pred_classes.shape) # print(pred_classes.eval()) # print(' pred_scores : ', pred_scores.shape ,'\n', pred_scores.eval()) # print(' pred_classes_exp : ', pred_classes_exp.shape) gather_ind = tf.stack([batch_grid , bbox_grid, pred_classes],axis = -1) pred_scores = tf.gather_nd(mrcnn_class, gather_ind) pred_deltas = tf.gather_nd(mrcnn_bbox , gather_ind) #------------------------------------------------------------------------------------ # 22-05-2018 - stopped using the following code as it was clipping too many bouding # boxes to 0 or 128 causing zero area generation ##------------------------------------------------------------------------------------ ## apply delta refinements to the rois, based on deltas provided by the mrcnn head ##------------------------------------------------------------------------------------ pred_deltas = tf.multiply(pred_deltas, config.BBOX_STD_DEV, name = 'pred_deltas') input_rois = tf.multiply(norm_input_rois , dup_scale ) ## compute "refined rois" utils.apply_box_deltas_tf(input_rois, pred_deltas) refined_rois = utils.apply_box_deltas_tf(input_rois, pred_deltas) ## Clip boxes to image window window = tf.constant([[0,0,h,w]], dtype = tf.float32) clipped_rois = utils.clip_to_window_tf( window, refined_rois) print(' input_rois.shape : ', type(input_rois), KB.int_shape(input_rois), input_rois.get_shape()) print(' refined_rois.shape : ', type(refined_rois), KB.int_shape(refined_rois), refined_rois.get_shape()) print(' refined rois clipped : ', clipped_rois.shape) # print(' mrcnn_class : ', mrcnn_class.shape, mrcnn_class) # print(' gather_ind : ', gather_ind.shape, gather_ind) # print(' pred_scores : ', pred_scores.shape ) # print(' pred_deltas : ', pred_deltas.shape ) # print(' input_rois : ', input_rois.shape, input_rois) # print(' refined rois: ', refined_rois.shape, refined_rois) ##------------------------------------------------------------------------------------ ## Build Pred_Scatter: tensor of bounding boxes by Image / Class ##------------------------------------------------------------------------------------ ## sequence id is used to preserve the order of rois as passed to this routine ## This may be important in the post matching process but for now it's not being used. ## 22-09-18 : We need to use this sequence as the sort process based on score will cause ## mismatch between the bboxes from output_rois and roi_gt_bboxes ##------------------------------------------------------------------------------------ sequence = tf.ones_like(pred_classes, dtype = tf.int32) * (bbox_grid[...,::-1] + 1) sequence = tf.to_float(tf.expand_dims(sequence, axis = -1)) print(' shape of sequence : ', sequence.shape) pred_array = tf.concat([ clipped_rois, pred_classes_exp , tf.expand_dims(pred_scores, axis = -1), sequence], axis=-1, name = 'pred_array') #-------------------------------------------------------------------------------------------- # pred_array = tf.concat([refined_rois, pred_classes_exp , tf.expand_dims(pred_scores, axis = -1)], axis=-1) #--------------------------------------------------------------------------------------------- scatter_ind = tf.stack([batch_grid , pred_classes, bbox_grid],axis = -1) pred_scatt = tf.scatter_nd(scatter_ind, pred_array, [batch_size, num_classes, num_rois, pred_array.shape[-1]]) print(' pred_array : ', pred_array.shape) print(' scatter_ind : ', type(scatter_ind), 'shape', scatter_ind.shape) print(' pred_scatter : ', pred_scatt.get_shape()) ##-------------------------------------------------------------------------------------------- ## Apply a per class score normalization ##-------------------------------------------------------------------------------------------- normalizer = tf.reduce_max(pred_scatt[...,5], axis = -1, keepdims=True) normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer) norm_score = tf.expand_dims(pred_scatt[...,5]/normalizer, axis = -1) pred_scatt = tf.concat([pred_scatt, norm_score],axis = -1) print(' - Add normalized score --\n') print(' normalizer : ', normalizer.shape) print(' norm_score : ', norm_score.shape) print(' pred_scatter : ', pred_scatt.get_shape()) ##------------------------------------------------------------------------------------ ## sort pred_scatter in each class dimension based on bbox scores (last column) ##------------------------------------------------------------------------------------ _, sort_inds = tf.nn.top_k(pred_scatt[...,6], k=pred_scatt.shape[2]) # build indexes to gather rows from pred_scatter based on sort order class_grid, batch_grid, roi_grid = tf.meshgrid(tf.range(num_classes),tf.range(batch_size), tf.range(num_rois)) roi_grid_exp = tf.to_float(tf.expand_dims(roi_grid, axis = -1)) gather_inds = tf.stack([batch_grid , class_grid, sort_inds],axis = -1) pred_array = tf.gather_nd(pred_scatt, scatter_ind ) pred_tensor = tf.gather_nd(pred_scatt, gather_inds[...,:det_per_class,:], name = 'pred_tensor') # append an index to the end of each row --- commented out 30-04-2018 # pred_tensor = tf.concat([pred_tensor, roi_grid_exp], axis = -1) print(' sort_inds : ', type(sort_inds) , ' shape ', sort_inds.shape) print(' class_grid : ', type(class_grid) , ' shape ', class_grid.get_shape()) print(' batch_grid : ', type(batch_grid) , ' shape ', batch_grid.get_shape()) print(' roi_grid shape : ', type(roi_grid) , ' shape ', roi_grid.get_shape()) print(' roi_grid_exp : ', type(roi_grid_exp), ' shape ', roi_grid_exp.get_shape()) print(' gather_inds : ', type(gather_inds) , ' shape ', gather_inds.get_shape()) print(' pred_array : ', pred_array.shape, pred_array.get_shape()) print(' pred_tensor : ', pred_tensor.get_shape()) # return pred_tensor ### Reshape pred_scatt?? No, doesn't work well as the reshape will convert into [batch_sz, #classes x # bboxes, 8] # btch_sz, cls_sz, bbox_sz, col_sz = pred_scatt.shape # print(btch_sz, cls_sz, bbox_sz, col_sz ) # reshape = tf.reshape(pred_scatt, [btch_sz, -1, col_sz]) # print(reshape.shape) ### This works well, converts pred_scatter back to pred_array (with added normzalized score column) # # reshape = tf.gather_nd(pred_scatt, scatter_ind ) ``` ### Display `pred_tensor` from model code and code above, `pred_heatmap_scores` ``` np.set_printoptions(linewidth=150, precision=6) # print('scatter shape is ', pred_scatt.get_shape()) print('pred_tensor : ', pred_tensor.get_shape() ) print('pred tensor from model : ', model_pred_tensor.shape) with sess.as_default(): r_pred_tensor = pred_tensor.eval() for img in range(2): class_ids = np.unique(r_pred_tensor[img,:,:,4]).astype(int).tolist() print('Classids: ', class_ids) for i in class_ids: print('Image ', img , '/ Class ',i,' ------------') for j in range(25): print(r_pred_tensor[img,i,j]) print(model_pred_tensor[img,i,j]) print(model_pred_heatmap_scores[img,i,j]) # print(pred_heatmap_scores[img,i,j]) print() np.set_printoptions(precision=4, threshold=4000, linewidth=210, suppress=True) with sess.as_default(): print(scatter_ind.shape) print(pred_scatt.shape) print(pred_array.shape) # r_clipped_rois = clipped_rois.eval() r_pred_array = pred_array.eval() for i in range(200): # print() # print('input_ro: ', r0[0,i]) # print('original (clipped) : ', r_clipped_rois[0,i]) print('pred_array : ', r_pred_array[0,i]) np.set_printoptions(precision=4, threshold=4000, linewidth=210, suppress=True) # with sess.as_default(): # print(pred_scores.eval()) # print(pred_classes.eval()) # print(scatter_ind.eval()[0]) # print(norm_score.eval()[0,9]) # print(pred_array.eval()[0,:200]) # print(scatter_ind.shape) # print(pred_scatt.shape) # print(pred_array.shape) # r_clipped_rois = clipped_rois.eval() # r_pred_array = pred_array.eval() # print(pred_scatt.eval()[0,1,0:200]) # print(normalizer.eval()[0,9]) ``` ### Some tests on the results #### Test that refined_rois is correctly working in `clip_to_window_tf` ``` with sess.as_default(): test_np = refined_rois.eval() test_tf = refined_rois_clipped.eval() window_np = np.array([0,0,128,128]) print(window_np.shape) for i in range(5): # print('Before', i) # print(test_np[i]) test_np[i] = clip_to_window(window_np, test_np[i]) # print('After', i) # print(test_np[i]) # print(' tensor flow') # print(test_tf[i]) for i in range(5): all_equal = np.all(test_np == refined_rois_clipped.eval()) print('i: ', i, '--- EQUAL : ', all_equal) ``` #### Test that pred_classes and pred_deltas have been properly selected when using tf.gather_nd () ``` with sess.as_default(): tmp0 = pred_classes.eval() tmp1 = mrcnn_bbox.eval() tmp2 = pred_deltas.eval() tmp4 = mrcnn_class.eval() tmp3 = pred_scores2.eval() tmp5 = pred_scores.eval() for i in range(5): for j in range(32): print('i: ', i, ' j :', j,'--- class: ',tmp0[i,j],'---------------') # print(tmp0[i,j]) print(tmp1[i,j]) print(' ===> ', tmp2[i,j]) print(' mrcnn_score: ', tmp4[i,j,tmp0[i,j]], ' pred_score:', tmp5[i,j,0], 'pred_score2: ', tmp3[i,j]) ``` #### Verify refined_rois generated by TF and NP are equal when using `apply_box_deltas_tf( )` ``` from mrcnn.utils import apply_box_deltas, apply_box_deltas_tf with sess.as_default(): refined_rois_tf = apply_box_deltas_3d(output_rois, pred_deltas).eval() print(' refined rois_tf: ', refined_rois_tf.shape, refined_rois_tf.dtype) tmp = [] bxs = output_rois.eval() dlt = pred_deltas.eval() for i in range(5): tmp.append(apply_box_deltas(bxs[i], dlt[i])) refined_rois_np = np.asarray(tmp) print(' refined rois_np: ', refined_rois_np.shape,refined_rois_np.dtype) print(' refined rois_np == refined rois_tf ?? :', np.all(refined_rois_tf[0,1] == refined_rois_np[0,1])) # for i in range(5): # for j in range(32): # all_eq = np.all(refined_rois_tf[0,1] == refined_rois_np[0,1]) # if ~all_eq: # print(' Not equal : ',i,'/',j) # print(refined_rois_tf[i,j]) # print(refined_rois_np[i,j]) # else: # print(' equal : ',i,'/',j) print(refined_rois_tf[0]) print(refined_rois_np[0]) ``` ### Prepare values to pass to build_heatmap ``` # def build_heatmap(in_tensor, config, names = None): np.set_printoptions(precision=4, threshold=4000, linewidth=210, suppress=True) print(model_pred_tensor.shape) in_tensor = tf.identity(model_pred_tensor) # in_tensor = pred_tensor # in_array = pred_array sess = KB.get_session() config = mrcnn_model.config names = ['test'] ``` ### `build_heatmap()` - part 1 ``` with sess.as_default(): # def build_heatmap(in_tensor, config, names = None): num_detections = config.DETECTION_MAX_INSTANCES img_h, img_w = config.IMAGE_SHAPE[:2] batch_size = config.BATCH_SIZE num_classes = config.NUM_CLASSES heatmap_scale = config.HEATMAP_SCALE_FACTOR rois_per_image = (in_tensor.shape)[2] grid_h, grid_w = config.IMAGE_SHAPE[:2] // heatmap_scale # rois per image is determined by size of input tensor # detection mode: config.TRAIN_ROIS_PER_IMAGE # ground_truth : config.DETECTION_MAX_INSTANCES # strt_cls = 0 if rois_per_image == 32 else 1 print('\n ') print(' > NEW build_heatmap() for ', names ) print(' in_tensor shape : ', in_tensor.shape) print(' num bboxes per class : ', rois_per_image ) print(' heatmap scale : ', heatmap_scale, 'Dimensions: w:', grid_w,' h:', grid_h) #----------------------------------------------------------------------------- ## Stack non_zero bboxes from in_tensor into pt2_dense #----------------------------------------------------------------------------- # pt2_ind shape is [?, 3]. # pt2_ind[0] corresponds to image_index # pt2_ind[1] corresponds to class_index # pt2_ind[2] corresponds to roi row_index # pt2_dense shape is [?, 6] # pt2_dense[0] is image index # pt2_dense[1:4] roi cooridnaytes # pt2_dense[5] is class id #----------------------------------------------------------------------------- pt2_sum = tf.reduce_sum(tf.abs(in_tensor[:,:,:,0:4]), axis=-1) pt2_ind = tf.where(pt2_sum > 0) pt2_dense = tf.gather_nd( in_tensor, pt2_ind) print(' pt2_sum shape ',pt2_sum.shape) print(' pt2_ind shape :', pt2_ind.shape) print(' pt2_dense shape ',pt2_dense.get_shape()) #----------------------------------------------------------------------------- ## Build mesh-grid to hold pixel coordinates #----------------------------------------------------------------------------- X = tf.range(grid_w , dtype=tf.int32) Y = tf.range(grid_h , dtype=tf.int32) X, Y = tf.meshgrid(X, Y) # duplicate (repeat) X and Y into a batch_size x rois_per_image tensor print(' X/Y shapes :', X.get_shape(), Y.get_shape()) ones = tf.ones([tf.shape(pt2_dense)[0] , 1, 1], dtype = tf.int32) rep_X = ones * X rep_Y = ones * Y print(' Ones: ', ones.shape) print(' ones_exp * X', ones.shape, '*', X.shape, '= ',rep_X.shape) print(' ones_exp * Y', ones.shape, '*', Y.shape, '= ',rep_Y.shape) # # stack the X and Y grids pos_grid = tf.to_float(tf.stack([rep_X,rep_Y], axis = -1)) print(' before transpse ', pos_grid.get_shape()) pos_grid = tf.transpose(pos_grid,[1,2,0,3]) print(' after transpose ', pos_grid.get_shape()) pt2_dense_scaled = pt2_dense[:,:4]/heatmap_scale ##----------------------------------------------------------------------------- ## Build mean and convariance tensors for Multivariate Normal Distribution ##----------------------------------------------------------------------------- width = pt2_dense_scaled[:,3] - pt2_dense_scaled[:,1] # x2 - x1 height = pt2_dense_scaled[:,2] - pt2_dense_scaled[:,0] cx = pt2_dense_scaled[:,1] + ( width / 2.0) cy = pt2_dense_scaled[:,0] + ( height / 2.0) means = tf.stack((cx,cy),axis = -1) covar = tf.stack((width * 0.5 , height * 0.5), axis = -1) covar = tf.sqrt(covar) ##----------------------------------------------------------------------------- ## Compute Normal Distribution for bounding boxes ##----------------------------------------------------------------------------- tfd = tf.contrib.distributions mvn = tfd.MultivariateNormalDiag(loc = means, scale_diag = covar) prob_grid = mvn.prob(pos_grid) print(' Prob_grid shape before tanspose: ',prob_grid.get_shape()) prob_grid = tf.transpose(prob_grid,[2,0,1]) print(' Prob_grid shape after tanspose : ',prob_grid.get_shape()) print(' >> input to MVN.PROB: pos_grid (meshgrid) shape: ', pos_grid.get_shape()) print(' << output probabilities shape:' , prob_grid.get_shape()) ##--------------------------------------------------------------------------------------------- ## (1) apply normalization per bbox heatmap instance ##--------------------------------------------------------------------------------------------- print('\n normalization ------------------------------------------------------') normalizer = tf.reduce_max(prob_grid, axis=[-2,-1], keepdims = True) normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer) print(' normalizer : ', normalizer.shape) prob_grid_norm = prob_grid / normalizer ##--------------------------------------------------------------------------------------------- ## (2) multiply normalized heatmap by normalized score in in_tensor/ (pt2_dense column 7) ## broadcasting : https://stackoverflow.com/questions/49705831/automatic-broadcasting-in-tensorflow ##--------------------------------------------------------------------------------------------- # Using the double tf.transpose, we dont need this any more # scr = tf.expand_dims(tf.expand_dims(pt2_dense[:,7],axis = -1), axis =-1) prob_grid_norm_scaled = tf.transpose(tf.transpose(prob_grid_norm) * pt2_dense[:,7]) print(' prob_grid_norm_scaled : ', prob_grid_norm_scaled.shape) # maxes2 = tf.reduce_max(prob_grid_norm_scaled, axis=[-2,-1], keepdims = True) # print(' shape of maxes2 : ', maxes2.shape) ``` #### Tests ``` with sess.as_default(): # print(prob_grid_norm.shape) # r_normalizer = normalizer.eval() # r_prob_grid = prob_grid.eval() # r_prob_grid_norm = prob_grid_norm.eval() # r_prob_grid_norm_scaled = prob_grid_norm_scaled.eval() r_maxes2 = maxes2.eval() r_score = pt2_dense[:,7].eval() # r_pt2_dense = pt2_dense.eval() # r_cx, r_cy = cx.eval(), cy.eval() # print(r_normalizer.shape) # print(r_prob_grid.shape, r_prob_grid_norm.shape) # print(r_maxes0.shape) # print(r_maxes1.shape) print(r_maxes2.shape) print(r_score.shape) # print(r_pt2_dense[:50]) for i in range(20): print(' ', r_score[i], ' ', r_maxes2[i],' ') # , r_pt2_dense[i],r_cx[i], r_cy[i]) for y in [111,112,113]: print(r_prob_grid[0,y,95:115]) print(r_prob_grid_norm[0,y,95:115]) print(r_prob_grid_norm_scaled[0,y,95:115]) print() from mrcnn.visualize import plot_2d_gaussian, plot_3d_gaussian box = 23 plot_3d_gaussian(r_prob_grid[box], zlim = 0.1) plot_3d_gaussian(r_prob_grid_norm[box]) plot_3d_gaussian(r_prob_grid_norm_scaled[box]) ``` ### `build_heatmap()` - part 2 - Calculate heatmap sum using old method ``` with sess.as_default(): ##-------------------------------------------------------------------------------- ## IMPORTANT: kill distributions of NaN boxes (resulting from bboxes with height/width of zero ## which cause singular sigma cov matrices ##-------------------------------------------------------------------------------- # prob_grid = tf.where(tf.is_nan(prob_grid), tf.zeros_like(prob_grid), prob_grid) ##------------------------------------------------------------------------------------- ## (3) scatter out the probability distributions based on class ##------------------------------------------------------------------------------------- print('\n Scatter out the probability distributions based on class --------------') gauss_scatt = tf.scatter_nd(pt2_ind, prob_grid_norm_scaled, [batch_size, num_classes, rois_per_image, grid_w, grid_h], name = 'gauss_scatter') print(' pt2_ind shape : ', pt2_ind.shape) print(' prob_grid shape : ', prob_grid.shape) print(' gauss_scatt : ', gauss_scatt.shape) # batch_sz , num_classes, num_rois, image_h, image_w ##------------------------------------------------------------------------------------- ## (4) SUM : Reduce and sum up gauss_scattered by class ##------------------------------------------------------------------------------------- print('\n Reduce sum based on class ---------------------------------------------') gauss_heatmap = tf.reduce_sum(gauss_scatt, axis=2, name='pred_heatmap2') # force small sums to zero - for now (09-11-18) commented out but could reintroduce based on test results # gauss_heatmap = tf.where(gauss_heatmap < 1e-12, gauss_heatmap, tf.zeros_like(gauss_heatmap), name='Where1') print(' gaussian_heatmap shape : ', gauss_heatmap.get_shape(), 'Keras tensor ', KB.is_keras_tensor(gauss_heatmap) ) ##--------------------------------------------------------------------------------------------- ## (5) heatmap normalization ## normalizer is set to one when the max of class is zero ## this prevents elements of gauss_heatmap_norm computing to nan ##--------------------------------------------------------------------------------------------- print('\n normalization ------------------------------------------------------') normalizer = tf.reduce_max(gauss_heatmap, axis=[-2,-1], keepdims = True) print(' normalizer shape : ', normalizer.shape) normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer) gauss_heatmap_norm = gauss_heatmap / normalizer print(' gauss norm : ', gauss_heatmap_norm.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap_norm) ) ``` ### Alternative method: use `scatter_nd_add` to build guassian sum requires definition of tf.variable ``` with sess.as_default(): # kvar = KB.variable(value = KB.zeros([batch_size, num_classes, grid_w, grid_h], dtype = 'float32')) # kvar = tf.scatter_nd_add(kvar, pt2_ind[:,:2],prob_grid) # kvar_norm = KB.variable(value = KB.zeros([batch_size, num_classes, grid_w, grid_h], dtype = 'float32')) # kvar_norm = tf.scatter_nd_add(kvar_norm, pt2_ind[:,:2],prob_grid) kvar_norm_scaled = KB.variable(value = KB.zeros([batch_size, num_classes, grid_w, grid_h], dtype = 'float32')) kvar_norm_scaled = KB.zeros([batch_size, num_classes, grid_w, grid_h]) kvar_norm_scaled = tf.scatter_nd_add(kvar_norm_scaled, pt2_ind[:,:2],prob_grid_norm_scaled) ##--------------------------------------------------------------------------------------------- ## heatmap normalization ## normalizer is set to one when the max of class is zero ## this prevents elements of gauss_heatmap_norm computing to nan ##--------------------------------------------------------------------------------------------- print('\n normalization ------------------------------------------------------') normalizer = tf.reduce_max(kvar_norm_scaled, axis=[-2,-1], keepdims = True) normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer) gaussian_heatmap_norm = kvar_norm_scaled / normalizer # gauss_heatmap_norm = gauss_heatmap / tf.reduce_max(gauss_heatmap, axis=[-2,-1], keepdims = True) # gauss_heatmap_norm = tf.where(tf.is_nan(gauss_heatmap_norm), tf.zeros_like(gauss_heatmap_norm), gauss_heatmap_norm, name = 'Where2') print(' gauss norm : ', gaussian_heatmap_norm.shape ) ``` #### Tests ``` with sess.as_default(): # r_ghm = gauss_heatmap.eval() r_kvar = KB.eval(kvar) r_kvar_norm = KB.eval(kvar_norm) r_kvar_norm_scaled = KB.eval(kvar_norm_scaled) r_kvar_final = kvar_final.eval() # r_kvar = kvar.eval() # r_kvar_norm = kvar_norm.eval() # r_kvar_norm_scaled = kvar_norm_scaled.eval() # r_kvar_final = kvar_final.eval() print(r_kvar.shape, r_kvar_norm.shape, r_kvar_norm_scaled.shape, r_kvar_final.shape) # np.set_printoptions(precision=4, threshold=30000, linewidth=260, suppress=True) # print(r_kvar.shape, r_ghm.shape) # print(kvar, gauss_heatmap) # for i in [9]: #range(81): # for j in range(256): # print(' Col: ', j, ': ',np.all(r_kvar[0,i,j] == r_ghm[0,i,j])) from mrcnn.visualize import plot_2d_gaussian, plot_3d_gaussian cls = 1 plot_3d_gaussian(r_ghm[0,cls]) plot_3d_gaussian(r_kvar[0,cls]) plot_3d_gaussian(r_kvar_norm[0,cls]) plot_3d_gaussian(r_kvar_final[0,cls]) # for i in range(81): # print(np.max(r_kvar[0,i]), np.max(r_ghm[0,i]), np.sum(r_kvar[0,i]),np.sum(r_ghm[0,i])) # plot_3d_gaussian(r_prob_grid_norm_scaled[box]) # np.set_printoptions(linewidth=150, precision=6) # # print('scatter shape is ', pred_scatt.get_shape()) # print('pt2_dense shape is ', pt2_dense.get_shape() ) # with sess.as_default(): # r_pt2_ind = pt2_ind.eval() # r_pt2_dense = pt2_dense.eval() # X1,Y1 = tf.meshgrid(tf.range(batch_size, dtype=tf.int32), tf.range(num_detections, dtype=tf.int32), indexing = 'ij') # r_X1 = X1.eval() # r_Y1 = Y1.eval() # print(r_X1.shape , Y1.shape) # print(r_X1) # print(r_Y1) # print(r_pt2_ind.shape) # where_to_go = np.stack([r_pt2_ind[:,0],r_pt2_dense[:,4], r_pt2_dense[:,6]],axis =-1) # print(where_to_go.shape) # print(where_to_go) # class_ids = np.unique(r_pt2_dense[:,4]).astype(int).tolist() # print('Classids: ', class_ids) # for box in range(r_pt2_ind.shape[0]): # print(r_pt2_ind[box],' ', r_pt2_dense[box,:]) # gauss_sum = tf.zeros([batch_size, num_classes, rois_per_image, img_w//scale, img_h//scale]) # print(' gaussian_sum shape : ', gauss_sum.get_shape(), 'Keras tensor ', KB.is_keras_tensor(gauss_sum) ) # counter = 0 # limit = batch_size * rois_per_image # c = lambda i, j, k,l: tf.less_equal(i, pt2_ind.get_shape()[0]) # b = lambda i, j, k,l: tf.add(j[k[i]], l[i]) # loop_vars = [counter, gauss_sum, pt2_ind, prob_grid] # tf.while_loop(c, b, loop_vars) # print('pt2_dense shape',pt2_dense.shape) # for i in range(pt2_dense.shape[0]): # print('i', i, 'pt2_ind[i]',pt2_ind[i].shape) # gauss_sum[pt2_ind[i,:]] += prob_grid[i] with sess.as_default(): # init_sum.initializer() r_ghm_norm = gauss_heatmap_norm.eval() from mrcnn.visualize import plot_2d_gaussian, plot_3d_gaussian box = 23 plot_3d_gaussian(r_ghm_norm[0,1]) plot_3d_gaussian(r_ghm[0,1]) # for i in range(81): # print(np.max(r_kvar[0,i]), np.max(r_ghm[0,i]), np.sum(r_kvar[0,i]),np.sum(r_ghm[0,i])) # plot_3d_gaussian(r_prob_grid_norm_scaled[box]) ``` ### `build_heatmap()` - part 3 ``` ##-------------------------------------------------------------------------------------------- ## Generate scores using prob_grid and pt2_dense - NEW METHOD ## added 09-21-2018 ##-------------------------------------------------------------------------------------------- scores_from_sum2 = tf.map_fn(build_hm_score, [prob_grid, pt2_dense_scaled, pt2_dense[:,7]], dtype = tf.float32, swap_memory = True) scores_scattered = tf.scatter_nd(pt2_ind, scores_from_sum2, [batch_size, num_classes, rois_per_image, 3], name = 'scores_scattered') gauss_scores = tf.concat([in_tensor, scores_scattered], axis = -1,name = names[0]+'_scores') print(' scores_scattered shape : ', scores_scattered.shape) print(' gauss_scores : ', gauss_scores.shape, ' Name: ', gauss_scores.name) print(' gauss_scores (FINAL) : ', gauss_scores.shape, ' Keras tensor ', KB.is_keras_tensor(gauss_scores) ) ##-------------------------------------------------------------------------------------------- ## Normalization is already perfored on the scores at a per_class leve, so we dont use this ## code below anympre ## ## This is a regular normalization that moves everything between [0, 1]. ## This causes negative values to move to -inf, which is a problem in FCN scoring. ## To address this a normalization between [-1 and +1] was introduced in FCN. ## Not sure how this will work with training tho. ##-------------------------------------------------------------------------------------------- # normalizer = tf.reduce_max(scores_scatt[...,-1], axis = -1, keepdims=True) # print('norm',normalizer.shape) # normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer) # norm_score2 = tf.expand_dims(scores_scatt[...,-1]/normalizer, axis = -1) # print('norm_SCORE2',norm_score2.shape) #------------------------------------------------------------------------------------------------------------------- # Generate scores using GAUSS_SUM -- OLD METHOD # removed 09-21-2018 #------------------------------------------------------------------------------------------------------------------- # Generate scores : # ----------------- # NOTE: Score is generated on NORMALIZED gaussian distributions (GAUSS_NORM) # If want to do this on NON-NORMALIZED, we need to apply it on GAUSS_SUM # Testing demonstated that the NORMALIZED score generated from using GAUSS_SUM # and GAUSS_NORM are the same. # For now we will use GAUSS_SUM score and GAUSS_NORM heatmap. The reason being that # the raw score generated in GAUSS_SUM is much smaller. # We may need to change this base on the training results from FCN #--------------------------------------------------------------------------------------------- # duplicate GAUSS_NORM <num_roi> times to pass along with bboxes to map_fn function # # Here we have a choice to calculate scores using the GAUSS_SUM (unnormalized) or GAUSS_NORM (normalized) # after looking at the scores and ratios for each option, I decided to go with the normalized # as the numbers are larger # # Examples> # Using GAUSS_SUM # [ 3.660313 3.513489 54.475536 52.747402 1. 0.999997 4.998889 2450. 0.00204 0.444867] # [ 7.135149 1.310972 50.020126 44.779854 1. 0.999991 4.981591 1892. 0.002633 0.574077] # [ 13.401865 0. 62.258957 46.636948 1. 0.999971 4.957398 2303. 0.002153 0.469335] # [ 0. 0. 66.42349 56.123024 1. 0.999908 4.999996 3696. 0.001353 0.294958] # [ 0. 0. 40.78952 60.404335 1. 0.999833 4.586552 2460. 0.001864 0.406513] # # Using GAUSS_NORM: class r-cnn scr # [ 3.660313 3.513489 54.475536 52.747402 1. 0.999997 1832.9218 2450. 0.748131 0.479411] # [ 7.135149 1.310972 50.020126 44.779854 1. 0.999991 1659.3965 1892. 0.877059 0.56203 ] # [ 13.401865 0. 62.258957 46.636948 1. 0.999971 1540.4974 2303. 0.668909 0.428645] # [ 0. 0. 66.42349 56.123024 1. 0.999908 1925.3267 3696. 0.520922 0.333813] # [ 0. 0. 40.78952 60.404335 1. 0.999833 1531.321 2460. 0.622488 0.398898] # # to change the source, change the following line gauss_heatmap_norm <--> gauss_heatmap #--------------------------------------------------------------------------------------------------------------------------- # flatten guassian scattered and input_tensor, and pass on to build_bbox_score routine # in_shape = tf.shape(in_tensor) # print(' shape of in_tensor is : ', KB.int_shape(in_tensor)) # in_tensor_flattened = tf.reshape(in_tensor, [-1, in_shape[-1]]) <-- not a good reshape style!! # replaced with following line: # in_tensor_flattened = tf.reshape(in_tensor, [-1, in_tensor.shape[-1]]) # # bboxes = tf.to_int32(tf.round(in_tensor_flattened[...,0:4])) # # print(' in_tensor : ', in_tensor.shape) # print(' in_tensor_flattened : ', in_tensor_flattened.shape) # print(' Rois per class : ', rois_per_image) # # print('\n Scores from gauss_heatmap ----------------------------------------------') # temp = tf.expand_dims(gauss_heatmap, axis =2) # print(' temp expanded : ', temp.shape) # temp = tf.tile(temp, [1,1, rois_per_image ,1,1]) # print(' temp tiled shape : ', temp.shape) # # temp = KB.reshape(temp, (-1, temp.shape[-2], temp.shape[-1])) # # print(' temp flattened : ', temp.shape) # print(' in_tensor_flattened : ', in_tensor_flattened.shape) # # scores_from_sum = tf.map_fn(build_hm_score, [temp, in_tensor_flattened], dtype=tf.float32) # scores_shape = [in_tensor.shape[0], in_tensor.shape[1], in_tensor.shape[2], -1] # scores_from_sum = tf.reshape(scores_from_sum, scores_shape) # print(' reshaped scores : ', scores_from_sum.shape) #-------------------------------------------------------------------------------------------- # tf.reduce_max(scores_from_sum[...,-1], axis = -1, keepdims=True) result is [num_imgs, num_class, 1] # # This is a regular normalization that moves everything between [0, 1]. # This causes negative values to move to -inf, which is a problem in FCN scoring. # To address this a normalization between [-1 and +1] was introduced in FCN. # Not sure how this will work with training tho. #-------------------------------------------------------------------------------------------- # normalizer = tf.reduce_max(scores_from_sum[...,-1], axis = -1, keepdims=True) # normalizer = tf.where(normalizer < 1.0e-15, tf.ones_like(normalizer), normalizer) # norm_score = tf.expand_dims(scores_from_sum[...,-1]/normalizer, axis = -1) #-------------------------------------------------------------------------------------------- # Append `in_tensor` and `scores_from_sum` to form `bbox_scores` #-------------------------------------------------------------------------------------------- # gauss_scores = tf.concat([in_tensor, scores_from_sum, norm_score], axis = -1,name = names[0]+'_scores') # print(' scores_from_sum final : ', scores_from_sum.shape) # print(' norm_score : ', norm_score.shape) # print(' gauss_scores : ', gauss_scores.shape, ' name: ', gauss_scores.name) # print(' gauss_scores (FINAL) : ', gauss_scores.shape, ' Keras tensor ', KB.is_keras_tensor(gauss_scores) ) #-------------------------------------------------------------------------------------------------------------------- ##-------------------------------------------------------------------------------------------- ## //create heatmap Append `in_tensor` and `scores_from_sum` to form `bbox_scores` ##-------------------------------------------------------------------------------------------- # gauss_heatmap = tf.transpose(gauss_heatmap,[0,2,3,1], name = names[0]) ### gauss_heatmap_norm = tf.transpose(gauss_heatmap_norm,[0,2,3,1], name = names[0]+'_norm') ### Use heatmap computed from KVAR gauss_heatmap_norm = tf.transpose(gaussian_heatmap_norm,[0,2,3,1], name = names[0]+'_norm') # print(' gauss_heatmap shape : ', gauss_heatmap.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap) ) # print(' gauss_heatmap_norm shape : ', gauss_heatmap_norm.shape,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap_norm) ) # print(' gauss_heatmap shape : ', gauss_heatmap.shape ,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap) ) print(' gauss_heatmap_norm shape : ', gauss_heatmap_norm.shape,' Keras tensor ', KB.is_keras_tensor(gauss_heatmap_norm) ) print(' complete') # return gauss_heatmap_norm, gauss_scores # , gauss_heatmap gauss_heatmap_L2norm # [gauss_heatmap, gauss_scatt, means, covar] # with sess.as_default(): # print(pred_array.shape) # pt2_sum2 = tf.reduce_sum(tf.abs(pred_array[:,:,0:4]), axis=-1) # r_dense = pt2_dense.eval() # r_sum = pt2_sum.eval() # r_ind = pt2_ind.eval() # r_pred_scores = gauss_scores.eval() # print(r_dense.shape) # print(r_sum.shape) # print(r_ind.shape) # print(r_ind) ``` ### `build_hm_score()` ``` ##---------------------------------------------------------------------------------------------------------------------- ## ##---------------------------------------------------------------------------------------------------------------------- def build_hm_score(input_list): ''' Inputs: ----------- heatmap_tensor : [ image height, image width ] input_row : [y1, x1, y2, x2] in absolute (non-normalized) scale input_norm_score: Normalzied score from pred_tensor Returns ----------- gaussian_sum : sum of gaussian heatmap vlaues over the area covered by the bounding box bbox_area : bounding box area (in pixels) weighted_sum : gaussian_sum * bbox_score ''' heatmap_tensor, input_bbox, input_norm_score = input_list with tf.variable_scope('mask_routine'): y_extent = tf.range(input_bbox[0], input_bbox[2]) x_extent = tf.range(input_bbox[1], input_bbox[3]) Y,X = tf.meshgrid(y_extent, x_extent) bbox_mask = tf.stack([Y,X],axis=2) mask_indices = tf.reshape(bbox_mask,[-1,2]) mask_indices = tf.to_int32(mask_indices) mask_size = tf.shape(mask_indices)[0] mask_updates = tf.ones([mask_size], dtype = tf.float32) mask = tf.scatter_nd(mask_indices, mask_updates, tf.shape(heatmap_tensor)) # mask_sum = tf.reduce_sum(mask) mask_applied = tf.multiply(heatmap_tensor, mask, name = 'mask_applied') bbox_area = tf.to_float((input_bbox[2]-input_bbox[0]) * (input_bbox[3]-input_bbox[1])) gaussian_sum = tf.reduce_sum(mask_applied) # Multiply gaussian_sum by score to obtain weighted sum # weighted_sum = gaussian_sum * input_row[5] # Replaced lines above with following lines 21-09-2018 # Multiply gaussian_sum by normalized score to obtain weighted_norm_sum weighted_norm_sum = gaussian_sum * input_norm_score # input_list[7] return tf.stack([gaussian_sum, bbox_area, weighted_norm_sum], axis = -1) ``` ### Evaluate results from `build_heatmap()` ``` np.set_printoptions(precision=4, threshold=4000, linewidth=210, suppress=True) with sess.as_default(): # sess.run( tf.global_variables_initializer()) # gauss_scatt = gauss_scatt.eval() # pred_heatmap = gauss_sum.eval() r_normalizer = normalizer.eval() pred_heatmap_norm = gauss_heatmap_norm.eval() pred_heatmap_scores = gauss_scores.eval() # prob_grid = prob_grid.eval() # r_scores_from_sum2 = scores_from_sum2.eval() np.set_printoptions(precision=4, threshold=4000, linewidth=210, suppress=True) img = 0 # print(gauss_sum.shape) # print(gauss_scatt.shape) # print(pred_heatmap.shape) print(r_normalizer.shape) print(pred_heatmap_norm.shape) print(model_pred_heatmap_norm.shape) # print(r_scores_from_sum2.shape) print(pred_heatmap_scores.shape) print(model_pred_heatmap_scores.shape) # print(pred_heatmap_scores[0,1]) np.set_printoptions(precision=4, threshold=None, linewidth=250, suppress=True) # np.set_printoptions(precision=4, threshold=4000, linewidth=210, suppress=True) for img in [0]: class_ids = np.unique(pred_heatmap_scores[img,:,:,4]).astype(int).tolist() print('\n Class ids for img', i, ':',class_ids, '\n') for i in class_ids: print('Image ', img , '/ Class ',i,' ------------ ') for j in range(25): print(' gt score : ', model_gt_heatmap_scores[img,i,j]) print(' pred score : ', pred_heatmap_scores[img,i,j]) print(' model score2: ', model_pred_heatmap_scores[img,i,j]) print() ``` #### compare results of `pred_heatmap_scores` from code above and program file ``` np.set_printoptions(linewidth=150, precision=6) print('pred_heatmap_scores shape is ', pred_heatmap_scores.shape ) print('pred_heatmap_scores from model is :', model_pred_heatmap_scores.shape) # with sess.as_default(): # r_pred_tensor = pred_tensor.eval() for img in [0]: class_ids = np.unique(pred_heatmap_scores[img,:,:,4]).astype(int).tolist() print('Classids: ', class_ids) for i in class_ids: print('Image ', img , '/ Class ',i,' ------------ normalizer:', r_normalizer[img,i]) for j in range(200): print(pred_heatmap_scores[img,i,j]) print(model_pred_heatmap_scores[img,i,j]) if (pred_heatmap_scores[img,i,j,-1] == model_pred_heatmap_scores[img,i,j,-1] == 0): break # print(pred_refined_tensor[img,i,j]) print() ``` ### Run TF graph ``` # with sess1.as_default(): # FeedList = [positive_ind_shuffled, positive_indices, positive_overlaps, roi_gt_class_ids, roi_gt_boxes, roi_gt_box_assignment ] # FeedList = [ rois, roi_gt_class_ids, roi_gt_deltas, roi_gt_boxes] Fetches = [ pred_heatmap, pred_heatmap_norm, pred_heatmap_scores] tt = sess.run(Fetches) print(type(tt), len(tt)) for i in tt: print(type(i), i.shape) ``` ## Plot heatmaps ### Plot 2D heatmap of one `pred_heatmap` distribution generated in `build_heatmap` ``` from mrcnn.visualize import plot_one_bbox_heatmap, plot_3d_heatmap, plot_3d_heatmap_all_classes, plot_2d_heatmap, plot_2d_heatmap_with_bboxes import matplotlib as plt %matplotlib notebook print('Image id: ',image_id) img_id = 0 class_names = dataset_train.class_names ``` #### 2D plot of `pred_heatmap_norm` returned from model : `model_pred_heatmap_norm` ``` for img_id in [0]: ## range(mrcnn_config.BATCH_SIZE): # print(pred_refined_heatmap_scores[img_id,:4]) print(pred_heatmap_norm.shape) class_ids = np.unique(pred_heatmap_scores[img_id,:,:,4]).astype(int).tolist() print('Image : {} ClassIds: {}'.format(img_id, class_ids)) # plot_2d_heatmap_with_bboxes(model_pred_heatmap_norm, model_pred_heatmap_scores, # img_id, [0], width=6, height=6, class_names = class_names, scale = 4) # plot_2d_heatmap_with_bboxes( pred_heatmap_norm, pred_heatmap_scores, # img_id, [0], width=6, height=6, class_names = class_names, scale = 4) # plot_2d_heatmap(pred_heatmap_norm, img_id, class_ids, width=6, height=6, class_names = class_names) ``` #### 2D plot of `pred_heatmap_norm` returned from model ``` for img_id in [1]: ##range(mrcnn_config.BATCH_SIZE): print(pred_heatmap_norm.shape) class_ids = np.unique(pred_heatmap_scores[img_id,:,:,4]).astype(int).tolist() print('Image : {} ClassIds: {}'.format(img_id, class_ids)) plot_2d_heatmap_with_bboxes( pred_heatmap_norm, pred_heatmap_scores, img_id, class_ids, width=6, height=6, class_names = class_names, scale = 4) # plot_2d_heatmap(model_pred_heatmap_norm, img_id, class_ids, width=6, height=6, class_names = class_names) ``` #### 2D plot of `gt_heatmap_norm` returned from model ``` for img_id in [1]: ##range(mrcnn_config.BATCH_SIZE): print(gt_heatmap_norm.shape) class_ids = np.unique(gt_heatmap_scores[img_id,:,:,4]).astype(int).tolist() print('Image : {} ClassIds: {}'.format(img_id, class_ids)) plot_2d_heatmap_with_bboxes(gt_heatmap_norm, gt_heatmap_scores, img_id, class_ids, width=6, height=6, class_names = class_names, scale = 4) ``` #### 3D plot of `model_pred_heatmap_norm` returned form model ``` for img_id in [0]: ##range(mrcnn_config.BATCH_SIZE): print(model_pred_heatmap_norm.shape) class_ids = np.unique(model_pred_heatmap_scores[img_id,:,:,4]).astype(int).tolist() print('Image : {} ClassIds: {}'.format(img_id, class_ids)) plot_3d_heatmap(model_pred_heatmap_norm, img_id, [37], width=6, height=6, class_names = class_names) ``` #### 3D plot of `pred_heatmap_norm` returned form code above ``` for img_id in [0]: ## range(mrcnn_config.BATCH_SIZE): print(pred_heatmap_norm.shape) class_ids = np.unique(pred_heatmap_scores[img_id,:,:,4]).astype(int).tolist() print('Image : {} ClassIds: {}'.format(img_id, class_ids)) plot_3d_heatmap(pred_heatmap_norm, img_id, class_ids, width=6, height=6, class_names = class_names) ``` #### 3D plot of `gt_heatmap_norm` returned form code above ``` for img_id in [1]: ## range(mrcnn_config.BATCH_SIZE): print(gt_heatmap_norm.shape) class_ids = np.unique(gt_heatmap_scores[img_id,:,:,4]).astype(int).tolist() print('Image : {} ClassIds: {}'.format(img_id, class_ids)) plot_3d_heatmap(gt_heatmap_norm, img_id, class_ids, width=6, height=6, class_names = class_names) ``` #### Verfiy max and min of gaussian heatmaps are 1.0 and 0.0, respectively ``` np.set_printoptions(precision=4, threshold=None, linewidth=200, suppress=True) print(pred_heatmap_norm.shape) hm_max = np.max(pred_heatmap_norm, axis = (1,2)) hm_min = np.min(pred_heatmap_norm, axis = (1,2)) print(hm_max.shape) for img_id in range(mrcnn_config.BATCH_SIZE): # print(pred_refined_heatmap_scores[img_id,:4]) class_ids = np.unique(pred_heatmap_scores[img_id,:,:,4]).astype(int).tolist() print('\n Image : {} ClassIds: {}'.format(img_id, class_ids)) print('-'*38) for cls in class_ids: print(' class: {} max: {} min: {}'.format(cls, hm_max[img_id,cls], hm_min[img_id,cls])) # print(pred_heatmap_scores[img_id, cls]) print(gt_heatmap_norm.shape) hm_max = np.max(gt_heatmap_norm, axis = (1,2)) hm_min = np.min(gt_heatmap_norm, axis = (1,2)) print(hm_max.shape) for img_id in range(mrcnn_config.BATCH_SIZE): # print(pred_refined_heatmap_scores[img_id,:4]) class_ids = np.unique(gt_heatmap_scores[img_id,:,:,4]).astype(int).tolist() print('\n Image : {} ClassIds: {}'.format(img_id, class_ids)) print('-'*38) for cls in class_ids: print(' class: {} max: {} min: {}'.format(cls, hm_max[img_id,cls], hm_min[img_id,cls])) # print(pred_heatmap_scores[img_id, cls]) ``` ### Display `pred_scatter` heatmaps for all bounding boxes of an image ``` %matplotlib notebook print('Image id: ',image_id , ' Classes (1: circle, 2: square, 3: triangle ): ') img = 1 print(pred_heatmap_scores[img,0,0]) plot_bbox_heatmaps(gauss_scatt[img], pred_tensor[img], width = 15, height=25, num_bboxes=12) ``` ### Display `gauss_heatmap` heatmap (not normalized, normlized, L2 normalized) ``` %matplotlib notebook print('Image id: ',image_id , ' Classes (1: circle, 2: square, 3: triangle ): ') img = 0 print(pred_heatmap_scores[img,0,0]) # plot_one_heatmap(pred_heatmap[img], pred_heatmap_scores[img], width=19, num_bboxes = 10, title='Non-normalized') plot_gaussian(pred_heatmap_norm[img,:,:,1],0, plot_one_heatmap(pred_heatmap_norm[img], pred_heatmap_scores[img], width=19, num_bboxes = 10, title='normalized') # plot_one_heatmap(pred_heatmap_L2norm[img], pred_heatmap_scores[img], width=19, num_bboxes = 10, title='L2-normalized') # plot_heatmaps(pred_heatmap, pred_heatmap_scores, width = 15, num_bboxes=12) ``` ### Display `gauss_heatmap` 3D heatmap (not normalized, normlized, L2 normalized) ``` from mrcnn.visualize import plot_3d_heatmap %matplotlib notebook print('Image id: ',image_id , ' Classes (1: circle, 2: square, 3: triangle ): ') img = 1 print(pred_heatmap_scores[img,cls,:10]) ttl = 'Non-normalized - image: {}'.format(img) plot_3d_heatmap(pred_heatmap[img], title = ttl, width = 20) plot_one_heatmap(pred_heatmap[img], pred_heatmap_scores[img], width=15, title=ttl) ttl = 'Normalized - image: {}'.format(img) plot_3d_heatmap(pred_heatmap[img], title = ttl, width = 20) plot_one_heatmap(pred_heatmap[img], pred_heatmap_scores[img], width=15, title=ttl) ``` ### Find maximum of gaussian distributions for the pred_heatmap Potentially use this as our heatmap scores Found out that using MAX values from the class heatmap (currently generated from the pred_tensor that itself is generated form output_rois and mrcnn_class) is not a viable option, because mutlple max values tend to congreagate around the peak of the gaussian distribution. This is also the case for gt_heatmaps. This will probably also be the case for the FCN output. #### pred_heatmap ``` np.set_printoptions(linewidth=150, threshold=10000) print(pred_hm.shape) cls_hm = pred_hm[0,:,:,2] print(cls_hm.shape) print(np.unravel_index(np.argmax(cls_hm) , cls_hm.shape) ) print(np.max(cls_hm)) print(pred_hm_norm.shape) cls_hm_norm = pred_hm_norm[0,:,:,2] print(cls_hm_norm.shape) print(np.unravel_index(np.argmax(cls_hm_norm) , cls_hm_norm.shape) ) print(np.max(cls_hm_norm)) hm_ls =np.ravel(cls_hm) hm_ls_norm = np.ravel(cls_hm_norm) srtlst = np.argsort(hm_ls) srtlst_norm = np.argsort(hm_ls_norm) print(' Sortlist') print(srtlst[::-1]) print(srtlst.shape) print('---- norm ------') print(srtlst_norm[::-1]) print(srtlst_norm.shape) print(' Top scores') top_scores = srtlst[:-21:-1] print('---- norm ------') top_scores_norm = srtlst_norm[:-21:-1] print(len(top_scores),top_scores) print(' Top items ') for i in top_scores : print( i , ' ', np.unravel_index(i, cls_hm.shape)) print('---- norm ------') for i in top_scores_norm : print( i , ' ', np.unravel_index(i, cls_hm_norm.shape)) print(' Top scores ') print(hm_ls[top_scores]) print('---- norm ------') print(hm_ls_norm[top_scores_norm]) ``` #### gt_heatmap ``` np.set_printoptions(linewidth=150, threshold=10000) print(pred_hm.shape) cls_hm = gt_hm[0,:,:,2] print(cls_hm.shape) print(np.unravel_index(np.argmax(cls_hm) , cls_hm.shape) ) print(np.max(cls_hm)) print('---- norm -----') print(gt_hm_norm.shape) cls_hm_norm = gt_hm_norm[0,:,:,2] print(cls_hm_norm.shape) print(np.unravel_index(np.argmax(cls_hm_norm) , cls_hm_norm.shape) ) print(np.max(cls_hm_norm)) hm_ls =np.ravel(cls_hm) hm_ls_norm = np.ravel(cls_hm_norm) srtlst = np.argsort(hm_ls) srtlst_norm = np.argsort(hm_ls_norm) print(' Sortlist') print(srtlst[::-1]) print(srtlst.shape) print('---- norm ------') print(srtlst_norm[::-1]) print(srtlst_norm.shape) print(' Top scores') top_scores = srtlst[:-21:-1] print('---- norm ------') top_scores_norm = srtlst_norm[:-21:-1] print(len(top_scores),top_scores) print(' Top items ') for i in top_scores : print( i , ' ', np.unravel_index(i, cls_hm.shape)) print('---- norm ------') for i in top_scores_norm : print( i , ' ', np.unravel_index(i, cls_hm_norm.shape)) print(' Top scores ') print(hm_ls[top_scores]) print('---- norm ------') print(hm_ls_norm[top_scores_norm]) max_a = np.max(cls_pred_heatmap) print(max_a.shape) print(np.unravel_index(np.argmax(prob_a) , prob_a.shape) ) print() print(' covar ', covar_sqrd) print(prob_b[35:50, 45:54]) max_b = np.max(prob_b) print(np.unravel_index(np.argmax(prob_b) , prob_b.shape) ) print('max a , max_b ', max_a, max_b, max_a/max_b) ``` ## `development build_heatmap_tf ()` ### Generate Multivariate Normal Distribution from Pred_Tensor ### Prepare values to pass to build_gaussian_tf #### Display for visual check - `pred_tensor` is the final result which is passed on to `build_gaussian_tf()` #### Display for visual check - `gt_tensor` is the final result which is passed on to `build_gaussian_tf()` ### Plot heatmap produced by network `fcn_bilinear` and compare with `pred_gaussian` ``` from mrcnn.visualize import plot_gaussian, plot_gaussian_2d import matplotlib as plt %matplotlib notebook img = 2 cls = 2 image_id = input_image_meta[img,0] image = dataset_train.load_image(image_id) mask, class_ids = dataset_train.load_mask(image_id) visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names) Zout1 = pred_heatmap # gt_gaussiam Zout2 = pred_heatmap_norm # fcn_bilinear Zout3 = pred_heatmap_L2norm # fcn_bilinear print(Zout1.shape, Zout2.shape) num_images = config.IMAGES_PER_GPU num_classes = config.NUM_CLASSES print(pred_tensor[img,cls,:10]) print(pred_tensor.shape) print('Image id: ',image_id) print('Classes (1: circle, 2: square, 3: triangle ): ') width = 9 # for j in [cls] : #range(num_classes): print(pred_heatmap_scores[img,cls,:10]) ttl = 'Pred_hm - image : {} class: {} '.format(img,j) plot_gaussian_2d(Zout1[img,:,:,j], title = ttl, width = width) ttl = 'pred_norm - image : {} class: {} '.format(img,j) plot_gaussian_2d(Zout2[img,:,:,j], title = ttl, width = width) ttl = 'pred_norm_L2 - image : {} class: {} '.format(img,j) plot_gaussian_2d(Zout3[img,:,:,j], title = ttl, width = width) from mrcnn.visualize import display_gt_bboxes, display_roi_proposals model_info = [model, config, dataset_train, train_generator] display_roi_proposals(model_info, input_image_meta, pred_tensor, [cls], 0) %matplotlib notebook width = 12 plot_gaussian2([pred_heatmap_norm, fcn_heatmap_norm], image_idx = 0, title = ttl, width = width) ``` ### Test `means`, `covar`, `gauss_grid`, and `gauss_sum ` between development version and final version ``` print(means.get_shape(), means.get_shape()) tst1 = means.eval() tst2 = means2.eval() print(tst1.shape, tst2.shape) print(tst1[0,:10]) print() print(tst2[0,:10]) print(np.all(tst1 == tst2)) print() del tst1, tst2 tst1 = st.eval() tst2 = st2.eval() print(tst1.shape, tst2.shape) print(tst1[0,:10]) print() print(tst2[0,:10]) print(np.all(tst1 == tst2)) print() del tst1, tst2 tst1 = gauss_grid.eval() tst2 = gauss_grid2.eval() print(tst1.shape, tst2.shape) print(tst1[0,0,:10]) print() print(tst2[0,0,:10]) print(np.all(tst1 == tst2)) # print() del tst1, tst2 tst1 = gauss_sum.eval() tst2 = gauss_sum2.eval() print(tst1.shape, tst2.shape) # print(tst1[0,0,:10]) # print() # print(tst2[0,0,:10]) print(np.all(tst1 == tst2)) # print() del tst1, tst2 np.set_printoptions(linewidth=150, threshold=10000) from scipy.stats import multivariate_normal # Build mesh-grid to hold pixel coordinates ---------------------------------- XX = np.arange(0, img_w, 1) YY = np.arange(0, img_h, 1) XX, YY = np.meshgrid(XX, YY) print('XX shape', XX.shape) pos = np.empty(XX.shape + (2,)) # concatinate shape of x to make ( x.rows, x.cols, 2) pos[:,:,0] = XX; pos[:,:,1] = YY; print('XX') print(XX) print('YY') print(YY) print(pos[0,:,:]) print(pos[0]) print(grid[0].eval()) print(' pos type ', type(pos), type(grid)) print(' grid shape ', pos.shape, grid.shape) print(np.all(pos == grid.eval())) mean = np.array([1,2]) covar = np.array([[1,0],[0,1]]) print(' mean ', mean) print(' covar ', covar) mvna = multivariate_normal(mean, covar) prob_a = mvna.pdf(pos) # mvnb = multivariate_normal(mean, covar_sqrd) # prob_b = mvnb.pdf(pos) # print(prob_a[35:50, 45:54]) # max_a = np.max(prob_a) # print(np.unravel_index(np.argmax(prob_a) , prob_a.shape) ) # print() # print(' covar ', covar_sqrd) # print(prob_b[35:50, 45:54]) # max_b = np.max(prob_b) # print(np.unravel_index(np.argmax(prob_b) , prob_b.shape) ) # print('max a , max_b ', max_a, max_b, max_a/max_b) with sess.as_default(): #----------------------------------------------------------------------------- ## Build mesh-grid to hold pixel coordinates #----------------------------------------------------------------------------- X = tf.range(80, dtype=tf.int32) Y = tf.range(80, dtype=tf.int32) X, Y = tf.meshgrid(X, Y) # duplicate (repeat) X and Y into a batch_size x rois_per_image tensor print(' X/Y shapes :', X.get_shape(), Y.get_shape()) ones = tf.ones([1, 1, 1], dtype = tf.int32) rep_X = ones * X rep_Y = ones * Y print(' Ones: ', ones.shape) print(' ones_exp * X', ones.shape, '*', X.shape, '= ',rep_X.shape) print(' ones_exp * Y', ones.shape, '*', Y.shape, '= ',rep_Y.shape) # # stack the X and Y grids bef_pos = tf.to_float(tf.stack([rep_X,rep_Y], axis = -1)) print(' before transpse ', tf.shape(bef_pos).eval()) pos_grid = tf.transpose(bef_pos,[1,2,0,3]) print(' after transpose ', tf.shape(pos_grid).eval()) pt2_den = tf.constant([[10,10,30,70]], dtype = tf.float32) print(type(pt2_den)) #----------------------------------------------------------------------------- ## Build mean and convariance tensors for Multivariate Normal Distribution #----------------------------------------------------------------------------- width = pt2_den[:,3] - pt2_den[:,1] # x2 - x1 height = pt2_den[:,2] - pt2_den[:,0] print(width.eval(), type(width)) print(height.eval(), type(height)) cx = pt2_den[:,1] + tf.div( width , 2.0) cy = pt2_den[:,0] + ( height / 2.0) means = tf.stack((cx,cy),axis = -1) covar = tf.stack((width * 0.5 , height * 0.5), axis = -1) covar = tf.sqrt(covar) print(means.eval()) print(covar.eval()) tfd = tf.contrib.distributions mvn = tfd.MultivariateNormalDiag( loc = means, scale_diag = covar) prob_grid = mvn.prob(pos_grid) print(' Prob_grid shape before tanspose: ',prob_grid.get_shape()) prob_grid = tf.transpose(prob_grid,[2,0,1]) print(' Prob_grid shape after tanspose: ',prob_grid.get_shape()) print(' >> input to MVN.PROB: pos_grid (meshgrid) shape: ', pos_grid.get_shape()) print(' << output probabilities shape:' , prob_grid.get_shape()) ``` ## Plot Image with bounding boxes from `output_rois` ``` img_idx = 0 image_id = input_image_meta[img_idx,0] image = dataset_train.load_image(image_id) mask, class_ids = dataset_train.load_mask(image_id) bbox = utils.extract_bboxes(mask) log("image", image) log("mask", mask) log("class_ids", class_ids) log("bbox", bbox) # class_names = [str(dataset_train.class_names[class_id]) for class_id in class_ids] class_names = dataset_train.class_names # visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names) print('Classes : ', class_ids) print("image_id : ", image_id, ' Reference: ', dataset_train.image_reference(image_id)) print(' class_ids : ', class_ids.shape[0]) print(' bbox : ', bbox.shape[0]) print(' output_rois: : ', output_rois.shape) print(' Image id : ', image_id , ' Image meta', img_meta[img_idx,:10]) print(' Classes : ', [class_names[i] for i in class_ids]) print(' Image window : ', img_meta[0, 4:8]) print(' Image shape : ', image.shape) ``` #### Display `output_roi` without delta refinement ``` unormalized_rois = output_rois[img_idx] * [1024,1024,1024,1024] unrefined_rois = utils.boxes_to_image_domain(unormalized_rois, img_meta[0] ) visualize.draw_rois(image, unrefined_rois, target_class_ids[0], class_names, limit=5) #, random = True) ``` #### Display `output_rois` with after clipping to image boundaries ``` clipped_rois = utils.clip_to_window_np(img_meta[0, 4:8], unormalized_rois) clipped_rois = utils.boxes_to_image_domain(clipped_rois, img_meta[0] ) visualize.draw_rois(image, clipped_rois , target_class_ids[0], class_names, bbox_ids = [0,1,2]) # or , limit=2) ``` #### Displayt `output_rois` after applying `target_bbox_deltas` NOTE: MUST BE MULTIPLIED BY BBOX_STD_DEV ``` print(' Target_bbox_deltas: ',target_bbox_deltas.shape) ## 1- Apply Bounding Box Standard Deviation and apply to output_rois apply_deltas = target_bbox_deltas[img_idx] * mrcnn_config.BBOX_STD_DEV refined_rois = utils.apply_box_deltas_np(output_rois[img_idx], apply_deltas) print(' Refined ROIs shape: ',refined_rois.shape) # print(refined_rois[:20]) ## 3- Clip to image windoow boundaries: refined_rois = refined_rois * [1024,1024,1024,1024] refined_rois = utils.clip_to_window_np(img_meta[0, 4:8], refined_rois) ## 4- Transfer to image coordniates : refined_rois = utils.boxes_to_image_domain(refined_rois, img_meta[0] ) ## 5- Visualize visualize.draw_rois(image, refined_rois, target_class_ids[0], class_names,bbox_ids = [0,1,2], limit=5) ``` #### Apply predicted `mrcnn_bbox` delta refinements to `output_rois` and display ``` # Create un unormalized_rois = output_rois[img_idx] * [1024,1024,1024,1024] clipped_rois = utils.clip_to_window_np(img_meta[0, 4:8], unormalized_rois) unrefined_rois = utils.boxes_to_image_domain(clipped_rois, img_meta[0] ) ## 1- Extract predicted deltas from mrcnn_bbox classes, deltas = get_predicted_mrcnn_deltas(mrcnn_class, mrcnn_bbox, verbose = False) # print(classes.shape, deltas.shape) # print(classes[0,:20]) # print(deltas[0,:20]) ## 2- Apply Bounding Box Standard Deviation and apply to output_rois apply_deltas = deltas[0] * mrcnn_config.BBOX_STD_DEV refined_rois = utils.apply_box_deltas_np(output_rois[img_idx], apply_deltas) print(' Refined ROIs shape: ',refined_rois.shape) # print(refined_rois[:20]) ## 3- Clip to image windoow boundaries: refined_rois = refined_rois * [1024,1024,1024,1024] refined_rois = utils.clip_to_window_np(img_meta[0, 4:8], refined_rois) ## 4- Transfer to image coordniates : refined_rois = utils.boxes_to_image_domain(refined_rois, img_meta[0] ) # Visualize visualize.draw_rois(image, unrefined_rois, target_class_ids[0], class_names, limit=5) visualize.draw_rois(image, refined_rois, target_class_ids[0], class_names, limit=5) # visualize.draw_rois_with_refinements(image, unrefined_rois, refined_rois, target_class_ids[0], class_names, limit=10) ``` #### Display `image_gt_bboxes` provided by data generator ``` # Display image and instances # visualize.display_instances_with_mask(image, bbox, mask, class_ids, dataset_train.class_names) # print(input_gt_bboxes[0,:20]) gt_bboxes = utils.boxes_to_image_domain(input_gt_bboxes[0], img_meta[0] ) visualize.draw_rois(image, gt_bboxes[:20], input_gt_class_ids[0,:20], class_names, limit=10) ``` ## misc code #### sparse to dense ``` with sess.as_default(): tf_dense = tf.sparse_to_dense(pt2_ind), in_tensor.shape[:-1], 1,0) r_tf_dense = tf_dense.eval() print(r_tf_dense.shape) print(r_tf_dense[0]) ``` #### Display for visual check - `pred_heatmap_norm` is the final result from `build_heatmap()` ``` print(pred_heatmap_norm.shape) temp = pred_heatmap_norm np.set_printoptions(linewidth=150, threshold=10000, suppress=False) print(' Temp shape :', temp.shape) temp_sum = np.sum(temp,axis=(1,2)) print('temp_sum is ', temp_sum.shape) for i in range(5): for j in range(4): print('img ',i,' class ', j, ' sum:',temp_sum[i,j], ' max: ',np.max(temp[i,:,:,j]),' mean: ', np.mean(temp[i,:,:,j]),' min: ', np.min(temp[i,:,:,j])) # with sess.as_default(): np.set_printoptions(linewidth=150, precision=6, suppress=True) # print('scatter shape is ', pred_scatt.get_shape()) print('pred_heatmap_scores shape is ', pred_heatmap_scores.shape) for img in [0,1,2]: for k in range(4): print('Image ', img , '/ Class ',k,' ------------') print(np.min(pred_heatmap_scores[img,k,:,8])) print(pred_heatmap_scores[img,k]) np.set_printoptions(linewidth=150, precision=6) print('gt_heatmap_scores shape is ', gt_heatmap_scores.shape) img = 1 for k in range(4): print('Image ', img , '/ Class ',k,' ------------') print(gt_heatmap_scores[img,k]) sess = KB.get_session() with sess.as_default(): temp = fcn_heatmap np.set_printoptions(linewidth=150, threshold=10000) print(' output shapes :', temp.get_shape()) temp_sum = tf.reduce_sum(temp, [2,3]) temp_min = tf.reduce_min(temp, [2,3]) temp_max = tf.reduce_max(temp, [2,3]) temp_avg = tf.reduce_mean(temp, [2,3]) print('temp_sum is ', temp_sum.shape) for i in range(5): for j in range(4): print('img/cls ',i,'/', j,' sum:',temp_sum[i,j], 'min',temp_min[i,j] ,'max',temp_max[i,j] ,'avg',temp_avg[i,j]) ``` #### `byclas_to_byimage()` reshape tensor / numpy array from per_class to per image ``` def byclass_to_byimage_np(in_array, seqid_column): ''' convert a by class tensor shaped [batch_size, num_classes, num_bboxes, columns ] to a by image tensor shaped [batch_size, num_bboxes, columns] ''' # np_sum = np.sum(np.abs(model_gt_heatmap_scores[:,:,:,0:4]), axis=-1) # print(np_sum.shape) # a,b,c = np.where(np_sum > 0) a,b,c = np.where(in_array[...,seqid_column]>0) output = np.zeros((in_array.shape[0],in_array.shape[-2],in_array.shape[-1])) # print(' output shape is ',output.shape) # print(a.shape, b.shape,c.shape) for img, cls , box in zip(a, b,c): # print( img,cls, box, 200 - in_array[img, cls, box,6].astype(int)) output[img, 200 - in_array[img, cls, box,6].astype(int)] = in_array[img, cls, box] return output def byclass_to_byimage_tf(in_array, seqid_column): ''' convert a by class tensor shaped [batch_size, num_classes, num_bboxes, columns ] to a by image tensor shaped [batch_size, num_bboxes, columns] ''' aa = tf.reshape(in_array, [in_array.shape[0], -1, in_array.shape[-1]]) _ , sort_inds = tf.nn.top_k(tf.abs(aa[:,:,seqid_column]), k= in_array.shape[2]) batch_grid, bbox_grid = tf.meshgrid(tf.range(in_array.shape[0]), tf.range(in_array.shape[2]),indexing='ij') gather_inds = tf.stack([batch_grid, sort_inds],axis = -1) output = tf.gather_nd(aa, gather_inds ) return output ``` #### Try `byclass_to_byimage()` on `gt_heatmap_scores` ``` # with sess.as_default(): np.set_printoptions(linewidth=150, precision=6) # print(gt_heatmap_scores.shape) # outp = byclass_to_byimage_tf(gt_heatmap_scores,6) # with sess.as_default(): # r_outp = outp.eval() # print(r_outp.shape) # print(r_outp[0]) # print(r_outp[1]) # print(tf_model_pred_heatmap_scores.shape, tf_model_pred_heatmap_scores) # outp = byclass_to_byimage_tf(tf_model_pred_heatmap_scores,6) # with sess.as_default(): # r_outp = outp.eval() # print(r_outp.shape) # print(r_outp[0]) # print(r_outp[1]) ``` #### Try `byclass_to_byimage()` on `pred_heatmap_scores` ``` np.set_printoptions(linewidth=150, precision=6) tf_model_pred_heatmap_scores = tf.constant(model_pred_heatmap_scores) print('pred_heatmap_scores shape is ', pred_heatmap_scores.shape ) print('pred_heatmap_scores from model is :', tf_model_pred_heatmap_scores.shape,tf_model_pred_heatmap_scores) r_out2 = byclass_to_byimage_np(pred_heatmap_scores,6) with sess.as_default(): r_out1 = byclass_to_byimage_tf(tf_model_pred_heatmap_scores,6).eval() for img in range(2): class_ids = np.unique(pred_heatmap_scores[img,:,:,4]).astype(int).tolist() print('Classids: ', class_ids) print('Image ', img ,' ------------') for j in range(200): print('tf: ',r_out1[img,j]) print('np: ',r_out2[img,j]) print() ``` #### ground work for writing `byclass_to_by_image()` ``` # print(pred_heatmap_scores.shape) # gt_heatmap_scores = tf.identity(model_gt_heatmap_scores) # aa = tf.reshape(gt_heatmap_scores, [gt_heatmap_scores.shape[0], -1, gt_heatmap_scores.shape[-1]]) # _ , sort_inds = tf.nn.top_k(tf.abs(aa[:,:,6]), k=gt_heatmap_scores.shape[2]) # print(sort_inds.shape) # batch_grid, bbox_grid = tf.meshgrid(tf.range(batch_size), tf.range(gt_heatmap_scores.shape[2]),indexing='ij') # gather_inds = tf.stack([batch_grid, sort_inds],axis = -1) # print(aa.shape) # print(bb.shape) # cc = tf.gather_nd(aa, gather_inds ) # print('cc : ',cc.shape) # with sess.as_default(): # # r_pred_heatmap_scores = gauss_scores.eval() # r_aa = aa.eval() # r_sort_inds = sort_inds.eval() # r_gather_inds = gather_inds.eval() # # r_bb = bb.eval() # r_cc = cc.eval() # # r_dd = dd.eval() # # print(r_pred_heatmap_scores[0,1]) # print('cc: ',r_cc.shape) # print('bb: ',r_bb.shape) # print('aa: ',r_aa.shape) # # print(r_sort_inds) # print(r_gather_inds) # # print(r_bb) # print(r_cc[0]) print(r_cc[1]) ``` #### Convert `pred_heatmap_scores` using `byclass_to_byimage_np` ``` # with sess.as_default(): np.set_printoptions(linewidth=150, precision=6) # print('scatter shape is ', pred_scatt.get_shape()) print(model_pred_heatmap_scores.shape) print(model_pred_heatmap_scores[0,0,0]) outp = byclass_to_byimage_np(model_pred_heatmap_scores,6) print(outp[0]) ``` #### Convert `gt_heatmap_scores` using `byclass_to_byimage_np` ``` # with sess.as_default(): np.set_printoptions(linewidth=150, precision=6) # print('pred_heatmap_scores shape is ', gt_heatmap_scores.shape ) print('pred_heatmap_scores from model is :', model_gt_heatmap_scores.shape) print(model_gt_heatmap_scores[0,1]) # with sess.as_default(): # r_pred_tensor = pred_tensor.eval() # for img in range(2): # class_ids = np.unique(model_gt_heatmap_scores[img,:,:,4]).astype(int).tolist() # print('Classids: ', class_ids) # for i in class_ids: # print('Image ', img , '/ Class ',i,' ------------') # for j in range(200): # print(gt_heatmap_scores[img,i,j]) # print(model_gt_heatmap_scores[img,i,j]) # # print(pred_refined_tensor[img,i,j]) # print() outp = byclass_to_byimage_np(model_gt_heatmap_scores,6) print(outp[0]) ``` #### Display for visual check - `pred_heatmap_scores` ``` # with sess.as_default(): np.set_printoptions(linewidth=150, precision=6) # print('scatter shape is ', pred_scatt.get_shape()) print('pred_heatmap_scores shape is ', pred_heatmap_scores.shape) img = 0 for k in range(4): print('Image ', img , '/ Class ',k,' ------------') print(pred_heatmap_scores[img,k]) ``` #### Display for visual check - `pred_heatmap_norm` ``` print(pred_heatmap_norm.shape) temp = pred_heatmap_norm np.set_printoptions(linewidth=150, threshold=10000) print(' Temp shape :', temp.shape) temp_sum = np.sum(temp,axis=(1,2)) print('temp_sum is ', temp_sum.shape) for i in range(5): for j in range(4): print('img ',i,' class ', j, ' sum:',temp_sum[i,j]) ```
github_jupyter
# Autoregressive Moving Average (ARMA): Sunspots data ``` %matplotlib inline import numpy as np from scipy import stats import pandas as pd import matplotlib.pyplot as plt import statsmodels.api as sm from statsmodels.graphics.api import qqplot ``` ## Sunspots Data ``` print(sm.datasets.sunspots.NOTE) dta = sm.datasets.sunspots.load_pandas().data dta.index = pd.Index(sm.tsa.datetools.dates_from_range('1700', '2008')) del dta["YEAR"] dta.plot(figsize=(12,8)); fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(dta, lags=40, ax=ax2) arma_mod20 = sm.tsa.ARMA(dta, (2,0)).fit(disp=False) print(arma_mod20.params) arma_mod30 = sm.tsa.ARMA(dta, (3,0)).fit(disp=False) print(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic) print(arma_mod30.params) print(arma_mod30.aic, arma_mod30.bic, arma_mod30.hqic) ``` * Does our model obey the theory? ``` sm.stats.durbin_watson(arma_mod30.resid.values) fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax = arma_mod30.resid.plot(ax=ax); resid = arma_mod30.resid stats.normaltest(resid) fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) fig = qqplot(resid, line='q', ax=ax, fit=True) fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(resid.values.squeeze(), lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2) r,q,p = sm.tsa.acf(resid.values.squeeze(), fft=True, qstat=True) data = np.c_[range(1,41), r[1:], q, p] table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"]) print(table.set_index('lag')) ``` * This indicates a lack of fit. * In-sample dynamic prediction. How good does our model do? ``` predict_sunspots = arma_mod30.predict('1990', '2012', dynamic=True) print(predict_sunspots) fig, ax = plt.subplots(figsize=(12, 8)) ax = dta.loc['1950':].plot(ax=ax) fig = arma_mod30.plot_predict('1990', '2012', dynamic=True, ax=ax, plot_insample=False) def mean_forecast_err(y, yhat): return y.sub(yhat).mean() mean_forecast_err(dta.SUNACTIVITY, predict_sunspots) ``` ### Exercise: Can you obtain a better fit for the Sunspots model? (Hint: sm.tsa.AR has a method select_order) ### Simulated ARMA(4,1): Model Identification is Difficult ``` from statsmodels.tsa.arima_process import ArmaProcess np.random.seed(1234) # include zero-th lag arparams = np.array([1, .75, -.65, -.55, .9]) maparams = np.array([1, .65]) ``` Let's make sure this model is estimable. ``` arma_t = ArmaProcess(arparams, maparams) arma_t.isinvertible arma_t.isstationary ``` * What does this mean? ``` fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(arma_t.generate_sample(nsample=50)); arparams = np.array([1, .35, -.15, .55, .1]) maparams = np.array([1, .65]) arma_t = ArmaProcess(arparams, maparams) arma_t.isstationary arma_rvs = arma_t.generate_sample(nsample=500, burnin=250, scale=2.5) fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(arma_rvs, lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(arma_rvs, lags=40, ax=ax2) ``` * For mixed ARMA processes the Autocorrelation function is a mixture of exponentials and damped sine waves after (q-p) lags. * The partial autocorrelation function is a mixture of exponentials and dampened sine waves after (p-q) lags. ``` arma11 = sm.tsa.ARMA(arma_rvs, (1,1)).fit(disp=False) resid = arma11.resid r,q,p = sm.tsa.acf(resid, fft=True, qstat=True) data = np.c_[range(1,41), r[1:], q, p] table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"]) print(table.set_index('lag')) arma41 = sm.tsa.ARMA(arma_rvs, (4,1)).fit(disp=False) resid = arma41.resid r,q,p = sm.tsa.acf(resid, fft=True, qstat=True) data = np.c_[range(1,41), r[1:], q, p] table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"]) print(table.set_index('lag')) ``` ### Exercise: How good of in-sample prediction can you do for another series, say, CPI ``` macrodta = sm.datasets.macrodata.load_pandas().data macrodta.index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3')) cpi = macrodta["cpi"] ``` #### Hint: ``` fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax = cpi.plot(ax=ax); ax.legend(); ``` P-value of the unit-root test, resoundingly rejects the null of a unit-root. ``` print(sm.tsa.adfuller(cpi)[1]) ```
github_jupyter
# 数据迭代 `Ascend` `GPU` `CPU` `数据准备` [![下载样例代码](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_download_code.png)](https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/master/programming_guide/zh_cn/mindspore_dataset_usage.py)&emsp;[![下载Notebook](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_notebook.png)](https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/master/programming_guide/zh_cn/mindspore_dataset_usage.ipynb)&emsp;[![查看源文件](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_source.png)](https://gitee.com/mindspore/docs/blob/master/docs/mindspore/programming_guide/source_zh_cn/dataset_usage.ipynb) ## 概述 原始数据集通过数据集加载接口读取到内存,再通过数据增强操作进行数据变换,得到的数据集对象有两种常规的数据迭代方法: - 创建迭代器进行数据迭代。 - 传入Model接口(如`model.train`、`model.eval`等)进行迭代训练或推理。 ## 创建迭代器进行数据迭代 数据集对象通常可以创建两种不同的迭代器来遍历数据,分别为元组迭代器和字典迭代器。 创建元组迭代器的接口为`create_tuple_iterator`,创建字典迭代器的接口为`create_dict_iterator`,具体使用方法如下。 首先,任意创建一个数据集对象作为演示说明。 ``` import mindspore.dataset as ds np_data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] dataset = ds.NumpySlicesDataset(np_data, column_names=["data"], shuffle=False) ``` 则可使用以下方法创建数据迭代器。 ``` # 创建元组迭代器 print("\n create tuple iterator") for item in dataset.create_tuple_iterator(): print("item:\n", item[0]) # 创建字典迭代器 print("\n create dict iterator") for item in dataset.create_dict_iterator(): print("item:\n", item["data"]) # 直接遍历数据集对象(等同于创建元组迭代器) print("\n iterate dataset object directly") for item in dataset: print("item:\n", item[0]) # 使用enumerate方式遍历(等同于创建元组迭代器) print("\n iterate dataset using enumerate") for index, item in enumerate(dataset): print("index: {}, item:\n {}".format(index, item[0])) ``` 此外,如果需要产生多个Epoch的数据,可以相应地调整入参`num_epochs`的取值。相比于多次调用迭代器接口,直接设置Epoch数可以提高数据迭代的性能。 ``` # 创建元组迭代器产生2个Epoch的数据 epoch = 2 iterator = dataset.create_tuple_iterator(num_epochs=epoch) for i in range(epoch): print("epoch: ", i) for item in iterator: print("item:\n", item[0]) ``` 迭代器默认输出的数据类型为`mindspore.Tensor`,如果希望得到`numpy.ndarray`类型的数据,可以设置入参`output_numpy=True`。 ``` # 默认输出类型为mindspore.Tensor for item in dataset.create_tuple_iterator(): print("dtype: ", type(item[0]), "\nitem:", item[0]) # 设置输出类型为numpy.ndarray for item in dataset.create_tuple_iterator(output_numpy=True): print("dtype: ", type(item[0]), "\nitem:", item[0]) ``` 更详细的说明,请参考[create_tuple_iterator](https://www.mindspore.cn/docs/api/zh-CN/master/api_python/dataset/mindspore.dataset.NumpySlicesDataset.html#mindspore.dataset.NumpySlicesDataset.create_tuple_iterator) 和[create_dict_iterator](https://www.mindspore.cn/docs/api/zh-CN/master/api_python/dataset/mindspore.dataset.NumpySlicesDataset.html#mindspore.dataset.NumpySlicesDataset.create_dict_iterator)的API文档。 ## 传入Model接口进行迭代训练或推理 数据集对象创建后,可通过传入`Model`接口,由接口内部进行数据迭代,并送入网络执行训练或推理。 ``` import numpy as np from mindspore import ms_function from mindspore import context, nn, Model import mindspore.dataset as ds import mindspore.ops as ops def create_dataset(): np_data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] np_data = np.array(np_data, dtype=np.float16) dataset = ds.NumpySlicesDataset(np_data, column_names=["col1"], shuffle=False) return dataset class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.relu = ops.ReLU() self.print = ops.Print() @ms_function def construct(self, x): self.print(x) return self.relu(x) if __name__ == "__main__": # it is supported to run in CPU, GPU or Ascend context.set_context(mode=context.GRAPH_MODE) dataset = create_dataset() network = Net() model = Model(network) # do training, sink to device defaultly model.train(epoch=1, train_dataset=dataset, dataset_sink_mode=True) ``` Model接口中的`dataset_sink_mode`参数用于设置是否将数据下沉到Device。若设置为不下沉,则内部会创建上述迭代器,逐条遍历数据并送入网络;若设置为下沉,则内部会将数据直接发送给Device,并送入网络进行迭代训练或推理。 更加详细的使用方法,可参见[Model基本使用](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/model_use_guide.html#id3)。
github_jupyter
``` %load_ext autoreload %autoreload 2 import os from os import listdir from os.path import isfile, join import numpy as np import matplotlib.pyplot as plt import madmom import sys sys.path.append('../src') from preprocessing import get_dataset, load_rhythm_feature_db from models import OLSPatchRegressor from utils import cv import visualize import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Lambda from keras.layers import Conv2D, MaxPooling2D from keras import backend as K from keras.models import Model from sklearn.metrics import log_loss MUSIC = 1 SPEECH = 0 na = np.newaxis plt.rc('text', usetex=True) plt.rc('font', family='serif') # --------------- FLAGS ---------- DATA = "SPECTRO" # "SPECTRO" MODEL = "Linear" # "Linear" assert DATA in ["RHYTHM", "SPECTRO"] assert MODEL in ["CNN", "Linear"] music_dir = '../data/music_speech/music_wav/' speech_dir = '../data/music_speech/speech_wav/' def get_spectro_data(): max_samples = -1 X, Y = get_dataset(music_dir, speech_dir, hpool=0, wpool=0, num_samples=max_samples, shuffle=True, reload=False, window=np.hanning, fps=100, num_bands=3, fmin=30, fmax=17000, fft_sizes=[1024, 2048, 4096] ) print('Train Set Shape') print(X.shape, Y.shape) Y = (Y + 1) / 2 return X, Y def get_rhythm_data(): X, Y = load_rhythm_feature_db(music_dir, speech_dir, num_samples=-1) # change -1, 1 labels to 0,1 Y = (Y + 1) / 2 # X is in (N,L,D) format X = X[:,na,:,:] # dont conv over the number of models return X, Y X, Y = get_rhythm_data() if DATA == "RHYTHM" else get_spectro_data() batch_size = 8 num_classes = 2 epochs = 50 # -------------------------------------------- num_frequencies = X.shape[1] num_timesteps = X.shape[2] num_channels = X.shape[3] filter_time_size = 3 input_shape = num_frequencies, num_timesteps, num_channels def reset_weights(model): session = K.get_session() for layer in model.layers: if hasattr(layer, 'kernel_initializer'): layer.kernel.initializer.run(session=session) CNN = None def get_cnn(input_shape=(input_shape), reinit=False): global CNN if CNN is None: # DEFINE MODEL model = Sequential() model.add(MaxPooling2D(pool_size=(1, 3), input_shape=input_shape)) model.add(Conv2D(32, kernel_size=(num_frequencies, filter_time_size), activation='relu')) model.add(Conv2D(1, kernel_size=(1, 1), activation='sigmoid')) model.add(Lambda(lambda x: K.mean(x, axis=[1,2]))) model.compile(loss=keras.losses.binary_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) CNN = model return model else: model = CNN reset_weights(model) return model LINEAR = None def get_linear(input_shape=(input_shape), reinit=False): global LINEAR if LINEAR is None: # DEFINE MODEL model = Sequential() model.add(Conv2D(1, kernel_size=(num_frequencies, filter_time_size), activation='sigmoid', input_shape=input_shape)) model.add(Lambda(lambda x: K.mean(x, axis=[1,2]))) model.compile(loss=keras.losses.binary_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) LINEAR = model return model else: model = LINEAR reset_weights(model) return model get_model = get_cnn if MODEL == "CNN" else get_linear # cross validation train_model = lambda model, X, Y: model.fit(X, Y, batch_size=batch_size, epochs=epochs, verbose=0) # evaluate using cross-validation on training set cvacc = cv(X, Y, get_model, train_model, nfolds=5, nrepetitions=1) print('CV loss:', cvacc[0]) print('CV accuracy:', cvacc[1]) split = 100 Xtrain, Ytrain, Xtest, Ytest = X[:split], Y[:split], X[split:], Y[split:] # evaluate using train-test split model = get_model() train_model(model, Xtrain, Ytrain) score = model.evaluate(Xtest, Ytest, verbose=0) model_path = '../models/keras/' os.makedirs(model_path, exist_ok=True) model.save(os.path.join(model_path, '{}_on_{}-{:2.2f}.h5'.format(MODEL, DATA, score[1]))) music = Xtest[Ytest == MUSIC][0] speech = Xtest[Ytest == SPEECH][1] visualize.prediction_over_time(music, speech, model) # for the original model, estimate the probability of a correct classification using chebychevs inequality X_p, Y_p, X_n, Y_n = Xtest[Ytest>0], Ytest[Ytest>0], Xtest[Ytest<=0], Ytest[Ytest<=0] time_model = Model(inputs=model.input, outputs=model.layers[-2].output) prediction_p = time_model.predict(X_p)[:,0,:,0] prediction_n = time_model.predict(X_n)[:,0,:,0] mean_p, var_p = np.mean(prediction_p), np.mean(np.var(prediction_p, axis=1)) mean_n, var_n = np.mean(prediction_n), np.mean(np.var(prediction_n, axis=1)) var_p *= (len(Y_p)/(len(Y_p)-1)) var_n *= (len(Y_n)/(len(Y_n)-1)) print("var p, var n", var_p, ", ", var_n) print("mean of positive samples", mean_p) print("mean of negative samples", mean_n) def p_wrong_negative(num_timestamps): # p(prediction is negative but sample is positive) diff = mean_p - 0.5 std = np.sqrt(var_p/num_timestamps) k = diff/std return min(1/k/k, 0.5) def p_wrong_positive(num_timestamps): diff = 0.5 - mean_n std = np.sqrt(var_n/num_timestamps) k = diff/std return min(1/k/k, 0.5) s_per_timestamp = 30/X.shape[2] time = np.arange(0, 10, s_per_timestamp*5) max_t = len(time) plt.plot(time, [p_wrong_negative(i*5) for i in range(max_t)], label="Wrong negative") plt.plot(time, [p_wrong_positive(i*5) for i in range(max_t)], ".", label="Wrong positive") plt.legend() plt.show() """ We see that the predicted accuracy goes down way too fast (should have almost 100% accuracy for 10s audio files). The difference may be explained by the fact that timestamps label estimates of one sample are not iid drawn from the distribution of timestamps of the respective class. """ # Visualize if MODEL == "LINEAR": if DATA == "SPECTRO": # visualize filters pass else: # plot histogram of sum(abs(weights)) for different channels pass class TimestampAggregator(): def __init__(self): self.model = get_model() self.time_model = Model(inputs=self.model.input, outputs=self.model.layers[-2].output) def fit(self, X, Y, *args, **kwargs): self.model.fit(X, Y, *args, **kwargs) p = self.time_model.predict(X)[:,0,:,0] # (samples, timestamps) mean = np.mean(p, axis=1) var = np.var(p, axis=1) a = np.array([mean, var]).T # (samples, 2) # a*w = y = w = (a^t a)^-1 a^t y self.w = np.linalg.lstsq(a, Y)[0] # for analysis purpose self.avg_mean_pos = np.mean(mean[Y>0]) self.avg_mean_neg = np.mean(mean[Y<=0]) self.avg_var_pos = np.mean(var[Y>0]) self.avg_var_neg = np.mean(var[Y<=0]) def predict(self, X): p = self.time_model.predict(X)[:,0,:,0] # (samples, timestamps) mean = np.mean(p, axis=1) var = np.var(p, axis=1) a = np.array([mean, var]).T # (samples, 2) return a.dot(self.w) def evaluate(self, X, Y, *args, **kwargs): Y_ = self.predict(X) return log_loss(Y, Y_), np.mean((Y_>0.5)==(Y>0.5)) def init_mv_model(): return TimestampAggregator() cvacc = cv(X, Y, init_mv_model, train_model, nfolds=5, nrepetitions=1) print('CV loss:', cvacc[0]) print('CV accuracy:', cvacc[1]) mv_model = init_mv_model() split = 100 Xtrain, Ytrain, Xtest, Ytest = X[:split], Y[:split], X[split:], Y[split:] mv_model.fit(Xtrain, Ytrain, batch_size=batch_size, epochs=epochs) print('Weight of mean over timestamps', mv_model.w[0]) print('Weight of var over timestamps', mv_model.w[1]) print('Positive samples: mean, var = ', mv_model.avg_mean_pos, ",", mv_model.avg_var_pos) print('Negative samples: mean, var = ', mv_model.avg_mean_neg, ",", mv_model.avg_var_neg) print('For positive audios, mean constributes on average', mv_model.avg_mean_pos*mv_model.w[0]) print('For positive audios, var constributes on average', mv_model.avg_var_pos*mv_model.w[1]) print('For negative audios, mean constributes on average', mv_model.avg_mean_neg*mv_model.w[0]) print('For negative audios, var constributes on average', mv_model.avg_var_neg*mv_model.w[1]) ```
github_jupyter
CER003 - Upload existing Root CA certificate ============================================ Use this notebook to upload a Root CA certificate to a cluster that was downloaded to this machine using: - [CER002 - Download existing Root CA certificate](../cert-management/cer002-download-existing-root-ca.ipynb) If needed, use these notebooks to view and set the Kubernetes configuration context appropriately to enable downloading the Root CA from a Big Data Cluster in one Kubernetes cluster, and to upload it to a Big Data Cluster in another Kubernetes cluster. - [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) - [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) Steps ----- ### Parameters ``` local_folder_name = "mssql-cluster-root-ca" test_cert_store_root = "/var/opt/secrets/test-certificates" ``` ### Common functions Define helper functions used in this notebook. ``` # Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows import sys import os import re import json import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found first_run = True rules = None debug_logging = False def run(cmd, return_output=False, no_output=False, retry_count=0): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False global first_run global rules if first_run: first_run = False rules = load_rules() # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportabilty, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) if which_binary == None: if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if rules is not None: apply_expert_rules(line) if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # apply expert rules (to run follow-on notebooks), based on output # if rules is not None: apply_expert_rules(line_decoded) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: return output else: return elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: return output def load_json(filename): """Load a json file from disk and return the contents""" with open(filename, encoding="utf8") as json_file: return json.load(json_file) def load_rules(): """Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable""" try: # Load this notebook as json to get access to the expert rules in the notebook metadata. # j = load_json("cer003-upload-existing-root-ca.ipynb") except: pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename? else: if "metadata" in j and \ "azdata" in j["metadata"] and \ "expert" in j["metadata"]["azdata"] and \ "rules" in j["metadata"]["azdata"]["expert"]: rules = j["metadata"]["azdata"]["expert"]["rules"] rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first. # print (f"EXPERT: There are {len(rules)} rules to evaluate.") return rules def apply_expert_rules(line): """Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so inject a 'HINT' to the follow-on SOP/TSG to run""" global rules for rule in rules: # rules that have 9 elements are the injected (output) rules (the ones we want). Rules # with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029, # not ../repair/tsg029-nb-name.ipynb) if len(rule) == 9: notebook = rule[1] cell_type = rule[2] output_type = rule[3] # i.e. stream or error output_type_name = rule[4] # i.e. ename or name output_type_value = rule[5] # i.e. SystemExit or stdout details_name = rule[6] # i.e. evalue or text expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it! if debug_logging: print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.") if re.match(expression, line, re.DOTALL): if debug_logging: print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook)) match_found = True display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.')) print('Common functions defined successfully.') # Hints for binary (transient fault) retry, (known) error and install guide # retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']} error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['no such host', 'TSG011 - Restart sparkhistory server', '../repair/tsg011-restart-sparkhistory-server.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]} install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']} ``` ### Get the Kubernetes namespace for the big data cluster Get the namespace of the Big Data Cluster use the kubectl command line interface . **NOTE:** If there is more than one Big Data Cluster in the target Kubernetes cluster, then either: - set \[0\] to the correct value for the big data cluster. - set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio. ``` # Place Kubernetes namespace name for BDC into 'namespace' variable if "AZDATA_NAMESPACE" in os.environ: namespace = os.environ["AZDATA_NAMESPACE"] else: try: namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True) except: from IPython.display import Markdown print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.") display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.')) raise print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}') ``` ### Get name of the ‘Running’ `controller` `pod` ``` # Place the name of the 'Running' controller pod in variable `controller` controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True) print(f"Controller pod name: {controller}") ``` ### Set temporary folder to hold Root CA certificate ``` import os import tempfile path = os.path.join(tempfile.gettempdir(), local_folder_name) ``` ### Create folder on `controller` to hold Root CA certificate ``` run(f'kubectl exec {controller} -n {namespace} -c controller -- bash -c "mkdir -p {test_cert_store_root}" ') ``` ### Copy Root CA certificate to `controller` `pod` ``` import os cwd = os.getcwd() os.chdir(path) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line run(f'kubectl cp cacert.pem {controller}:{test_cert_store_root}/cacert.pem -c controller -n {namespace}') run(f'kubectl cp cakey.pem {controller}:{test_cert_store_root}/cakey.pem -c controller -n {namespace}') os.chdir(cwd) ``` ### Delete the temporary folder holding the Root CA certificate ``` import shutil shutil.rmtree(path) print('Notebook execution complete.') ``` Related ------- - [CER001 - Generate a Root CA certificate](../cert-management/cer001-create-root-ca.ipynb) - [CER002 - Download existing Root CA certificate](../cert-management/cer002-download-existing-root-ca.ipynb) - [CER010 - Install generated Root CA locally](../cert-management/cer010-install-generated-root-ca-locally.ipynb)
github_jupyter
This notebook demonstrates the result of the first round of data collection, collected in the San Francisco Bay Area by @shankari. The round had several shortcomings, some of which were addressed during the data collection and some of which were fixed before starting the second round of data collection. ## Import all the dependencies ``` # for reading and validating data import emeval.input.spec_details as eisd import emeval.input.phone_view as eipv import emeval.input.eval_view as eiev # Visualization helpers import emeval.viz.phone_view as ezpv import emeval.viz.eval_view as ezev # For plots import matplotlib.pyplot as plt %matplotlib notebook # For maps import branca.element as bre ``` ## Load and validate data The first issue to note is that we actually have two specs here. The first spec is the checked in `evaluation.spec.sample`, which defines calibration for both stationary and moving instances, and some evaluation trips. However, while starting with the calibration, we noticed some inconsistencies between the power curves. So in order to be more consistent, I defined a second, calibration-only spec `examples/calibration.only.json`, which essentially repeats the calibration experiments multiple times. After that, I returned to the first set of experiments for the moving calibration and the evaluation. ``` DATASTORE_URL = "http://cardshark.cs.berkeley.edu" AUTHOR_EMAIL = "shankari@eecs.berkeley.edu" sdt3 = eisd.SpecDetails(DATASTORE_URL, AUTHOR_EMAIL, "many_unimodal_trips_sb") pvt3 = eipv.PhoneView(sdt3) ``` ### Issue #1: Identical transition timestamps While exploring the data after the collection was done, there were many inconsistencies with the way in which the transitions and configurations were pushed to the server. In particular, because I save the timestamps as integer unix timestamps (using arrow.get().unix()), it is possible for elements stored in quick succession to have identical write timestamps and to not be retrieved correctly. And sometimes, due to races, the transitions were not even stored correctly (https://github.com/e-mission/e-mission-docs/issues/415) I resolved these manually for the most part so that we could get preliminary results but I did not resolve this since it is only for validation. The validation check fails because there were no modified sensor configs detected during the medium accuracy calibration on android. ``` About to retrieve messages using {'user': 'ucb-sdb-android-1', 'key_list': ['config/sensor_config'], 'start_time': 1561132633, 'end_time': 1561135735} response = <Response [200]> Found 0 entries medium_accuracy_train_AO -> [] ``` ``` # Commented out because this fails pvt3.validate() import importlib importlib.reload(eipv) evt3 = eiev.EvaluationView() evt3.from_view_eval_trips(pvt3, "", "") ``` ## Now for the results (calibration, phone view)! ### Battery drain over time (stationary) #### First experiment (single run) The figures below show the battery drain over time for both the stationary and moving calibrations The first set of figures are the initial stationary data collected with the first spec. As we can see, the android curves are almost identical, but the iOS curves show a clear difference between two pairs of phones. Phones (1,4) and phones (2,3) are almost identical with each other but noticeably different from the other pair. ``` (ifig, [android_ax, ios_ax]) = plt.subplots(ncols=2, nrows=1, figsize=(25,6)) ezpv.plot_all_power_drain(ios_ax, pvt3.map()["ios"], "calibration", "stationary") # ios_ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ezpv.plot_all_power_drain(android_ax, pvt3.map()["android"], "calibration", "stationary") # android_ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ``` #### Second experiment (multiple runs) Since this was surprising, I decided to run the experiments multiple times to try and avoid noise. The results are shown below. There is clearly a greater variation in the iOS case than the android case; I am not sure if it can be controlled any better. We may just need to work with higher tolerances on iOS. This also indicates several issues that need to be addressed in the next round. ##### Issue #2: Medium accuracy on iOS The iOS accuracy levels are defined (as CLLocationAccuracy constants) [https://developer.apple.com/documentation/corelocation/cllocationaccuracy?language=objc] Based on the list, I picked high accuracy = `kCLLocationAccuracyBest` and medium accuracy = `kCLLocationAccuracyNearestTenMeters`. However, at least in our testing, there was no significant difference in power drain between the two options. We will see later that there doesn't appear to be a significant difference in accuracy either. The option which really separated from the curve was `kCLLocationAccuracyHundredMeters` which I had mapped to low accuracy. In the next round, I need to switch medium accuracy = `kCLLocationAccuracyHundredMeters` and low accuracy = `kCLLocationAccuracyKilometer`? ##### Issue #3: Built-in duty cycling on android It appears that android has some form of built-in duty cycling in high accuracy mode, where the power drain slope abruptly changes around 2 hours. We will see some additional evidence of this later. After 2.5 hours, the slope appears to be more similar to medium accuracy. There does not appear to be such a knee during medium accuracy collection. ##### Issue #4: Unexpected and unexplained move out of duty cycling on android This only happened once, but it looks like one phone moved back into the active state during one run causing a second clear increase in slope at around 12.5 hours. We will see additional evidence for this later as well. It is not clear what caused this to happen, and it is also not clear why the others did not follow suit. Such idiosyncracies could complicate efforts to observe power drain during evaluation. ##### Issue #5: Representing multiple runs This is more of a UI issue, but the current version of the UI did not allow for more than one full screen of calibration options. This meant that we could only see one low accuracy option, which is why we have limited low accuracy data. We need to figure out how best to represent this - allow the UI to display more options? separate the run from the calibration option? both? ``` (ifig, [android_ax, ios_ax]) = plt.subplots(ncols=1, nrows=2, figsize=(10,10)) ezpv.plot_all_power_drain(ios_ax, pv_ca_only.map()["ios"], "calibration", "stationary") ios_ax.legend(loc="center left", bbox_to_anchor=(1, 0.5), ncol=2) ezpv.plot_all_power_drain(android_ax, pv_ca_only.map()["android"], "calibration", "stationary") android_ax.legend(loc="center left", bbox_to_anchor=(1, 0.5), ncol=2) ``` #### Recap of the issues with another view This other view displays the plots for each phone over multiple runs. This highlights the previous issues again: - medium accuracy and high accuracy on iOS are almost identical, low accuracy is significantly different - the duty cycling for `high-accuracy-stationary-4` on `ucb-sdb-android-3` is very clear and is different from the others - for `high-accuracy-stationary-0` on `ucb-sdb-android-1`, there are two discontinuities - the second one, around 12.5 hours sharply increases the power drain - the `high-accuracy-stationary-0` run on `ucb-sdb-ios-3`, the `medium-accuracy-stationary-0` run on `ucb-sdb-ios-4` are significantly different from the others. The first is an outlier even in the aggregate (see above), the second is only an outlier for this phone. ``` (ifig, ax) = plt.subplots(figsize=(12,3), nrows=0, ncols=0) ezpv.plot_separate_power_drain(ifig, pv_ca_only.map()["ios"], 4, "calibration", "stationary") (ifig, ax) = plt.subplots(figsize=(12,3), nrows=0, ncols=0) ezpv.plot_separate_power_drain(ifig, pv_ca_only.map()["android"], 4, "calibration", "stationary") ``` ### Battery drain over time (moving calibration) The moving calibration runs were not very useful in terms of battery drain, since there were too few points to be useful. Part of this is inherent in the definition of moving calibration, since it is unlikely that we will move for 10-15 hours at a time to collect the kind of data we have in the stationary case. And if our trip lasts for an hour, but we only read the battery level once an hour, we will end up with close to no data. ``` (ifig, [android_ax, ios_ax]) = plt.subplots(ncols=1, nrows=2, figsize=(10,10)) ezpv.plot_all_power_drain(ios_ax, pvt3.map()["ios"], "calibration", "AO") ios_ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ezpv.plot_all_power_drain(android_ax, pvt3.map()["android"], "calibration", "AO") android_ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ``` ### Checking counts (stationary) We now check the number of data points collected during calibration and their distribution in an effort to validate the duty cycling. Observations from this are: ##### on android: more points = more power drain As we would expect, the number of points across the various phones and the various runs is almost identical. In the cases where it is significantly different (e.g. `high-accuracy-stationary-0` on `ucb-sdb-android-1` and `high-accuracy-stationary-3` on `ucb-sdb-android-3`), we have see significant differences in the power drain as well. However, we do not understand why these two runs behave differently from the other runs. ##### on iOS: almost no points Since iOS has a distance filter, and not a time filter, and this calibration was stationary, almost no points are generated for high accuracy sensing. However, with low accuracy sensing (which is actually medium accuracy), we do get a significant number of points (an order of magnitude more), although nowhere near the number of entries on android. ##### on android: medium accuracy = almost no points On android, medium accuracy sensing generates two orders of magnitude fewer points than high accuracy. So the additional power drain on android probably reflects not just the sensing cost but also the processing cost. This also indicates that the medium accuracy sensing, which relies on WiFi and cellular signal strengths, is likely to be suspended when the phone is in doze mode, and is consistent with prior observed behavior. ``` count_df = ezpv.get_count_df(pv_ca_only); count_df (ifig, ax) = plt.subplots(nrows=1, ncols=3, figsize=(16,8)) count_df.filter(like="high_accuracy").filter(like="android", axis=0).plot(ax=ax[0],kind="bar") count_df.filter(like="ios", axis=0).plot(ax=ax[1],kind="bar") count_df.filter(like="medium_accuracy").filter(like="android", axis=0).plot(ax=ax[2],kind="bar") ``` ### Checking counts (moving) Although the battery drain is not significant while moving, the counts are likely to be much more relevant, specially in the iOS case, with the distance filter. ##### on iOS: significant number of points Since iOS has a distance filter, we finally have a reasonable set of location points for both platforms. The number of points on iOS is still consistently lower than the corresponding count on android ##### on iOS: medium accuracy is consistently lower than high accuracy Recall that the "medium" accuracy here is `kCLLocationAccuracyNearestTenMeters` which did not have a significantly different power drain than `kCLLocationAccuracyBest`. However, the number of points is much lower when this medium accuracy is selected. ##### on android: medium accuracy = significant number of points, but lower On android, medium accuracy sensing now generates ~ 0.5 * the number of points with high accuracy, but the medium accuracy numbers are consistently lower than the high accuracy. ``` count_df = ezpv.get_count_df(pvt3); count_df.filter(like="AO") (ifig, ax) = plt.subplots(nrows=1, ncols=2, figsize=(16,8), sharey=True) count_df.filter(like="AO").filter(like="android", axis=0).plot(ax=ax[0],kind="bar") count_df.filter(like="AO").filter(like="ios", axis=0).plot(ax=ax[1],kind="bar") ``` ### Checking densities (stationary) Density checks don't make as much sense on iOS, since there are so few entries, so we will focus mainly on android. ##### on android: duty cycling = density variation In general, most of the android points are sensed right after the calibration starts, at around zero. There are also a couple of minor bumps around hours 2, 6 and 15. This seems consistent with the explanation of doze mode, in which the phone goes into a lower power state when not in use and wakes up at increasing intervals. The exceptions are `high-accuracy-stationary-1` on `ucb-sdb-android-1`, which corresponds to the abrupt increase in power drain seen in the power curves. There is also a somewhat unusual bump related to `low-accuracy-stationary-4` on `ucb-sdb-android-4` but probably because the accuracy is already low, and the bump is small, we do not see a visible difference in slope for that curve. ``` android_density_df = ezpv.get_location_density_df(pv_ca_only.map()["android"]) nRows = ezpv.get_row_count(len(android_density_df), 2) print(nRows) android_ax = android_density_df.plot(kind='density', subplots=False, layout=(nRows, 2), figsize=(10,10), sharex=True, sharey=True) android_ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) (ifig, ax) = plt.subplots(figsize=(16,4), nrows=0, ncols=0) ezpv.plot_separate_density_curves(ifig, pv_ca_only.map()["android"], 4, "calibration", "stationary") ``` ### Checking densities (moving) As expected, when moving, while the densities do vary, they do not show the kind of spiky behavior that we see while stationary. Instead, we get points pretty much throughout the travel time. ``` android_density_df = ezpv.get_location_density_df(pvt3.map()["android"]) nRows = ezpv.get_row_count(len(android_density_df), 2) print(nRows) android_ax = android_density_df.filter(like="AO").plot(kind='density', subplots=False, layout=(nRows, 2), figsize=(10,10), sharex=True, sharey=True) android_ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ios_density_df = ezpv.get_location_density_df(pvt3.map()["ios"]) nRows = ezpv.get_row_count(len(ios_density_df), 2) print(nRows) ios_ax = ios_density_df.filter(like="AO").plot(kind='density', subplots=False, layout=(nRows, 2), figsize=(10,10), sharex=True, sharey=True) ios_ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ``` ### Checking trajectories (stationary) While checking the counts and densities, we looked at the location sensing data **over time**. We can also look at it **over space**, by displaying it on a map. At this point, stationary data is less interesting because we basically expect it to be concenrated around a single location. However, on visualizing it, we can see some unexpected behavior. ##### on all phones: there are unexpected jumps Even in the case of high accuracy sensing, on both android and iOS, we see jumps from the stationary location. These jumps are particularly pronounced in `ucb-sdb-android-2_medium_accuracy_stationary_2`, `ucb-sdb-ios-1_low_accuracy_stationary_4`, where they cover 5-6 blocks, but we can see at least one block displacements in a bunch of other maps (e.g. `ucb-sdb-ios-2_high_accuracy_stationary_1`) ##### on android: low accuracy really sucks The low accuracy option on android jumps all over the map in a very distinctive zig-zag pattern ``` ha_map_list = ezpv.get_map_list(pv_ca_only, "calibration", "") ha_map_list.extend(ezpv.get_map_list(pvt3, "calibration", "stationary")) rows = ezpv.get_row_count(len(ha_map_list), 8) evaluation_maps = bre.Figure(ratio="{}%".format((rows/4) * 100)) for i, curr_map in enumerate(ha_map_list): evaluation_maps.add_subplot(rows, 8, i+1).add_child(curr_map) evaluation_maps ``` ### Checking trajectories (moving) As expected, these are more interesting than the stationary trajectories. Some observations: - the high accuracy trajectories look reasonably good, but the medium accuracy trajectories on android have significant zig zags - the iOS medium accuracy trajectories look really good in comparison, but note that in this run, "medium accuracy" seems to incur a power drain close to high accuracy. We need to retry with the medium accuracy set to low accuracy (issue already identified) ``` ha_map_list = ezpv.get_map_list(pvt3, "calibration", "AO") rows = ezpv.get_row_count(len(ha_map_list), 8) evaluation_maps = bre.Figure(ratio="{}%".format((rows/4) * 100)) for i, curr_map in enumerate(ha_map_list): evaluation_maps.add_subplot(rows, 8, i+1).add_child(curr_map) evaluation_maps ``` ## Now for the results (calibration, evaluation view)! ### Trajectory matching In the phone view, we were able to compare phone results against each other (e.g. `ucb-sdb-android-1` v/s `ucb-sdb-android-2` for the same run) by plotting them on the same graph. We need something similar for trajectories, so that we can get a better direct comparison against various configurations. To make this easier, we want to switch the view so that the calibration ranges are first grouped by the settings and then by the phone. Once we do that, we can compare trajectories from different phones for the same experiment in the same map. ##### Issue #1: No matching with ground truth Zooming into the maps, we can see that even in the high accuracy case, there are mismatches between the trajectories. For example, the iOS high accuracy maps between South San Francisco and San Francisco, android medium accuracy maps between SF and the Easy Bay. Even if the trajectories match, they don't necessarily match with the ground truth, for example, the android high accuracy maps between 22nd street and 4th and King, iOS medium accuracy right after reaching Oakland. We should extend the spec to support this. ``` map_list = ezev.get_map_list_single_run(evt3, "calibration", "AO") rows = ezpv.get_row_count(len(map_list), 2) evaluation_maps = bre.Figure(ratio="{}%".format((rows/4) * 100)) for i, curr_map in enumerate(map_list): evaluation_maps.add_subplot(rows, 2, i+1).add_child(curr_map) evaluation_maps ``` ### Battery drain (stationary) This is less important since the plots with all curves do allow for direct comparisons between the battery drain curves across multiple phones. But just for the record, let us generate subplots that are grouped by run instead of by phone. ``` (ifig, ax) = plt.subplots(figsize=(16,6),nrows=0,ncols=0) ezev.plot_separate_power_drain_multiple_runs(ifig, 3, ev_ca_only.map("calibration")["android"], "") ``` ## Now for the results (evaluation, evaluation view)! ### Trajectory matching Finally, we get to the evaluation, in which we run different regimes across the different phones. We also have pre-determined ground truth for the trips. Since our entire goal is to compare the trips against each other, we will go directly to the evaluation view. ##### Issue #1: Tracking not turned off for the power control We can see that the power control also has location entries. This is because, even in the case of the power control, although we were setting the accuracy to the lowest possible and also sampling at a very low rate, we were not turning tracking off. We need to fix this. ``` evt3.map("evaluation")["android"]["HAHFDC v/s HAMFDC"]["short_walk_suburb"]["power_control"]["location_df"] ``` ### Other observations include: - The trajectory lines all match up pretty well, but that is not surprising since this was a high accuracy v/s high accuracy comparison, with only the filter being different - The android evaluation phones ran out of battery before the second set of trips, so we only have the accuracy control for the `short_car_suburb` and `short_car_suburb_freeway` - There is a clear zigzag in the android `short_bike_suburb` case - The gap between the actual start of the trip and the detected start of the trip is much larger on iOS (~ 3-4 blocks) than android (~ 1-2 blocks) ``` map_list = ezev.get_map_list_eval_trips(evt3, "evaluation", "AO") rows = ezpv.get_row_count(len(map_list), 2) evaluation_maps = bre.Figure(ratio="{}%".format((rows/4) * 100)) for i, curr_map in enumerate(map_list): evaluation_maps.add_subplot(rows, 2, i+1).add_child(curr_map) evaluation_maps ``` ### Investigating android evaluation power drain The android evaluation power drain is surprising. We would expect power drain of the evaluation (which duty cycles the sensing) to be much lower than the accuracy control, which senses continuously. However, both the evaluation phones ran out of battery before the second trip, and the accuracy control did not. Let's verify this from the battery drain. We know that individual trip power drains will not tell us much because of the short durations. But the range-specific tracking should have some values... Aha! We can see that the duty cycling works as expected on iOS. The power drain of both regimes is almost identical to the power control, although we would expect the power control to get even lower when we actually stop tracking. However, on android, the evaluation regimes are in fact almost identical and with a much higher drain than the accuracy control. ``` (ifig, [android_ax, ios_ax]) = plt.subplots(ncols=1, nrows=2, figsize=(10,10)) ezpv.plot_all_power_drain(ios_ax, pvt3.map()["ios"], "evaluation", "") ios_ax.legend(loc="center left", bbox_to_anchor=(1, 0.5), ncol=1) ezpv.plot_all_power_drain(android_ax, pvt3.map()["android"], "evaluation", "") android_ax.legend(loc="center left", bbox_to_anchor=(1, 0.5), ncol=1) ``` ##### Issue #1: Milliseconds? Investigating this further by viewing the logs on the phone, we realize that we configure the android filter_time with the value of the filter directly. e.g. if filter = 1, then filter_time = 1. However, the API expects the time in milliseconds, so we are effectively setting this to 1 millisecond, not one second. Since the minimum filter_time is 1 second, this ensures that: - we get data every second in all the regimes (see plots below) - the HAHFDC and HAMFDC are effectively the same - the number of entries that we need to find before we detect the trip end is in the tens of thousands. We don't actually achieve this number, so we keep sensing anyway, so we never actually duty cycle (see transition list below) But why are they **worse** than the accuracy control? Given that the sensing is largely identical, this **must be** due to the additional processing of iterating over all the entries to determine whether the trip has ended. So there really does appear to be a tradeoff between lower sensing and more local computation in the duty cycling case, specially for CPU-hungry phones. We might want to experiment further with this. ``` test_eval_range = evt3.map("evaluation")["android"]["HAHFDC v/s HAMFDC"] (ifig, ax_list) = plt.subplots(ncols=3, nrows=1, figsize=(12,4)) for i, (regime, regime_map) in enumerate(test_eval_range["short_walk_suburb"].items()): if i == 3: continue regime_map["location_df"].ts.diff().hist(ax=ax_list[i], label=regime) ax_list[i].set_title(regime) import arrow test_transition_phone = "ucb-sdb-android-2" test_eval_range = pvt3.map()["android"][test_transition_phone]["evaluation_ranges"][0] transition_entries = pvt3.spec_details.retrieve_data_from_server(test_transition_phone, ["statemachine/transition"], test_eval_range["start_ts"], test_eval_range["end_ts"]) print("\n".join([str((t["data"]["transition"], t["data"]["ts"], arrow.get(t["data"]["ts"]).to(pvt3.spec_details.eval_tz))) for t in transition_entries])) ``` ### Checking the motion activity In addition to location data, we also read the motion_activity data from the closed source phone APIs. Let's quickly check how accurate the raw motion activity is. The medium accuracy runs seem to be much more noisy wrt motion activity transitions. We should really not get a lot of transitions since we essentially took various trains for the entire route. The high accuracy sensing seems to be largely stable, except for one extraneous transition in the middle of the `ucb-sdb-ios-1` run. ``` (ifig, ax) = plt.subplots(nrows=2, ncols=2, figsize=(12,8), sharex=True) ezpv.display_unprocessed_android_activity_transitions(pvt3, ax[0][0], "calibration", "medium_accuracy_train_AO") ezpv.display_unprocessed_android_activity_transitions(pvt3, ax[0][1], "calibration", "high_accuracy_train_AO") ezpv.display_unprocessed_ios_activity_transitions(pvt3, ax[1][0], "calibration", "medium_accuracy_train_AO") ezpv.display_unprocessed_ios_activity_transitions(pvt3, ax[1][1], "calibration", "high_accuracy_train_AO") plt.legend() ```
github_jupyter
# Intro to programming using python ## Objectives * To get a brief overview of what Python is * To understand computer basics and programs * To write a small python program using: https://repl.it/languages/python3 ### Python has been increasingly popular in the last few years. ![Graph of popularity from tiobe.com](images/tiobe_index.png) ### In particular due to the adoption of python by the data science community which is illustrated by [this study of stackoverflow](https://stackoverflow.blog/2017/09/14/python-growing-quickly/): ![Graph which shows that pandas is the 1st python related search, followed by web related terms](images/stackoverflow_study.jpeg) ![Who uses python](images/who-uses-python.jpg) ## What is python used for? * Web development * Data analysis * Web scraping * Gaming * Robotics * Machine learning * Testing * ... ## Before going more into details... What is a computer? (1/2) An electronic device that is receiving data input, storing (in RAM) and processing (in the CPU) them and producing information in output. ![](images/what_is_computer.png) ## What is a computer? (2/2) ![](images/what_is_computer_example.png) ## What is a program? * Computer programs, known as software, are instructions to the computer. * You tell a computer what to do through programs. Without programs, a computer is an empty machine. Computers do not understand human languages, so you need to use computer languages to communicate with them. * Programs are written using programming languages. ## Different types of Programming Languages * Machine language is a set of primitive instructions built into every computer. The instructions are in the form of binary code. The programs in machine language are highly difficult to read and modify. For example, to add two numbers, you might write an instruction in binary like this: `1101101010011010` * The high-level languages are English-like and easy to learn and program. For example, the following is a high-level language statement that multiply two number: `area = 5 * 5` ## Diving in... * Simple arithmetic operations * Variables * Comments * Expressions * Indentations * Functions * Conditions ## Variable (1) It is a space created in memory (in RAM) where we can temporarily store values or data. We use the sign '=' for assigning a value to a variable. You can think of it like a box. ![](images/box_variable.png) ## Variable (2) When a new value is assigned to a variable, the old one is forgotten. ![](images/box_replace_variable.png) ## Variable (3) * A variable name is a non-empty sequence of characters of any length with: - The start character can be the underscore "_" or a capital or lower case letter. * Python keywords are not allowed as identifier names! * The variable has a name so that we can reuse it. When we use a variable, it is for retrieving the value that it is holding. * We do not specify the type (integer, string) of the variable, python sees it automatically. ## Comments * Is not executed, it is ignored by the program * `#` is commenting everything on the right of it ```python a = 1 # this is ignored, commented ``` ## Expression It represents a value, like a number or a string ```python >>> 1 # is an expression >>> 2 + 3 # is also an expression >>> "hello" # as well ``` We can put an expression in a variable ```python my_variable = 1 + 1 ``` ## Function execution (1) * A function has a name * A function has parameters (that can be optional) * A function is executed! Example: ```python print('Hello world') ``` ## Function execution (2) Example: input() * Prompt the user to enter an input, the programs waits for an input and as soon as the user press Enter, the program carries on * The input function __returns__ a value that we can store in a variable and then reuse. ``` name_of_the_user = input("Enter your name: ") print(f"Hello {name_of_the_user}") ``` ## Exercice: computing the age of the user Ask a user to enter the year he was born (use `input()`), compute his age and tell him how old he will turn this current year (use `print()`). ## Indentation The indentation is the increase or decrease of space between the left margin and the first character of the line. The code need to be properly indented, else python will raise an error. For example, what is wrong here? ``` if True: print("indented properly ") ``` ## Function definition (1) Aside the built-in functions such as `print` and `input` that are already implemented, we can also define our own functions. We use the keyword __def__, the name of the function, the brackets, and the colon Then the body of the function needs to be indented ```python def name_of_the_function(): # body of the function ``` When we define a function, we just make python see that the function exist but it is not executed ```python def my_function(): print("THIS IS MY FUNCTION") ``` ## Function definition vs execution (2) To call or execute or run a function, we use the name of the function AND the brackets, without the brackets, the function is not called. `name_of_the_function()` Notice the difference between defining and calling a function ```python def my_function(): print("THIS IS MY FUNCTION") my_function() ``` ## Conditions: Controlling the flow of our programs We can represent the flow of execution with a flow chart ![](images/flow_chart.png) ## Structure of a simple if statement Pseudo code: ```python if condition: # statement (mind the indentation) ``` Example, representation of the flow chart example in python code: ```python if name=='Alice': print('Hi Alice') ``` ## The two-way if statement Pseudo code: ```python if condition: # statement (mind the indentation) else: # statement executed when the condition is False ``` Example, representation of the flow chart example in python code with an else statement: ```python if name=='Alice': print('Hi Alice') else: print('Hi') ``` ## Difference between '==' and '=' * The sign = is the sign of __assignment__, it is used for assigning a value to a variable * The sign == is the sign of __comparison__, it compares 2 values and return a boolean (True or False) ## Exercise: password Create a program that ask the user for a password. * Have the password defined in "clear" (i.e. not encrypted as you would do in standard application) in your program, in a variable called "PASSWORD" * Use input() to receive the password entered by the user * If the word entered by the user matches the password, display "Access Granted", else, "Forbidden" ``` # This is a comment # Replace the comment with the code # You begin by setting the password in a variable PASSWORD (remember that the sign '=' is the assignment sign) # Use input and retrieve the value of input in an other variable # use conditions to check if the password is correct and display if it is correct or not ``` ## Extra resources * [Automate the boring stuff with python](https://automatetheboringstuff.com/)
github_jupyter
``` # Initialize Otter Grader import otter grader = otter.Notebook() ``` ![data-x](https://raw.githubusercontent.com/afo/data-x-plaksha/master/imgsource/dx_logo.png) # In-class Assignment (Feb 9) Run the following two cells to load the required modules and read the data. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error df = pd.read_csv("Video_Games_Sales_cleaned_sampled.csv") df.head(5) ``` ## Exploring Data with Pandas ### Q1: How many data points (rows) are there in this dataset? Store it in ```num_rows```. <!-- BEGIN QUESTION name: q1 manual: false --> ``` # your code here num_rows = ... print(num_rows) ``` ### Q2 What are the max and min values in Global Sales? What about the quartiles (25%, 50%, and 75%)? Can you answer this question with a one-liner code? <!-- BEGIN QUESTION name: q2 manual: false --> ``` # your code here ... ``` ### Q3 What are the unique genres and consoles that the dataset contains? Store them in ```genre_unique``` and ```console_unique```. <!-- BEGIN QUESTION name: q3 manual: false --> ``` # your code here genre_unique = ... console_unique = ... print("All genres:", genre_unique) print("All consoles:", console_unique) ``` ### Q4 What are the top five games with the most global sales? <!-- BEGIN QUESTION name: q4 manual: false --> ``` # your code here ... ``` ### Q5 (Optional: Do it if you had enough time) How many games in the dataset are developed by Nintendo? What are their names? <!-- BEGIN QUESTION name: q5 manual: false --> ``` # your code here ... ``` ## Linear Regression Suppose that you want to regress the global sales on four features: Critic_Score, Critic_Count, User_Score, and User_Count. The input matrix $X$ and the output $y$ are given to you below. ``` ## No need for modification, just run this cell X = df[['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count']].values y = df[['Global_Sales']].values ``` ### Q6 Use train_test_split function in sklearn to split the dataset into training and test sets. Set 80% of the dataset aside for training and use the rest for testing. (set random_state=0) <!-- BEGIN QUESTION name: q6 manual: false --> ``` # your code here ... ``` ### Q7 Train your linear regression model using the training set you obtained above. Then, store the coefficients and the intercept of your model in ```coefs``` and ```intercept```, respectively. <!-- BEGIN QUESTION name: q7 manual: false --> ``` # your code here coefs = ... intercept = ... print("Coefficients:", coefs) print("Intercept:", intercept) ``` ### Q8 (Optional: Do it if you had enough time.) Compute the mean-squared-error of your model's prediction on the training and test sets and store them in ```train_error``` and ```test_error```, respectively. <!-- BEGIN QUESTION name: q8 manual: false --> ``` # your code here train_error = ... test_error = ... print(train_error) print(test_error) ``` # Submit Make sure you have run all cells in your notebook in order before running the cell below, so that all images/graphs appear in the output. **Please save before submitting!** ``` # Save your notebook first, then run this cell to create a pdf for your reference. ```
github_jupyter
# Comparing Collaborative Filtering Systems According to studies done by the article "Comparing State-of-the-Art Collaborative Filtering Systems" by Laurent Candillier, Frank Meyer, and Marc Boulle, the __best user based approach__ is based on __pearson similarity and 1500 neighbors__. The __best item based approach__ is based on __probabilistic similarity and 400 neighbors__. The __best model based approach__ is using __K-means with euclidean distance, 4 clusters and prediction scheme based on the nearest cluster and Bayes model minimizing MAE__. Lastly, the __best default approach__ is based on __Bayes rule minimizing MAE__. We will try to implement the studies done by this article and see if we will achieve the same results. What we have implemented so far: * Bayes * Bayes MAP * Bayes MSE * Bayes MAE * Pearson Correlation (partially) Table of Contents: ``` 0. Last updated 1. Install and import libraries 2. Load dataset 3. Convert dataset to DataFrame (optional) 4. Determine characteristics of data (optional) 5. Splitting the data (optional) 6. Calculate similarities and find nearest neighbors 7. Develop Model 8. Evaluate Metrics 9. Compare different CF systems ``` Explanation of Bayes: * https://www.countbayesie.com/blog/2015/2/18/bayes-theorem-with-lego * http://www.analyticsvidhya.com/blog/2015/09/naive-bayes-explained/ ## 0. Last updated ``` import datetime, time # timestamp is not correct; it is 8 hours ahead print (datetime.datetime.now() - datetime.timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S') ``` ## 1. Install and import libraries ``` import importlib import pip def _install(package): pip.main(['install', package]) def _import(package): importlib.import_module(package) def install_and_import(package): try: _import(package) except ImportError: _install(package) # install PyMC install_and_import("git+git://github.com/pymc-devs/pymc.git") # zip up source_dir located in GitHub remote_url's remote_branch and add it to Spark's source context remote_url = "https://github.com/lab41/hermes.git" remote_branch = "master" source_dir = "src" debug = True # helper functions import os import functools def _list_all_in_dir(dir_path): for path, subdirs, files in os.walk(dir_path): for filename in files: print os.path.join(path, filename) def _zip_dir(srcdir_path, zipfile_handler): try: zipfile_handler.writepy(srcdir_path) finally: zipfile_handler.close() def trackcalls(func): @functools.wraps(func) def wrapper(*args, **kwargs): wrapper.has_been_called = True return func(*args, **kwargs) wrapper.has_been_called = False return wrapper @trackcalls def _add_zipfile_to_sc(zipfile_path): sc.addPyFile(zipfile_path) import git import os import tempfile import shutil import zipfile # create a temporary directory tmpdir_path = tempfile.mkdtemp() if debug: print "temporary directory: %s\n" % tmpdir_path # ensure file is read/write by creator only saved_umask = os.umask(0077) # create a zipfile handler to zip the necessary files ziptmpdir_path = tempfile.mkdtemp() if debug: print "temporary directory for zip file: %s\n" % ziptmpdir_path zipfile_path = ziptmpdir_path + "/hermes_src_2.zip" if debug: print "zip file's path: %s\n" % zipfile_path zipfile_handler = zipfile.PyZipFile(zipfile_path, "w") # make zipfile handler verbose for debugging zipfile_handler.debug = 3 try: # clone "framework" branch from GitHub into temporary directory local_branch = git.Repo.clone_from(remote_url, tmpdir_path, branch=remote_branch) if debug: print "current branch: %s\n" % local_branch.head.ref if debug: print "list all in %s:" % tmpdir_path; _list_all_in_dir(tmpdir_path); print "\n" # zip "hermes" directory if debug: print "zipping: %s\n" % os.path.join(tmpdir_path, source_dir) _zip_dir(os.path.join(tmpdir_path, source_dir), zipfile_handler) # check zip file if debug: print "Is zip file %s valid? %s\n" % (zipfile_path, zipfile.is_zipfile(zipfile_path)) # add zip to SparkContext # note: you can only add zip to SparkContext one time if not _add_zipfile_to_sc.has_been_called: if debug: print "add zip file %s into spark context\n" % zipfile_path _add_zipfile_to_sc(zipfile_path) else: if debug: print "zip file %s is already added into spark context; will not re-add\n" % zipfile_path except IOError as e: raise e else: os.remove(zipfile_path) finally: os.umask(saved_umask) shutil.rmtree(tmpdir_path) shutil.rmtree(ziptmpdir_path) # import the required modules from Hermes from src.algorithms import performance_metrics as pm from src.data_prep import movieLens_vectorize as mv from src.utils import save_load as sl # import other modules import os import time class Timer(object): """ To time how long a particular function runs. Example: import Timer with Timer() as t: somefunction() print("somefunction() takes %s seconds" % t.secs) print("somefunction() takes %s milliseconds" % t.msecs) """ def __enter__(self): self.start = time.time() return self def __exit__(self, *args): self.end = time.time() self.secs = self.end - self.start self.msecs = self.secs * 1000 ``` ## 2. Load dataset, in this case MovieLens data We are going to use MovieLens's 1M data. ``` # ratings_json_path # movies_json_path ``` ## 3. Convert dataset to Dataframe Run this only when you load datasets from your home directory. ``` def convert_dataset_to_dataframe(dataset_path): df = sqlCtx.read.json(dataset_path, None) df = df.repartition(sc.defaultParallelism * 3) return df # obtaining ratings dataframe ratingsdf = convert_dataset_to_dataframe(ratings_json_path) # obtaining movies dataframe moviesdf = convert_dataset_to_dataframe(movies_json_path) ``` ## 4. Determine characteristics of the MovieLens data (optional) Run this only when you load datasets from your home directory. Format: * ratings = [user_id, movie_id, rating, timestamp] * movies = [movie_id, title, genres] ``` # extract most commonly used vectors to be used later on # 1. using ratingsdf # a. [(user_id, movie_id, rating)] umr = ratingsdf.map(lambda row: (row.user_id, row.movie_id, row.rating)) # b. [(user_id, movie_id, rating)] where rating >= 3 umr_weighted = umr.filter(lambda (user_id, movie_id, rating): rating >= 3) print "-" * 80 print "format: [(user_id, movie_id, rating)]\n" print "umr:\n", umr.take(2) print "umr_weighted:\n", umr_weighted.take(2) print "-" * 80 print "\nTo identify user-to-user similarity:" print "format: [(movie_id, (user_id, rating))]\n" # c. [(movie_id, (user_id, rating)] -> to identify user-to-user similarity m_ur = ratingsdf.map(lambda row: (row.movie_id, (row.user_id, row.rating))) # d. [(movie_id, (user_id, rating)] where rating >= 3 m_ur_weighted = m_ur.filter(lambda (movie_id, (user_id, rating)): rating >= 3) print "m_ur:\n", m_ur.take(2) print "m_ur_weighted (aka rating >=3):\n", m_ur_weighted.take(2) print "-" * 80 print "\nTo identify movie-to-movie similarity:" print "format: [(user_id, (movie_id, rating))]\n" # e. [(user_id, (movie_id, rating))] -> to identify movie-to-movie similarity u_mr = ratingsdf.map(lambda row: (row.user_id, (row.movie_id, row.rating))) # f. [(user_id, (movie_id, rating))] where rating >= 3 u_mr_weighted = u_mr.filter(lambda (user_id, (movie_id, rating)): rating >= 3) print "um_r:\n", u_mr.take(2) print "um_r_weighted (aka rating >=3):\n", u_mr_weighted.take(2) print "-" * 80 # total number of distinct users num_distinct_users = ratingsdf.map(lambda row: row.user_id).distinct().count() num_users = ratingsdf.map(lambda row: row.user_id).count() print "total number of distinct users = ", num_distinct_users print "total number of users = ", num_users # total number of ratings # should be the same as num_users num_ratings = ratingsdf.map(lambda row: row.rating).count() print "total number of ratings = ", num_ratings # total number of distinct movies num_distinct_movies = moviesdf.map(lambda row: row.movie_id).distinct().count() num_movies = moviesdf.map(lambda row: row.movie_id).count() print "total number of distinct movies = ", num_distinct_movies print "total number of movies = ", num_movies # what is the average number of ratings a user rates = number of ratings / number of users # round it to the fourth digit avg_num_ratings_per_user = round(float(num_ratings) / float(num_distinct_users), 4) print "average number of ratings a user rates = ", avg_num_ratings_per_user # what is the average number of ratings a movie receives = number of ratings / number of movies avg_num_ratings_per_movie = round(float(num_ratings) / float(num_distinct_movies), 4) print "average number of ratings a movie receives = ", avg_num_ratings_per_movie # completeness = number of ratings / (number of users * number of movies) completeness = round(float(num_ratings) / (float(num_distinct_users) * float(num_distinct_movies)), 4) print "completeness = ", completeness # mean rating mean_rating = ratingsdf.map(lambda row: row.rating).mean() print "mean rating = ", mean_rating # mean rating per movie # [(movie_id, rating)] movie_rating_pair = ratingsdf.map(lambda row: (row.movie_id, row.rating)) """ combineByKey() requires 3 functions: * createCombiner: first aggregation step for each key -> lambda first_rating: (first_rating, 1) * mergeValue: what to do when a combiner is given a new value -> lambda x, first_rating: x[0] + first_rating, x[1] + 1 -> lambda thisNewRating_thisNumRating, firstRating: thisNewRating + firstRating, thisNumRating + 1 * mergeCombiner: how to merge two combiners -> lambda x, y: (x[0] + y[0], x[1] + y[1]) -> lambda sumRating1_numRating1, sumRating2_numRating2: (sumRating1 + sumRating2, numRating1 + numRating2) """ # [(movie_id, (sum_rating, num_rating))] movie_sumRating_numRating_pair = movie_rating_pair.combineByKey( lambda first_rating: (first_rating, 1), lambda x, first_rating: (x[0] + first_rating, x[1] + 1), lambda x, y: (x[0] + y[0], x[1] + y[1])) # [(movie_id, mean_rating)] movie_meanRating_pair = movie_sumRating_numRating_pair.map(lambda (movie_id, (sum_rating, num_rating)): (movie_id, sum_rating/num_rating)) movie_meanRating_pair.take(3) # meanRating_numRating_pair will be used in plotting in the next cell # where _1 = mean rating of the movie # _2 = number of users who review the movie # [(mean_rating, num_rating)] meanRating_numRating_pair = movie_sumRating_numRating_pair.map(lambda (movie_id, (sum_rating, num_rating)): (sum_rating/num_rating, num_rating)) meanRating_numRating_pair_df = meanRating_numRating_pair.toDF() meanRating_numRating_pair_df.show() # plot mean rating per movie %matplotlib inline import matplotlib.pyplot as plt import pandas as pd meanRating_numRating_pair = movie_sumRating_numRating_pair.map(lambda (movie_id, (sum_rating, num_rating)): (sum_rating/num_rating, num_rating)) meanRating_numRating_pair_df = meanRating_numRating_pair.toDF() meanRating_numRating_pair_panda_df = meanRating_numRating_pair_df.toPandas() plot = meanRating_numRating_pair_panda_df.plot( x="_2", \ y="_1", \ kind="hexbin", \ xscale="log", \ cmap="YlGnBu", \ gridsize=12, \ mincnt=1, \ title="Mean vs Number of Reviewers") plot.set_xlabel("Number of Reviewers Per Movie") plot.set_ylabel("Mean Rating Per Movie") plt.show() ``` This figure shows that the average rating of a movie is actually slightly higher than 3. __Hypothesis__: * We can safely predict the mean rating after 100 reviews. * After 100 reviews, the average rating is approximately in between 3.0 and 4.0. ## 5. Splitting the data (optional) Run this only when you load datasets from your home directory. Default split data into: * 90% training * 10% test * 0% validation * seed = 41 Remember that calling randomSplit when you restart the kernel will provide you with a different training, test, and validation data even though the weights and the seed are the same. ``` weights = [0.9, 0.1, 0] seed = 41 # 1. using ratingsdf # a. [(user_id, movie_id, rating)] umr_train, umr_test, umr_validation = umr.randomSplit(weights, seed) # b. [(user_id, movie_id, rating)] where rating >= 3 umr_weighted_train, umr_weighted_test, umr_weighted_validation = umr_weighted.randomSplit(weights, seed) # c. [(movie_id, (user_id, rating)] m_ur_train, m_ur_test, m_ur_validation = m_ur.randomSplit(weights, seed) # d. [(movie_id, (user_id, rating)] where rating >= 3 m_ur_weighted_train, m_ur_weighted_test, m_ur_weighted_validation = m_ur_weighted.randomSplit(weights, seed) # e. [(user_id, (movie_id, rating)] u_mr_train, u_mr_test, u_mr_validation = u_mr.randomSplit(weights, seed) # f. [(user_id, (movie_id, rating)] where rating >= 3 u_mr_weighted_train, u_mr_weighted_test, u_mr_weighted_validation = u_mr_weighted.randomSplit(weights, seed) ``` ## 6. Calculate similarity and find nearest neighbors These are the different similarity measurement implemented in the article: * pearson * cosine * constraint pearson: in the case of MovieLens data, it means any ratings greater than 3 (aka positive ratings) * adjusted cosine * probabilistic "When implementing a user- or item-based approach, one may choose: * a similarity measure: pearson, cosine, constraint pearson, adjusted cosine, or probabilistic * a neighborhood size * and how to compute predictions: using a weighted sum of rating values or using a weighted sum of deviations from the mean." Table of Contents: ``` 6.A.1. Calculate Pearson Correlation a. user-based: DONE except for prediction b. item-based 6.A.2. Calculate Weighted Pearson Correlation a. user-based b. item-based 6.A.3. Calculate Pearson Deviation a. user-based b. item-based 6.B.1. Calculate Probabilistic Similarity a. user-based b. item-based 6.B.2. Calculate Probabilistic Deviation a. user-based b. item-based 6.C.1. Calculate Cosine Similarity a. user-based b. item-based 6.C.2. Calculate Adjusted Cosine Similarity a. user-based b. item-based 6.D. Comparing Similarities' Measurement ``` ## 6.A.1. Calculate Pearson Correlation ``` # helper functions from scipy.stats import pearsonr import math # filter out duplicate pairs # user-based approach: # input and output: [( movie_id, ((user_id_1, rating_1), (user_id_2, rating_2)) )] # item-based approach: # input and output: [( user_id, ((movie_id_1, rating_1), (movie_id_2, rating_2)) )] def removeDuplicates((key_id, ratings)): (value_id_1, rating_1) = ratings[0] (value_id_2, rating_2) = ratings[1] return value_id_1 < value_id_2 # rearrange so that it will be in the format of pairs # user-based approach: # input: [( movie_id, ((user_id_1, rating_1), (user_id_2, rating_2)) )] # output: [( movie_id, ((user_id_1, user_id_2), (rating_1, rating_2)) )] # item-based approach: # input: [( user_id, ((movie_id_1, movie_id_2), (rating_1, rating2)) )] # output: [( user_id, ((movie_id_1, movie_id_2), (rating_1, rating2)) )] def createPairs((key_id, ratings)): (value_id_1, rating_1) = ratings[0] (value_id_2, rating_2) = ratings[1] return ((value_id_1, value_id_2), (rating_1, rating_2)) # aggregate pairs using combineByKey() instead of groupByKey() # [( test_user_id, train_user_id), (test_rating_1, train_rating_1), (test_rating_2, train_rating_2), ...] def aggregatePairs(keyPairs): return keyPairs.combineByKey( lambda firstRatingPair: ((firstRatingPair),), lambda newRatingPair, firstRatingPair: newRatingPair + ((firstRatingPair),), lambda tupleRatingPairs1, tupleRatingPairs2: tupleRatingPairs1 + tupleRatingPairs2) # calculate pearson correlation when you passed in the values of # user-based approach: # input: values of [(user_id_1, user_id_2), ((rating_1, rating_2), (rating_1, rating_2)...)] # output: values of [(user_id_1, user_id_2), (pearson_correlation, num_rating_pairs, p_value)] # item-based approach: # input: values of [(movie_id_1, movie_id_2), ((rating_1, rating_2), (rating_1, rating_2)...)] # output: values of [(movie_id_1, movie_id_2), (pearson_correlation, num_rating_pairs, p_value)] # NOTE: ignore p_value def calculatePearson(ratingPairs): rating1s = [rating1 for (rating1, _) in ratingPairs] rating2s = [rating2 for (_, rating2) in ratingPairs] pearson_correlation, p_value = pearsonr(rating1s, rating2s) return (pearson_correlation, len(ratingPairs)) ``` ### 6.A.1. Pearson's User-Based Approach: comparing USER similarities According to the article, this is supposed to be the best user-based approach. ``` #((user_id, movie_id), rating) a = sc.parallelize([ ((1, 2), 3), ((2, 2), 4) ]) #((user_id, movie_id), predicted_rating) b = sc.parallelize([ ((1, 2), 2), ((2, 2), 5) ]) #((user_id, movie_id), (rating, predicted_rating) c = a.join(b) c.collect() # combine test and train together so that # [movie_id, ( (test_user_id, test_rating), (train_user_id, train_rating) )] M_testUR_trainUR = m_ur_test.join(m_ur_train) print M_testUR_trainUR.count() M_testUR_trainUR.take(5) # remove duplicates M_testUR_trainUR = M_testUR_trainUR.filter(removeDuplicates) print M_testUR_trainUR.count() M_testUR_trainUR.take(2) # rearrange so that it will be in the format # [(test_user_id, train_user_id), (test_rating, train_rating)] userPairs = M_testUR_trainUR.map(createPairs) print userPairs.count() userPairs.take(2) # congregate all ratings for each user pair so that it will be in the format of: # [( test_user_id, train_user_id), (test_rating_1, train_rating_1), (test_rating_2, train_rating_2), ...] # instead of using groupByKey(), use combineByKey() instead. """ # Implemented using groupByKey(): with Timer() as t: aggUserPairs = userPairs.groupByKey() print "aggregate user pairs approach #1: %s seconds" % t.secs print aggUserPairs.count() aggUserPairs.take(5) ----------------------------------------------------------------- # Output: aggregate user pairs: 0.0353801250458 seconds 10728120 Out[20]: [((1274, 2736), <pyspark.resultiterable.ResultIterable at 0x7f180eb55350>), ((2117, 5393), <pyspark.resultiterable.ResultIterable at 0x7f180eb55510>), ((1422, 3892), <pyspark.resultiterable.ResultIterable at 0x7f180eb55550>), ((1902, 5636), <pyspark.resultiterable.ResultIterable at 0x7f180eb55590>), ((3679, 5555), <pyspark.resultiterable.ResultIterable at 0x7f180eb555d0>)] ----------------------------------------------------------------- output = aggUserPairs.mapValues(lambda iterable: tuple(iterable)) output.take(2) ----------------------------------------------------------------- # Output: [((3848, 4390), ((5.0, 5.0),)), ((897, 2621), ((4.0, 5.0), (4.0, 4.0), (2.0, 2.0)))] ----------------------------------------------------------------- """ with Timer() as t: aggUserPairs = aggregatePairs(userPairs) print "aggregate user pairs: %s seconds" % t.secs print aggUserPairs.count() aggUserPairs.take(2) # calculate pearson correlation to figure out user-to-user similarity in the format of: # [( (test_user_id, train_user_id), (pearson_correlation, num_rating_pairs) )] userPairSimilarities = aggUserPairs.mapValues(calculatePearson) userPairSimilarities.sortByKey() print userPairSimilarities.count() userPairSimilarities.take(5) ``` find nearest neighbors 1. select neighbors whose similarity correlation is greater than the threshold of 0.5 2. select top n neighbors with the highest correlation ``` # 1. # a. select neighbors whose similarity correlation is greater than the threshold of 0.5 # b. remove user pairs that do not share a minimum of 5 reviews # output: number of user pairs that passes minPearson = 1692207 # number of user pairs that passes both minPearson and minSimilarReviews = 533407 minPearson = 0.5 minSimilarReviews = 5 userPairPassThreshold = userPairSimilarities.filter( lambda (userPair, (pearson_correlation, num_rating_pairs)): pearson_correlation > minPearson and num_rating_pairs >= minSimilarReviews ) print userPairPassThreshold.count() userPairPassThreshold.take(5) # 2. select top n neighbors for each test user from pyspark.rdd import RDD import heapq def takeOrderedByKey(self, topN, sortValueFn=None, ascending=False): def base(a): return [a] def combiner(agg, a): agg.append(a) return getTopN(agg) def merger(x, y): agg = x + y return getTopN(agg) def getTopN(agg): if ascending == True: return heapq.nsmallest(topN, agg, sortValueFn) else: return heapq.nlargest(topN, agg, sortValueFn) return self.combineByKey(base, combiner, merger) # add takeOrderedByKey() function to RDD class RDD.takeOrderedByKey = takeOrderedByKey # convert # [( (test_user_id, train_user_id), (pearson_correlation, num_rating_pairs) )] # to # [( test_user_id, [(test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)] )] # so that you can sort by test_user_id after sorting the highest pearson correlation per test_user_id testU_testUtrainU_sim = userPairPassThreshold.map( lambda ((test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)): (test_user_id, ((test_user_id, train_user_id), (pearson_correlation, num_rating_pairs))) ) print testU_testUtrainU_sim.count() testU_testUtrainU_sim.take(5) # for each test user, take the top N neighbors and ordering with the highest pearson correlation first # [( test_user_id, [(test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)] )] topN = 20 testUserTopNeighbors = testU_testUtrainU_sim.takeOrderedByKey( topN, sortValueFn=lambda ((test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)): (pearson_correlation, num_rating_pairs), ascending=False) # note: testUserTopNeighbors.count() should be less than the number of users print testUserTopNeighbors.count() testUserTopNeighbors.take(5) num_distinct_test_users = m_ur_test.map(lambda (movie_id, (user_id, rating)): user_id).distinct().count() num_distinct_test_users_pass_threshold = userPairPassThreshold.map(lambda ((test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)): test_user_id).distinct().count() num_test_users_in_top_neighbors = testUserTopNeighbors.count() print "num_distinct_test_users = ", num_distinct_test_users print "num_distinct_test_users that passes the threshold check (aka pearson > 0.5, minReviews >= 5) = ", num_distinct_test_users_pass_threshold print "num_test_users in testUserTopNeighbors = ", num_test_users_in_top_neighbors # flattened version, meaning # convert # [( test_user_id, [(test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)] )] # to # [( (test_user_id, train_user_id), (pearson_correlation, num_rating_pairs) )] testUserTopNeighborsFlattened = testUserTopNeighbors.flatMap(lambda (test_user_id, rest): rest) print testUserTopNeighborsFlattened.count() testUserTopNeighborsFlattened.take(5) ``` ### Compute Predictions \#1: using weighted sum of rating values __!!!!!! TODO !!!!!!!__ ### Compute Predictions \#2: using a weighted sum of deviations from the mean ``` P = predicted rating of user a on movie i M = mean rating of user a S = sum of ((pearson correlation of user a and each user u who rates movie i) * (rating of each user u on movie i - mean rating of user u)) D = sum of (absolute value of pearson correlation of user a and each user u who rates movie i) P = M + S/D ``` __!!!!!! TODO !!!!!!!__ ``` # determine mean rating of each test user (aka find M) # output: [(user_id, mean_rating)] # convert to [(user_id, rating)] ur = m_ur.map(lambda (movie_id, (user_id, rating)): (user_id, rating)) # [(user_id, (sum_rating, num_rating))] u_sumRating_numRating = ur.combineByKey( lambda first_rating: (first_rating, 1), lambda x, first_rating: (x[0] + first_rating, x[1] + 1), lambda x, y: (x[0] + y[0], x[1] + y[1])) # [(test_user_id, mean_rating)] u_meanRating = u_sumRating_numRating.map( lambda (user_id, (sum_rating, num_rating)): (user_id, sum_rating/num_rating)) u_meanRating.take(5) # for each movie i, # determine pearson correlation of user a and all other users who rates movie i # determine rating of each user u on movie i - mean rating of user u # testUserTopNeighborsFlattened == [( (test_user_id, train_user_id), (pearson_correlation, num_rating_pairs) )] # M_testUR_trainUR == # [movie_id, ( (test_user_id, test_rating), (train_user_id, train_rating) )] # movie_id, (for every users who rate movie_id, add all pearson correlation * rating of user u on movie i - mean rating of user u) # compute predictions #2 # using a weighted sum of deviations from the mean """ sum of user u has rated(pearson correlation of user a and user u) * (rating of user u on movie i - mean rating of user u) divided by """ ``` ### 6.A.1. Pearson's Item-Based Approach: comparing MOVIES similarities ``` # list all ratings in the format: # [user_id, (movie_id, rating)] print u_mr.count() u_mr.take(5) # list all combinations of movies rated by the same user in the format: # [user_id, ( (movie_id_1, rating_1), (movie_id_2, rating_2) )] # this is to find movie's similarity with each other sameUserRatingsCombo = u_mr.join(u_mr) print sameUserRatingsCombo.count() sameUserRatingsCombo.take(5) # filter out duplicate pairs def removeDuplicates((user_id, ratings)): (movie_id_1, rating_1) = ratings[0] (movie_id_2, rating_2) = ratings[1] return movie_id_1 < movie_id_2 sameUserRatingsCombo = sameUserRatingsCombo.filter(removeDuplicates) print sameUserRatingsCombo.count() sameUserRatingsCombo.take(5) # rearrange so that it will be in the format of movie pairs: # [(movie_id_1, movie_id_2), (rating_1, rating2)] def createMoviePairs((user_id, ratings)): (movie_id_1, rating_1) = ratings[0] (movie_id_2, rating_2) = ratings[1] return ((movie_id_1, movie_id_2), (rating_1, rating_2)) moviePairs = sameUserRatingsCombo.map(createMoviePairs) print moviePairs.count() moviePairs.take(5) # congregate all ratings for each movie pair so that it will be in the format of: # [( movie_id_1, movie_id_2), (rating_1, rating_2), (rating_1, rating_2), ...] moviePairRatings = moviePairs.groupByKey() print moviePairRatings.count() moviePairRatings.take(5) # calculate pearson correlation approach #1 # using udemy's approach # I prefer approach #2 import math def computePearsonCorrelationCoefficient(ratingPairs): numPairs = 0 if not ratingPairs: return (0, 0) muX = sum(1.*ratingX for (ratingX, _) in ratingPairs)/len(ratingPairs) muY = sum(1.*ratingY for (_, ratingY) in ratingPairs)/len(ratingPairs) cov = sum_sqdev_x = sum_sqdev_y = 0 for ratingX, ratingY in ratingPairs: dev_x = ratingX - muX dev_y = ratingY - muY cov += dev_x * dev_y sum_sqdev_x += dev_x**2 sum_sqdev_y += dev_y**2 numPairs += 1 numerator = cov denominator = math.sqrt(sum_sqdev_x) * math.sqrt(sum_sqdev_y) score = 0 if (denominator): score = (numerator / (float(denominator))) return (score, numPairs) moviePairSimilarities = moviePairRatings.mapValues(computePearsonCorrelationCoefficient).cache() moviePairSimilarities.sortByKey() moviePairSimilarities.take(5) print moviePairRatings.count() print moviePairSimilarities.count() # calculate pearson correlation approach #2 # using scipy # note: you cannot use pyspark.mllib.stat.Statistics's corr() function within the map function from scipy.stats import pearsonr def calculatePearson(ratingPairsPerMoviePairResultIterable): ratingPairsPerMoviePair = tuple(ratingPairsPerMoviePairResultIterable) rating1s = [rating1 for (rating1, _) in ratingPairsPerMoviePair] rating2s = [rating2 for (_, rating2) in ratingPairsPerMoviePair] pearson_correlation, p_value = pearsonr(rating1s, rating2s) return (pearson_correlation, len(ratingPairsPerMoviePair)) moviePairSimilarities2 = moviePairRatings.mapValues(calculatePearson).cache() moviePairSimilarities2.sortByKey() moviePairSimilarities2.take(5) print moviePairRatings.count() print moviePairSimilarities2.count() ``` ## 6.A.2. Calculate Constraint Pearson Correlation In the case of MovieLens data, it means any ratings greater than 3 (aka positive ratings). ### 6.A.2. Pearson's User-Based Approach: comparing USERS similarities This is the same as Pearson's User-Based Approach with the exception that it filters out ratings that are 2 or less. ### 6.A.2. Constraint Pearson's Item-Based Approach: comparing MOVIES similarities This is the same as Pearson's Item-Based Approach with the exception that it filters out ratings that are 2 or less. ## 6.B. Calculate Probabilistic Similarity ### 6.B. Probabilistic's Item-Based Approach: comparing MOVIES similarity According to the article, this is supposed to be the best item-based approach. ## 6.C.1. Calculate Cosine Similarity ## 6.C.2. Calculate Adjusted Cosine Similarity ## 6.D. Comparing Similarities' Measurement Graph user-based approaches using the deviation prediction scheme (MAE) and different neighborhood sizes (K) ## 7. Develop Model Comparing distance measures for model-based approaches using the mean item rating prediction scheme and different number of clusters (K) * Manhattan * Euclidian Comparing prediction schemes for model-based approaches using the euclidian distance and different numbers of clusters (K) * Mean Item * Bayes MAE Comparing different clustering algorithms for model-based approaches using the euclidian distance, the mean item rating prediction scheme, and different numbers of clusters (K) * K-Means * Bisecting * LAC * SSC ``` # divide movielens data into 10 parts to perform 10-fold cross-validation # training model using 9 parts # test model using last part # results are better when default ratings are based on item information than when they are based on user information # using mean rating is better than using majority rating ``` ### 7.A. Implement Bayes What is Bayes? 1. We have a prior belief in A 2. We have a posterior probability X where X is the number of tests it passes 3. Bayesian inference merely uses it to connect prior probabilities P(A) with an updated posterior probabilities P(A|X) ``` P(A|X) = P(X|A) * P(A) / P(X) P(A|X) = Posterior Probability: the posterior probability of class (A, target) given predictor(X, attributes) P(X|A) = Likelihood: the likelihood which is the probability of predictor given class P(A) = Class Prior Probability: prior probability of class P(X) = Predictor Prior Probability: prior probability of predictor ``` Types of Bayes: 1. Maximum A Posteriori (MAP) : predict the most probably rating 2. Mean Squared Error (MSE): compute the weighted sum of ratings that corresponds to minimizing the expectation of MSE 3. Mean Absolute Error (MAE): select the rating that minimizes the expectation of Mean Absolute Error Table of Contents: ``` 7.A. Implement Bayes 7.A.1. Implement Naive Bayes using PySpark: DONE 7.A.2. Implement Naive Bayes using PyMC 7.A.3. Implement Naive Bayes manually: DONE * Implement Bayes MAP: DONE * Implement Bayes MSE: DONE * Implement Bayes MAE: DONE ``` #### 7.A.1. Implementing Naive Bayes using PySpark It does not support computation for Bayes MAP, MSE, and MAE because it does not provide a probability distribution over labels (aka rating) for the given featureset (aka user_id, movie_id). ``` from pyspark.mllib.classification import NaiveBayes from pyspark.mllib.regression import LabeledPoint # To use MLlib's Naive Bayes model, it requires the input to be in a format of a LabeledPoint # therefore, convert dataset so that it will be in the following format: # [(rating, (user_id, movie_id))] r_um = ratingsdf.map(lambda row: LabeledPoint(row.rating, (row.user_id, row.movie_id))) # split the data r_um_train, r_um_test, r_um_validation = r_um.randomSplit(weights, seed) # train a Naive Bayes model naiveBayesModel = NaiveBayes.train(r_um_train, lambda_=1.0) # save this Naive Bayes model #naiveBayesModel.save(sc, "NaiveBayes_MovieLens1M_UserUser") # load this Naive Bayes model into the SparkContext #sameNaiveBayesModel = NaiveBayesModel.load(sc, "NaiveBayes_MovieLens1M_UserUser") # make prediction # [((test_user_id, test_movie_id), (predicted_rating, actual_rating))] r_um_predicted = r_um_test.map( lambda p: ( (p.features[0], p.features[1]), (naiveBayesModel.predict(p.features), p.label) ) ) print r_um_predicted.take(5) ``` [((2.0, 593.0), (1.0, 5.0)), ((2.0, 1955.0), (1.0, 4.0)), ((5.0, 3476.0), (1.0, 3.0)), ((5.0, 1093.0), (1.0, 2.0)), ((6.0, 3508.0), (1.0, 3.0))] ``` # test accuracy sameRating = r_um_predicted.filter( lambda ((test_user_id, test_movie_id), (predicted_rating, actual_rating)): predicted_rating == actual_rating) accuracy = 1.0 * sameRating.count() / r_um_test.count() print "accuracy = (predicted_rating == actual_rating)/total_num_ratings = ", accuracy ``` accuracy = (predicted_rating == actual_rating)/total_num_ratings = 0.162442085039 ``` # calculate RMSE and MAE # convert into two vectors where # one vector describes the actual ratings in the format [(user_id, movie_id, actual_rating)] # second vector describes the predicted ratings in the format [(user_id, movie_id, predicted_rating)] actual = r_um_predicted.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating) ) predicted = r_um_predicted.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, predicted_rating) ) print "actual:\n", actual.take(5) print "predicted:\n", predicted.take(5) rmse = pm.calculate_rmse_using_rdd(actual, predicted) print "rmse = ", rmse mae = pm.calculate_mae_using_rdd(actual, predicted) print "mae = ", mae ``` actual: [(7.0, 3793.0, 3.0), (8.0, 2490.0, 2.0), (15.0, 1343.0, 3.0), (16.0, 2713.0, 2.0), (17.0, 457.0, 5.0)] predicted: [(7.0, 3793.0, 1.0), (8.0, 2490.0, 1.0), (15.0, 1343.0, 1.0), (16.0, 2713.0, 1.0), (17.0, 457.0, 1.0)] rmse = 2.26584476437 mae = 1.88503067116 #### Implementing Naive Bayes using PyMC #### Implementing Naive Bayes manually Probability of rating r for a given user u on a given item i can be defined as follows: $$P(r|u, i) = \frac{P(r|u) * P(r|i)}{P(r)}*\frac{P(u) * P(i)}{P(u, i)}$$ We make the **assumption** that this is the same as: $$P(r|u, i) = \frac{P(r|u) * P(r|i)}{P(r)}$$ The last three probabilities P(u), P(i), and P(u, i) can be ignored since they are the same for all users and items. We will compute P(r|u), P(r|i), and P(r) individually before congregating them in a final computation. ``` # determine min and max of ratings minRating = ratingsdf.map(lambda row: row.rating).min() maxRating = ratingsdf.map(lambda row: row.rating).max() print "minRating = ", minRating print "maxRating = ", maxRating ``` Output example: ``` minRating = 1.0 maxRating = 5.0 ``` ``` # create RDD for the range of ratings # [(1, 2, 3, 4, 5)] rangeOfRatings = sc.parallelize( list(range(int(minRating), int(maxRating + 1))) ) print rangeOfRatings.collect() print rangeOfRatings.count() ``` Output example: ``` [1, 2, 3, 4, 5] 5 ``` ``` # [(user_id, movie_id, rating)] umr = ratingsdf.map(lambda row: (row.user_id, row.movie_id, row.rating)) umr.count() ``` 1000209 ``` # since we have to determine the probability of rating r for each user_id and movie_id, # we have to create a RDD with [(rating, (user_id, movie_id))] for each rating # ie. (rating_1, (user_id, movie_id)), (rating_2, (user_id, movie_id)), ..., (rating_5, (user_id, movie_id)) um = umr.map(lambda (user_id, movie_id, rating): (user_id, movie_id)) rCombo_um = rangeOfRatings.cartesian(um).map(lambda (rating, (user_id, movie_id)): (float(rating), (user_id, movie_id))) print rCombo_um.take(2) print rCombo_um.count() # == umr.count() * 5 ``` [(1.0, (1, 1197)), (1.0, (1, 938))] 5001045 ``` umrCombo = rCombo_um.map(lambda (rating, (user_id, movie_id)): (user_id, movie_id, rating)) print umrCombo.take(2) print umrCombo.count() ``` [(1, 1197, 1.0), (1, 938, 1.0)] 5001045 ``` # since we have to determine the probability of rating r for each user_id and movie_id, # we have to create a RDD with [(rating, (user_id, movie_id))] for each rating # ie. (rating_1, (user_id, movie_id)), (rating_2, (user_id, movie_id)), ..., (rating_5, (user_id, movie_id)) um_test = umr_test.map(lambda (user_id, movie_id, rating): (user_id, movie_id)) rCombo_um_test = rangeOfRatings.cartesian(um_test).map(lambda (rating, (user_id, movie_id)): (float(rating), (user_id, movie_id))) print rCombo_um_test.take(2) print rCombo_um_test.count() # == umr.count() * 5 ``` [(1.0, (2, 593)), (1.0, (2, 1955))] 501170 ``` umrCombo_test = rCombo_um_test.map(lambda (rating, (user_id, movie_id)): (user_id, movie_id, rating)) print umrCombo_test.take(2) print umrCombo_test.count() ``` [(2, 593, 1.0), (2, 1955, 1.0)] 501170 ##### Calculating P(r|u) , probability of rating r for user u $$ P(r|u) = { numberOfParticularRatingThatUserGives \over totalNumberOfRatingsThatUserGives }$$ ``` P(r|u) = (number of ratings r that user u gives) / (total number of ratings that user u gives) For example: r == 1 P(r|u) = (number of ratings r == 1 that user u gives) / (total number of ratings that user u gives) ``` ``` # [((user_id, rating), 1)] ur_1 = umr.map(lambda (user_id, movie_id, rating): ((user_id, rating), 1)) ur_1.take(2) ``` [((1, 3.0), 1), ((1, 4.0), 1)] ``` ur_1.count() ``` 1000209 ``` # [(((user_id, rating_1), 0), ((user_id, rating_2), 0), ..., ((user_id, rating_5), 0))] urCombo_0 = umrCombo.map(lambda (user_id, movie_id, rating): ((user_id, rating), 0)).distinct() #print urCombo_0.sortByKey().collect() print urCombo_0.count() ``` 30200 ``` ur_1Or0 = ur_1.union(urCombo_0) print ur_1Or0.take(2) print ur_1Or0.count() # ur_1Or0.count() == ur_1.count() + urCombo_0.count() # 1000209 + 30200 # 1030409 ``` [((1, 3.0), 1), ((1, 4.0), 1)] 1030409 ``` ur_1Or0.sortByKey().collect() from operator import add # [(user_id, rating), (num_rating)] ur_numRating = ur_1Or0.reduceByKey(add) print ur_numRating.take(2) print ur_numRating.count() ``` [((3577, 5.0), 29), ((1260, 2.0), 13)] 30200 ``` # [(user_id, (rating, num_rating))] u_r_numRating = ur_numRating.map(lambda ((user_id, rating), num_rating): (user_id, (rating, num_rating))) print u_r_numRating.take(2) print u_r_numRating.count() ``` [(3577, (5.0, 29)), (1260, (2.0, 13))] 30200 ``` # [(user_id, total_rating)] u_totalRating = sc.parallelize(umr.map(lambda (user_id, movie_id, rating): (user_id, rating)).countByKey().items()) print u_totalRating.take(2) print u_totalRating.count() ``` [(1, 53), (2, 129)] 6040 ``` # [(user_id, (total_rating, (rating, num_rating)))] u_componentsOfProb = u_totalRating.join(u_r_numRating) print u_componentsOfProb.take(2) print u_componentsOfProb.count() ``` [(2850, (43, (4.0, 12))), (2850, (43, (1.0, 5)))] 30200 ``` # [(user_id, rating, probRU)] probRU = u_componentsOfProb.map(lambda (user_id, (total_rating, (rating, num_rating))): (user_id, rating, float(num_rating)/float(total_rating)) ) print probRU.take(2) print probRU.count() ``` [(2850, 1.0, 0.11627906976744186), (2850, 3.0, 0.18604651162790697)] 30200 ##### Calculating P(r|i) $$ P(r|i) = { numberOfParticularRatingThatItemReceives \over totalNumberOfRatingsThatItemReceives }$$ ``` P(r|i) = (number of ratings r that item i receives) / (total number of ratings that item i receives) For example: r == 1 P(r|i) = (number of ratings r == 1 that movie i receives) / (total number of ratings that movie i receives) ``` ``` # [((movie_id, rating), 1)] mr_1 = umr.map(lambda (user_id, movie_id, rating): ((movie_id, rating), 1)) mr_1.take(2) ``` [((1197, 3.0), 1), ((938, 4.0), 1)] ``` mr_1.count() ``` 1000209 ``` # [(((user_id, rating_1), 0), ((user_id, rating_2), 0), ..., ((user_id, rating_5), 0))] mrCombo_0 = umrCombo.map(lambda (user_id, movie_id, rating): ((movie_id, rating), 0)).distinct() #print mrCombo_0.sortByKey().collect() print mrCombo_0.count() ``` 18530 ``` mr_1Or0 = mr_1.union(mrCombo_0) print mr_1Or0.take(2) print mr_1Or0.count() # ur_1Or0.count() == ur_1.count() + urCombo_0.count() # 1000209 + 18530 # 1018739 ``` [((1197, 3.0), 1), ((938, 4.0), 1)] 1018739 ``` # [(movie_id, rating), (num_rating)] mr_numRating = mr_1Or0.reduceByKey(add) print mr_numRating.take(2) print mr_numRating.count() ``` [((3577, 5.0), 3), ((1260, 2.0), 6)] 18530 ``` # OPTION instead of using union() and then reduceByKey() """ mr_1Or0 = mr_1.reduceByKey(add).rightOuterJoin(mrCombo_0) print mr_1Or0.take(2) print mr_1Or0.count() """ """ [((2001, 5.0), (129, 0)), ((3654, 4.0), (266, 0))] 18530 """ # [(movie_id, (rating, num_rating))] m_r_numRating = mr_numRating.map(lambda ((movie_id, rating), num_rating): (movie_id, (rating, num_rating))) print m_r_numRating.take(2) print m_r_numRating.count() ``` [(391, (3.0, 18)), (518, (4.0, 22))] 18530 ``` # [(movie_id, total_rating)] m_totalRating = sc.parallelize(umr.map(lambda (user_id, movie_id, rating): (movie_id, rating)).countByKey().items()) print m_totalRating.take(2) print m_totalRating.count() ``` [(1, 2077), (2, 701)] 3706 ``` # [(user_id, (total_rating, (rating, num_rating)))] m_componentsOfProb = m_totalRating.join(m_r_numRating) print m_componentsOfProb.take(2) print m_componentsOfProb.count() ``` [(3808, (44, (5.0, 17))), (3808, (44, (3.0, 8)))] 18530 ``` # [(movie_id, rating, probRI)] probRI = m_componentsOfProb.map(lambda (movie_id, (total_rating, (rating, num_rating))): (movie_id, rating, float(num_rating)/float(total_rating)) ) print probRI.take(2) print probRI.count() ``` [(3808, 5.0, 0.38636363636363635), (3808, 4.0, 0.36363636363636365)] 18530 #####P(r) = numRating / totalRatings ie. rating = 1 P(r) = (number of rating == 1) / (total number of ratings) ``` totalRatings = umr.count() print totalRatings ``` 1000209 ``` # [(rating, 1)] r_1 = umr.map(lambda (user_id, movie_id, rating): (rating, 1)) # [(rating, num_rating)] r_numRating = r_1.reduceByKey(add) # [(rating, probR)] probR = r_numRating.mapValues(lambda num_rating: float(num_rating)/float(totalRatings)) probR.take(2) ``` [(1.0, 0.05616226208722377), (2.0, 0.1075345252842156)] ##### P(r | a, i) = (P(r|u) * P(r|i) / P(r)) * (P(u) * P(i) / P(u, i)) = P(r|u) * P(r|i) / P(r) ``` # add probR to user_id, movie_id, rating components = rCombo_um.join(probR) print components.take(2) print components.count() ``` [(1.0, ((1, 914), 0.05616226208722377)), (1.0, ((1, 594), 0.05616226208722377))] 5001045 ``` # add probRU to user_id, movie_id, rating, probR tmp_a = components.map(lambda (rating, ((user_id, movie_id), prob_r)): ((user_id, rating), (movie_id, prob_r))) tmp_b = probRU.map(lambda (user_id, rating, prob_ru): ((user_id, rating), prob_ru)) components = tmp_a.join(tmp_b) print components.take(2) print components.count() ``` [((327, 1.0), ((1248, 0.05616226208722377), 0.038135593220338986)), ((327, 1.0), ((1254, 0.05616226208722377), 0.038135593220338986))] 5001045 ``` # add probRI to user_id, movie_id, rating, probR, probRU tmp_a = components.map(lambda ( (user_id, rating), ((movie_id, prob_r), prob_ru) ): ( (movie_id, rating), (user_id, prob_r, prob_ru) ) ) tmp_b = probRI.map(lambda (movie_id, rating, prob_ri): ((movie_id, rating), prob_ri)) components = tmp_a.join(tmp_b) print components.take(2) print components.count() ``` [((1644, 5.0), ((1605, 0.22626271109338147, 0.038381742738589214), 0.056842105263157895)), ((1644, 5.0), ((1451, 0.22626271109338147, 0.3022636484687084), 0.056842105263157895))] 5001045 ``` # re-format # [((user_id, movie_id, rating), bayes_probability)] componentsReformat = components.map(lambda ((movie_id, rating), ((user_id, prob_r, prob_ru), prob_ri)): ((user_id, movie_id, rating), (prob_r, prob_ru, prob_ri)) ) # calculate bayes probability bayesProb = componentsReformat.mapValues(lambda (prob_r, prob_ru, prob_ri): prob_ru * prob_ri / prob_r) print bayesProb.take(2) ``` [((2168, 135, 4.0), 0.13697613242692502), ((4808, 135, 4.0), 0.12445827900425674)] ``` print "umr = ", umr.count() print "probR = ", probR.count() print "probRU = ", probRU.count() print "probRI = ", probRI.count() print "bayesProb = ", bayesProb.count() # note: bayesProb.count() = umr.count() * 5 # bayesProb = umr_train * 5 1000209 * 5 # extract only user_id, movie_id in umr_test from bayes_prob # remember that we have to extract the bayes_prob for each rating too # [(user_id, movie_id, rating)] print "umrCombo_test.count() = ", umrCombo_test.count() # [((user_id, movie_id, rating), bayes_prob)] print "bayesProb.count() = ", bayesProb.count() # [((user_id, movie_id), (rating, bayes_prob))] tmp_a = umrCombo_test.map(lambda (user_id, movie_id, rating): ((user_id, movie_id, rating), 1)) tmp_b = bayesProb bayesProb_test = tmp_a.join(tmp_b).map( lambda ((user_id, movie_id, rating), (_, bayes_prob)): ((user_id, movie_id), (rating, bayes_prob))) print bayesProb_test.take(2) print bayesProb_test.count() # == umrCombo_test.count() ``` umrCombo_test.count() = 501170 bayesProb.count() = 5001045 [((5522, 2157), (3.0, 0.2209227724078584)), ((5786, 3210), (2.0, 0.08545729298368235))] 501170 ``` # [((user_id, movie_id), [(rating_1, bayes_prob_1), (rating_2, bayes_prob_2), ..., (rating_5, bayes_prob_5)])] um_allBayesProb = bayesProb_test.mapValues(lambda value: [value]).reduceByKey(lambda a, b: a + b) print um_allBayesProb.take(2) print um_allBayesProb.count() # == bayesProb_test.count()/5 == umr_test.count() == 100234 ``` [((4335, 1588), [(3.0, 0.5498862085999521), (1.0, 0.016548382705956422), (2.0, 0.13615664002520045), (4.0, 0.32236074697317796), (5.0, 0.025783030822306607)]), ((4728, 1894), [(5.0, 0.01634723322124617), (3.0, 0.7342812664378788), (4.0, 0.256444827101078), (1.0, 0.005091044955245684), (2.0, 0.1289571243789302)])] 100234 ``` um_allBayesProb = um_allBayesProb.mapValues(lambda value: sorted(value, key=lambda(rating, bayes_prob): rating)) print um_allBayesProb.take(2) print um_allBayesProb.count() ``` ### 7.A.1. Implementing Bayes MAP Maximum A Posteriori (MAP) : predict the most probably rating ``` Pai = predicted rating for user a on movie i P(r|a,i) = Naive Bayes that computes the probability of rating r for a given user a on movie i Pai = Argmax(r=1 to 5) P(r|a,i) ``` ``` def calculate_bayes_map(value): # extract the bayes_prob bayesProbList = [x[1] for x in value] # define the argmax, return the index argmax = bayesProbList.index(max(bayesProbList)) return argmax predicted_bayes_map = um_allBayesProb.mapValues(calculate_bayes_map) print predicted_bayes_map.take(2) print predicted_bayes_map.count() ``` [((4335, 1588), 0), ((4728, 1894), 2)] 100234 ``` # [(test_user_id, test_movie_id), (actual_rating, predicted_rating)] tmp_a = umr_test.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating)) tmp_b = predicted_bayes_map um_testBayesMap = tmp_a.join(tmp_b) print um_testBayesMap.take(2) print um_testBayesMap.count() # [(train_user_id, train_movie_id), (actual_rating, predicted_rating)] tmp_a = umr_train.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating)) tmp_b = predicted_bayes_map um_trainBayesMap = tmp_a.join(tmp_b) ``` [((3491, 3699), (4.0, 0)), ((1120, 1654), (4.0, 0))] 100234 ``` a, b, c = umr.randomSplit(weights, seed) print a.count() print b.count() print c.count() ``` 900042 100167 0 ``` print a.take(1) print b.take(1) ``` [(2, 1955, 4.0)] [(31, 3591, 3.0)] ``` # calculate RMSE and MAE # convert into two vectors where # one vector describes the actual ratings in the format [(user_id, movie_id, actual_rating)] # second vector describes the predicted ratings in the format [(user_id, movie_id, predicted_rating)] actual = um_testBayesMap.map( lambda((test_user_id, test_movie_id), (actual_rating, predicted_rating)): (test_user_id, test_movie_id, actual_rating) ) predicted = um_testBayesMap.map( lambda((test_user_id, test_movie_id), (actual_rating, predicted_rating)): (test_user_id, test_movie_id, predicted_rating) ) print "actual:\n", actual.take(5) print "predicted:\n", predicted.take(5) rmse = pm.calculate_rmse_using_rdd(actual, predicted) print "rmse = ", rmse mae = pm.calculate_mae_using_rdd(actual, predicted) print "mae = ", mae ``` ``` actual: [(3039, 2937, 4), (1810, 3072, 3), (2718, 1610, 3), (5081, 3255, 3), (4448, 52, 3)] predicted: [(366, 3196, 5.0), (1810, 3072, 4.0), (2718, 1610, 2.0), (59, 943, 3.0), (4448, 52, 5.0)] rmse = 1.33451063018 mae = 1.06974538193 ``` ``` # y_test y_test = um_testBayesMap.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating) ) # y_train y_train = um_trainBayesMap.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating) ) # y_predicted y_predicted = um_testBayesMap.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, predicted_rating) ) pm_results_bayes_map = pm.get_perform_metrics(y_test, y_train, y_predicted, content_array, sqlCtx) from pprint import pprint pprint(pm_results_bayes_map) ``` ### 7.A.2. Implementing Bayes MSE Mean Squared Error (MSE): compute the weighted sum of ratings that corresponds to minimizing the expectation of MSE ``` Pai = predicted rating for user a on movie i P(r|a,i) = Naive Bayes that computes the probability of rating r for a given user a on movie i Pai = Sum of (r * P(r|a,i)) from r=1 to 5 ``` ``` def calculate_bayes_mse(value): predicted = 0. for rating, bayes_prob in value: predicted += rating * bayes_prob return predicted predicted_bayes_mse = um_allBayesProb.mapValues(calculate_bayes_mse) print predicted_bayes_mse.take(2) print predicted_bayes_mse.count() ``` [((4335, 1588), 3.3568784305604584), ((4728, 1894), 3.5733645675372854)] 100234 ``` # [(test_user_id, test_movie_id), (predicted_rating, actual_rating)] tmp_a = umr_test.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating)) tmp_b = predicted_bayes_mse um_testBayesMse = tmp_a.join(tmp_b) print um_testBayesMse.take(2) print um_testBayesMse.count() ``` [((1120, 1654), (4.0, 3.44226621556054)), ((4439, 3005), (3.0, 3.360422113015879))] 100234 ``` # calculate RMSE and MAE # convert into two vectors where # one vector describes the actual ratings in the format [(user_id, movie_id, actual_rating)] # second vector describes the predicted ratings in the format [(user_id, movie_id, predicted_rating)] actual = um_testBayesMse.map( lambda((test_user_id, test_movie_id), (actual_rating, predicted_rating)): (test_user_id, test_movie_id, actual_rating) ) predicted = um_testBayesMse.map( lambda((test_user_id, test_movie_id), (actual_rating, predicted_rating)): (test_user_id, test_movie_id, predicted_rating) ) print "actual:\n", actual.take(5) print "predicted:\n", predicted.take(5) rmse = pm.calculate_rmse_using_rdd(actual, predicted) print "rmse = ", rmse mae = pm.calculate_mae_using_rdd(actual, predicted) print "mae = ", mae ``` actual: [(1120, 1654, 3.44226621556054), (4439, 3005, 3.360422113015879), (4271, 3671, 2.8494882459525477), (2259, 1213, 7.778718357130783), (1820, 3101, 3.7261784376809395)] predicted: [(1120, 1654, 4.0), (4439, 3005, 3.0), (4271, 3671, 5.0), (2259, 1213, 5.0), (1820, 3101, 5.0)] rmse = 1.17748775303 mae = 0.918588835371 ``` # y_test y_test = um_testBayesMse.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating) ) # y_train tmp_a = umr_train.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating)) tmp_b = predicted_bayes_mse um_trainBayesMse = tmp_a.join(tmp_b) y_train = um_trainBayesMse.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating) ) # y_predicted y_predicted = um_testBayesMse.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, predicted_rating) ) pm_results_bayes_mse = pm.get_perform_metrics(y_test, y_train, y_predicted, content_array, sqlCtx) from pprint import pprint pprint(pm_results_bayes_mse) ``` ### 7.A.3. Implementing Bayes MAE Mean Absolute Error (MAE): select the rating that minimizes the expectation of Mean Absolute Error ``` Pai = predicted rating for user a on movie i P(r|a,i) = Naive Bayes that computes the probability of rating r for a given user a on movie i Pai = Argmin from r=1 to 5(Sum of (P(n|a,i) * |r-n|) from n=1 to 5) ``` ``` # TODO: fix this the same as argmax def calculate_bayes_mae(value): sumOfProductList = [] for rating, bayes_prob in value: sumOfProduct = 0. for i in range(1, 6): sumOfProduct += bayes_prob * abs(rating - i) sumOfProductList.append(sumOfProduct) argmin = sumOfProductList.index(min(sumOfProductList)) return argmin predicted_bayes_mae = um_allBayesProb.mapValues(calculate_bayes_mae) print predicted_bayes_mae.take(2) print predicted_bayes_mae.count() ``` [((4335, 1588), 4), ((4728, 1894), 2)] 100234 ``` # [(test_user_id, test_movie_id), (predicted_rating, actual_rating)] tmp_a = umr_test.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating)) tmp_b = predicted_bayes_map um_testBayesMae = tmp_a.join(tmp_b) print um_testBayesMae.take(2) print um_testBayesMae.count() ``` [((1120, 1654), (4.0, 2)), ((4169, 2723), (3.0, 3))] 100234 ``` # calculate RMSE and MAE from src.algorithms import performance_metrics as pm # convert into two vectors where # one vector describes the actual ratings in the format [(user_id, movie_id, actual_rating)] # second vector describes the predicted ratings in the format [(user_id, movie_id, predicted_rating)] actual = um_testBayesMae.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating) ) predicted = um_testBayesMae.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, predicted_rating) ) print "actual:\n", actual.take(5) print "predicted:\n", predicted.take(5) rmse = pm.calculate_rmse_using_rdd(actual, predicted) print "rmse = ", rmse mae = pm.calculate_mae_using_rdd(actual, predicted) print "mae = ", mae ``` actual: [(1120, 1654, 2), (4169, 2723, 3), (4271, 3671, 3), (2259, 1213, 1), (1820, 3101, 4)] predicted: [(1120, 1654, 4.0), (4439, 3005, 3.0), (4271, 3671, 5.0), (2259, 1213, 5.0), (1820, 3101, 5.0)] rmse = 2.39828498227 mae = 1.95142366862 ``` # y_test y_test = um_testBayesMae.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating) ) # y_train tmp_a = umr_train.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating)) tmp_b = predicted_bayes_mae um_trainBayesMae = tmp_a.join(tmp_b) y_train = um_trainBayesMae.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating) ) # y_predicted y_predicted = um_testBayesMae.map( lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, predicted_rating) ) pm_results_bayes_mae = get_results(y_test, y_train, y_predicted, content_array, sqlCtx) pprint(pm_results_bayes_mae) ```
github_jupyter
# Sentiment Analysis - SQL Achemy ## A. Load Libraries ``` import pandas as pd import numpy as np import csv from sqlalchemy import Column, String, Integer, ForeignKey, DateTime, func from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declarative_base ``` ## B. Load 3 csv files ``` df_Firm = pd.read_csv(r"C:\Users\User\Documents\Big Data\Acquisition & Merger Analysis\Firm.csv") df_Firm.head() ``` Reference: https://stackoverflow.com/questions/19699367/for-line-in-results-in-unicodedecodeerror-utf-8-codec-cant-decode-byte ``` df_Staff = pd.read_csv(r"C:\Users\User\Documents\Big Data\Acquisition & Merger Analysis\Staff.csv", encoding = "ISO-8859-1") df_Staff.head() df_Sales = pd.read_csv(r"C:\Users\User\Documents\Big Data\Acquisition & Merger Analysis\Sales.csv", encoding = "ISO-8859-1") df_Sales.head() df_Sales.head(50) ``` ## C. Create an engine to access the localhost created in the Command Prompt run as administrator ``` from sqlalchemy import create_engine engine = create_engine('mysql+mysqldb://phuongdaingo:0505@localhost:3306/customerintention?charset=utf8mb4', echo=True) ``` ## D. Design 3 tables For reference of other methods of DataTime only: https://stackoverflow.com/questions/13370317/sqlalchemy-default-datetime ``` from sqlalchemy import Column, String, Integer, ForeignKey, DateTime, func, Boolean, MetaData, Table, Float from sqlalchemy.dialects.mysql import TINYINT from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() metadata = MetaData(bind=engine) class Firm(Base): __tablename__ = Table('firm', Base.metadata, autoload=True, autoload_with=engine) # metadata comes from database # Database (TablePlus) will regularize PK, Python won't do so (primary_key=True) since this is for mapping tables only. # If Python is used for creating tables, we will need ID as a PK so 'primary_key=True' will be included. Firm_ID = Column(Integer, primary_key=True) Firm_Name = Column(String()) class Staff(Base): __tablename__ = Table('staff', Base.metadata, autoload=True, autoload_with=engine) ID = Column(Integer, primary_key=True) Emp_ID = Column(Integer) Name_Prefix = Column(String()) First_Name = Column(String()) Middle_Initial = Column(String()) Last_Name = Column(String()) Gender = Column(String()) E_Mail = Column(String()) Father_Name = Column(String()) Mother_Name = Column(String()) Mother_Maiden_Name = Column(String()) Date_of_Birth = Column(String()) Time_of_Birth = Column(String) Age_in_Years = Column(Float()) Weight_in_Kgs = Column(Integer()) Date_of_Joining = Column(String) Quarter_of_Joining = Column(String()) Half_of_Joining = Column(String()) Year_of_Joining = Column(Integer()) Month_of_Joining = Column(Integer()) Month_Name_of_Joining = Column(String()) Short_Month = Column(String()) DOW_of_Joining = Column(String()) Short_DOW = Column(String()) Day_of_Joining = Column(Integer()) Age_in_Company_Years = Column(Float()) Salary = Column(Float()) SSN = Column(String()) Phone_No = Column(String()) Place_Name = Column(String()) County = Column(String()) City = Column(String()) State = Column(String()) Zip = Column(Integer) Region = Column(String()) User_Name = Column(String()) Password = Column(String()) Firm_ID = Column(Integer) class Sales(Base): __tablename__ = Table('sales', Base.metadata, autoload=True, autoload_with=engine) ID = Column(Integer, primary_key=True) Order_Number = Column(Integer) Quantity_Ordered = Column(Integer) Price_of_Each = Column(Float) Order_Line_Number = Column(Integer) Sales = Column(Float) Revenue = Column(Float) Order_Date = Column(String) Status = Column(String) Quarter_ID = Column(Integer) Month_ID = Column(Integer) Year_ID = Column(Integer) Product_Line = Column(String) MSRP = Column(Integer) Product_Code = Column(String) Customer_Name = Column(String) Phone = Column(String) Address_Line_1 = Column(String) Address_Line_2 = Column(String) City = Column(String) State = Column(String) Postal_Code = Column(Integer) Country = Column(String) Territory = Column(String) Contact_Last_Name = Column(String) Contact_First_Name = Column(String) Deal_Size = Column(String) Firm_ID = Column(Integer) # Mapping classes with tables in TablePlus's databases # Should not create tables by Python but TablePlus from sqlalchemy.orm import sessionmaker #Session = sessionmaker() #Session.configure(bind=engine) Session = sessionmaker(bind=engine) # writing queries requires session before executing queries session = Session() # object #Base.metadata.create_all(engine) ``` ## E. Insert all rows of each dataframe to database's tables in TablePlus's MySQL Database New Method: inserting directly from data frames Inserting takes long time means that selecting or filtering will take less time, and in reverse, due to adding IDX for a column or different columns depending on purposes of saving data into relational database only or reading the data. I will insert dataframes in batches into session (relational database), then commit to finalize saving. If an error happen, that batch will be stopped inserting and still stay in the session and other batches will not be entered into the session as well if flush() is placed outside 'for loop'. Therefore, if flush() is placed inside the for loop, batches will be flushed into the session regarless any error might occur. But we have to set rollback() in the except case to delete any existing batches in the session causing an error. ### Insert 'df_Firm' dataframe into 'firm' table in the MySQL databse ``` import time #import mysql.connector # as below mysql, not sqlite3 for this case import traceback from tqdm import tqdm from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine # use sqlalchemy with connection string for mysql from sqlalchemy.orm import scoped_session, sessionmaker import unicodedata Base = declarative_base() DBSession = scoped_session(sessionmaker()) # the scoped_session() function is provided which produces a thread-managed registry of Session objects. It is commonly used in web applications so that a single global variable can be used to safely represent transactional sessions with sets of objects, localized to a single thread. engine = None def init_sqlalchemy(dbname='mysql+mysqldb://phuongdaingo:0505@localhost:3306/customerintention?charset=utf8mb4'): global engine engine = create_engine(dbname, echo=False) DBSession.remove() DBSession.configure(bind=engine, autoflush=False, expire_on_commit=False) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) def firm_sqlalchemy_orm(n=1001): init_sqlalchemy() t0 = time.time() error_i_list = [] # a new list containing i(s) of batch(es) causing errors for i in tqdm(range(n)): # use tqdm to track progress try: # create custome, then add into session firm = Firm() firm.Firm_ID = df_Firm['Firm_ID'].iloc[i] firm.Firm_Name = df_Firm['Firm_Name'].iloc[i] DBSession.add(firm) # error might happen here or below DBSession.commit() if i % 100 == 0: # when i reachs 100 rows, it will execute by flush() to insert the batch of 100 rows into the session of the relational database #DBSession.flush() # should use try, except inside each 'for loop' to wrap i # error might happen here DBSession.commit() #2nd attempt: place commit() here, then compare the progress # commit here to insert batch without affecting other batch(es) with errors #text = unicodedata.normalize('NFC', text) # text: string type to fix error and replace all string texts into being wrapped by unicode except Exception as er: print('Error at index {}: '.format(i)) print(traceback.format_exc()) # print error(s) print('-' * 20) DBSession.rollback() # rollback here to delete all rows of a batch/batches causing errors to avoid being flooded or stuck with new batches coming in and then getting stuck as well error_i_list.append(i) # append into array the index of batch(es) causing errors # DBSession.commit() # 1st attempt: place commit() here, outside of 'for loop' # faster but will stop other batches coming in if errors happen print( "Firm's SQLAlchemy ORM: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") if __name__ == '__main__': firm_sqlalchemy_orm(df_Firm.shape[0]) ``` ### Insert 'df_Sales' dataframe into 'sales' table in the MySQL database NaN values appeared in the column State because many countries don't have states like the US so I had replaced them to 'None'. nan values appeared in the column Territory because of 'NA' standing for 'North America' so I had replaced it with 'N.A'. Both the Sales and Staff tables didn't have their own ID columns so I had to created one for each as the Primary Key. However, there were some rows in Sales were collapsed. Then I went to Data > Ungroup > Clear Outline to expand all collapsed rows. After that, I fill auto-increment values for ID column by rename the ID column by ROW() for cell A1. Cell A2 will be =ROW(A1) and I copied A2's formula for the rest of cells to get all ID rows filled with unique numbers. I had set up the DateTime typed columns in both files by timestamp in the MySQL Database. However, there was a problem with doing so although this method is correct with other datasets with the same value. Then I had to change all timestamp type in MySQL into varchar and DateTime(timezone=True)) in SQLAlchemy into String(). ``` import time #import mysql.connector # as below mysql, not sqlite3 for this case import traceback from tqdm import tqdm from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine # use sqlalchemy with connection string for mysql from sqlalchemy.orm import scoped_session, sessionmaker Base = declarative_base() DBSession = scoped_session(sessionmaker()) # the scoped_session() function is provided which produces a thread-managed registry of Session objects. It is commonly used in web applications so that a single global variable can be used to safely represent transactional sessions with sets of objects, localized to a single thread. engine = None def init_sqlalchemy(dbname='mysql+mysqldb://phuongdaingo:0505@localhost:3306/customerintention?charset=utf8mb4'): global engine engine = create_engine(dbname, echo=False) DBSession.remove() DBSession.configure(bind=engine, autoflush=False, expire_on_commit=False) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) def sales_sqlalchemy_orm(n=100000): init_sqlalchemy() t0 = time.time() error_i_list = [] # a new list containing i(s) of batch(es) causing errors # Index column must match with ID column of df_Firm > indexing to the row 10th iso using loop with iterows (time consuming), but by using range(df.rows) > take out the 10th row for i in tqdm(range(n)): # use tqdm to track progress try: # create custome, then add into session sales = Sales() sales.ID = df_Sales['ID'].iloc[i] sales.Order_Number = df_Sales['Order_Number'].iloc[i] sales.Quantity_Ordered = df_Sales['Quantity_Ordered'].iloc[i] sales.Price_of_Each = df_Sales['Price_of_Each'].iloc[i] sales.Order_Line_Number = df_Sales['Order_Line_Number'].iloc[i] sales.Sales = df_Sales['Sales'].iloc[i] sales.Revenue = df_Sales['Revenue'].iloc[i] sales.Order_Date = df_Sales['Order_Date'].iloc[i] sales.Status = df_Sales['Status'].iloc[i] sales.Quarter_ID = df_Sales['Quarter_ID'].iloc[i] sales.Month_ID = df_Sales['Month_ID'].iloc[i] sales.Year_ID = df_Sales['Year_ID'].iloc[i] sales.Product_Line = df_Sales['Product_Line'].iloc[i] sales.MSRP = df_Sales['MSRP'].iloc[i] sales.Product_Code = df_Sales['Product_Code'].iloc[i] sales.Customer_Name = df_Sales['Customer_Name'].iloc[i] sales.Phone = df_Sales['Phone'].iloc[i] sales.Address_Line_1 = df_Sales['Address_Line_1'].iloc[i] sales.Address_Line_2 = df_Sales['Address_Line_2'].iloc[i] sales.City = df_Sales['City'].iloc[i] sales.State = df_Sales['State'].iloc[i] sales.Postal_Code = df_Sales['Postal_Code'].iloc[i] sales.Country = df_Sales['Country'].iloc[i] sales.Territory = df_Sales['Territory'].iloc[i] sales.Contact_Last_Name = df_Sales['Contact_Last_Name'].iloc[i] sales.Contact_First_Name = df_Sales['Contact_First_Name'].iloc[i] sales.Deal_Size = df_Sales['Deal_Size'].iloc[i] sales.Firm_ID = df_Sales['Firm_ID'].iloc[i] DBSession.add(sales) # error might happen here or below if i % 100 == 0: # when i reachs 100 rows, it will execute by flush() to insert the batch of 100 rows into the session of the relational database DBSession.flush() # should use try, except inside each 'for loop' to wrap i # error might happen here DBSession.commit() #2nd attempt: place commit() here, then compare the progress # commit here to insert batch without affecting other batch(es) with errors except Exception as er: print('Error at index {}: '.format(i)) print(traceback.format_exc()) # print error(s) print('-' * 20) DBSession.rollback() # rollback here to delete all rows of a batch/batches causing errors to avoid being flooded or stuck with new batches coming in and then getting stuck as well error_i_list.append(i) # append into array the index of batch(es) causing errors # DBSession.commit() # 1st attempt: place commit() here, outside of 'for loop' # faster but will stop other batches coming in if errors happen print( "Sales's SQLAlchemy ORM: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") if __name__ == '__main__': sales_sqlalchemy_orm(df_Sales.shape[0]) ``` ### Insert 'df_Staff' dataframe into 'staff' table in the MySQL database ``` import time #import mysql.connector # as below mysql, not sqlite3 for this case import traceback from tqdm import tqdm from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine # use sqlalchemy with connection string for mysql from sqlalchemy.orm import scoped_session, sessionmaker import unicodedata Base = declarative_base() DBSession = scoped_session(sessionmaker()) # the scoped_session() function is provided which produces a thread-managed registry of Session objects. It is commonly used in web applications so that a single global variable can be used to safely represent transactional sessions with sets of objects, localized to a single thread. engine = None def init_sqlalchemy(dbname='mysql+mysqldb://phuongdaingo:0505@localhost:3306/customerintention?charset=utf8mb4'): global engine engine = create_engine(dbname, echo=False) DBSession.remove() DBSession.configure(bind=engine, autoflush=False, expire_on_commit=False) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) def staff_sqlalchemy_orm(n=100000): init_sqlalchemy() t0 = time.time() error_i_list = [] # a new list containing i(s) of batch(es) causing errors # Index column must match with ID column of df_Firm > indexing to the row 10th iso using loop with iterows (time consuming), but by using range(df.rows) > take out the 10th row for i in tqdm(range(n)): # use tqdm to track progress try: staff = Staff() staff.ID = df_Staff['ID'].iloc[i] staff.Emp_ID = df_Staff['Emp_ID'].iloc[i] staff.Name_Prefix = df_Staff['Name_Prefix'].iloc[i] staff.First_Name = df_Staff['First_Name'].iloc[i] staff.Middle_Initial = df_Staff['Middle_Initial'].iloc[i] staff.Last_Name = df_Staff['Last_Name'].iloc[i] staff.Gender = df_Staff['Gender'].iloc[i] staff.E_Mail = df_Staff['E_Mail'].iloc[i] staff.Father_Name = df_Staff['Father_Name'].iloc[i] staff.Mother_Name = df_Staff['Mother_Name'].iloc[i] staff.Mother_Maiden_Name = df_Staff['Mother_Maiden_Name'].iloc[i] staff.Date_of_Birth = df_Staff['Date_of_Birth'].iloc[i] staff.Time_of_Birth = df_Staff['Time_of_Birth'].iloc[i] staff.Age_in_Years = df_Staff['Age_in_Years'].iloc[i] staff.Weight_in_Kgs = df_Staff['Weight_in_Kgs'].iloc[i] staff.Date_of_Joining = df_Staff['Date_of_Joining'].iloc[i] staff.Quarter_of_Joining = df_Staff['Quarter_of_Joining'].iloc[i] staff.Half_of_Joining = df_Staff['Half_of_Joining'].iloc[i] staff.Year_of_Joining = df_Staff['Year_of_Joining'].iloc[i] staff.Month_of_Joining = df_Staff['Month_of_Joining'].iloc[i] staff.Month_Name_of_Joining = df_Staff['Month_Name_of_Joining'].iloc[i] staff.Short_Month = df_Staff['Short_Month'].iloc[i] staff.DOW_of_Joining = df_Staff['DOW_of_Joining'].iloc[i] staff.Short_DOW = df_Staff['Short_DOW'].iloc[i] staff.Day_of_Joining = df_Staff['Day_of_Joining'].iloc[i] staff.Age_in_Company_Years = df_Staff['Age_in_Company_Years'].iloc[i] staff.Salary = df_Staff['Salary'].iloc[i] staff.SSN = df_Staff['SSN'].iloc[i] staff.Phone_No = df_Staff['Phone_No'].iloc[i] staff.Place_Name = df_Staff['Place_Name'].iloc[i] staff.County = df_Staff['County'].iloc[i] staff.City = df_Staff['City'].iloc[i] staff.State = df_Staff['State'].iloc[i] staff.Zip = df_Staff['Zip'].iloc[i] staff.Region = df_Staff['Region'].iloc[i] staff.User_Name = df_Staff['User_Name'].iloc[i] staff.Password = df_Staff['Password'].iloc[i] staff.Firm_ID = df_Staff['Firm_ID'].iloc[i] DBSession.add(staff) # error might happen here or below if i % 100 == 0: # when i reachs 1000 rows, it will execute by flush() to insert the batch of 1000 rows into the session of the relational database DBSession.flush() # should use try, except inside each 'for loop' to wrap i # error might happen here DBSession.commit() #2nd attempt: place commit() here, then compare the progress # commit here to insert batch without affecting other batch(es) with errors except Exception as er: print('Error at index {}: '.format(i)) print(traceback.format_exc()) # print error(s) print('-' * 20) DBSession.rollback() # rollback here to delete all rows of a batch/batches causing errors to avoid being flooded or stuck with new batches coming in and then getting stuck as well error_i_list.append(i) # append into array the index of batch(es) causing errors # DBSession.commit() # 1st attempt: place commit() here, outside of 'for loop' # faster but will stop other batches coming in if errors happen print( "Staff's SQLAlchemy ORM: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") if __name__ == '__main__': staff_sqlalchemy_orm(df_Staff.shape[0]) # number of rows of df as I want --> customized function name ```
github_jupyter
# Homework: Introduction to Time Series ##### Summary - Measuring error with MAPE - Selecting parameters in exponential smoothing - Comparing ARIMA and SARIMA - Holiday effects with Facebook's Prophet library ``` import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np ``` ## 1. Measuring Error with MAPE The Mean Absolute Percentage Error (MAPE) is a common metric for measuring error in forecasts. The MAPE represents error as a percentage of the actual observed values. A high MAPE value means the error is large relative to the quantity being measuring and so the forecast is poor. A small MAPE means the error is relatively small so the forecast is good. The MAPE is given by: $$ MAPE = \frac{1}{n} \sum_{i=1}^{n} \frac{|F_t - A_t|}{A_t} $$ where $A_t$ is the actual observed value, $F_t$ is the forecast, and $n$ is the number of data points the MAPE is being calculated over. Read about MAPE at https://en.wikipedia.org/wiki/Mean_absolute_percentage_error **A. Write a function `get_mape` that calculates the MAPE for a set of forecasts and actuals. The inputs to the function are two <i> pd.series called </i> `actuals` and `forecasts`. The function returns the MAPE as decimal rounded to 4 decimal places (i.e. MAPE = 0.1032)** ``` def get_mape(actuals, forecasts): ### YOUR CODE HERE ### ... ``` ## 2. Selecting Parameters in Exponential Smoothing **A. Plot the avocado weekly sales dataset to understand the trend and seasonality. Does the trend appear to be multiplicative or additive? Does the seasonality appear to be multiplicative or additive?** ``` # Read in and process the weekly avocado sales dataset avocado = pd.read_csv('avocado_sales.csv') def convert_to_datetime(df, freq): df["Date"]= pd.to_datetime(df["Date"]) df.sort_values(by ='Date', ascending = True, inplace = True) df.set_index("Date", inplace = True) output_df = df.asfreq(freq) return output_df avocado = convert_to_datetime(avocado, 'W-Sun') ### YOUR CODE ### ``` **B. Use Triple Exponential Smoothing to create weekly avocado sales forecasts for June 2018 and onward (95 weeks into the future). Fit a model with parameter values recommended by the model and train on data collected from May 2018 and prior.** - `trend` = [FROM PART A] - `seasonal` = [FROM PART A] - `seasonal_periods` = ? - `smoothing_level` = Recommended by model - `smoothing_slope` = Recommended by model - `smoothing_seasonal` = Recommended by model Hint: Remember we can let the model recommend parameters by passing no inputs into the `fit` method: `ExponentialSmoothing(...).fit()` ``` from statsmodels.tsa.holtwinters import ExponentialSmoothing train = avocado[avocado.index <= '2018-05-31'] test = avocado[avocado.index >= '2018-06-01'] ### YOUR CODE HERE model = ... # Fit model fit = ... # Forecast 95 months out pred = ... # Plot the training set, forecast, and test set #plt.plot(pred, label = 'forecast') #plt.plot(train['Units Sold'], label = 'train') #plt.plot(test['Units Sold'], label = 'test') #plt.legend(); ``` **C. Now use Triple Exponential Smoothing to create weekly avocado sales forecasts for June 2018 and onward (95 weeks into the future) but use the following model parameters. Again fit a model on the training data collected from May 2018 and prior. Is the forecast better when the model recommends parameters or with these values?** - `trend` = [FROM PART A] - `seasonal` = [FROM PART A] - `seasonal_periods` = ? - `smoothing_level` = 0.2 - `smoothing_slope` = 0.2 - `smoothing_seasonal` = 0.2 ``` from statsmodels.tsa.holtwinters import ExponentialSmoothing train = avocado[avocado.index <= '2018-05-31'] test = avocado[avocado.index >= '2018-06-01'] ### YOUR CODE HERE ### model = ... # Fit models fit = ... # Forecast 95 months out pred = ... # Plot the training set, forecast, and test set #plt.plot(pred, label = 'forecast') #plt.plot(train['Units Sold'], label = 'train') #plt.plot(test['Units Sold'], label = 'test') #plt.legend(); ``` **D. Calculate the MAPE as the `smoothing_slope` parameter changes from 0.01 to 1 in intervals of 0.01. Train your model on all data from May 2018 and prior. Calculate the MAPE by comparing the 95 weeks of forecasts to the test set (June 2018 and onward). Record the MAPE values in a list called `mapes` where the first element is calculated with beta = 0.01 and the last value is calculated with beta = 1.** ``` train = avocado[avocado.index <= '2018-05-31'] test = avocado[avocado.index >= '2018-06-01'] ### YOUR CODE HERE ### def score_train_model(model, beta): # Fit model fit = ... # Forecast 95 months out pred = ... return get_mape(test['Units Sold'], pred) model = ... mapes = [] betas = np.arange(0.1, 1.0, 0.01) for b in betas: score = ... mapes.append(score) ``` We'll plot the error below. We should see that the error is minimized when beta is between about 0.2 and 0.4. A similar searching method can be used to select the other parameter values. ``` # Plot MAPE against Betas #plt.plot(betas, mapes) #plt.xlabel('Betas') #plt.ylabel('MAPE') #plt.title('Mean Absolute Percentage Error (MAPE) vs. Betas'); ``` ## 3. Comparing ARIMA and SARIMA Models **A. Use an ARIMA model to forecast weekly avocado sales for June 2018 and onward (95 weeks into the future). Train the model on the data from May 2018 and prior. Use the following parameters. Then plot the ARIMA forecast, test set, and training set.** - Differencing order = 1 - Autoregressive order = 1 - Moving average order = 1 ``` from statsmodels.tsa.arima_model import ARIMA ### YOUR CODE HERE ### # Define model model = ... # Fit model model_fit = ... # Create forecasts #output = model_fit.forecast(95) #arima_pred = output[0] # Plot forecast, test set, and training set ... ``` **B. Use a SARIMA model to forecast weekly avocado sales for June 2018 and onward (95 weeks into the future). Train the model on the data from May 2018 and before. Use the following parameters. Then plot the SARIMA forecast, test set, and training set.** - Differencing order = 1 - Autoregressive order = 1 - Moving average order = 1 - Seasonal differencing order = 1 - Seasonal autoregressive order = 1 - Seasonal moving average order = 1 ``` from statsmodels.tsa.statespace.sarimax import SARIMAX ### YOUR CODE HERE ### # Define model model = ... # Fit model model_fit = ... # Create forecasts sarima_pred = ... # Plot forecast, test set, and training set ... ``` **C. Calculate the MAPE of the ARIMA and SARIMA forecasts by comparing the 95 weeks of forecasts to the test set. How much did the MAPE improve when seasonality was accounted for?** ``` ### YOUR CODE HERE ### arima_mape = ... sarima_mape = ... print('ARIMA MAPE: ', arima_mape) print('SARIMA MAPE: ', sarima_mape) #print('Improvement: ', arima_mape - sarima_mape) ``` ## 4. Holiday Effects with Facebook's Prophet library **A. Use Prophet to forecast weekly avocado sales for June 2018 and onward (95 weeks into the future). Train the model on the data from May 2018 and before. Use Prophet's default parameters. Report the forecasts as `prophet_forecast`, a series with 95 forecasts where the first row is the forecast for the first week in June 2018.** ``` from fbprophet import Prophet # Prophet requires the time series to be a 2 column data series with the Date as 'ds' and the values as 'y'. avocado_prophet = avocado.reset_index().rename(columns = {'Date':'ds', 'Units Sold':'y'}) ### YOUR CODE HERE ### ... # Fit the model on the time series. ... # Create a DataFrame of future dates to create forecasts for. ... # Create forecasts prophet_forecast = ... ``` **B. Use Prophet to forecast avocado sales again for June 2018 and onward (95 weeks into the future) but now add the Supebowl and Fourth of July holidays to the model. Train the model on the data from May 2018 and before. Use Prophet's default parameters for all other model features. Report the new forecasts as `prophet_forecast_holidays`, a series with 95 forecasts where the first row is the forecast for the first week in June 2018.**** ``` ### YOUR CODE HERE ### prophet_forecast_holidays = ... ``` **C. Calculate the MAPE of the Prophet model before account for holidays and after adding holidays. The MAPE should be calculated by comparing the 95 weeks of forecasts to the test set. How much did the MAPE improve when holidays were accounted for?** ``` ### YOUR CODE HERE ### prophet_mape = ... prophet_holiday_mape = ... print('Original Prophet MAPE: ', prophet_mape) print('Prophet with Holiday MAPE: ', prophet_holiday_mape) #print('Improvement: ', prophet_mape - prophet_holiday_mape) ```
github_jupyter
# Linear Regression ##### Examining the relationship between a player's pass volume and completion percentage --- ``` import requests import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt ``` Same as in previous Notebook, but we're adding: - `matplotlib.pyplot as plt`. which is the commonly-used convention for importing `matplotlib` --- ``` base_url = "https://raw.githubusercontent.com/statsbomb/open-data/master/data/" comp_url = base_url + "matches/{}/{}.json" match_url = base_url + "events/{}.json" def parse_data(competition_id, season_id): matches = requests.get(url=comp_url.format(competition_id, season_id)).json() match_ids = [m['match_id'] for m in matches] all_events = [] for match_id in tqdm(match_ids): events = requests.get(url=match_url.format(match_id)).json() passes = [x for x in events if x['type']['name'] == "Pass"] for a in passes: attributes = { "player_id": a['player']['id'], "outcome": 0 if 'outcome' in a['pass'].keys() else 1, } all_events.append(attributes) return pd.DataFrame(all_events) ``` The `parse_data` function has been adjusted such that `player_id` and `outcome` are the only attributes being collected. The StatsBomb data has this weird quirk of only presenting an `outcome` key on event objects that are incomplete. This bit of code handles that: `"outcome": 0 if 'outcome' in a['pass'].keys() else 1` --- ``` competition_id = 43 season_id = 3 df = parse_data(competition_id, season_id) df.head(15) total_passes = df.groupby('player_id')['outcome'].sum() percentage = df.groupby('player_id')['outcome'].mean() ``` In `Pandas` DataFrames, you can do some basic grouping and aggregation. Here, we're grouping on `player_id`, and applying a `sum()` or a `mean()` to the `outcome` attribute. --- ``` plt.scatter(total_passes, percentage, alpha=0.8) plt.show() ``` This is a basic scatter plot via `Matplotlib`, with the x and y axes set to `total_passes` and `percentage` `alpha=0.8` sets the opacity of each scatter point. --- ``` from sklearn.linear_model import LinearRegression ``` This imports LinearRegression from `scikit-learn`'s `linear_model` module. --- ``` model = LinearRegression() fit = model.fit([[x] for x in total_passes], percentage) print("Coefficients: {}".format(fit.coef_)) print("Intercept: {}".format(fit.intercept_)) ``` This builds a LinearRegression model, and attempts to predict `percentage` with the features in the `total_passes` variable. The list comprehension (`[[x] for x in total_passes]`) that surrounds `total_passes` is worth an explanation. Since `model.fit()` allows for multiple features, it requires a nested list as the first argument. --- ``` xfit = [0, 500] # This is the x-axis range of the chart yfit = model.predict([[x] for x in xfit]) ``` This builds the regression line such that it can be plotted in the next step. --- ``` plt.scatter(total_passes, percentage, alpha=0.3) plt.plot(xfit, yfit, 'r') plt.show() ``` This plots the previous chart, but also overlays the calculated regression line in red. The color is adjusted with the `'r'` in the `plt.plot()` function. --- Devin Pleuler 2020
github_jupyter
# TensorFlow Distributions: A Gentle Introduction >[TensorFlow Distributions: A Gentle Introduction](#scrollTo=DcriL2xPrG3_) >>[Basic Univariate Distributions](#scrollTo=QD5lzFZerG4H) >>[Multivariate Distributions](#scrollTo=ztM2d-N9nNX2) >>[Multiple Distributions](#scrollTo=57lLzC7MQV-9) >>[Using Independent To Aggregate Batches to Events](#scrollTo=t52ptQXvUO07) >>[Batches of Multivariate Distirbutions](#scrollTo=INu1viAVXz93) >>[Broadcasting, aka Why Is This So Confusing?](#scrollTo=72uiME85SmEH) >>[Going Farther](#scrollTo=JpjjIGThrj8Q) In this notebook, we'll explore TensorFlow Distributions (TFD for short). The goal of this notebook is to get you gently up the learning curve, including understanding TFD's handling of tensor shapes. This notebook tries to present examples before rather than abstract concepts. We'll present canonical easy ways to do things first, and save the most general abstract view until the end. If you're the type who prefers a more abstract and reference-style tutorial, check out [Understanding TensorFlow Distributions Shapes](https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb). If you have any questions about the material here, don't hesitate to contact (or join) [the TensorFlow Probability mailing list](https://groups.google.com/a/tensorflow.org/forum/#!forum/tfprobability). We're happy to help. Before we start, we need to import the appropriate libraries. Our overall library is `tensorflow_probability`. By convention, we generally refer to the distributions library as `tfd`. [Tensorflow Eager](https://www.tensorflow.org/guide/eager) is an imperative execution environment for TensorFlow. In TensorFlow eager, every TF operation is immediately evaluated and produces a result. This is in contrast to TensorFlow's standard "graph" mode, in which TF operations add nodes to a graph which is later executed. This entire notebook is written using TF Eager, although none of the concepts presented here rely on that, and TFP can be used in graph mode. ``` import collections import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions tfe = tf.contrib.eager tfe.enable_eager_execution() import matplotlib.pyplot as plt from __future__ import print_function ``` ## Basic Univariate Distributions Let's dive right in and create a normal distribution: ``` n = tfd.Normal(loc=0., scale=1.) n ``` We can draw a sample from it: ``` n.sample() ``` We can draw multiple samples: ``` n.sample(3) ``` We can evaluate a log prob: ``` n.log_prob(0.) ``` We can evaluate multiple log probabilities: ``` n.log_prob([0., 2., 4.]) ``` We have a wide range of distributions. Let's try a Bernoulli: ``` b = tfd.Bernoulli(probs=0.7) b b.sample() b.sample(8) b.log_prob(1) b.log_prob([1, 0, 1, 0]) ``` ## Multivariate Distributions We'll create a multivariate normal with a diagonal covariance: ``` nd = tfd.MultivariateNormalDiag(loc=[0., 10.], scale_diag=[1., 4.]) nd ``` Comparing this to the univariate normal we created earlier, what's different? ``` tfd.Normal(loc=0., scale=1.) ``` We see that the univariate normal has an `event_shape` of `()`, indicating it's a scalar distribution. The multivariate normal has an `event_shape` of `2`, indicating the basic [event space](https://en.wikipedia.org/wiki/Event_(probability_theory&#41;) of this distribution is two-dimensional. Sampling works just as before: ``` nd.sample() nd.sample(5) nd.log_prob([0., 10]) ``` Multivariate normals do not in general have diagonal covariance. TFD offers multiple ways to create multivariate normals, including a full-covariance specification, which we use here. ``` nd = tfd.MultivariateNormalFullCovariance( loc = [0., 5], covariance_matrix = [[1., .7], [.7, 1.]]) data = nd.sample(200) plt.scatter(data[:, 0], data[:, 1], color='blue', alpha=0.4) plt.axis([-5, 5, 0, 10]) plt.title("Data set") plt.show() ``` ## Multiple Distributions Our first Bernoulli distribution represented a flip of a single fair coin. We can also create a batch of independent Bernoulli distributions, each with their own parameters, in a single `Distribution` object: ``` b3 = tfd.Bernoulli(probs=[.3, .5, .7]) b3 ``` It's important to be clear on what this means. The above call defines three independent Bernoulli distributions, which happen to be contained in the same Python `Distribution` object. The three distributions cannot be manipulated individually. Note how the `batch_shape` is `(3,)`, indicating a batch of three distributions, and the `event_shape` is `()`, indicating the individual distributions have a univariate event space. If we call `sample`, we get a sample from all three: ``` b3.sample() b3.sample(6) ``` If we call `prob`, (this has the same shape semantics as `log_prob`; we use `prob` with these small Bernoulli examples for clarity, although `log_prob` is usually preferred in applications) we can pass it a vector and evaluate the probability of each coin yielding that value: ``` b3.prob([1, 1, 0]) ``` Why does the API include batch shape? Semantically, one could perform the same computations by creating a list of distributions and iterating over them with a `for` loop (at least in Eager mode, in TF graph mode you'd need a `tf.while` loop). However, having a (potentially large) set of identically parameterized distributions is extremely common, and the use of vectorized computations whenever possible is a key ingredient in being able to perform fast computations using hardware accelerators. ## Using Independent To Aggregate Batches to Events In the previous section, we created `b3`, a single `Distribution` object that represented three coin flips. If we called `b3.prob` on a vector $v$, the $i$'th entry was the probability that the $i$th coin takes value $v[i]$. Suppose we'd instead like to specify a "joint" distribution over independent random variables from the same underlying family. This is a different object mathematically, in that for this new distribution, `prob` on a vector $v$ will return a single value representing the probability that the entire set of coins matches the vector $v$. How do we accomplish this? We use a "higher-order" distribution called `Independent`, which takes a distribution and yields a new distribution with the batch shape moved to the event shape: ``` b3_joint = tfd.Independent(b3, reinterpreted_batch_ndims=1) b3_joint ``` Compare the shape to that of the original `b3`: ``` b3 ``` As promised, we see that that `Independent` has moved the batch shape into the event shape: `b3_joint` is a single distribution (`batch_shape = ()`) over a three-dimensional event space (`event_shape = (3,)`). Let's check the semantics: ``` b3_joint.prob([1, 1, 0]) ``` An alternate way to get the same result would be to compute probabilities using `b3` and do the reduction manually by multiplying (or, in the more usual case where log probabilities are used, summing): ``` tf.reduce_prod(b3.prob([1, 1, 0])) ``` `Indpendent` allows the user to more explicitly represent the desired concept. We view this as extremely useful, although it's not strictly necessary. Fun facts: * `b3.sample` and `b3_joint.sample` have different conceptual implementations, but indistinguishable outputs: the difference between a batch of independent distributions and a single distribution created from the batch using `Independent` shows up when computing probabilites, not when sampling. * `MultivariateNormalDiag` could be trivially implemented using the scalar `Normal` and `Independent` distributions (it isn't actually implemented this way, but it could be). ## Batches of Multivariate Distirbutions Let's create a batch of three full-covariance two-dimensional multivariate normals: ``` ndb = tfd.MultivariateNormalFullCovariance( loc = [[0., 0.], [1., 1.], [2., 2.]], covariance_matrix = [[[1., .1], [.1, 1.]], [[1., .3], [.3, 1.]], [[1., .5], [.5, 1.]]]) nd_batch ``` We see `batch_shape = (3,)`, so there are three independent multivariate normals, and `event_shape = (2,)`, so each multivariate normal is two-dimensional. In this example, the individual distributions do not have independent elements. Sampling works: ``` ndb.sample(4) ``` Since `batch_shape = (3,)` and `event_shape = (2,)`, we pass a tensor of shape `(3, 2)` to `log_prob`: ``` nd_batch.log_prob([[0., 0.], [1., 1.], [2., 2.]]) ``` ## Broadcasting, aka Why Is This So Confusing? Abstracting out what we've done so far, every distribution has an batch shape `B` and an event shape `E`. Let `BE` be the concatenation of the event shapes: * For the univariate scalar distributions `n` and `b`, `BE = ().`. * For the two-dimensional multivariate normals `nd`. `BE = (2).` * For both `b3` and `b3_joint`, `BE = (3).` * For the batch of multivariate normals `ndb`, `BE = (3, 2).` The "evaluation rules" we've been using so far are: * Sample with no argument returns a tensor with shape `BE`; sampling with a scalar n returns an "n by `BE`" tensor. * `prob` and `log_prob` take a tensor of shape `BE` and return a result of shape `B`. The actual "evaluation rule" for `prob` and `log_prob` is more complicated, in a way that offers potential power and speed but also complexity and challenges. The actual rule is (essentially) that **the argument to `log_prob` *must* be [broadcastable](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) against `BE`; any "extra" dimensions are preserved in the output.** Let's explore the implications. For the univariate normal `n`, `BE = ()`, so `log_prob` expects a scalar. If we pass `log_prob` a tensor with non-empty shape, those show up as batch dimensions in the output: ``` n = tfd.Normal(loc=0., scale=1.) n n.log_prob(0.) n.log_prob([0.]) n.log_prob([[0., 1.], [-1., 2.]]) ``` Let's turn to the two-dimensional multivariate normal `nd` (parameters changed for illustrative purposes): ``` nd = tfd.MultivariateNormalDiag(loc=[0., 1.], scale_diag=[1., 1.]) nd ``` `log_prob` "expects" an argument with shape `(2,)`, but it will accept any argument that broadcasts against this shape: ``` nd.log_prob([0., 0.]) ``` But we can pass in "more" examples, and evaluate all their `log_prob`'s at once: ``` nd.log_prob([[0., 0.], [1., 1.], [2., 2.]]) ``` Perhaps less appealingly, we can broadcast over the event dimensions: ``` nd.log_prob([0.]) nd.log_prob([[0.], [1.], [2.]]) ``` Broadcasting this way is a consequence of our "enable broadcasting whenever possible" design; this usage is somewhat controversial and could potentially be removed in a future version of TFP. Now let's look at the three coins example again: ``` b3 = tfd.Bernoulli(probs=[.3, .5, .7]) ``` Here, using broadcasting to represent the probability that *each* coin comes up heads is quite intuitive: ``` b3.prob([1]) ``` (Compare this to `b3.prob([1., 1., 1.])`, which we would have used back where `b3` was introduced.) Now suppose we want to know, for each coin, the probability the coin comes up heads *and* the probability it comes up tails. We could imagine trying: `b3.log_prob([0, 1])` Unfortunately, this produces an error with a long and not-very-readable stack trace. `b3` has `BE = (3)`, so we must pass `b3.prob` something broadcastable against `(3,)`. `[0, 1]` has shape `(2)`, so it doesn't broadcast and creates an error. Instead, we have to say: ``` b3.prob([[0], [1]]) ``` Why? `[[0], [1]]` has shape `(2, 1)`, so it broadcasts against shape `(3)` to make a broadcast shape of `(2, 3)`. Broadcasting is quite powerful: there are cases where it allows order-of-magnitude reduction in the amount of memory used, and it often makes user code shorter. However, it can be challenging to program with. If you call `log_prob` and get an error, a failure to broadcast is nearly always the problem. ## Going Farther In this tutorial, we've (hopefully) provided a simple introduction. A few pointers for going further: * `event_shape`, `batch_shape` and `sample_shape` can be arbitrary rank (in this tutorial they are always either scalar or rank 1). This increases the power but again can lead to programming challenges, especially when broadcasting is involved. For an additional deep dive into shape manipulation, see the [Understanding TensorFlow Distributions Shapes](https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb). * TFP includes a powerful abstraction known as `Bijectors`, which in conjunction with `TransformedDistribution`, yields a flexible, compositional way to easily create new distributions that are invertible transformations of existing distributions. We'll try to write a tutorial on this soon, but in the meantime, check out [the documentation](https://www.tensorflow.org/api_docs/python/tf/contrib/distributions/TransformedDistribution)
github_jupyter
# Homework 1 # In this problem you will explore some consequences of the ocean's nonlinear equation of state. Then you will make some calculations regarding air-sea fluxes. Each question is worth 25%. There is an _optional_ bonus question at the end which is worth 10 points towards any future homework. You will need to calculate thermodynamic quantities from [TEOS-10](http://www.teos-10.org/software.htm). For this you will need to use the TEOS-10 Gibbs Sea Water software. There are versions of this available for your preferred programming environment: * [TEOS-10 MATLAB](http://www.teos-10.org/software.htm) * [TEOS-10 GSW for Python](https://github.com/TEOS-10/GSW-Python) (Can be installed with `conda install -c conda-forge gsw`) * [TEOS-10 GSW for R](https://github.com/TEOS-10/GSW-R) If you choose to use python for these calculations, then you can [download this assignment](https://github.com/rabernat/intro_to_physical_oceanography/blob/master/homework/01_thermodynamics.ipynb) from github and just fill in the blanks. If you _don't_ go this route, you should still use a computer to type your responses, carefully identifying each question. ## 1) Cabbeling ## You make two measurements of seawater with a CTD. | |T (in-situ temperature, $^\circ$C) |S$_p$ (Practical Salinity, PSU)| |--|--------|-----------------------| |Measurement 1| 0.0 | 31.0 | |Measurement 2 | 16.45 | 34.0 | The measurements are taken at the surface ($p=0$ dbar) in the North Atlantic Ocean, coordinates 45N latitude, 30E longitude. First convert the measurments to absolute salinity and conservative temperature. Now calculate the density of each water parcel? Which water mass is denser? What is their average density? Now allow the two water masses to mix. When they mix, they homogenize their conservative temperature and absolute salinity. What is the density of the new water mass? Discuss your result. How does the density of the mixed seawater compare to that of the two source water masses? ## 2) Stratification and Thermobaricity ## You are on cruise in the Anarctic (65S latitude, 20E longitude). You make two measurements: | |T (in-situ temperature, $^\circ$C) |S$_p$ (Practical Salinity, PSU)| Pressure (dbar) | |--|--------|-----------------------| |Measurement 1| -1.8 | 33.0 | 0 | |Measurement 2 | 0.0 | 33.2 | 20 | Assess the stability of the water column by comparing the densities of the two water masses referenced to the same pressue (i.e. use potential density). Is the water column stably stratified in this region? Now imagine that ocean circulation transports the same two water masses to pressures of 4990 dbar and 5010 dbar respectively. (One is still approx 20 m deeper than the other.) Compare the two potential densities using the mid-point reference pressure of 5000 dbar. How does the stratification differ? ## 3) Sensible heat flux _(NOTE: the following problems mostly do not involve ``gsw``. You can still use a computer as a calculator if you wish.)_ Let's assume that initially the ocean and atmosphere are in a balanced state, with no turbulent heat exchange and $T = T_{10} = 20^\circ$C. Suddenly a cold front comes through and the air temperature drops suddenly to 18$^\circ$C. Assume there is no ocean current and that the atmospheric winds are blowing at 10 m/s. You can also assume that $C_H = 10^{-3}$. 1. Calculate the sensible heat flux immediately after this drop in air temperature. 1. Assume the mixed layer is 50 m deep. Calculate the instantaneous rate of mixed layer cooling immediately after the drop in air temperature. Express your answer in degrees per day. 1. This cooling rate cannot be maintained. As the mixed layer cools and the ocean temperature approaches the air temperature, the sensible heat flux will become weaker. Derive an equation describing the time evolution of the air-sea temperature difference $T' = T - T_{10}$, assuming $T_{10}$ remains fixed. ($T'$ is positive when the water is warmer than the air.) You should find an equation of the form $$ \frac{dT'}{dt} = - \lambda T' $$ The constant $\lambda$ is an inverse timescale. Express $\lambda$ first in terms of the other parameters and then find its value. Give your answer in the units days$^{-1}$. In your own words, what does $\lambda$ represent? How does it depend on the mixed layer depth? The wind speed? 1. This equation is a simple linear, first-order ordinary differential equation. Write the solution for $T'(t)$. How long does it take the initial air-sea difference of 2$^\circ$C to be reduced by half? How does this compare with the initial cooling rate you calculated in part (b)? ## 4) Evaporation and Latent Heat Flux We will now calculate the evaporation and latent heat flux due to the same event. Cold air can hold much less water vapor than warm air. The Clausius-Clapeyron equation describes the maximum amount of water vapor that air can hold. The humidity of air is often expressed as _relative humidity_, i.e. $q$ as a percentage of $q_{sat}(T)$. 1. What is $q_{sat}$, the specific humidity of the right at the sea surface? (The water temperature is 20$^\circ$C, and the air right at the surface is always 100% saturated.) 1. Assume that the air in the cold front has 50% relative humidity at 10 m above the sea surface. What is $q_{10}$? (Recall that $T_{10} = 18^\circ$C.) 1. Calculate the evaporation rate immediately after the drop in air temperature. What are the units of $E$? Rainfall is often measured in units of mm per day. Can you convert your answer to these units? 1. Calculate the instantaneous rate of change of the mixed layer salinity due to evaporation immediately after the drop in air temperature using the virtual salt flux approximation. Assume that the average salinity of the mixed layer $S_{A0} = 35$ g/kg. 1. Calculate the latent heat flux corresponding with this evaporation rate. 1. Sensible heat fluxes can be both positive and negative over the ocean, depending on whether the air temperature is warmer or cooler than the water. As a result, large cancellations occur, and sensible heat fluxes do not contribute very much to the global heat budget. In contrast, latent heat loss is only negative, and strongly cools the ocean as a whole. Why is latent heat flux always negative? ## 5) OPTIONAL Bonus Question: Total Density Flux All of these processes (sensible heat loss, evaporation, and latent heat loss) are making the mixed layer denser. Let's compare their effect 1. Combine the equations for the rate of change of $T$ and $S_A$ together to form an expression for the rate of change of density $\rho$. (You may assume for simplicity that $T=\Theta$.) You will need to use the equation of state. (Since the pressure changes over the mixed layer are small, you should neglect the dependence of density on pressure.) 1. Quantify the densification rate of the mixed layer due to each process immediately following the change in air temperature. Use ``gsw`` to calculate the appropriate thermal expansion and haline contraction coefficients. Express your answer in kg m$^{-3}$ per day. Which is the dominant process? 1. The mixed layer is the lightest (least dense) water in the water column. It overlies the stratified pycnocline. What happens when air-sea fluxes cause the mixed layer to become denser than the water below it?
github_jupyter
# Compare data to predictions In the previous [notebook](https://github.com/hundredblocks/ml-powered-applications/blob/master/notebooks/train_simple_model.ipynb), we trained a simple model and looked at its accuracy, precision, recall, and f1-score. These are fine aggregate metrics, but we'd like to gain a deeper understanding of our model's performance, and its shortcomings. In order to do that, we will start by looking at a confusion matrix, then plot an ROC curve, and finally a claibration curve. First, we load data. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt from pathlib import Path from sklearn.externals import joblib import sys sys.path.append("..") import warnings warnings.filterwarnings('ignore') %matplotlib inline %load_ext autoreload %autoreload 2 from ml_editor.data_processing import ( format_raw_df, get_split_by_author, add_text_features_to_df, get_vectorized_series, get_feature_vector_and_label, ) from ml_editor.model_evaluation import get_confusion_matrix_plot, get_roc_plot, get_calibration_plot data_path = Path('../data/writers.csv') df = pd.read_csv(data_path) df = format_raw_df(df.copy()) ``` Then, we add features and split the dataset ``` df = add_text_features_to_df(df.loc[df["is_question"]].copy()) train_df, test_df = get_split_by_author(df, test_size=0.2, random_state=40) ``` We then load the trained model and vectorizer ``` model_path = Path("../models/model_1.pkl") clf = joblib.load(model_path) vectorizer_path = Path("../models/vectorizer_1.pkl") vectorizer = joblib.load(vectorizer_path) ``` We use the vectorizer on our data to produce features that are ready for the model ``` train_df["vectors"] = get_vectorized_series(train_df["full_text"].copy(), vectorizer) test_df["vectors"] = get_vectorized_series(test_df["full_text"].copy(), vectorizer) features = [ "action_verb_full", "question_mark_full", "text_len", "language_question", ] X_train, y_train = get_feature_vector_and_label(train_df, features) X_test, y_test = get_feature_vector_and_label(test_df, features) ``` Finally, we get the model's predictions on a held out test set ``` y_predicted = clf.predict(X_test) y_predicted_proba = clf.predict_proba(X_test) ``` ## Confusion matrix First, I'll plot a confusion matrix, which gives us a clear way to view which class our model performs well on. ``` get_confusion_matrix_plot(y_predicted, y_test, figsize=(9, 9)) plt.show() ``` Looking at this confusion matrix, we can clearly see that the model seems better at predicting low score questions (it has higher recall for low score questions): - Proportion of low score questions correctly predicted: 537/(537+244) = 68% - Proportion of high score questions correctly predicted: 402/(402+346) = 53% ## ROC Curve Next, I'll plot an ROC curve, which shows a model's True Positive Rate as a function of it's False Positive Rate. ``` get_roc_plot(y_predicted_proba[:,1], y_test, figsize=(10,10)) plt.show() ``` ROC curves can help judge the overall performance of a model, but most models are designed with practical applications in mind. These applications often come with specific performance requirements, such as maintaining false positives below a certain rate. To understand model performance better, it can be helpful to visually highlight the relevant part of an ROC curve, as shown below. ``` get_roc_plot(y_predicted_proba[:,1], y_test, fpr_bar=.1, figsize=(10,10)) plt.show() ``` For the chosen FPR requirement of `.1` , our model reached around `.2` true positive rate. In an application where maintaining this FPR constraint is important, this is the metric we should track in following experiments. ## Calibration curve Finally, we'll plot a calibration curve, which plots the fraction of actual positive examples as a function of a model's probability score. Such a curve allows us to measure the quality of a model's probability estimate (when a model says a question has a `60%` chance to be good, is that actually the case?) ``` get_calibration_plot(y_predicted_proba[:,1], y_test, figsize=(9,9)) ``` On this plot, we can see that the model is well calibrated between .2 and .7, and poorly outside of that. This is in part due to the fact that the model mostly outputs scores between .2 and .7 as you can see on the histogram of scores below. We also display the [Brier score](https://en.wikipedia.org/wiki/Brier_score) which is a measure of calibration ranging from 0 to 1, with a value of 0 being reached for a perfectly calibrated model. To gain an even better understanding of success and failure modes of our model, we can examine individual examples to identify error trends (which we will do in the top-k [notebook](https://github.com/hundredblocks/ml-powered-applications/blob/master/notebooks/top_k.ipynb) ), and dig into the trained model's parameters.
github_jupyter
``` import numpy as np import keras from matplotlib.pyplot import imshow import h5py import cv2 from zipfile import ZipFile keras.backend.set_image_data_format('channels_last') from keras.layers import Input, Activation, Conv2D from keras.models import Model zipped_images = ZipFile('train_images.zip') # 查看目录结构 zipped_images.printdir() # 提取图片 data = [] # 图片数据 lens = [] # 三种图片的数量 label = [] # 标签 mid_name = ['off_edge', 'on_edge', 'on_corner'] for i in range(3): txt_file_name = 'train_images/edge/' + mid_name[i] + '.txt' # 各个图片名在.txt文件里 txtstr = zipped_images.read(txt_file_name) # 读出byte流 print(type(txtstr)) txtlist = txtstr.split() # 利用换行符分割 # print(type(image_name)), type(image_name) = bytes for image_name in txtlist: # image_name是utf-8编码,str是unicode编码,要转换一下 bmpname = 'train_images/edge/' + mid_name[i] + '/4/' + str(image_name, encoding='utf-8') # 读出byte流 bmp_byte_str = zipped_images.read(bmpname) # 转换为numpy array bmp_byte_array = np.frombuffer(bmp_byte_str, dtype=np.uint8) # 转换为灰度图像的格式 image = cv2.imdecode(bmp_byte_array, cv2.IMREAD_GRAYSCALE) data.append(image) lens.append(len(txtlist)) label.extend([i] * lens[-1]) print(data[110000]) print(label[110000]) print(lens) data = np.array(data) label = np.array(label) data = data.reshape((data.shape[0], -1)) label = label.reshape((label.shape[0], -1)) print(data.shape, label.shape) overall_data = np.hstack((data, label)) # 横向堆叠 overall_data = overall_data[0:100390 + 110910] # 取出on edge 和 off edge部分 print(overall_data.shape) np.random.shuffle(overall_data) div_num = int(overall_data.shape[0] * 0.7) print(div_num) # 划分训练集和测试集 X_train = overall_data[0:div_num, 0:25] Y_train = overall_data[0:div_num, 25] X_test = overall_data[div_num:, 0:25] Y_test = overall_data[div_num:, 25] # 归一化 X_train = X_train / 255 X_test = X_test / 255 # 构造channel维度 X_train = X_train.reshape(X_train.shape[0], 5, 5, 1) Y_train = Y_train.reshape(Y_train.shape[0], 1, 1, 1) X_test = X_test.reshape(X_test.shape[0], 5, 5, 1) Y_test = Y_test.reshape(Y_test.shape[0], 1, 1, 1) print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape) # 定义DNN网络结构 # DNN的好处在于,对于不同大小的输入,无需改变网络结构 def EdgeDetectModel(input_shape): X_input = Input(input_shape) # (5, 5) / (240, 240) # Conv2D(filter_number, filter_size, strides, name) X = Conv2D(8, (3, 3), strides = (1, 1), name = 'conv0')(X_input) # (3, 3, 8) / (238, 238, 8) X = Activation('relu')(X) X = Conv2D(1, (3, 3), strides = (1, 1), name = 'conv1')(X) # (1, 1, 1) / (236, 236, 1) X = Activation('relu')(X) model = Model(inputs = X_input, outputs = X, name='EdgeDetectModel') return model # None表示图片尺寸不确定 model = EdgeDetectModel((None, None, 1)) # 确定优化器,损失函数, model.compile(optimizer = 'Adam', loss = 'binary_crossentropy', metrics = ['accuracy']) # 开始训练 model.fit(x = X_train, y = Y_train, epochs = 10, batch_size = 16) # 用test集,检测效果 preds = model.evaluate(x = X_test, y = Y_test) print ("Loss = " + str(preds[0])) print ("Test Accuracy = " + str(preds[1])) def load_test_images(): data = [] pre_path = './synthetic_characters/' for i in range(4): for j in range(10): image_path = pre_path + '0-0-0-' + str(i) + '-' + str(j) + '.bmp' img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) data.append(img) data = np.array(data) data = np.expand_dims(data, axis=3) return data test_images = load_test_images() print(test_images.shape) import matplotlib.pyplot as plt for i in range(test_images.shape[0]): test = test_images[i] test = np.expand_dims(test, axis=0) pred = model.predict(test) # print(pred.shape) pred = pred.reshape((236, 236)) pred = (pred > 0.5) pred = pred * 255 # plt.imshow(pred, cmap='gray') # plt.show() plt.imsave('edge_detection_test/' + str(i) + '.png', pred, cmap='gray') ```
github_jupyter
<img src="../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px\" align="left"> # Quantum Volume Overview ### Contributors Shelly Garion$^{1}$ and David McKay$^{2}$ 1. IBM Research Haifa, Haifa University Campus, Mount Carmel Haifa, Israel 2. IBM T.J. Watson Research Center, Yorktown Heights, NY, USA ## Introduction **Quantum Volume (QV)** is a single-number metric that can be measured using a concrete protocol on near-term quantum computers of modest size. The QV method quantifies the largest random circuit of equal width and depth that the computer successfully implements. Quantum computing systems with high-fidelity operations, high connectivity, large calibrated gate sets, and circuit rewriting toolchains are expected to have higher quantum volumes. ### References [1] Andrew W. Cross, Lev S. Bishop, Sarah Sheldon, Paul D. Nation, and Jay M. Gambetta, *Validating quantum computers using randomized model circuits*, https://arxiv.org/pdf/1811.12926 ## The Quantum Volume Protocol A QV protocol (see [1]) consists of the following steps: (We should first import the relevant qiskit classes for the demonstration). ``` #Import general libraries (needed for functions) import numpy as np import matplotlib.pyplot as plt #Import Qiskit classes classes import qiskit from qiskit.providers.aer.noise import NoiseModel from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error, thermal_relaxation_error #Import the qv function. import qiskit.ignis.verification.quantum_volume as qv ``` ### Step 1: Generate QV sequences It is well-known that quantum algorithms can be expressed as polynomial-sized quantum circuits built from two-qubit unitary gates. Therefore, a model circuit consists of $d$ layers of random permutations of the qubit labels, followed by random two-qubit gates (from $SU(4)$). When the circuit width $m$ is odd, one of the qubits is idle in each layer. More precisely, a **QV circuit** with **depth $d$** and **width $m$**, is a sequence $U = U^{(d)}...U^{(2)}U^{(1)}$ of $d$ layers: $$ U^{(t)} = U^{(t)}_{\pi_t(m'-1),\pi_t(m)} \otimes ... \otimes U^{(t)}_{\pi_t(1),\pi_t(2)} $$ each labeled by times $t = 1 ... d$ and acting on $m' = 2 \lfloor n/2 \rfloor$ qubits. Each layer is specified by choosing a uniformly random permutation $\pi_t \in S_m$ of the $m$ qubit indices and sampling each $U^{(t)}_{a,b}$, acting on qubits $a$ and $b$, from the Haar measure on $SU(4)$. In the following example we have 6 qubits Q0,Q1,Q3,Q5,Q7,Q10. We are going to look at subsets up to the full set (each volume circuit will be depth equal to the number of qubits in the subset) ``` # qubit_lists: list of list of qubit subsets to generate QV circuits qubit_lists = [[0,1,3],[0,1,3,5],[0,1,3,5,7],[0,1,3,5,7,10]] # ntrials: Number of random circuits to create for each subset ntrials = 50 ``` We generate the quantum volume sequences. We start with a small example (so it doesn't take too long to run). ``` qv_circs, qv_circs_nomeas = qv.qv_circuits(qubit_lists, ntrials) ``` As an example, we print the circuit corresponding to the first QV sequence. Note that the ideal circuits are run on the first n qubits (where n is the number of qubits in the subset). ``` #pass the first trial of the nomeas through the transpiler to illustrate the circuit qv_circs_nomeas[0] = qiskit.compiler.transpile(qv_circs_nomeas[0], basis_gates=['u1','u2','u3','cx']) print(qv_circs_nomeas[0][0]) ``` ### Step 2: Simulate the ideal QV circuits The quantum volume method requires that we know the ideal output for each circuit, so we use the statevector simulator in Aer to get the ideal result. ``` #The Unitary is an identity (with a global phase) backend = qiskit.Aer.get_backend('statevector_simulator') ideal_results = [] for trial in range(ntrials): print('Simulating trial %d'%trial) ideal_results.append(qiskit.execute(qv_circs_nomeas[trial], backend=backend).result()) ``` Next, we load the ideal results into a quantum volume fitter ``` qv_fitter = qv.QVFitter(qubit_lists=qubit_lists) qv_fitter.add_statevectors(ideal_results) ``` ### Step 3: Calculate the heavy outputs To define when a model circuit $U$ has been successfully implemented in practice, we use the *heavy output* generation problem. The ideal output distribution is $p_U(x) = |\langle x|U|0 \rangle|^2$, where $x \in \{0,1\}^m$ is an observable bit-string. Consider the set of output probabilities given by the range of $p_U(x)$ sorted in ascending order $p_0 \leq p_1 \leq \dots \leq p_{2^m-1}$. The median of the set of probabilities is $p_{med} = (p_{2^{m-1}} + p_{2^{m-1}-1})/2$, and the *heavy outputs* are $$ H_U = \{ x \in \{0,1\}^m \text{ such that } p_U(x)>p_{med} \}.$$ The heavy output generation problem is to produce a set of output strings such that more than two-thirds are heavy. As an illustration, we print the heavy outputs from various depths and their probabilities (for trial 0): ``` for qubit_list in qubit_lists: l = len(qubit_list) print ('qv_depth_'+str(l)+'_trial_0:', qv_fitter._heavy_outputs['qv_depth_'+str(l)+'_trial_0']) for qubit_list in qubit_lists: l = len(qubit_list) print ('qv_depth_'+str(l)+'_trial_0:', qv_fitter._heavy_output_prob_ideal['qv_depth_'+str(l)+'_trial_0']) ``` ### Step 4: Define the noise model We define a noise model for the simulator. To simulate decay, we add depolarizing error probabilities to the CNOT and U gates. ``` noise_model = NoiseModel() p1Q = 0.002 p2Q = 0.02 noise_model.add_all_qubit_quantum_error(depolarizing_error(p1Q, 1), 'u2') noise_model.add_all_qubit_quantum_error(depolarizing_error(2*p1Q, 1), 'u3') noise_model.add_all_qubit_quantum_error(depolarizing_error(p2Q, 2), 'cx') #noise_model = None ``` We can execute the QV sequences either using Qiskit Aer Simulator (with some noise model) or using IBMQ provider, and obtain a list of exp_results. ``` backend = qiskit.Aer.get_backend('qasm_simulator') basis_gates = ['u1','u2','u3','cx'] # use U,CX for now shots = 1024 exp_results = [] for trial in range(ntrials): print('Running trial %d'%trial) exp_results.append(qiskit.execute(qv_circs[trial], basis_gates=basis_gates, backend=backend, noise_model=noise_model, backend_options={'max_parallel_experiments': 0}).result()) ``` ### Step 5: Calculate the average gate fidelity The *average gate fidelity* between the $m$-qubit ideal unitaries $U$ and the executed $U'$ is: $$ F_{avg}(U,U') = \frac{|Tr(U^{\dagger}U')|^2/2^m+1}{2^m+1}$$ The observed distribution for an implementation $U'$ of model circuit $U$ is $q_U(x)$, and the probability of sampling a heavy output is: $$ h_U = \sum_{x \in H_U} q_U(x)$$ As an illustration, we print the heavy output counts from various depths (for trial 0): ``` qv_fitter.add_data(exp_results) for qubit_list in qubit_lists: l = len(qubit_list) #print (qv_fitter._heavy_output_counts) print ('qv_depth_'+str(l)+'_trial_0:', qv_fitter._heavy_output_counts['qv_depth_'+str(l)+'_trial_0']) ``` ### Step 6: Calculate the achievable depth The probability of observing a heavy output by implementing a randomly selected depth $d$ model circuit is: $$h_d = \int_U h_U dU$$ The *achievable depth* $d(m)$ is the largest $d$ such that we are confident that $h_d > 2/3$. In other words, $$ h_1,h_2,\dots,h_{d(m)}>2/3 \text{ and } h_{d(m+1)} \leq 2/3$$ We now convert the heavy outputs in the different trials and calculate the mean $h_d$ and the error for plotting the graph. ``` plt.figure(figsize=(10, 6)) ax = plt.gca() # Plot the essence by calling plot_rb_data qv_fitter.plot_qv_data(ax=ax, show_plt=False) # Add title and label ax.set_title('Quantum Volume for up to %d Qubits \n and %d Trials'%(len(qubit_lists[-1]), ntrials), fontsize=18) plt.show() ``` ### Step 7: Calculate the Quantum Volume The quantum volume treats the width and depth of a model circuit with equal importance and measures the largest squareshaped (i.e., $m = d$) model circuit a quantum computer can implement successfully on average. The *quantum volume* $V_Q$ is defined as $$\log_2 V_Q = \arg\max_{m} \min (m, d(m))$$ We list the statistics for each depth. For each depth we list if the depth was successful or not and with what confidence interval. For a depth to be sucessful the confidence interval must be > 97.5%. ``` qv_success_list = qv_fitter.qv_success() qv_list = qv_fitter.ydata QV = 1 for qidx, qubit_list in enumerate(qubit_lists): if qv_list[0][qidx]>2/3: if qv_success_list[qidx][0]: print("Width/depth %d greater than 2/3 (%f) with confidence %f (successful). Quantum volume %d"% (len(qubit_list),qv_list[0][qidx],qv_success_list[qidx][1],qv_fitter.quantum_volume()[qidx])) QV = qv_fitter.quantum_volume()[qidx] else: print("Width/depth %d greater than 2/3 (%f) with confidence %f (unsuccessful)."% (len(qubit_list),qv_list[0][qidx],qv_success_list[qidx][1])) else: print("Width/depth %d less than 2/3 (unsuccessful)."%len(qubit_list)) print ("The Quantum Volume is:", QV) ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Gena/contrib/palettes-test-crameri-dem.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Gena/contrib/palettes-test-crameri-dem.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Gena/contrib/palettes-test-crameri-dem.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.eefolium as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40,-100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset from ee_plugin.contrib import utils, palettes Map.setCenter(4.408241, 52.177595, 18) dem = ee.Image("AHN/AHN2_05M_RUW") \ .resample('bicubic') \ .convolve(ee.Kernel.gaussian(0.5, 0.25, 'meters')) # palette = palettes.crameri['lisbon'][50] palette = palettes.crameri['oleron'][50] # palette = palettes.crameri['roma'][50].slice(0).reverse() demRGB = dem.visualize(**{ 'min': -5, 'max': 15, 'palette': palette }) weight = 0.4 # wegith of Hillshade vs RGB intensity (0 - flat, 1 - HS) exaggeration = 5 # vertical exaggeration azimuth = 315 # Sun azimuth zenith = 20 # Sun elevation brightness = -0.05 # 0 - default contrast = 0.05 # 0 - default saturation = 0.8 # 1 - default castShadows = False # no shadows rgb = utils.hillshadeRGB(demRGB, dem, weight, exaggeration, azimuth, zenith, contrast, brightness, saturation, castShadows) Map.addLayer(rgb, {}, 'DEM (no shadows)', False) # with shadows castShadows = True rgb = utils.hillshadeRGB(demRGB, dem, weight, exaggeration, azimuth, zenith, contrast, brightness, saturation, castShadows) Map.addLayer(rgb, {}, 'DEM') Map.addLayer(dem, {}, 'DEM (raw)', False) ``` ## Display Earth Engine data layers ``` Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map ```
github_jupyter
# WeatherPy ---- #### Note * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. ``` # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time from scipy.stats import linregress from datetime import datetime # Import API key from api_keys import weather_api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) ``` ## Generate Cities List ``` # List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(lat_range[0], lat_range[1], size=1500) lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities) url = "http://api.openweathermap.org/data/2.5/weather?" units = "metric" # Build partial query URL query_url = f"{url}appid={weather_api_key}&units={units}&q=" print(query_url) ``` ### Perform API Calls * Perform a weather check on each city using a series of successive API calls. * Include a print log of each city as it'sbeing processed (with the city number and city name). ``` #cities = ["Paris", "London", "Oslo", "Beijing", "Mumbai", "Manila", "New York","Vaitupu" ,"Seattle", "Dallas", "Taipei"] # set up lists to hold reponse info lat = [] lon = [] mtem = [] hum = [] clo = [] win = [] cou = [] dat = [] cit=[] i=1 print("Beginning data Retrieval") # Loop through the list of cities and perform a request for data on each for city in cities: try: response = requests.get(query_url + city).json() lat.append(response['coord']['lat']) lon.append(response['coord']['lon']) mtem.append(response['main']['temp_max']) hum.append(response['main']['humidity']) clo.append(response['clouds']['all']) win.append(response['wind']['speed']) cou.append(response['sys']['country']) dat.append(datetime.utcfromtimestamp(response['dt']).strftime('%Y-%m-%d %H:%M:%S')) cit.append(response['name']) print("Processing Record", i ," ", city) i=i+1 except KeyError: print("City not found. Skipping", i,city) #cities found print(len(cit)) ``` ### Convert Raw Data to DataFrame * Export the city data into a .csv. * Display the DataFrame ``` # create a data frame from cities cities_dict = { "City": cit, "Lat": lat, "Lng": lon, "Max Temp": mtem, "Humidity": hum, "Cloudiness": clo, "Wind Speed": win, "Country": cou, "Date": dat } cities_data = pd.DataFrame(cities_dict) cities_data.to_csv("Output/cities_weather.csv", index=False) cities_data #display stadistics summary cities_data.describe() ``` ## Inspect the data and remove the cities where the humidity > 100%. ---- Skip this step if there are no cities that have humidity > 100%. ``` clean_city_data=cities_data.loc[cities_data['Humidity']<=100] clean_city_data.head() # Get the indices of cities that have humidity over 100%. #over100=[] #over100 = next(iter((cities_data['Humidity']>=100).index), 'no match') # Make a new DataFrame equal to the city data to drop all humidity outliers by index. # Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data". col_hum = clean_city_data['Humidity'] outliers = col_hum.between(col_hum.quantile(.05), col_hum.quantile(.95)) hum_to_drop = clean_city_data[~outliers].index print(str(len(hum_to_drop)) + " data drop of " + str(len(clean_city_data)) + " total cities.") clean_city_data=clean_city_data.drop(hum_to_drop) clean_city_data ``` ## Plotting the Data * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels. * Save the plotted figures as .pngs. ## Latitude vs. Temperature Plot ``` plt.scatter(clean_city_data['Lat'], clean_city_data['Max Temp'], marker="o", facecolors="red", edgecolors="black", alpha=0.75) plt.title("City Latitude vs. Max temperature") plt.ylabel("Max Temperature (F)") plt.xlabel("Latitude") plt.savefig("Images/latvsmmaxtemp.png") plt.show() ``` ## Latitude vs. Humidity Plot ``` clean_city_data.plot(kind="scatter", x="Lat", y="Humidity", title="City Latitude vs. Humidity") plt.ylabel("Humidity (%)") plt.xlabel("Latitude") plt.savefig("Images/latvshum.png") plt.show() ``` ## Latitude vs. Cloudiness Plot ``` clean_city_data.plot(kind="scatter", x="Lat", y="Cloudiness", title="City Latitude vs. Cloudiness") plt.ylabel("Cloudiness (%)") plt.xlabel("Latitude") plt.savefig("Images/latvsclou.png") plt.show() ``` ## Latitude vs. Wind Speed Plot ``` plt.scatter(clean_city_data['Lat'], clean_city_data['Wind Speed'], marker="o", facecolors="red", edgecolors="black", alpha=0.75) plt.title("City Latitude vs. Wind Speed") plt.ylabel("Wind Speed (mph)") plt.xlabel("Latitude") plt.savefig("Images/latvswin.png") plt.show() ``` ## Linear Regression ``` northern_hemi_data=clean_city_data.loc[clean_city_data['Lat']>=0] northern_hemi_data southern_hemi_data=clean_city_data.loc[clean_city_data['Lat']<0] southern_hemi_data ``` #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression ``` northern_hemi_data.plot(kind="scatter", x="Lat", y="Max Temp", title="Northern Hemisphere - Max Temperature(F) vs. Latitude") # Add the linear regression equation and line to plot x_values = northern_hemi_data['Lat'] y_values = northern_hemi_data['Max Temp'] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.plot(x_values,regress_values,"r-") #take values to print the line eq x=x_values.min() y=y_values.min() plt.annotate(line_eq,(x,y),fontsize=15,color="red") print(f"The r-squared is: {rvalue**2}") #save image plt.savefig("Images/norlatvsmaxtem.png") plt.ylabel("Maximiun Temperature(F)") plt.xlabel("Latitude") plt.show() ``` #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression ``` southern_hemi_data.plot(kind="scatter", x="Lat", y="Max Temp", title="Southern Hemisphere - Max Temperature(F) vs. Latitude") # Add the linear regression equation and line to plot x_values = southern_hemi_data['Lat'] y_values = southern_hemi_data['Max Temp'] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.plot(x_values,regress_values,"r-") x=x_values.min() y=y_values.min() plt.annotate(line_eq,(x,y),fontsize=15,color="red") plt.ylabel("Max Temperature(F)") plt.xlabel("Latitude") print(f"The r-squared is: {rvalue**2}") #save image plt.savefig("Images/soulatvsmaxtem.png") plt.show() ``` #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression ``` northern_hemi_data.plot(kind="scatter", x="Lat", y="Humidity", title="Northern Hemisphere - Humidity vs. Latitude") # Add the linear regression equation and line to plot x_values = northern_hemi_data['Lat'] y_values = northern_hemi_data['Humidity'] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.plot(x_values,regress_values,"r-") #take values to print the line eq x=x_values.min() y=y_values.min() plt.annotate(line_eq,(x,y),fontsize=15,color="red") print(f"The r-squared is: {rvalue**2}") #save image plt.savefig("Images/norlatvshum.png") plt.ylabel("Humidity") plt.xlabel("Latitude") plt.show() ``` #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression ``` southern_hemi_data.plot(kind="scatter", x="Lat", y="Humidity", title="Southern Hemisphere - Humidity vs. Latitude") # Add the linear regression equation and line to plot x_values = southern_hemi_data['Lat'] y_values = southern_hemi_data['Humidity'] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.plot(x_values,regress_values,"r-") x=x_values.min() y=y_values.min() plt.annotate(line_eq,(x,y),fontsize=15,color="red") plt.ylabel("Humidity (%)") plt.xlabel("Latitude") print(f"The r-squared is: {rvalue**2}") #save image plt.savefig("Images/soulatvshum.png") plt.show() ``` #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression ``` northern_hemi_data.plot(kind="scatter", x="Lat", y="Cloudiness", title="Northern Hemisphere - Cloudiness vs. Latitude") # Add the linear regression equation and line to plot x_values = northern_hemi_data['Lat'] y_values = northern_hemi_data['Cloudiness'] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.plot(x_values,regress_values,"r-") x=x_values.min() y=y_values.min() plt.annotate(line_eq,(x,y),fontsize=15,color="red") print(f"The r-squared is: {rvalue**2}") #save image plt.savefig("Images/norlatvsclou.png") plt.ylabel("Cloudiness") plt.xlabel("Latitude") plt.show() ``` #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression ``` southern_hemi_data.plot(kind="scatter", x="Lat", y="Cloudiness", title=" Southern Hemisphere - Cloudiness vs. Latitude") # Add the linear regression equation and line to plot x_values = southern_hemi_data['Lat'] y_values = southern_hemi_data['Cloudiness'] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.plot(x_values,regress_values,"r-") x=x_values.min() y=y_values.min() plt.annotate(line_eq,(x,y),fontsize=15,color="red") plt.ylabel("Cloudiness (%)") plt.xlabel("Latitude") print(f"The r-squared is: {rvalue**2}") #save image plt.savefig("Images/soulatvsclou.png") plt.show() ``` #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression ``` northern_hemi_data.plot(kind="scatter", x="Lat", y="Wind Speed", title="Northern Hemisphere - Wind Speed vs. Latitude") # Add the linear regression equation and line to plot x_values = northern_hemi_data['Lat'] y_values = northern_hemi_data['Wind Speed'] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.plot(x_values,regress_values,"r-") x=x_values.min() y=y_values.min() plt.annotate(line_eq,(x,y),fontsize=15,color="red") print(f"The r-squared is: {rvalue**2}") #save image plt.savefig("Images/norlatvswin.png") plt.ylabel("Wind Speed (mph)") plt.xlabel("Latitude") plt.show() ``` #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression ``` southern_hemi_data.plot(kind="scatter", x="Lat", y="Wind Speed", title="Southern Hemisphere - City Wind Speed vs. Latitude") # Add the linear regression equation and line to plot x_values = southern_hemi_data['Lat'] y_values = southern_hemi_data['Wind Speed'] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.plot(x_values,regress_values,"r-") x=x_values.min() y=y_values.min() plt.annotate(line_eq,(x,y),fontsize=15,color="red") plt.xlabel("Latitude") plt.ylabel("Wind Speed (mph)") print(f"The r-squared is: {rvalue**2}") #save image plt.savefig("Images/soulatvswin.png") plt.show() ```
github_jupyter
``` """ This is a python notebook to create a stock screener. The finished product will accept a set of parameters and output the stocks that meet those requirements. Currently this scanner follows the "Swing Traders Checklist" @ https://www.swing-trade-stocks.com/swing-traders-checklist.html. The goal is to deploy this with a suite of Azure Functions and use it in a suite of other related financial-python projects, such as twitter sentiment and FinViz sentiment analysis, using ML-clustering to define levels of support and resistance, and other ideas I might think of at 2am on a Sunday. """ import yahoo_fin.stock_info as si import pandas as pd from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta import talib import requests import json import openpyxl import psycopg2 as pg import StockSentiment_FinViz.ipynb #Get connected conn = pg.connect("dbname=StonksGoUp user=postgres host=localhost password=admin") cur = conn.cursor() #read in the finnhub.io token # This will be used until the ML aspect is tested and complete with open('local_settings.txt') as f: json_local = json.load(f) finn_token = json_local["finn_token"] #define scanner parameters: low = float(2.5) high = float(25.0) to = int(datetime.strptime(datetime.today().strftime("%d/%m/%Y") + " +0000", "%d/%m/%Y %z").timestamp()) fro = int((datetime.strptime(datetime.today().strftime("%d/%m/%Y") + " +0000", "%d/%m/%Y %z")-relativedelta(days=300)).timestamp()) earnings_period = int((datetime.strptime(datetime.today().strftime("%d/%m/%Y") + " +0000", "%d/%m/%Y %z")+relativedelta(days=5)).timestamp()) capital = 100000 risk = 0.05 get_tickers = """ SELECT ticker from stockdata WHERE ticker IN (SELECT ticker from tickers) GROUP BY ticker """ cur.execute(get_tickers, conn) scanner_list = list([i[0] for i in cur.fetchall()]) print(scanner_list[:20]) # get sentiment table get_sentiment = """ SELECT ticker, round(avg(score), 2) as avg_sentiment FROM sentiment WHERE timestamp > current_date - interval '1 week' GROUP BY ticker """ df_sentiment = pd.read_sql(get_sentiment, conn) print(df_sentiment) # Functions for Technical Analysis def get_hist(ticker, conn): # Get data from web-scraping # req = f"https://query1.finance.yahoo.com/v7/finance/download/{ticker}?period1={fro}&period2={to}&interval=1d&events=history" # data = pd.read_csv(req) # data.index = data["Date"].apply(lambda x: pd.Timestamp(x)) # data.drop("Date", axis=1, inplace=True) # Get data from database get_data = f"""SELECT ticker ,quotedate as "Date" ,open as "Open" ,high as "High" ,low as "Low" ,close as "Close" ,adjclose as "Adj Close" ,volume as "Volume" FROM stockdata WHERE ticker = '{ticker}' ORDER BY quotedate ASC""" data = pd.read_sql(get_data, conn) return data def get_indicators(data): # Get MACD data["macd"], data["macd_signal"], data["macd_hist"] = talib.MACD(data['Close']) # Get SMA10 and SMA30 data["sma10"] = talib.SMA(data["Close"], timeperiod=10) data["sma30"] = talib.SMA(data["Close"], timeperiod=30) # Get MA200 data["sma200"] = talib.SMA(data["Close"], timeperiod=200) # Get RSI data["rsi"] = talib.RSI(data["Close"]) return data def analyze_chart(indicated_data, df_analyzed): #quote_price = indicated_data.loc[:,'Adj Close'].iloc[-1] # Check RSI if indicated_data.loc[:,'rsi'].iloc[-1] < 35: rsi = "Oversold" elif indicated_data.loc[:,'rsi'].iloc[-1] > 65: rsi = "Overbought" else: rsi = None # Check SMA Trend if indicated_data.loc[:,'sma30'].iloc[-1]<indicated_data.loc[:,'sma10'].iloc[-1]: trend = "Uptrend" elif indicated_data.loc[:,'sma30'].iloc[-1]>indicated_data.loc[:,'sma10'].iloc[-1]: trend = "Downtrend" else: trend = None # Check 200SMA if indicated_data.loc[:,'Open'].iloc[-1]>indicated_data.loc[:,'sma200'].iloc[-1]: above200 = True else: above200 = None # Check for Earnings try: if pd.isnull(si.get_quote_table(ticker)['Earnings Date']): earnings_date = None elif datetime.strptime(si.get_quote_table(ticker)['Earnings Date'].split(' - ')[0], '%b %d, %Y'): earnings_date = datetime.strptime(si.get_quote_table(ticker)['Earnings Date'].split(' - ')[0], '%b %d, %Y') else: earnings_date = datetime.strptime(si.get_quote_table(ticker)['Earnings Date'], '%b %d, %Y') except: earnings_date = None # Check for support or resistance req = requests.get(f'https://finnhub.io/api/v1/scan/support-resistance?symbol={ticker}&resolution=D&token={finn_token}') supp_res = None supp_res_price = float() for level in req.json()['levels']: if float(level)*0.90 < indicated_data.loc[:,'Open'].iloc[-1] < float(level)*1.10: if indicated_data.loc[:,'Open'].iloc[-1] >= float(level): supp_res = "support" supp_res_price = round(level, 2) elif indicated_data.loc[:,'Open'].iloc[-1] <= float(level): supp_res = "resistance" supp_res_price = round(level, 2) else: supp_res = "Indeterminant" supp_res_price = None else: pass # Check TAZ # Check for Pullback if indicated_data.loc[:,'Adj Close'].iloc[-1]<= indicated_data.loc[:,'Adj Close'].iloc[-2]<= indicated_data.loc[:,'Adj Close'].iloc[-3]: pullback = True else: pullback = None # Add latest sentiment df_analyzed = df_analyzed.append({'Ticker' : ticker, 'Open' : round(indicated_data.loc[:,'Open'].iloc[-1]), 'Quote' : round(indicated_data.loc[:,'Adj Close'].iloc[-1]), 'RSI' : rsi, 'Trend' : trend, 'Above200' : above200, 'Earnings' : earnings_date, 'Supp/Res' : supp_res, 'S/R Price' : supp_res_price, 'Pullback' : pullback, 'Sentiment' : sentiment }, ignore_index=True) return df_analyzed def analyze_position(df_analyzed, capital, risk): position_risk = capital*risk df_analyzed['Entry'] = df_analyzed['S/R Price'] df_analyzed['Stoploss'] = df_analyzed['S/R Price'].astype(float).apply(lambda x: x * float(0.95)) df_analyzed['risk_per_share'] = df_analyzed['Entry'] - df_analyzed['Stoploss'] df_analyzed['position_size'] = round(position_risk/df_analyzed['risk_per_share']) return df_analyzed scanner_list = ['AAL', 'AES', 'AMCR', 'APA', 'BAC'] df_analyzed = pd.DataFrame(columns=['Ticker', 'Open', 'Quote', 'RSI', 'Trend', 'Above200', 'Earnings', 'Supp/Res', 'S/R Price', 'Pullback']) for ticker in scanner_list: print(ticker) # Get historical data data = get_hist(ticker, conn) # Add indicator data indicated_data = get_indicators(data) # Analyze stonks: df_analyzed = analyze_chart(indicated_data, df_analyzed) df_analyzed = analyze_position(df_analyzed, capital, risk) df_analyzed = df_analyzed[df_analyzed['Above200'] == True] df_analyzed = df_analyzed[df_analyzed['RSI'] != None] df_analyzed = df_analyzed[df_analyzed['Trend'] != None] df_analyzed = df_analyzed[df_analyzed['Earnings'] != None] df_analyzed = df_analyzed[df_analyzed['Supp/Res'] != None] df_analyzed = df_analyzed[df_analyzed['Pullback'] != None] print(df_analyzed) #df_analyzed.to_excel('Output.xlsx', ignore_index=True) ```
github_jupyter
# Introduction/Business Problem Car accidents are one of the most common issues found across the globe to be severe.Accidents might sometimes be due to our negligence or due to natural reasons or anything.Sometimes, we might be too lazy or negligent to drive costing our lives as well as the others.Whereas sometimes, due to heavy rain or heavy gales etc. we might unknowingly droop into an accident with the other car.Whatever the reason maybe, car accidents not only lead to property damage but cause injuries and sometimes even leading to people's death.In our project we decide how these accidents occur due to weather conditions.So, the main problem or question arising in this depressing situation is "What is the severity of these car accidents?What are their causes?And How to curb or slow down them?" # Data section We have several attributes in our dataset which tell us about the severity of these accidents.attributes like WEATHER, ROADCOND, LIGHTCOND, JUNCTIONTYPE can tell us about the accidents which happen naturally.And attributes like SEVERITYDESC and COLLISIONTYPE help us decide how these accidents take place. Our predictor or target variable will be 'SEVERITYCODE' because it is used measure the severity of an accident from 0 to 5 within the dataset. Attributes used to weigh the severity of an accident are 'WEATHER', 'ROADCOND' and 'LIGHTCOND'. * 0 : Little to no Probability (Clear Weather Conditions) * 1 : Very Low Probability - Chance or Property Damage * 2 : Low Probability - Chance of Injury * 3 : Mild Probability - Chance of Serious Injury * 4 : High Probability - Chance of Fatality So depending on these severity codes, we decide the extent of severity of accidents due to these these weather conditions # Methodology UK Road Safety data: Total accident counts with accident severity as Slight, Serious and Fatal Normalized accident counts each month for slight and (Serious and Fatal clubbed) Plotting importance of each feature for considered features Data Pre-processing techniques: The dataset is imputed by replacing NaN and missing values with most frequent values of the corresponding column. All the categorical values have been labeled by integers from 0 to n for each column. Time has been converted to categorial feature with 2 values i.e., daytime and night time. The data is visualized for correlation. Negatively correlated features are selected to be dropped. Feature importance is plotted to visualize and only features with high importance are taken into consideration for predicting accident severity. The multi class label is converted to binary class by merging “Serious” and “Fatal” to Serious class. Feature Selection: The dataset has 34 attributes describing the incident of an accident. There are mixed types of data such as continuous and categorical. Manually dropped few columns due to its inconsistency in values such as Accident ID, and Location ID. For selecting the best features, below functions are used from sklearn library. * 1. SelectKBest: SelectKBest is a sci-kit learn library provides the k best features by performing statistical tests i.e., chi squared computation between two non-negative features. Using chi squared function filters out the features which are independent of target attribute. * 2. Recursive Feature Elimination (RFE): RFE runs the defined model by trying out different possible combinations of features, and it removes the features recursively which are not impacting the class label. Logistic regression algorithm is used as a parameter for RFE to decide on features. ``` import pandas as pd import numpy as np from sklearn import metrics from sklearn.metrics import classification_report, confusion_matrix df = pd.read_csv('Accident_Information.csv', sep=',') encoding = { "Carriageway_Hazards": {"None": 0, "Other object on road": 1, "Any animal in carriageway (except ridden horse)": 1, "Pedestrian in carriageway - not injured": 1, "Previous accident": 1, "Vehicle load on road": 1, "Data missing or out of range": 0 } } df.replace(encoding, inplace=True) print(df['Carriageway_Hazards'].value_counts()) print(df['Light_Conditions'].value_counts()) encoding_light = {"Light_Conditions": {"Daylight": 0, "Darkness - lights lit": 1, "Darkness - no lighting": 1, "Darkness - lighting unknown": 1, "Darkness - lights unlit": 1, "Data missing or out of range": 0}} df.replace(encoding_light, inplace=True) print(df['Light_Conditions'].value_counts()) print(df['Day_of_Week'].value_counts()) encoding_day_of_week = {"Day_of_Week": {"Saturday": 1, "Sunday": 1, "Monday": 0, "Tuesday": 0, "Wednesday": 0, "Thursday": 0, "Friday": 0}} df.replace(encoding_day_of_week, inplace=True) print(df['Day_of_Week'].value_counts()) print(df['Special_Conditions_at_Site'].value_counts()) encoding_Special_Conditions_at_Site = {"Special_Conditions_at_Site": {"None": 0, "Roadworks": 1, "Oil or diesel": 1, "Mud": 1, "Road surface defective": 1, "Auto traffic signal - out": 1, "Road sign or marking defective or obscured": 1, "Auto signal part defective": 1, "Data missing or out of range": 0}} df.replace(encoding_Special_Conditions_at_Site, inplace=True) print(df['Special_Conditions_at_Site'].value_counts()) encoding_1st_road_class = {"1st_Road_Class": {"A": 1, "A(M)": 1, "B": 2, "C": 3, "Motorway": 4, "Unclassified": 1}} df.replace(encoding_1st_road_class, inplace=True) df['1st_Road_Class'].value_counts() #replacing 'Data missing or out of range' with most occured value 'Give way or uncontrolled' df['Junction_Control'] = df['Junction_Control'].replace(['Data missing or out of range'], 'Give way or uncontrolled') df['Junction_Control'].value_counts() encoding_junction_detail = {"Junction_Control": {"Give way or uncontrolled": 1, "Auto traffic signal": 2, "Not at junction or within 20 metres": 3, "Stop sign": 4, "Authorised person": 5, }} df.replace(encoding_junction_detail, inplace=True) df['Junction_Control'].value_counts() encoding_junction_detail = {"Junction_Detail": {"Not at junction or within 20 metres": 1, "T or staggered junction": 2, "Crossroads": 3, "Roundabout": 4, "Private drive or entrance": 5, "Other junction": 6, "Slip road": 7, "More than 4 arms (not roundabout)": 8, "Mini-roundabout": 9, "Data missing or out of range": 1 }} df.replace(encoding_junction_detail, inplace=True) df['Junction_Detail'].value_counts() encoding_road_surface_cond = {"Road_Surface_Conditions": {"Dry": 1, "Wet or damp": 2, "Frost or ice": 3, "Snow": 4, "Flood over 3cm. deep": 5, "Data missing or out of range": 1 }} df.replace(encoding_road_surface_cond, inplace=True) df['Road_Surface_Conditions'].value_counts() encoding_road_type = {"Road_Type": {"Single carriageway": 1, "Dual carriageway": 2, "Roundabout": 3, "One way street": 4, "Slip road": 5, "Unknown": 0, "Data missing or out of range": 1 }} df.replace(encoding_road_type, inplace=True) df['Road_Type'].value_counts() encoding_urban_rural = {"Urban_or_Rural_Area": {"Urban": 1, "Rural": 2, "Unallocated": 1 }} df.replace(encoding_urban_rural, inplace=True) df['Urban_or_Rural_Area'].value_counts() encoding_weather = {"Weather_Conditions": {"Fine no high winds": 1, "Raining no high winds": 2, "Raining + high winds": 3, "Fine + high winds": 4, "Snowing no high winds": 5, "Fog or mist": 6, "Snowing + high winds": 7, "Unknown": 1, "Other": 1, "Data missing or out of range": 1 }} df.replace(encoding_weather, inplace=True) df['Weather_Conditions'].value_counts() np.where(np.isnan(df['Speed_limit'])) df['Speed_limit'].fillna((df['Speed_limit'].mean()), inplace=True) df['Time'].fillna(0, inplace=True) def period(row): rdf = [] if(type(row) == float): row = str(row) rdf = row.split(".") else: rdf = str(row).split(":"); # day -- 8am-8pm hr = rdf[0] if int(hr) > 8 and int(hr) < 20: return 1; else: return 2; df['Time'] = df['Time'].apply(period) df_train1 = df[['1st_Road_Class','Carriageway_Hazards','Junction_Control','Day_of_Week','Junction_Detail','Light_Conditions','Road_Surface_Conditions','Road_Type','Special_Conditions_at_Site','Speed_limit','Time','Urban_or_Rural_Area','Weather_Conditions','Accident_Severity']] df_slight = df_train1[df_train1['Accident_Severity']=='Slight'] df_serious = df_train1[df_train1['Accident_Severity']=='Serious'] df_fatal = df_train1[df_train1['Accident_Severity']=='Fatal'] df_serious['Accident_Severity'].value_counts() random_subset = df_slight.sample(n=3) random_subset.head() df_fatal['Accident_Severity'].value_counts() df_slight_sampling = df_slight.sample(n=45000) #Matched the combined number of records for Fatal and Serious(As we are going to club fatal&serious to Serious) df_serious_sampling = df_serious.sample(n=24693) #Matched number of records with the rarer class (Fatal#24693) df_final_sampling = pd.concat([df_serious_sampling,df_slight_sampling,df_fatal]) df_final_sampling.head() df_test = df_final_sampling[['Accident_Severity']] #replacing 'Data missing or out of range' with most occured value 'None' df_test['Accident_Severity'] = df_test['Accident_Severity'].replace(['Fatal'], 'Serious') df_train = df_final_sampling[['1st_Road_Class','Carriageway_Hazards','Junction_Control','Day_of_Week','Junction_Detail','Light_Conditions','Road_Surface_Conditions','Road_Type','Special_Conditions_at_Site','Speed_limit','Time','Urban_or_Rural_Area','Weather_Conditions']] df_test['Accident_Severity'].value_counts() ``` # Results ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(df_train, df_test, test_size=0.2) from sklearn.ensemble import RandomForestClassifier #class_weight = dict({2:1, 1:15, 0:50}) rdf = RandomForestClassifier(n_estimators=300,random_state=35) rdf.fit(X_train,y_train) y_pred=rdf.predict(X_test) #Import scikit-learn metrics module for accuracy calculation from sklearn import metrics # Model Accuracy, how often is the classifier correct? print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) from sklearn import metrics from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) from sklearn.ensemble import RandomForestClassifier #class_weight = dict({2:1, 1:15, 0:50}) rdf = RandomForestClassifier(bootstrap=True, class_weight="balanced_subsample", criterion='gini', max_depth=8, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=4, min_samples_split=10, min_weight_fraction_leaf=0.0, n_estimators=300, oob_score=True, random_state=35, verbose=0, warm_start=False) rdf.fit(X_train,y_train) y_pred=rdf.predict(X_test) #Import scikit-learn metrics module for accuracy calculation from sklearn import metrics # Model Accuracy, how often is the classifier correct? print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) from sklearn import metrics from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) pip install xgboost from xgboost import XGBClassifier model = XGBClassifier(learning_rate =0.07, n_estimators=300, class_weight="balanced_subsample", max_depth=8, min_child_weight=1, scale_pos_weight=7, seed=27,subsample=0.8,colsample_bytree=0.8) model.fit(X_train,y_train) y_pred=model.predict(X_test) print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) # import the class from sklearn.neighbors import KNeighborsClassifier # instantiate the model (with the default parameters) knn = KNeighborsClassifier(n_neighbors=3,weights='distance') # fit the model with data (occurs in-place) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) from sklearn.linear_model import LogisticRegression logisticRegr = LogisticRegression() logisticRegr.fit(X_train, y_train) y_pred = logisticRegr.predict(X_test) print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score gnb = GaussianNB() y_pred = gnb.fit(X_train, y_train).predict(X_test) print(accuracy_score(y_test, y_pred)) confusion_matrix(y_test, y_pred) print(format(classification_report(y_test, y_pred))) from sklearn.ensemble import GradientBoostingClassifier gbc = GradientBoostingClassifier(loss="deviance", learning_rate=0.1, n_estimators=100, subsample=1.0, criterion="friedman_mse", min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, min_impurity_decrease=0.0, min_impurity_split=None, init=None, random_state=None, max_features=None, verbose=0, max_leaf_nodes=None, warm_start=False, presort="auto") y_pred = gbc.fit(X_train, y_train.values.ravel()).predict(X_test) print(format(classification_report(y_test, y_pred))) print(accuracy_score(y_test, y_pred)) ``` # Discussion Our main aim was to predict the severity of the accident when it is “serious” and “fatal”. It was very difficult to handle this large-sized data. Using HPC we were able to run most of our algorithms. Data is highly imbalanced so even though most of our algorithms were giving > 89% accuracies, it was of no use. It was predicting all the accidents as slight accidents. After checking on all these algorithms, the team even tried dimensionality reduction techniques and but the results were not improved. Then the team decided to use the undersampled dataset as it was giving better results in predicting the severe/fatal accidents. This decision was made on trying out oversampling, undersampling, test and train data with an equal ratio of classification classes. # Conclusion In conclusion, most of the algorithms are biased towards most frequent class. However, efficient pre-processing and corresponding imbalanced data techniques should give optimal results. Based on the current known conditions of weather, light, traffic signal, road surface, speed limit etc., accident severity can be classified. But there is no one feature, that influences the accident severity.
github_jupyter
문장을 입력해서 이진분류하는 모델에 대해서 알아보겠습니다. 언어가 시계열적인 의미가 있으므로, 이 언어를 문자로 표현한 문장도 시계열적인 의미가 있습니다. 모델에 입력하기 위해서 문장을 시계열수치로 인코딩하는 방법과 여러가지 이진분류 모델을 구성해보고, 학습 결과를 살펴보겠습니다. 이 모델들은 문장 혹은 시계열수치로 양성/음성을 분류하거나 이벤트 발생 유무를 감지하는 문제를 풀 수 있습니다. --- ### 데이터셋 준비 IMDB에서 제공하는 영화 리뷰 데이터셋을 이용하겠습니다. 이 데이터셋은 훈련셋 25,000개, 시험셋 25,000개의 샘플을 제공합니다. 라벨은 1과 0으로 좋아요/싫어요로 지정되어 있습니다. 케라스에서 제공하는 imdb의 load_data() 함수을 이용하면 데이터셋을 쉽게 얻을 수 있습니다. 데이터셋은 이미 정수로 인코딩되어 있으며, 정수값은 단어의 빈도수를 나타냅니다. 모든 단어를 고려할 수 없으므로 빈도수가 높은 단어를 위주로 데이터셋을 생성합니다. 20,000번째로 많이 사용하는 단어까지만 데이터셋으로 만들고 싶다면, num_words 인자에 20000이라고 지정하면 됩니다. ``` from keras.datasets import imdb (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=20000) ``` 훈련셋의 데이터가 어떻게 구성되어 있는 지 살펴보겠습니다. x_train을 출력하면 다음과 같습니다. ``` print(x_train) ``` 총 25000개의 샘플이 있으며, 각 샘플은 영화 리뷰 한 건을 의미하며, 단어의 인덱스로 구성되어 있습니다. 'num_words=20000'으로 지정했기 때문에 빈도수가 20,000을 넘는 단어는 보이지가 않습니다. 훈련셋 25,000개를 다시 훈련셋 20,000개와 검증셋 5,000개로 분리합니다. ``` x_val = x_train[20000:] y_val = y_train[20000:] x_train = x_train[:20000] y_train = y_train[:20000] ``` 리뷰의 길이가 다르니 각 샘플의 길이가 다르겠죠? 적개는 수십 단어로 많게는 천 개 이상의 단어로 구성되어 있습니다. 모델의 입력으로 사용하려면 고정된 길이로 만들어야 하므로 케라스에서 제공되는 전처리 함수인 sequence의 pad_sequences() 함수를 사용합니다. 이 함수는 두 가지 역할을 수행합니다. * 문장을 maxlen 인자로 지정된 길이로 맞춰줍니다. 예를 들어 200으로 지정했다면 200보다 짧은 문장은 0으로 채워서 200단어로 맞춰주고 200보다 긴 문장은 200단어까지만 잘라냅니다. * (num_samples, num_timesteps)으로 2차원의 numpy 배열로 만들어줍니다. maxlen을 200으로 지정하였다면, num_timesteps도 200이 됩니다. ``` from keras.preprocessing import sequence x_train = sequence.pad_sequences(x_train, maxlen=200) x_val = sequence.pad_sequences(x_val, maxlen=200) x_test = sequence.pad_sequences(x_test, maxlen=200) ``` --- ### 레이어 준비 본 장에서 새롭게 소개되는 블록들은 다음과 같습니다. |블록|이름|설명| |:-:|:-:|:-| |![img](http://tykimos.github.io/warehouse/DeepBrick/Model_Recipe_Part_Embedding_s.png)|Embedding|단어를 의미론적 기하공간에 매핑할 수 있도록 벡터화시킵니다.| |![img](http://tykimos.github.io/warehouse/DeepBrick/Model_Recipe_Part_Conv2D_s.png)|Conv2D|필터를 이용하여 영상 특징을 추출하는 컨볼루션 레이어입니다.| |![img](http://tykimos.github.io/warehouse/DeepBrick/Model_Recipe_Part_MaxPooling2D_s.png)|MaxPooling2D|영상에서 사소한 변화가 특징 추출에 크게 영향을 미치지 않도록 해주는 맥스풀링 레이어입니다.| |![img](http://tykimos.github.io/warehouse/DeepBrick/Model_Recipe_Part_Flatten_s.png)|Flatten|2차원의 특징맵을 전결합층으로 전달하기 위해서 1차원 형식으로 바꿔줍니다. |![img](http://tykimos.github.io/warehouse/DeepBrick/Model_Recipe_Part_Activation_relu_2D_s.png)|relu|활성화 함수로 주로 Conv2D 은닉층에 사용됩니다.| --- ### 모델 준비 영상을 입력하여 다중클래스분류를 하기 위해 `다층퍼셉트론 신경망 모델`, `컨볼루션 신경망 모델`, `깊은 컨볼루션 신경망 모델`을 준비했습니다. #### 다층퍼셉트론 신경망 모델 model = Sequential() model.add(Embedding(20000, 128, input_length=200)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dense(1, activation='sigmoid')) 임베딩 레이어 인자의 의미는 다음과 같습니다. * 첫번째 인자(input_dim) : 단어 사전의 크기를 말하며 총 20,000개의 단어 종류가 있다는 의미입니다. 이 값은 앞서 imdb.load_data() 함수의 num_words 인자값과 동일해야 합니다. * 두번째 인자(output_dim) : 단어를 인코딩 한 후 나오는 벡터 크기 입니다. 이 값이 128이라면 단어를 128차원의 의미론적 기하공간에 나타낸다는 의미입니다. 단순하게 빈도수만으로 단어를 표시한다면, 10과 11은 빈도수는 비슷하지만 단어로 볼 때는 전혀 다른 의미를 가지고 있습니다. 하지만 의미론적 기하공간에서는 거리가 가까운 두 단어는 의미도 유사합니다. 즉 임베딩 레이어는 입력되는 단어를 의미론적으로 잘 설계된 공간에 위치시켜 벡터로 수치화 시킨다고 볼 수 있습니다. * input_length : 단어의 수 즉 문장의 길이를 나타냅니다. 임베딩 레이어의 출력 크기는 샘플 수 * output_dim * input_lenth가 됩니다. 임베딩 레이어 다음에 Flatten 레이어가 온다면 반드시 input_lenth를 지정해야 합니다. 플래튼 레이어인 경우 입력 크기가 알아야 이를 1차원으로 만들어서 Dense 레이어에 전달할 수 있기 때문입니다. ![img](http://tykimos.github.io/warehouse/2017-8-17-Text_Input_Binary_Classification_Model_Recipe_1m.png) #### 순환 신경망 모델 model = Sequential() model.add(Embedding(max_features, 128)) model.add(LSTM(128)) model.add(Dense(1, activation='sigmoid')) ``` # 0. 사용할 패키지 불러오기 from keras.datasets import imdb from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Embedding from keras.layers import Flatten max_features = 20000 text_max_words = 200 # 2. 모델 구성하기 model = Sequential() model.add(Embedding(max_features, 128, input_length=text_max_words)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dense(1, activation='sigmoid')) # 3. 모델 학습과정 설정하기 model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) from IPython.display import SVG from keras.utils.vis_utils import model_to_dot %matplotlib inline SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg')) # 0. 사용할 패키지 불러오기 from keras.datasets import imdb from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Embedding, LSTM from keras.layers import Flatten max_features = 20000 text_max_words = 200 # 1. 데이터셋 생성하기 # 2. 모델 구성하기 model = Sequential() model.add(Embedding(max_features, 128)) model.add(LSTM(128)) model.add(Dense(1, activation='sigmoid')) # 3. 모델 학습과정 설정하기 model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) from IPython.display import SVG from keras.utils.vis_utils import model_to_dot %matplotlib inline SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg')) # 0. 사용할 패키지 불러오기 from keras.datasets import imdb from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Embedding, LSTM from keras.layers import Flatten max_features = 20000 text_max_words = 200 # 1. 데이터셋 생성하기 # 2. 모델 구성하기 model = Sequential() model.add(Embedding(max_features, 128)) model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(1, activation='sigmoid')) # 3. 모델 학습과정 설정하기 model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) from IPython.display import SVG from keras.utils.vis_utils import model_to_dot %matplotlib inline SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg')) # 0. 사용할 패키지 불러오기 from keras.datasets import imdb from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Embedding, LSTM from keras.layers import Flatten, Dropout from keras.layers import Conv1D, GlobalMaxPooling1D max_features = 20000 text_max_words = 200 # 2. 모델 구성하기 model = Sequential() model.add(Embedding(max_features, 128, input_length=text_max_words)) model.add(Dropout(0.2)) model.add(Conv1D(256, 3, padding='valid', activation='relu', strides=1)) model.add(GlobalMaxPooling1D()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(1, activation='sigmoid')) # 3. 모델 학습과정 설정하기 model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) from IPython.display import SVG from keras.utils.vis_utils import model_to_dot %matplotlib inline SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg')) # 0. 사용할 패키지 불러오기 from keras.datasets import imdb from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Embedding, LSTM from keras.layers import Flatten, Dropout from keras.layers import Conv1D, MaxPooling1D max_features = 20000 text_max_words = 200 # 1. 데이터셋 생성하기 # 2. 모델 구성하기 model = Sequential() model.add(Embedding(max_features, 128, input_length=text_max_words)) model.add(Dropout(0.2)) model.add(Conv1D(256, 3, padding='valid', activation='relu', strides=1)) model.add(MaxPooling1D(pool_size=4)) model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(1, activation='sigmoid')) # 3. 모델 학습과정 설정하기 model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) from IPython.display import SVG from keras.utils.vis_utils import model_to_dot %matplotlib inline SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg')) ``` --- ### 데이터셋 준비 입력 x에 대해 2를 곱해 두 배 정도 값을 갖는 출력 y가 되도록 데이터셋을 생성해봤습니다. 선형회귀 모델을 사용한다면 Y = w * X + b 일 때, w가 2에 가깝고, b가 0.16에 가깝게 되도록 학습시키는 것이 목표입니다. ``` import numpy as np # 데이터셋 생성 x_train = np.random.random((1000, 1)) y_train = x_train * 2 + np.random.random((1000, 1)) / 3.0 x_test = np.random.random((100, 1)) y_test = x_test * 2 + np.random.random((100, 1)) / 3.0 # 데이터셋 확인 %matplotlib inline import matplotlib.pyplot as plt plt.plot(x_train, y_train, 'ro') plt.plot(x_test, y_test, 'bo') plt.legend(['train', 'test'], loc='upper left') plt.show() ``` ![img](http://tykimos.github.io/warehouse/2017-8-12-Numerical_Prediction_Model_Recipe_5.png) --- ### 레이어 준비 수치예측 모델에 사용할 레이어는 `Dense`와 `Activation`입니다. `Activation`에는 은닉층(hidden layer)에 사용할 `relu`를 준비했습니다. 데이터셋은 일차원 벡터만 다루도록 하겠습니다. |종류|구분|상세구분|브릭| |:-:|:-:|:-:|:-:| |데이터셋|Vector|-|![img](http://tykimos.github.io/warehouse/DeepBrick/Model_Recipe_Part_Dataset_Vector_s.png)| |레이어|Dense||![img](http://tykimos.github.io/warehouse/DeepBrick/Model_Recipe_Part_Dense_s.png)| |레이어|Activation|relu|![img](http://tykimos.github.io/warehouse/DeepBrick/Model_Recipe_Part_Activation_Relu_s.png)| --- ### 모델 준비 수치예측을 하기 위해 `선형회귀 모델`, `퍼셉트론 모델`, `다층퍼셉트론 모델`, `깊은 다층퍼셉트론 모델`을 준비했습니다. #### 선형회귀 모델 가장 간단한 1차 선형회귀 모델로 수치예측을 해보겠습니다. 아래 식에서 x, y는 우리가 만든 데이터셋이고, 회귀분석을 통해서, w와 b값을 구하는 것이 목표입니다. Y = w * X + b w와 b값을 구하게 되면, 임의의 입력 x에 대해서 출력 y가 나오는 데 이것이 예측 값입니다. w, b 값은 분산, 공분산, 평균을 이용하여 쉽게 구할 수 있습니다. w = np.cov(X, Y, bias=1)[0,1] / np.var(X) b = np.average(Y) - w * np.average(X) 간단한 수식이지만 이 수식을 도출하기란 꽤나 복잡습니다. 오차를 최소화하는 극대값을 구하기 위해 편미분을 수행하고, 다시 식을 전개하는 등등의 과정이 필요합니다. ![img](http://tykimos.github.io/warehouse/2017-8-12-Numerical_Prediction_Model_Recipe_0.png) #### 퍼셉트론 모델 Dense 레이어가 하나이고, 뉴런의 수도 하나인 가장 기본적인 퍼셉트론 모델입니다. 즉 웨이트(w) 하나, 바이어스(b) 하나로 전형적인 Y = w * X + b를 풀기 위한 모델입니다. 수치 예측을 하기 위해서 출력 레이어에 별도의 활성화 함수를 사용하지 않았습니다. w, b 값이 손으로 푼 선형회귀 최적해에 근접하려면 경우에 따라 만번이상의 에포크가 필요합니다. 실제로 사용하지는 않는 모델이지만 선형회귀부터 공부하시는 분들에게는 입문 모델로 나쁘지 않습니다. model = Sequential() model.add(Dense(1, input_dim=1)) ![img](http://tykimos.github.io/warehouse/2017-8-12-Numerical_Prediction_Model_Recipe_1m.png) #### 다층퍼셉트론 모델 Dense 레이어가 두 개인 다층퍼셉트론 모델입니다. 첫 번째 레이어는 64개의 뉴런을 가진 Dense 레이어이고 오류역전파가 용이한 `relu` 활성화 함수를 사용하였습니다. 출력 레이어인 두 번째 레이어는 하나의 수치값을 예측을 하기 위해서 1개의 뉴런을 가지며, 별도의 활성화 함수를 사용하지 않았습니다. model = Sequential() model.add(Dense(64, input_dim=1, activation='relu')) model.add(Dense(1)) ![img](http://tykimos.github.io/warehouse/2017-8-12-Numerical_Prediction_Model_Recipe_2m.png) #### 깊은 다층퍼셉트론 모델 Dense 레이어가 총 세 개인 다층퍼셉트론 모델입니다. 첫 번째, 두 번째 레이어는 64개의 뉴런을 가진 Dense 레이어이고 오류역전파가 용이한 `relu` 활성화 함수를 사용하였습니다. 출력 레이어인 세 번째 레이어는 하나의 수치값을 예측을 하기 위해서 1개의 뉴런을 가지며, 별도의 활성화 함수를 사용하지 않았습니다. model = Sequential() model.add(Dense(64, input_dim=1, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(1)) ![img](http://tykimos.github.io/warehouse/2017-8-12-Numerical_Prediction_Model_Recipe_3m.png) --- ### 전체 소스 앞서 살펴본 `선형회귀 모델`, `퍼셉트론 모델`, `다층퍼셉트론 모델`, `깊은 다층퍼셉트론 모델`의 전체 소스는 다음과 같습니다. #### 다층퍼셉트론 모델 ``` # 다층퍼셉트론 모델로 수치예측하기 import numpy as np from keras.models import Sequential from keras.layers import Dense import random # 1. 데이터셋 준비하기 x_train = np.random.random((1000, 1)) y_train = x_train * 2 + np.random.random((1000, 1)) / 3.0 x_test = np.random.random((100, 1)) y_test = x_test * 2 + np.random.random((100, 1)) / 3.0 # 2. 모델 구성하기 model = Sequential() model.add(Dense(64, input_dim=1, activation='relu')) model.add(Dense(1)) # 3. 모델 학습과정 설정하기 model.compile(optimizer='rmsprop', loss='mse') # 4. 모델 학습시키기 hist = model.fit(x_train, y_train, epochs=50, batch_size=64) # 5. 모델 평가하기 loss = model.evaluate(x_test, y_test, batch_size=32) print('loss : ' + str(loss)) # 6. 학습과정 확인하기 %matplotlib inline import matplotlib.pyplot as plt plt.plot(hist.history['loss']) plt.ylim(0.0, 1.5) plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train'], loc='upper left') plt.show() ``` #### 깊은 다층퍼셉트론 모델 ``` # 깊은 다층퍼셉트론 모델로 수치예측하기 import numpy as np from keras.models import Sequential from keras.layers import Dense import random # 1. 데이터셋 준비하기 x_train = np.random.random((1000, 1)) y_train = x_train * 2 + np.random.random((1000, 1)) / 3.0 x_test = np.random.random((100, 1)) y_test = x_test * 2 + np.random.random((100, 1)) / 3.0 # 2. 모델 구성하기 model = Sequential() model.add(Dense(64, input_dim=1, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(1)) # 3. 모델 학습과정 설정하기 model.compile(optimizer='rmsprop', loss='mse') # 4. 모델 학습시키기 hist = model.fit(x_train, y_train, epochs=50, batch_size=64) # 5. 모델 평가하기 loss = model.evaluate(x_test, y_test, batch_size=32) print('loss : ' + str(loss)) # 6. 학습과정 확인하기 %matplotlib inline import matplotlib.pyplot as plt plt.plot(hist.history['loss']) plt.ylim(0.0, 1.5) plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train'], loc='upper left') plt.show() ``` --- ### 학습결과 비교 퍼셉트론 > 다층퍼셉트론 > 깊은 다층퍼셉트론 순으로 학습이 좀 더 빨리 되는 것을 확인할 수 있습니다. |퍼셉트론|다층퍼셉트론|깊은 다층퍼셉트론| |:-:|:-:|:-:| |![img](http://tykimos.github.io/warehouse/2017-8-12-Numerical_Prediction_Model_Recipe_6.png)|![img](http://tykimos.github.io/warehouse/2017-8-12-Numerical_Prediction_Model_Recipe_7.png)|![img](http://tykimos.github.io/warehouse/2017-8-12-Numerical_Prediction_Model_Recipe_8.png)| --- ### 결론 수치예측을 위한 퍼셉트론, 다층퍼셉트론, 깊은 다층퍼셉트론 모델을 살펴보고, 그 성능을 확인 해봤습니다. ![img](http://tykimos.github.io/warehouse/2017-8-12-Numerical_Prediction_Model_Recipe_4m.png) --- ### 같이 보기 * [강좌 목차](https://tykimos.github.io/lecture/)
github_jupyter
# Scalable Batch GP Classification in 1D (w/ SVGP) This example shows how to use grid interpolation based variational classification with an `ApproximateGP` using a `VariationalStrategy` module while learning the inducing point locations. **Note:** The performance of this notebook is substantially improved by using a GPU and casting your tensors with `.cuda()`. See our other GPU example notebooks for how to do this. ``` import math import torch import gpytorch from matplotlib import pyplot as plt from math import exp %matplotlib inline %load_ext autoreload %autoreload 2 train_x = torch.linspace(0, 1, 260).unsqueeze(-1) train_y_cos = torch.cos(train_x * (2 * math.pi)).squeeze() + 0.1 * torch.randn(260) train_y_sin = torch.sin(train_x * (2 * math.pi)).squeeze() + 0.1 * torch.randn(260) # Make train_x (2 x 260 x 1) and train_y (2 x 260) train_x = torch.cat([train_x, train_x], dim=1).transpose(-2, 1).unsqueeze(-1) train_y = torch.cat([train_y_cos.unsqueeze(-1), train_y_sin.unsqueeze(-1)], dim=1).transpose(-2, -1) from gpytorch.models import ApproximateGP from gpytorch.variational import CholeskyVariationalDistribution from gpytorch.variational import VariationalStrategy class SVGPRegressionModel(ApproximateGP): def __init__(self, inducing_points): variational_distribution = CholeskyVariationalDistribution(inducing_points.size(-2), batch_size=2) variational_strategy = VariationalStrategy(self, inducing_points, variational_distribution, learn_inducing_locations=True) super(SVGPRegressionModel, self).__init__(variational_strategy) self.mean_module = gpytorch.means.ConstantMean() self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) def forward(self,x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x) return latent_pred # We'll initialize the inducing points to evenly span the space of train_x inducing_points = torch.linspace(0, 1, 25).unsqueeze(-1).repeat(2, 1, 1) model = SVGPRegressionModel(inducing_points) likelihood = gpytorch.likelihoods.GaussianLikelihood() from gpytorch.mlls.variational_elbo import VariationalELBO # Find optimal model hyperparameters model.train() likelihood.train() # Use the adam optimizer optimizer = torch.optim.Adam([ {'params': model.parameters()}, {'params': likelihood.parameters()} ], lr=0.01) # "Loss" for GPs - the marginal log likelihood # n_data refers to the number of training datapoints mll = VariationalELBO(likelihood, model, train_y.size(-1), combine_terms=False) def train(): num_iter = 200 for i in range(num_iter): optimizer.zero_grad() output = model(train_x) # Calc loss and backprop gradients log_lik, kl_div, log_prior = mll(output, train_y) loss = -(log_lik - kl_div + log_prior) loss = loss.sum() loss.backward() if i % 50 == 0: print('Iter %d - Loss: %.3f [%.3f, %.3f, %.3f]' % (i + 1, loss.item(), log_lik.sum().item(), kl_div.sum().item(), log_prior.sum().item())) optimizer.step() # Get clock time %time train() # Set into eval mode model.eval() likelihood.eval() # Initialize plots f, (y1_ax, y2_ax) = plt.subplots(2, 1, figsize=(8, 8)) # Test points every 0.02 in [0,1] # Make predictions with torch.no_grad(): test_x = torch.linspace(0, 1, 51).view(1, -1, 1).repeat(2, 1, 1) observed_pred = likelihood(model(test_x)) # Get mean mean = observed_pred.mean # Get lower and upper confidence bounds lower, upper = observed_pred.confidence_region() # Plot training data as black stars y1_ax.plot(train_x[0].detach().numpy(), train_y[0].detach().numpy(), 'k*') # Predictive mean as blue line y1_ax.plot(test_x[0].squeeze().numpy(), mean[0, :].numpy(), 'b') # Shade in confidence y1_ax.fill_between(test_x[0].squeeze().numpy(), lower[0, :].squeeze().numpy(), upper[0, :].squeeze().numpy(), alpha=0.5) y1_ax.set_ylim([-3, 3]) y1_ax.legend(['Observed Data', 'Mean', 'Confidence']) y1_ax.set_title('Observed Values (Likelihood)') y2_ax.plot(train_x[1].detach().numpy(), train_y[1].detach().numpy(), 'k*') y2_ax.plot(test_x[1].squeeze().numpy(), mean[1, :].numpy(), 'b') y2_ax.fill_between(test_x[1].squeeze().numpy(), lower[1, :].squeeze().numpy(), upper[1, :].squeeze().numpy(), alpha=0.5) y2_ax.set_ylim([-3, 3]) y2_ax.legend(['Observed Data', 'Mean', 'Confidence']) y2_ax.set_title('Observed Values (Likelihood)') ```
github_jupyter
# Списки Списками в Python называются массивы. Они могут содержать данные различных типов. Для создания списка автоматически можно использовать метод `list()`: ``` list('Lambda') ``` Также можно это сделать напрямую, присвоив переменной значение типа `list`: ``` # Пустой список s = [] # список с данными разных типов l = ['mai', 'l', ['lambda'], 228] print(s) print(l) ``` Также можно использовать генераторы списков: ``` a = ['L', 'a', 'm', 'b', 'd', 'a'] b = [i * 3 for i in a] b ``` Количество элементов в списке можно узнать с помощью функции `len()`: ``` a = ['L', 'a', 'm', 'b', 'd', 'a'] len(a) ``` В списках, так же как и в строках, можно обратиться к элементу через индекс `s[2]` или `s[3]`, для сравнение или вывода на печать: ``` s = ['L', 'a', 'm', 'b', 'd', 'a'] print(s[2], s[3]) ``` Генераторы списков позволяют создавать и заполнять списки. Генератор списков предполагает наличие итерируемого объекта или итератора, на основании которого будет создаваться новый список, а также выражения, которое будет производить манипуляции с извлеченными из последовательности элементами перед тем как добавить их в формируемый список. ## Методы для работы со списками Методы списков вызываются по схеме: `list.method()`. Ниже будут перечислены полезные методы для работы со списками: - `append(a)` - добавляет элемент a в конец списка ``` var = ['L', 'a', 'm', 'b', 'd'] var.append('a') print(var) ``` - `extend(L)` - расширяет список, добавляя к концу все элементы списка `L` ``` var = ['L', 'a', 'm', 'b', 'd', 'a'] var.extend(['L', 'a', 'm', 'b', 'd', 'a']) print(var) ``` - `insert(i, a)` - вставляет на `i` позицию элемент `a` ``` var = ['L', 'a', 'b', 'd', 'a'] var.insert(2,'m') print(var) ``` - `remove(a)` - удаляет первое найденное значение элемента в списке со значением `a`, возвращает ошибку, если такого элемента не существует ``` var = ['L', 'a', 'm', 'b', 'd', 'a'] var.remove('a') print(var) ``` - `pop(i)` - удаляет `i`-ый элемент и возвращает его, если индекс не указан, удаляет последний элемент ``` var = ['L', 'a', 'm', 'b', 'd', 'a'] var.pop(0) print(var) ``` - `index(a)` возвращает индекс элемента `a` (индексация начинается с `0`) ``` var = ['L', 'a', 'm', 'b', 'd', 'a'] var.index('L') ``` - `count(a)` - возвращает количество элементов со значением `a` ``` var = ['L', 'a', 'm', 'b', 'd', 'a'] var.count('a') ``` - `sort([key = функция])` - сортирует список на основе функции, можно не прописывать функцию, тогда сортировка будет происходить по встроенному алгоритму ``` var = ['L', 'a', 'm', 'b', 'd', 'a'] var.sort() print(var) ``` - `reverse()` - разворачивает список ``` var = ['L', 'a', 'm', 'b', 'd', 'a'] var.reverse() print(var) ``` - `copy()` - поверхностная копия списка, при присвоении переменной копии списка, значение данного списка не изменяется в случае изменения первого. Если переменной присвоить список через `"="`, тогда значение этой переменной будет меняться при изменении оригинала ``` var = ['L', 'a', 'm', 'b', 'd', 'a'] asd = var.copy() print(asd) var = ['L', 'a', 'm', 'b', 'd', 'a'] asd = var print(asd) print(var) var.reverse() print(asd) print(var) var = ['L', 'a', 'm', 'b', 'd', 'a'] asd = var.copy() print(asd) print(var) var.reverse() print(asd) print(var) ``` - `clear()` - очищает список ``` var = ['L', 'a', 'm', 'b', 'd', 'a'] var.clear() print(var) ```
github_jupyter
``` # data retrieval import requests # data storage and manipulation import numpy as np import pandas as pd import matplotlib.pyplot as plt # functional tools to allow for model fine tuning from functools import partial, update_wrapper # modeling and validation from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, \ precision_score, \ recall_score, \ confusion_matrix, \ f1_score # from my modeling repo from indoorplants.analysis import exploratory from indoorplants.validation import crossvalidate, curves %matplotlib inline ``` ### -------------------------------------------------------------- # Census Data Analysis ### -------------------------------------------------------------- # 0. Overview ### Prerequisites This notebook contains the results of a quick exercise in data analysis and prediction, with the goal being to develop a model to predict whether a given person, based on the provided data, makes more than $50K per year. Depending on which version of Jupyter Notebook you are running, you may have to increase your data transmission limits in order to be able to download the dataset within the confines of the notebook, which is what I have done here. This can be achieved through passing a new limit to the *NotebookApp.iopub_data_rate_limit* option while launching Jupyter Notebook from the command line: jupyter notebook --NotebookApp.iopub_data_rate_limit=10000000 The original dataset and description can be found at: https://archive.ics.uci.edu/ml/datasets/Adult ### Analysis Description I put together this analysis in an effort to showcase some analysis & ML-validation modules that I have been working on. The Notebook has 3 parts: 1) Get Data - Pull dataset from the UC Irvine website - Check for and deal with duplicates and nulls - Carry out a slight reworking of table 2) Exploratory Data Analaysis - Investigate certain features, looking for relationships with income level - The approach taken here is visual (and also pretty casual) - Additionally, provide some descriptions of the dataset 3) Modeling - Fit a Random Forest Classifier to the data - Leverage cross-validationTune to depth parameter to reduce overfitting - Introduce example business logic and develop custom score function # 1. Get Data ### Retrieve ``` def get_census_data(): cols = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'over_fifty_k'] url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data' with requests.get(url, stream=True) as r: results = [l.decode().split(',') for l in r.iter_lines()] return pd.DataFrame(results, columns=cols) table = get_census_data() table.head() table.to_csv("cencus_data.csv") len(table) ``` ### Check for problems #### Duplicates ``` len(table[table.duplicated()]) table = table.drop_duplicates() ``` #### Nulls ``` table.isnull().sum() table[table.workclass.isnull()] table[~table.index.isin((32561,))].isnull().sum() table = table[~table.index.isin((32561,))].reset_index(drop=True) ``` ### Set up data types ``` table.dtypes int_cols = ['age', 'fnlwgt', 'capital_gain', 'capital_loss', 'hours_per_week'] table.loc[:, int_cols] = table.loc[:, int_cols].astype(int) ``` ### Set up response column ``` table.over_fifty_k.value_counts() table.over_fifty_k.unique() table.over_fifty_k = table.over_fifty_k.map(lambda _: 1 if _ == ' >50K' else 0) ``` # 2. Exploratory Data Analysis ``` table.over_fifty_k.value_counts() / len(table) ``` #### Notes The classes are imbalanced, appearing here at a ratio of roughly 3: 1 negative: postitive. ### Age ``` table.age.head() table.age.isnull().sum() table.age = table.age.astype(int) table.age.nunique() ax = table.age.value_counts().sort_index().plot.bar(figsize=(15, 8), color="steelblue") ax.title.set_text('age: counts by age') ``` #### Notes The *age* data looks to be positively skewed, and has a floor of 17. We can look, in more detail, at the distribution of the *age* data, this time with overlays for the data's mean and strandard deviation, using the `center_scale_plot` function from the **exploratory** module. ``` ax = exploratory.center_scale_plot(table.age, np.mean, np.std, bins=73) len(table[table.age > 79]) / len(table) ``` #### Notes A couple of observations: - roughly 99.6% of the data falls within 3 standard deviations of the mean, which is 38.6 years - the concentration of data with age 90 might indicate that age was cut off at 90 ``` ax = exploratory.qq_plot(table.age.astype(int)) ``` #### Notes We can use a Q-Q, or quantile-quantile, plot (which in this case plots the quantiles of our data against the quantiles of a Normal distribution with the same mean and standard deviation as the data) to assess whether the data is Normally distributed. We have an $R^2$ of over 98%, as the vast majority of the data, particularly the data close to the mean, fits the Normal distribution (the straight red line) quite well. However, we can see clearly the deviation from the Normal distribution that occurs in the tails, particularly in the left tail. The positive skewness makes sense given that the data does not seem to include those under the age of 17. ``` ax = exploratory.feature_value_counts_by_class(table, 'over_fifty_k', 'age', figsize=(15, 8)) ``` #### Notes Breaking down age by income, we can see that the bulk of the $50K-or-higher earners are middle-aged. ### Education ``` table.education.nunique() ed = table[['education', 'education_num'] ].drop_duplicates().set_index('education', drop=True) ed['education_num'] = ed['education_num'].astype(int) ed.sort_values('education_num') ``` #### Notes There is a one-to-one mapping of *education* to *education_num*. We can retain the ordering here for future analysis of the *education* field. ``` table.education = pd.Categorical(table.education, ed.sort_values('education_num').index) ax = table['education'].value_counts( ).sort_index( ).plot.bar(figsize=(11, 8), rot=40, color="steelblue") ax.title.set_text('education: counts') ax = exploratory.feature_value_counts_by_class(table, 'over_fifty_k','education', rot=40) table['education_num'] = table.education_num.astype(int) ``` #### Notes High income is much more prevalent amongst those who went at least as far as to complete high school. ### Education vs. age, by income class ``` ax = exploratory.scatter_by_class(table, 'over_fifty_k', 'age', 'education_num') ``` #### Notes The more brownish regions in the above plot indicate an overlap between the classes. The bright orange here could indicate some pockets of predominantly over-$50K earners. Let us continue examining other variables to see if a decision boundary becomes more clear. ### Work class ``` table.workclass.nunique() ax = table['workclass'].value_counts( ).sort_index( ).plot.bar(figsize=(11, 8), rot=40, color="steelblue") t = ax.title.set_text('workclass: histogram') ax = exploratory.feature_value_counts_by_class(table, 'over_fifty_k', 'workclass', rot=40) ``` #### Notes The *Private* employment bucket seem to hold the bulk of the high earners, though most of the people in this bucket in fact do not eclipse $50K in income. ### Final weight #### Notes The UCI documentation describes this column as reflecting a by-state population demographic weight. Let's take a look at how this data is distributed. ``` table.fnlwgt.nunique() table.fnlwgt = table.fnlwgt.astype(int) ax = exploratory.center_scale_plot(table.fnlwgt, np.mean, np.std, bins=100) len(table[table.fnlwgt > 506445]) / len(table) ``` #### Notes The final weight data exhibits strong positive skewness. ``` ax = exploratory.center_scale_plot(np.log(table.fnlwgt), np.mean, np.std, bins=100) ``` #### Notes Taking the log transform has yielded a more symmetrical dataset that sits almost entirely within 3 standard deviation bands. ``` ax = exploratory.qq_plot(np.log(table.fnlwgt)) ``` #### Notes Plotting the ordered final weight data against the quantiles of a Normal distribution with the same location and center as the data, we achieve a high goodness-of-fit measure. The unusual lump in the data's left tail stands out on this QQ plot as it did on the above histogram. ``` table['fnlwgt_log'] = np.log(table.fnlwgt) ``` ### Final weight vs. education, by income class ``` ax = exploratory.scatter_by_class(table, 'over_fifty_k', 'education_num', 'fnlwgt_log') ``` # 2. Modeling #### Notes I am going to keep it simple: one-hot encode the categorical variables, leave the rest in their original states, and see what kind of results can be achieved using a Random Forest Classifier. A Decision Tree works by repeatedly splitting a dataset, 20-questions-style, in an effort to find feature values by which the dataset can be seperated into its proper classes. At a high-level, a Random Forest works by creating many different Decision Trees and then averaging its trees predictions to obtain a single value. RFCs are capable of finding complex decision boundaries, require little with respect to data prep and hyperparameter tuning, and can handle class imbalances in a straightforward manner, through a weighting of the class counts when evaluating splits and making predictions. We will in this case opt for a _balanced subsample_ class weighting. See sklearn [docs](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) for more detail. ### Function to get modeling data ``` def get_model_data(table, features, dont_dummy): cols = ['over_fifty_k'] + features for_modeling = table[cols] to_dummy = list(filter(lambda _: _ not in dont_dummy, cols)) return pd.get_dummies(for_modeling, columns=to_dummy) ``` ### Prep ``` table.columns table.dtypes table[['capital_gain', 'capital_loss', 'hours_per_week']] = table[['capital_gain', 'capital_loss', 'hours_per_week'] ].astype(int) int_cols = list(table.dtypes[table.dtypes=='int64'].index.values) int_cols X = ['age', 'workclass', 'fnlwgt', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country'] ``` ### Get data ``` for_modeling = get_model_data(table, X, int_cols) len(for_modeling.columns) X_model = [col for col in for_modeling.columns if col != 'over_fifty_k'] cv, final = train_test_split(for_modeling.index, test_size=.20, stratify=for_modeling.over_fifty_k) final = for_modeling.loc[final, :] for_modeling = for_modeling.loc[cv, :] ``` ### Cross validate #### Notes Cross validation involves slicing the data into *k* pieces (in this case 5) and, for each of the *k* folds, holding that fold out as a test set (which the model will not be trained on) and training on the rest. The results for the tests on each of the folds are then averaged. This methodology solves 2 common problems in model selection: - performance figures that stem from predicting the data the model is trained on will be overly optimistic - the particular subset of the data the model is trained on can bias the hyper-parameter tuning process We will perform cross validation on 80% of the data (saved into the `for_modeling` table) and we will perform our final tests on the remaining 20% (`final`) so as to avoid reporting final test results based on the data that is used for model selection. ``` results = crossvalidate.cv_score(model_obj=RandomForestClassifier(n_estimators=100, class_weight='balanced_subsample'), X=for_modeling[X_model], y=for_modeling.over_fifty_k, score_funcs=[accuracy_score, precision_score, recall_score]) results ``` #### Analysis We've trained and tested a Random Forest classifier with 2 hyper parameters passed: - `n_estimators` determines the number of Decision Trees built - `class_weight` of *balanced_subsample* tells the algorithm to weight class counts, within each tree, inversely to the prevalence of the classes in the subsample of the data that the tree was built on Our test *accuracy* of ~85% is on par with the results posted by this dataset's curator. But there are a couple of issues with these results: - there is a large disparity between train and test results for all 3 of the scores we've calculated - *accuracy*, especially when dealing with imbalanced classes, can be very misleading I've included *accuracy* as a sort of quick-n-dirty guide, but the *precision* and *recall* scores here are more important as they are based off of the results for the positive (over-$50K annual income) class, which is less prevalent in the dataset. *Precision* tells us how reckless the model was in making its positive predictions. The higher the score, the more careful the model. *Recall* tells us how much money was left on the table, so to speak. The higher the *recall*, the greater the percent of all positive class instances identified. We can introduce some prepruning, which means limiting the growth of the RFC's internal Decision Trees, so as to curb the overfitting. ``` curves.validation_curve(model_type=RandomForestClassifier, X=for_modeling[X_model], y=for_modeling.over_fifty_k, param_name='max_depth', param_range=np.linspace(4, 16, 7), other_params={'class_weight':'balanced_subsample', 'n_estimators':100}, score=f1_score) curves.learning_curve(model_type=RandomForestClassifier, X=for_modeling[X_model], y=for_modeling.over_fifty_k, model_params={'class_weight':'balanced_subsample', 'n_estimators':100, "max_depth": 14}, score=f1_score) curves.calibration_curve(model_type=RandomForestClassifier, X=for_modeling[X_model], y=for_modeling.over_fifty_k, model_params={'class_weight':'balanced_subsample', 'n_estimators':100, "max_depth": 14}) ``` #### Analysis The above validation curve plots cross validation performance for the model across different levels of prepruning. The lower `max_depth` is, the stronger the prepruning. As `max_depth` increases, the train and test scores diverge, which is a good sign of overfitting. The performance metric being used here is called *f1 score*. f1 is the harmonic mean of precision and recall, which means that if you have to choose one number, this can be a good choice as it accounts for both care and thoroughness, so to speak. Additionally, this measure is calculated on the positive class, which means that we will know if our model is simply dumping all predictions into the negative class bucket, a common "gotcha" when dealing with imbalanced classes. I am going to continue validation with a `max_depth` of 14, as the train and test divergence is still pretty tame (within 5%), and there does appear to be a gain in validation performance between a `max_depth` of 12 and 14. ``` r = crossvalidate.cv_conf_mat(model_obj=RandomForestClassifier(n_estimators=100, class_weight='balanced_subsample', max_depth=14), X=for_modeling[X_model], y=for_modeling.over_fifty_k, splits=3) r ``` #### Analysis To really come to an opinion on how a model's working, it is good to take a look at what's being put where, which is what the above confusion matrices show us (1 for each of 3 trials). It doesn't seem like anything crazy is happening - these numbers reflect what we'd expect from the aggregate scores we looked at above. ### Test situation Taking the confusion matrix further, one can put a value on each type of correct or incorrect prediction. This can make a performance score less abstract and more aligned with a business objective. Example: you are selling insurance, and a potential client will buy your insurance if the following conditions hold: 1. potential client makes at least $50K annually 2. potential client is exposed to your insurance product via advertising It will cost you \$100 to advertise this insurance to a potential client, and you will generate $1000 in revenue if a potential client converts. You need to build a model that will help you decide who to advertise to in order to maximize your simplified gross income of *sales revenue* - *advertising costs*. #### Set up cost funcs ``` def cost(true, pred): # true negative, no cost if true == 0 and pred == 0: return 0 # false positive, lose $100 to advertising elif true == 0 and pred == 1: return 100 # false negative, miss out on a grand elif true == 1 and pred == 0: return 1000 # true positive, spend $100 to get $1000 elif true == 1 and pred == 1: return -900 def model_cost(y_true, y_pred): func = np.vectorize(cost) return func(y_true, y_pred).sum() ``` #### Cross validate ``` results = crossvalidate.cv_score(model_obj=RandomForestClassifier( n_estimators=100, class_weight='balanced_subsample', max_depth=14), X=for_modeling[X_model], y=for_modeling.over_fifty_k, score_funcs=[model_cost]) results ``` #### Analysis We've set up an aggressive cost function here where poor *recall* is heavily penalized, which makes sense given the business model. Fortunately, our model is coming in on the right side of zero (this is a measurement of cost so negative is good). It's worth noting that there's a large disparity between train and test error. This could be an indication of overfitting. That said, the standard deviation, proportionately speaking, is quite small. Both of these qualities - overfitting but also consistency - were revealed to us in our learning curve analysis above. #### Train / test disparity I wonder if the disparity between train and test costs may have something to do with the larger size of the training set? (this is an absolute, not %, cost) ``` results.loc[('model_cost', 'test'), 'mean' ] / results.loc[('model_cost', 'train'), 'mean'] ``` Our train data sets used in the CV contained 4 times as much data as our test set did (5-fold validation), and our train score is about 5 times our test score. So we are overfitting, but only by, let's say, 5 or so percent. #### Next steps One thing we can do to optimize our cost here is tweak our decision boundary. **sklearn's** models default to a boundary of .5 for binary classification. Since we are much more interested in *recall* than *precision* (we want to nail as many positives as possible and are OK with some false positives) we can lower that decision bounday a bit. In other words, we're going to tell the model to be a little more aggressive in looking for positive classifications. We will start with a decision boundary of 40%. ### Adjust boundaries #### New RF Classifier that predicts probabilities To do this, we are going to have to put a wrapper around the **sklearn** `RandomForestClassifier` class. These cross-validation tools expect models with an API similar to that of the **sklearn** models, which means they are looking for every model to have a predict method. However, we can change the functionality of a model's predict method so that it predicts class probabilities instead of labels. *Note*: I have left this boundary analysis broken out into pieces here in the notebook, but the **indoorplants.validation.boundaries** module provides tools for evaluating binary classifier performance using adjusted decision boundaries. ``` class RFProb(RandomForestClassifier): def predict(self, X): return self.predict_proba(X) ``` #### Modify cost mechanisms to convert probability to label ``` def prob_to_class(func): def convert(y_true, y_pred): pos_class = y_pred[:, 1] conv = np.vectorize(lambda _: 1 if _ > .4 else 0) return func(y_true, conv(pos_class)) return convert @prob_to_class def model_cost(y_true, y_pred): func = np.vectorize(cost) return func(y_true, y_pred).sum() ``` #### Cross validate ``` results40 = crossvalidate.cv_score(model_obj=RFProb(n_estimators=100, class_weight='balanced_subsample', max_depth=14), X=for_modeling[X_model], y=for_modeling.over_fifty_k, score_funcs=[model_cost]) results40 results40.loc[('convert', 'test'), 'mean' ] / results.loc[('model_cost', 'test'), 'mean'] ``` #### Notes We have seen an improvement here, subtracting roughly 16% from our test cost. We can push this boundary even further if we'd like. #### Rework functions to allow for testing across passed thresholds ``` def cost(true, pred): if true == 0 and pred == 0: return 0 elif true == 0 and pred == 1: return 100 elif true == 1 and pred == 0: return 1000 elif true == 1 and pred == 1: return -900 def model_cost(y_true, y_pred): func = np.vectorize(cost) return func(y_true, y_pred).sum() def prob_to_class(t, func): def convert(t, y_true, y_pred): pos_class = y_pred[:, 1] conv = np.vectorize(lambda _: 1 if _ > t else 0) return func(y_true, conv(pos_class)) def threshold(t): partial_func = partial(convert, t) update_wrapper(partial_func, convert) return partial_func return threshold(t) def boundary_cost(t): return prob_to_class(t, model_cost) ``` #### Function to cross validate over multiple boundaries: 5% to 55% ``` def test_boundaries(): i, results = .55, {} while i > .05: results[i] = crossvalidate.cv_score( model_obj=RFProb(n_estimators=100, class_weight='balanced_subsample', max_depth=14), X=for_modeling[X_model], y=for_modeling.over_fifty_k, score_funcs=[boundary_cost(i)]) i -= .05 return pd.concat(results) ``` #### Run cross validation ``` results = test_boundaries() results ``` #### Analysis The minimum cost (test) was produced with a decision boundary of 15%. However, we see a drastically reduced standard deviation for a decision boundary of 10%. Of course, whether or not we'd actually want a model this aggressive would depend upon our real-world use case. Seeing as how this is not a real business model, I will continue with a decision threshold of 10%. This means that we will advertise to anyone to whom the model assigns at least a 10% probability of earning at least $50K per year. ### Train and test final model #### Instantiate model ``` model = RFProb(n_estimators=100, class_weight='balanced_subsample', max_depth=14) ``` #### Train ``` model = model.fit(for_modeling[X_model], for_modeling.over_fifty_k) ``` #### Predict ``` y_pred = model.predict(final[X_model]) ``` #### Threshold ``` def convert(t, y_pred): pos_class = y_pred[:, 1] conv = np.vectorize(lambda _: 1 if _ > t else 0) return conv(pos_class) y_pred_class = convert(.1, y_pred) ``` #### Analysis ``` conf_mat = confusion_matrix(final.over_fifty_k, y_pred_class) conf_mat boundary_cost(.1)(final.over_fifty_k, y_pred) 'recall: {}'.format(conf_mat[1][1] / conf_mat[1, :].sum()) 'precision: {}'.format(conf_mat[1][1] / conf_mat[:, 1].sum()) ``` Testing on our held-out 20% (stratified & randomly selected) of the dataset, we've achieved a positive-class *recall* score of roughly 99%, and a a positive-class *precision* of roughly 37%. To put it differently, we can see (looking at the above confusion matrix) that we had almost no false negatives, with a whole lot of false positives - about as many (a bit more actually) than there were true negatives. In a real business situation, you would have to think carefully about how you wanted to score your model's performance. However, we've seen in this exercise that, depending on your situation, it can be worth thinking outside the box (or in the case of the confusion matrix - reinterpreting the box).
github_jupyter
# Steam Data Cleaning - Optimising Cleaning of the Release Date Column *This forms part of a larger series of posts for my [blog](http://nik-davis.github.io) on downloading, processing and analysing data from the steam store. [See all posts here](http://nik-davis.github.io/tag/steam).* ``` # view software version information # http://raw.github.com/jrjohansson/version_information/master/version_information.py %load_ext version_information # %reload_ext version_information %version_information numpy, pandas ``` <!-- PELICAN_BEGIN_SUMMARY --> In my [previous post](https://nik-davis.github.io/posts/2019/steam-data-cleaning/), we took an in-depth look at cleaning data downloaded from the Steam Store. We followed the process from start to finish, omitting just one column, which we will look at today. The final column to clean, `release_date`, provides some interesting optimisation and learning challenges. We encountered columns with a similar structure previously, so we can use what we learned there, but now we will also have dates to handle. We're going to approach this problem with the goal of optimisation in mind - we'll start by figuring out how to solve the task, getting to the point of a functional solution, then we'll test parts of the code to see where the major slowdowns are, using this to develop a framework for improving the efficiency of the code. By iteratively testing, rewriting and rerunning sections of code, we can gradually move towards a more efficienct solution. <!-- PELICAN_END_SUMMARY --> ## Importing Local Functions When cleaning the data, we wrote a `print_steam_links` function to easily create links from a dataframe. To use it again, we could copy the code and define it here, but instead we will use a handy trick in jupyter notebook. If we place the function in a separate python (.py) file inside a folder at the root of the project directory (in this case, the 'src' folder), we can tell python to look there for local modules using `sys.path.append`. Then we can import the function directly, where the file name (datacleaning) is the module name, as seen below. ``` import sys sys.path.append('../src/') from datacleaning import print_steam_links ``` ## Import and Inspect Data We begin by importing the necessary libraries and inspecting the data, with every column cleaned except release date. ``` # standard library imports from ast import literal_eval import time import re import sys sys.path.append('../src/') # third-party imports import numpy as np import pandas as pd # local imports from datacleaning import print_steam_links # customisations pd.set_option("max_columns", 100) imported_steam_data = pd.read_csv('../data/exports/steam_partially_clean.csv') print('Rows:', imported_steam_data.shape[0]) print('Columns:', imported_steam_data.shape[1]) imported_steam_data.head() ``` Checking the null counts, we see there are no columns with missing values. This means we did our job properly previously, and we should just be able to focus on cleaning and formatting the column. ``` imported_steam_data.isnull().sum() ``` ## Checking the Format First we shall inspect the raw format of the column. As we can see below, it is stored as a dictionary-like string object containing values for `coming_soon` and `date`. From the first few rows it would appear that the dates are stored in a uniform format - day as an integer, month as a 3-character string abbreviation, a comma, then the year as a four-digit number. We can parse this either using the python built-in datetime module, or as we already have pandas imported, we can use the [pd.to_datetime](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html) function. Also, as our analysis will involve looking at ownership and sales data, looking at games that are not released yet will not be useful to us. Intuitively, we can drop any titles which are marked as coming soon, presumably having this value set to true. As a side note, once parsed it may be worth checking that no release dates in our data are beyond the current date, just to make doubly sure none slip through. ``` display(imported_steam_data['release_date'][0]) imported_steam_data[['name', 'release_date']].head() ``` We can explore the data a little further using the `value_counts` method. Whilst it looks like most dates are in the format `dd mmm, yyyy`, there at least a couple of rows in the format `mmm yyyy`, such as 'Jun 2009'. This means that all the dates aren't stored uniformly, so we will have to take care when parsing them. ``` print(imported_steam_data['release_date'].value_counts().head()) imported_steam_data.loc[imported_steam_data['release_date'].str.contains('Jun 2009'), 'release_date'] ``` There are also a number of rows that have a blank string for the date value. We'll have to treat these as missing values, and decide if we want to remove them from the dataset. We can use the imported `print_steam_links` function to inspect some of the rows, using `sample` to randomly select a few. ``` no_release_date = imported_steam_data[imported_steam_data['release_date'].str.contains("'date': ''")] print('Rows with no release date:', no_release_date.shape[0], '\n') print_steam_links(no_release_date.sample(5, random_state=0)) ``` It looks like some are special re-releases, like anniversary or game of the year editions, some are early access and not officially released yet, and others simply have a missing date. Apart from that there don't appear to be any clear patterns emerging, so as there are only 26 rows it may be best to remove them. ## Parsing the dates Taking a look at the format of the column, we'll need to be using `literal_eval` as we did before. Apart from that it should be straightforward enough to extract the date. ``` print(type(imported_steam_data['release_date'].iloc[0])) imported_steam_data['release_date'].iloc[0] print(type(literal_eval(imported_steam_data['release_date'].iloc[0]))) literal_eval(imported_steam_data['release_date'].iloc[0])['date'] ``` Once extracted, we can use the `pd.to_datetime` functon to interpret and store dates as `datetime` objects. This is useful as it will allow us to search and sort the dataset by year when it comes to performing analysis. Say for example we only wish to examine games released in 2010, by converting our dates to a python-recognisable format this will be very easy to achieve. As seen below, we can supply the `to_datetime` function with a date and pandas will automatically interpret the format. We can then inspect it or print an attribute like the year. We can also provide pandas with the format explicitly, so it knows what to look for and how to parse it, which may be [quicker for large sets of data](https://stackoverflow.com/questions/32034689/why-is-pandas-to-datetime-slow-for-non-standard-time-format-such-as-2014-12-31). ``` timestamp = pd.to_datetime(literal_eval(imported_steam_data['release_date'].iloc[0])['date']) print(timestamp) print(timestamp.year) pd.to_datetime(literal_eval(imported_steam_data['release_date'].iloc[0])['date'], format='%d %b, %Y') ``` ## Initial Function Definition Now we are ready to begin defining a function. As we only want to keep unreleased games, we first evaluate values from the `coming_soon` key, and keep only those where the value is `False`. Next we extract the release date, and set missing dates to np.nan, the default way of storing null values in pandas. Then, using the formats we learned previously, we interpret those dates using the `to_datetime` function. Once complete we pass over the dataframe once more with a general call to `to_datetime`, catching any dates we missed. Finally we drop the columns we no longer need and return the dataframe. ``` def process_release_date(df): df = df.copy() df['coming_soon'] = df['release_date'].apply(lambda x: literal_eval(x)['coming_soon']) # Only want to keep released games df = df[df['coming_soon'] == False].copy() # extract release date and set missing dates to null df['date'] = df['release_date'].apply(lambda x: literal_eval(x)['date']) df.loc[df['date'] == '', 'date'] = np.nan # Parse the date formats we have discovered df['datetime'] = pd.to_datetime(df['date'], format='%d %b, %Y', errors='ignore') df['datetime'] = pd.to_datetime(df['datetime'], format='%b %Y', errors='ignore') # Parse the rest of the date formats df['release_date'] = pd.to_datetime(df['datetime']) df = df.drop(['coming_soon', 'date', 'datetime'], axis=1) return df result = process_release_date(imported_steam_data) result[['steam_appid', 'release_date']].head() ``` Whilst functional, the process is quite slow. The easiest way to measure the efficiency of code is by timing how long it takes to run, and that is the method we'll use here. By running this code inside of jupyter notebook, we can take advanted of IPython magics, and use the [%timeit](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-timeit) magic to easily test how long it takes to run the function. ``` %timeit process_release_date(imported_steam_data) ``` We can see that, on average, it takes about 3.5 seconds. Whilst manageable, we could certainly benefit from optimising our code, as this could quickly add up in larger data sets, where increasing efficiency can prove invaluable. There are a few areas we can investigate to make improvements. When initially parsing the date, we end up calling `literal_eval` twice, which may be a source of slowdown. We also loop over the entire dataset multiple times when calling the `to_datetime` function. We can be pretty confident that reducing the number of traversals over the dataset will provide some gains, but first, let's find out which part is causing the greatest slowdown. Targetting the slowest part of the code and improving it will lead to the most noticeable gains, and beyond that we can tweak other parts until we're happy. We just used the %timeit magic to time our function - the function is run multiple times and the average execution time is given - but we can also use the built-in `time` module of python to easily inspect specific sections of code. ``` # begin timing start_time = time.time() # do something x = 1 for i in range(1000): x += 1 for j in range(1000): x += 1 # stop timing end_time = time.time() # calculate time difference execution_time = end_time - start_time print(execution_time) ``` We'll break down the function into different sections which we think may be causing slowdown, and see which takes the longest to execute. ``` def process_release_date(df): df = df.copy() # first section eval_start = time.time() df['coming_soon'] = df['release_date'].apply(lambda x: literal_eval(x)['coming_soon']) df = df[df['coming_soon'] == False].copy() df['date'] = df['release_date'].apply(lambda x: literal_eval(x)['date']) print('Evaluation run-time:', time.time() - eval_start) df.loc[df['date'] == '', 'date'] = None # second section first_parse_start = time.time() df['datetime'] = pd.to_datetime(df['date'], format='%d %b, %Y', errors='ignore') df['datetime'] = pd.to_datetime(df['datetime'], format='%b %Y', errors='ignore') print('First parse run-time:', time.time() - first_parse_start) # third section second_parse_start = time.time() df['release_date'] = pd.to_datetime(df['datetime']) print('Second parse run-time:', time.time() - second_parse_start) df = df.drop(['coming_soon', 'date', 'datetime'], axis=1) return df function_start = time.time() process_release_date(imported_steam_data) print('\nTotal run-time:', time.time() - function_start) ``` Immediately we can see that the majority of run-time is taken up by the second call to `pd.to_datetime`. This suggests that the first two calls are not functioning as expected - they are possibly terminating after the first error instead of skipping over it as desired - and most of the work is being done by the final call. Now it makes sense why it is slow - pandas has to figure out how each date is formatted, and since we know we have some variations this may be slowing it down considerably. Whilst the evaluation run-time is much shorter, multiple calls to `literal_eval` may be slowing the function as well, so we may wish to investigate that. As we know the biggest slowdown, we will begin there. We now know that handling our dates in their current form is slow, and we know that we have some different formats mixed in there. Whilst there are likely many possible solutions to this problem, using regular expressions (or regex) comes to mind as they tend to excel at pattern matching in strings. We know for sure two of the patterns, so let's build a regex for each of those. Then we can iteratively add more as we discover any other patterns. A powerful and useful tool for building and testing regex can be found at [regexr.com](https://regexr.com/). ``` pattern = r'[\d]{1,2} [A-Za-z]{3}, [\d]{4}' string = '13 Jul, 2018' print(re.search(pattern, string)) pattern = r'[A-Za-z]{3} [\d]{4}' string = 'Apr 2016' print(re.search(pattern, string)) ``` Using these two patterns we can start building out a function. We're going to apply a function to the date column which searches for each pattern, returning a standardised date string which we will then feed into the `to_datetime` function. The first search matches the 'mmm yyyy' pattern, like 'Apr 2019'. As we don't know the particular day for these matches we will assume it is the first of the month, returning '1 Apr 2019' in this example. If we don't match this, we'll check for the second case. The second match will be the 'dd mmm, yyyy' pattern, like '13 Jul, 2018'. In this case we will simply return the match with the comma removed, to become '13 Jul 2018'. If neither of these match, we'll check for the empty string, and return it as it is for now. For anything else we'll simply print the string so we know what else we should be searching for. ``` def process_release_date(df): df = df.copy() df['coming_soon'] = df['release_date'].apply(lambda x: literal_eval(x)['coming_soon']) df = df[df['coming_soon'] == False].copy() df['date'] = df['release_date'].apply(lambda x: literal_eval(x)['date']) def parse_date(x): if re.search(r'[A-Za-z]{3} [\d]{4}', x): return '1 ' + x elif re.search(r'[\d]{1,2} [A-Za-z]{3}, [\d]{4}', x): return x.replace(',', '') elif x == '': return x else: print(x) df['date'] = df['date'].apply(parse_date) df['release_date'] = pd.to_datetime(df['date'], infer_datetime_format=True) df = df.drop(['coming_soon', 'date'], axis=1) return df result = process_release_date(imported_steam_data) ``` As no other rows we're printed out, we can be confident that we caught all of the patterns, and don't have any extra to take care of. We just used the `infer_datetime_format` parameter of `to_datetime`, which, according to the documentation, can speed up the process. However, as we now know the exact format the dates will be in, we can explicitly set it ourselves, and this should be the fastest way of doing things. We also need to decide how to handle the missing dates - those with the empty strings. For now let's set the way the function handles errors as `coerce`, which returns `NaT` (not a time). We can now rewrite the function and time it as we did before. ``` def process_release_date_old(df): df = df.copy() df['coming_soon'] = df['release_date'].apply(lambda x: literal_eval(x)['coming_soon']) df = df[df['coming_soon'] == False].copy() df['date'] = df['release_date'].apply(lambda x: literal_eval(x)['date']) # Simple parsing df['release_date'] = pd.to_datetime(df['date']) df = df.drop(['coming_soon', 'date'], axis=1) return df def process_release_date_new(df): df = df.copy() df['coming_soon'] = df['release_date'].apply(lambda x: literal_eval(x)['coming_soon']) df = df[df['coming_soon'] == False].copy() df['date'] = df['release_date'].apply(lambda x: literal_eval(x)['date']) # Complex parsing def parse_date(x): if re.search(r'[A-Za-z]{3} [\d]{4}', x): return '1 ' + x elif re.search(r'[\d]{1,2} [A-Za-z]{3}, [\d]{4}', x): return x.replace(',', '') elif x == '': return x df['date'] = df['date'].apply(parse_date) df['release_date'] = pd.to_datetime(df['date'], format='%d %b %Y', errors='coerce') df = df.drop(['coming_soon', 'date'], axis=1) return df print('Testing date parsing:\n') %timeit process_release_date_old(imported_steam_data) %timeit process_release_date_new(imported_steam_data) ``` Our results show that the new method is almost four times faster, so we're on the right track. Another optimisation we can make here is checking which part of the if/else statements has the most matches. It makes sense to order our statements from most matches to least, so for the majority of rows we only have to search through once. To do this, instead of returning the date we'll return a number for each match. We can then print the value counts for the column and see which is the most frequent. ``` def optimise_regex_order(df): df = df.copy() df['coming_soon'] = df['release_date'].apply(lambda x: literal_eval(x)['coming_soon']) df = df[df['coming_soon'] == False].copy() df['date'] = df['release_date'].apply(lambda x: literal_eval(x)['date']) def parse_date(x): if re.search(r'[A-Za-z]{3} [\d]{4}', x): return '0: mmm yyyy' # '1 ' + x elif re.search(r'[\d]{1,2} [A-Za-z]{3}, [\d]{4}', x): return '1: dd mmm, yyyy' # x.replace(',', '') elif x == '': return '2: empty' # pass df['release_date'] = df['date'].apply(parse_date) return df result = optimise_regex_order(imported_steam_data) result['release_date'].value_counts() ``` By far the majority of dates are in the 'dd mmm, yyyy' format, which is second in our if/else statements. This means that for all these rows we are unnecessarily searching the string twice. Simply by reordering our searches we should see a performance improvement. ``` def process_release_date_unordered(df): df = df.copy() df['coming_soon'] = df['release_date'].apply(lambda x: literal_eval(x)['coming_soon']) df = df[df['coming_soon'] == False].copy() df['date'] = df['release_date'].apply(lambda x: literal_eval(x)['date']) def parse_date(x): if re.search(r'[A-Za-z]{3} [\d]{4}', x): return '1 ' + x elif re.search(r'[\d]{1,2} [A-Za-z]{3}, [\d]{4}', x): return x.replace(',', '') elif x == '': return x df['release_date'] = df['date'].apply(parse_date) df['release_date'] = pd.to_datetime(df['date'], format='%d %b %Y', errors='coerce') df = df.drop(['coming_soon', 'date'], axis=1) return df def process_release_date_ordered(df): df = df.copy() df['coming_soon'] = df['release_date'].apply(lambda x: literal_eval(x)['coming_soon']) df = df[df['coming_soon'] == False].copy() df['date'] = df['release_date'].apply(lambda x: literal_eval(x)['date']) def parse_date(x): if re.search(r'[\d]{1,2} [A-Za-z]{3}, [\d]{4}', x): return x.replace(',', '') elif re.search(r'[A-Za-z]{3} [\d]{4}', x): return '1 ' + x elif x == '': return x df['release_date'] = df['date'].apply(parse_date) df['release_date'] = pd.to_datetime(df['date'], format='%d %b %Y', errors='coerce') df = df.drop(['coming_soon', 'date'], axis=1) return df %timeit process_release_date_unordered(imported_steam_data) %timeit process_release_date_ordered(imported_steam_data) ``` It's an improvement, if only slightly, so we'll keep it. If anything this goes to show how fast regex pattern matching is, as there was hardly any slowdown in searching most of the strings twice. Now parsing is well-optimised we can move on to the evaluation section. ``` # Testing evaluation methods def evaluation_method_original(df): df = df.copy() df['coming_soon'] = df['release_date'].apply(lambda x: literal_eval(x)['coming_soon']) df = df[df['coming_soon'] == False].copy() df['release_date'] = df['release_date'].apply(lambda x: literal_eval(x)['date']) return df def evaluation_method_1(df): df = df.copy() df['release_date'] = df['release_date'].apply(lambda x: literal_eval(x)) df['coming_soon'] = df['release_date'].apply(lambda x: x['coming_soon']) df = df[df['coming_soon'] == False].copy() df['release_date'] = df['release_date'].apply(lambda x: x['date']) return df def evaluation_method_2(df): df = df.copy() df['release_date'] = df['release_date'].apply(lambda x: literal_eval(x)) df_2 = df['release_date'].transform([lambda x: x['coming_soon'], lambda x: x['date']]) df = pd.concat([df, df_2], axis=1) return df def evaluation_method_3(df): df = df.copy() def eval_date(x): x = literal_eval(x) if x['coming_soon']: return np.nan else: return x['date'] df['release_date'] = df['release_date'].apply(eval_date) df = df[df['release_date'].notnull()] return df print('Original method:\n') %timeit evaluation_method_original(imported_steam_data) print('\nNew methods:\n') %timeit evaluation_method_1(imported_steam_data) %timeit evaluation_method_2(imported_steam_data) %timeit evaluation_method_3(imported_steam_data) ``` It looks like we may have been right in our assumption that multiple calls to `literal_eval` were slowing down the function - by calling it once instead of twice we almost halved the run-time. Of the new methods the final one was just about the fastest, which is useful because it contains flexible custom logic we can modify if needed. Let's put everything together into the final function, and time it once more to see the improvements we've made. We'll make a couple of changes so we can easily remove missing values at the end, which should mean we end up with clean release dates. ``` def process_release_date(df): df = df.copy() def eval_date(x): x = literal_eval(x) if x['coming_soon']: return '' # return blank string so can drop missing at end else: return x['date'] df['release_date'] = df['release_date'].apply(eval_date) def parse_date(x): if re.search(r'[\d]{1,2} [A-Za-z]{3}, [\d]{4}', x): return x.replace(',', '') elif re.search(r'[A-Za-z]{3} [\d]{4}', x): return '1 ' + x elif x == '': return np.nan df['release_date'] = df['release_date'].apply(parse_date) df['release_date'] = pd.to_datetime(df['release_date'], format='%d %b %Y', errors='coerce') df = df[df['release_date'].notnull()] return df %timeit process_release_date(imported_steam_data) ``` Referring back to the original time of 3.5s, we've achieved a 7x speed increase. That's pretty close to an order of magnitude improvement. With a dataset like this, we're only talking a matter of seconds, but in a much larger dataset with millions of rows, spending the time to improve code efficiency could shave hours off of run time. As I'm sure you're aware if you have some familiarity with writing code, for most tasks there are a million and one ways of approaching and solving the problem. Hopefully this helps lay out a simple methodology for testing, improving and thinking about code. Plus, it can be fun and interesting to figure out different ways of achieving the same output. Speaking of which, let's look at a final little challenge. ## Bonus: Vanilla Python Solution In our final solution to cleaning the `release_date` column, we relied heavily on the pandas library. Often using libraries such as this is a good idea as it contains vectorised and optimised methods for dealing with data, plus it's generally quicker to develop a working solution. As a small challenge, let's have a look at performing the above cleaning process entirely with vanilla python functions, as in those available by default using python and its built-in packages. First we need to convert the data from a pandas dataframe into a native python format. We have a few options but let's store the data as a list of lists. We'll also only include the AppID and release date columns, for the sake of demonstration. ``` date_list = [] for i, row in imported_steam_data.iterrows(): date_list.append([row['steam_appid'], row['release_date']]) date_list[:5] ``` The process is actually very similar. We have to loop through the data, rather than using pandas `apply`, and we parse the dates using `strptime` from the `datetime` module. We can generate the output using regex pattern matching, as we did before, and we can store the results in a new list of lists. We also display the first few rows of the output, and time how long it takes to run so we can compare. ``` from datetime import datetime as dt def python_only(ls): processed_rows = [] for i, date in ls: eval_row = literal_eval(date) if eval_row['coming_soon'] == False: if re.search(r'[\d]{1,2} [A-Za-z]{3}, [\d]{4}', eval_row['date']): output = dt.strptime(eval_row['date'], '%d %b, %Y') elif re.search(r'[A-Za-z]{3} [\d]{4}', eval_row['date']): output = dt.strptime(eval_row['date'], '%b %Y') elif eval_row['date'] == '': output = 'none' else: print('Not matched:', eval_row['date']) processed_rows.append([i, output]) else: processed_rows.append([i, 'none']) return processed_rows start = time.time() display(python_only(date_list)[:5]) end = time.time() - start print(f'\nTime taken: {end:.2f}s') ``` Impressively, this method only took twice as long as our optimised method using pandas. It would probably take a bit longer if we had to deal with all the columns in the dataset, but this is still a viable solution. Also, we didn't properly handle the missing values, and the data is populated with some amount of 'none' values.
github_jupyter
# Intro to TensorFlow ## Hello, Tensor World! ``` import tensorflow as tf # Create TensorFlow object called tensor hello_constant = tf.constant('Hello World!') with tf.Session() as sess: # Run the tf.constant operatin in the session output = sess.run(hello_constant) print(output) ``` ### Tensor In TensorFlow, data isn’t stored as integers, floats, or strings. These values are encapsulated in an object called a tensor. In the case of `hello_constant = tf.constant('Hello World!')`, hello_constant is a 0-dimensional string tensor, but tensors come in a variety of sizes as shown below: ``` # A is a 0-dimensional int32 tensor A = tf.constant(1234) # B is a 1-dimensional int32 tensor B = tf.constant([123,456,789]) # C is a 2-dimensional int32 tensor C = tf.constant([ [123,456,789], [222,333,444] ]) ``` The tensor returned by `tf.constant()` is called a constant tensor, because the value of the tensor never changes. ### Session TensorFlow’s api is built around the idea of a computational graph, a way of visualizing a mathematical process. Let’s take the TensorFlow code and turn that into a graph: ![Session](https://d17h27t6h515a5.cloudfront.net/topher/2016/October/580feadb_session/session.png) A "TensorFlow Session", as shown above, is an environment for running a graph. The session is in charge of allocating the operations to GPU(s) and/or CPU(s), including remote machines. Let’s see how you use it: ``` with tf.Session() as sess: output = sess.run(hello_constant) ``` The code has already created the tensor, `hello_constant`, from the previous lines. The next step is to evaluate the tensor in a session. The code creates a session instance, `sess`, using `tf.Session`. The `sess.run()` function then evaluates the tensor and returns the results. ## TensorFlow Input In the last section, a tensor was passed into a session and it returned the result. What if we want to use a non-constant? This is where `tf.placeholder()` and `feed_dict` come into place. In this section, we'll go over the basics of feeding data into TensorFlow. ### tf.placeholder() Sadly you can’t just set `x` to your dataset and put it in TensorFlow, because over time you'll want your TensorFlow model to take in different datasets with different parameters. You need `tf.placeholder()`! `tf.placeholder()` returns a tensor that gets its value from data passed to the `tf.session.run()` function, allowing you to set the input right before the session runs. ### Session's feed_dict ``` x = tf.placeholder(tf.string) with tf.Session() as sess: output = sess.run(x, feed_dict={x: 'Hello World'}) print(output) ``` Use the feed_dict parameter in `tf.session.run()` to set the placeholder tensor. The above example shows the tensor `x` being set to the string `"Hello, world"`. It's also possible to set more than one tensor using `feed_dict` as shown below: ``` x = tf.placeholder(tf.string) y = tf.placeholder(tf.int32) z = tf.placeholder(tf.float32) with tf.Session() as sess: output_x = sess.run(x, feed_dict={x: 'Test String', y: 123, z: 45.67}) output_y = sess.run(y, feed_dict={x: 'Test String', y: 123, z:45.67}) print(output_x) print(output_y) ``` **Note**: If the data passed to the `feed_dict` doesn’t match the tensor type and can’t be cast into the tensor type, you’ll get the error `“ValueError: invalid literal for...”`. ### Quiz ``` import tensorflow as tf def run(): output = None x = tf.placeholder(tf.int32) with tf.Session() as sess: # TODO: Feed the x tensor 123 output = sess.run(x, feed_dict={x: 123}) return output run() ``` ## TensorFlow Math Getting the input is great, but now you need to use it. We're going to use basic math functions that everyone knows and loves - add, subtract, multiply, and divide - with tensors. (There's many more math functions you can check out in the [documentation](https://www.tensorflow.org/api_docs/python/math_ops/).) ### Addition ``` x = tf.add(5, 2) # 7 ``` Let's start with the add function. The `tf.add()` function does exactly what you expect it to do. It takes in two numbers, two tensors, or one of each, and returns their sum as a tensor. ### Subraction and Multiplication ``` x = tf.subtract(10, 4) # 6 y = tf.multiply(2, 5) # 10 ``` The `x` tensor will evaluate to `6`, because `10 - 4 = 6`. The `y` tensor will evaluate to `10`, because `2 * 5 = 10`. That was easy! ### Converting Types It may be necessary to convert between types to make certain operators work together. For example, if you tried the following, it would fail with an exception: ``` tf.subtract(tf.constant(2.0),tf.constant(1)) # Fails with ValueError: Tensor conversion requested dtype float32 for Tensor with dtype int32: ``` That's because the constant `1` is an integer but the constant `2.0` is a floating point value and subtract expects them to match. In cases like these, you can either make sure your data is all of the same type, or you can cast a value to another type. In this case, converting the `2.0` to an integer before subtracting, like so, will give the correct result: ``` tf.subtract(tf.cast(tf.constant(2.0), tf.int32), tf.constant(1)) # 1 ``` ### Quiz Let's apply what you learned to convert an algorithm to TensorFlow. The code below is a simple algorithm using division and subtraction. Convert the following algorithm in regular Python to TensorFlow and print the results of the session. You can use `tf.constant()` for the values `10`, `2`, and `1`. ``` import tensorflow as tf # TODO: Convert the following to TensorFlow: x = tf.constant(10) y = tf.constant(2) z = tf.subtract(tf.divide(x, y), 1) # TODO: Print z from a session with tf.Session() as sess: output = sess.run(z) print(output) ``` ## TensorFlow Linear Functions The most common operation in neural networks is calculating the linear combination of inputs, weights, and biases. As a reminder, we can write the output of the linear operation as: ![](https://d17h27t6h515a5.cloudfront.net/topher/2017/February/58a4d8b3_linear-equation/linear-equation.gif) Here, **W** is a matrix of the weights connecting two layers. The output **y**, the input **x**, and the biases **b** are all vectors. ### Weights and Bias in TensorFlow The goal of training a neural network is to modify weights and biases to best predict the labels. In order to use weights and bias, you'll need a Tensor that can be modified. This leaves out `tf.placeholder()` and `tf.constant()`, since those Tensors can't be modified. This is where `tf.Variable` class comes in. ### tf.Variable() ``` x = tf.Variable(5) ``` The `tf.Variable` class creates a tensor with an initial value that can be modified, much like a normal Python variable. This tensor stores its state in the session, so you must initialize the state of the tensor manually. You'll use the `tf.global_variables_initializer()` function to initialize the state of all the Variable tensors: ``` init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) ``` The `tf.global_variables_initializer()` call returns an operation that will initialize all TensorFlow variables from the graph. You call the operation using a session to initialize all the variables as shown above. Using the `tf.Variable` class allows us to change the weights and bias, but an initial value needs to be chosen. Initializing the weights with random numbers from a normal distribution is good practice. Randomizing the weights helps the model from becoming stuck in the same place every time you train it. You'll learn more about this in the next lesson, gradient descent. Similarly, choosing weights from a normal distribution prevents any one weight from overwhelming other weights. We'll use the `tf.truncated_normal()` function to generate random numbers from a normal distribution. ### tf.truncated_normal() ``` n_features = 120 n_labels = 5 weights = tf.Variable(tf.truncated_normal((n_features, n_labels))) ``` The `tf.truncated_normal()` function returns a tensor with random values from a normal distribution whose magnitude is no more than 2 standard deviations from the mean. Since the weights are already helping prevent the model from getting stuck, you don't need to randomize the bias. Let's use the simplest solution, setting the bias to 0. ### tf.zeros() ``` n_labels = 5 bias = tf.Variable(tf.zeros(n_labels)) ``` The `tf.zeros()` function returns a tensor with all zeros. ## TensorFlow Softmax In the Intro to TFLearn lesson we used the softmax function to calculate class probabilities as output from the network. The softmax function squashes it's inputs, typically called **logits** or **logit scores**, to be between 0 and 1 and also normalizes the outputs such that they all sum to 1. This means the output of the softmax function is equivalent to a categorical probability distribution. It's the perfect function to use as the output activation for a network predicting multiple classes. ![](https://d17h27t6h515a5.cloudfront.net/topher/2017/February/58950908_softmax-input-output/softmax-input-output.png) We're using TensorFlow to build neural networks and, appropriately, there's a function for calculating softmax. ``` x = tf.nn.softmax([2.0, 1.0, 0.2]) ``` Easy as that! `tf.nn.softmax()` implements the softmax function for you. It takes in logits and returns softmax activations. ### Quiz ``` import tensorflow as tf def run_2(): output = None logit_data = [2.0, 1.0, 0.1] logits = tf.placeholder(tf.float32) # TODO: Calculate the softmax of the logits softmax = tf.nn.softmax(logit_data) with tf.Session() as sess: # TODO: Feed in the logit data output = sess.run(softmax, feed_dict={logits: logit_data}) return output print(run_2()) ``` ## One-Hot Encoding Transforming your labels into one-hot encoded vectors is pretty simple with scikit-learn using `LabelBinarizer`. Check it out below! ``` import numpy as np from sklearn import preprocessing # Example labels labels = np.array([1,5,3,2,1,4,2,1,3]) # Create the encoder lb = preprocessing.LabelBinarizer() # Here the encoder finds the classes and assigns one-hot vectors lb.fit(labels) # And finally, transform the labels into one-hot encoded vectors lb.transform(labels) ``` ## TensorFlow Cross Entropy In the Intro to TFLearn lesson we discussed using cross entropy as the cost function for classification with one-hot encoded labels. Again, TensorFlow has a function to do the cross entropy calculations for us. ![](https://d17h27t6h515a5.cloudfront.net/topher/2017/February/589b18f5_cross-entropy-diagram/cross-entropy-diagram.png) To create a cross entropy function in TensorFlow, you'll need to use two new functions: * `tf.reduce_sum()` * `tf.log()` ### Reduce Sum ``` x = tf.reduce_sum([1, 2, 3, 4, 5]) # 15 ``` The `tf.reduce_sum()` function takes an array of numbers and sums them together. ### Natural Log ``` l = tf.log(100) # 4.60517 ``` This function does exactly what you would expect it to do. `tf.log()` takes the natural log of a number. ### Quiz Print the cross entropy using `softmax_data` and `one_hot_encod_label`. ``` import tensorflow as tf softmax_data = [0.7, 0.2, 0.1] one_hot_data = [1.0, 0.0, 0.0] softmax = tf.placeholder(tf.float32) one_hot = tf.placeholder(tf.float32) # TODO: Print cross entropy from session cross_entropy = -tf.reduce_sum(tf.multiply(one_hot_data, tf.log(softmax_data))) with tf.Session() as session: output = session.run(cross_entropy, feed_dict={one_hot: one_hot_data, softmax: softmax_data}) print(output) ``` ## Mini-batching In this section, you'll go over what mini-batching is and how to apply it in TensorFlow. Mini-batching is a technique for training on subsets of the dataset instead of all the data at one time. This provides the ability to train a model, even if a computer lacks the memory to store the entire dataset. Mini-batching is computationally inefficient, since you can't calculate the loss simultaneously across all samples. However, this is a small price to pay in order to be able to run the model at all. It's also quite useful combined with SGD. The idea is to randomly shuffle the data at the start of each epoch, then create the mini-batches. For each mini-batch, you train the network weights with gradient descent. Since these batches are random, you're performing SGD with each batch. ## Epochs An epoch is a single forward and backward pass of the whole dataset. This is used to increase the accuracy of the model without requiring more data.
github_jupyter
# Generates images from text prompts with CLIP guided diffusion. By Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). It uses a 512x512 unconditional ImageNet diffusion model fine-tuned from OpenAI's 512x512 class-conditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. ``` # @title Licensed under the MIT License # Copyright (c) 2021 Katherine Crowson # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ``` Note: This notebook requires 16 GB of GPU memory to work, if you are unable to get a 16 GB GPU consistently, try the [256x256 version](https://colab.research.google.com/drive/12a_Wrfi2_gwwAuN3VvMTwVMz9TfqctNj). ``` # Check the GPU !nvidia-smi # Install dependencies !git clone https://github.com/openai/CLIP !git clone https://github.com/crowsonkb/guided-diffusion !pip install -e ./CLIP !pip install -e ./guided-diffusion !pip install lpips # Download the diffusion model !curl -OL --http1.1 'https://the-eye.eu/public/AI/models/512x512_diffusion_unconditional_ImageNet/512x512_diffusion_uncond_finetune_008100.pt' # Imports import gc import io import math import sys from IPython import display import lpips from PIL import Image import requests import torch from torch import nn from torch.nn import functional as F from torchvision import transforms from torchvision.transforms import functional as TF from tqdm.notebook import tqdm sys.path.append('./CLIP') sys.path.append('./guided-diffusion') import clip from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults # Define necessary functions def fetch(url_or_path): if str(url_or_path).startswith('http://') or str(url_or_path).startswith('https://'): r = requests.get(url_or_path) r.raise_for_status() fd = io.BytesIO() fd.write(r.content) fd.seek(0) return fd return open(url_or_path, 'rb') def parse_prompt(prompt): if prompt.startswith('http://') or prompt.startswith('https://'): vals = prompt.rsplit(':', 2) vals = [vals[0] + ':' + vals[1], *vals[2:]] else: vals = prompt.rsplit(':', 1) vals = vals + ['', '1'][len(vals):] return vals[0], float(vals[1]) class MakeCutouts(nn.Module): def __init__(self, cut_size, cutn, cut_pow=1.): super().__init__() self.cut_size = cut_size self.cutn = cutn self.cut_pow = cut_pow def forward(self, input): sideY, sideX = input.shape[2:4] max_size = min(sideX, sideY) min_size = min(sideX, sideY, self.cut_size) cutouts = [] for _ in range(self.cutn): size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size) offsetx = torch.randint(0, sideX - size + 1, ()) offsety = torch.randint(0, sideY - size + 1, ()) cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size] cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size)) return torch.cat(cutouts) def spherical_dist_loss(x, y): x = F.normalize(x, dim=-1) y = F.normalize(y, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def tv_loss(input): """L2 total variation loss, as in Mahendran et al.""" input = F.pad(input, (0, 1, 0, 1), 'replicate') x_diff = input[..., :-1, 1:] - input[..., :-1, :-1] y_diff = input[..., 1:, :-1] - input[..., :-1, :-1] return (x_diff**2 + y_diff**2).mean([1, 2, 3]) def range_loss(input): return (input - input.clamp(-1, 1)).pow(2).mean([1, 2, 3]) # Model settings model_config = model_and_diffusion_defaults() model_config.update({ 'attention_resolutions': '32, 16, 8', 'class_cond': False, 'diffusion_steps': 1000, 'rescale_timesteps': True, 'timestep_respacing': '1000', # Modify this value to decrease the number of # timesteps. 'image_size': 512, 'learn_sigma': True, 'noise_schedule': 'linear', 'num_channels': 256, 'num_head_channels': 64, 'num_res_blocks': 2, 'resblock_updown': True, 'use_fp16': True, 'use_scale_shift_norm': True, }) # Load models device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('Using device:', device) model, diffusion = create_model_and_diffusion(**model_config) model.load_state_dict(torch.load('512x512_diffusion_uncond_finetune_008100.pt', map_location='cpu')) model.requires_grad_(False).eval().to(device) for name, param in model.named_parameters(): if 'qkv' in name or 'norm' in name or 'proj' in name: param.requires_grad_() if model_config['use_fp16']: model.convert_to_fp16() clip_model = clip.load('ViT-B/16', jit=False)[0].eval().requires_grad_(False).to(device) clip_size = clip_model.visual.input_resolution normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]) lpips_model = lpips.LPIPS(net='vgg').to(device) ``` ## Settings for this run: ``` prompts = ['alien friend by Odilon Redon'] image_prompts = [] batch_size = 1 clip_guidance_scale = 1000 # Controls how much the image should look like the prompt. tv_scale = 150 # Controls the smoothness of the final output. range_scale = 50 # Controls how far out of range RGB values are allowed to be. cutn = 32 cut_pow = 0.5 n_batches = 1 init_image = None # This can be an URL or Colab local path and must be in quotes. skip_timesteps = 0 # This needs to be between approx. 200 and 500 when using an init image. # Higher values make the output look more like the init. init_scale = 0 # This enhances the effect of the init image, a good value is 1000. seed = 0 ``` ### Actually do the run... ``` def do_run(): if seed is not None: torch.manual_seed(seed) make_cutouts = MakeCutouts(clip_size, cutn, cut_pow) side_x = side_y = model_config['image_size'] target_embeds, weights = [], [] for prompt in prompts: txt, weight = parse_prompt(prompt) target_embeds.append(clip_model.encode_text(clip.tokenize(txt).to(device)).float()) weights.append(weight) for prompt in image_prompts: path, weight = parse_prompt(prompt) img = Image.open(fetch(path)).convert('RGB') img = TF.resize(img, min(side_x, side_y, *img.size), transforms.InterpolationMode.LANCZOS) batch = make_cutouts(TF.to_tensor(img).unsqueeze(0).to(device)) embed = clip_model.encode_image(normalize(batch)).float() target_embeds.append(embed) weights.extend([weight / cutn] * cutn) target_embeds = torch.cat(target_embeds) weights = torch.tensor(weights, device=device) if weights.sum().abs() < 1e-3: raise RuntimeError('The weights must not sum to 0.') weights /= weights.sum().abs() init = None if init_image is not None: init = Image.open(fetch(init_image)).convert('RGB') init = init.resize((side_x, side_y), Image.LANCZOS) init = TF.to_tensor(init).to(device).unsqueeze(0).mul(2).sub(1) cur_t = None def cond_fn(x, t, y=None): with torch.enable_grad(): x = x.detach().requires_grad_() n = x.shape[0] my_t = torch.ones([n], device=device, dtype=torch.long) * cur_t out = diffusion.p_mean_variance(model, x, my_t, clip_denoised=False, model_kwargs={'y': y}) fac = diffusion.sqrt_one_minus_alphas_cumprod[cur_t] x_in = out['pred_xstart'] * fac + x * (1 - fac) clip_in = normalize(make_cutouts(x_in.add(1).div(2))) image_embeds = clip_model.encode_image(clip_in).float() dists = spherical_dist_loss(image_embeds.unsqueeze(1), target_embeds.unsqueeze(0)) dists = dists.view([cutn, n, -1]) losses = dists.mul(weights).sum(2).mean(0) tv_losses = tv_loss(x_in) range_losses = range_loss(out['pred_xstart']) loss = losses.sum() * clip_guidance_scale + tv_losses.sum() * tv_scale + range_losses.sum() * range_scale if init is not None and init_scale: init_losses = lpips_model(x_in, init) loss = loss + init_losses.sum() * init_scale return -torch.autograd.grad(loss, x)[0] if model_config['timestep_respacing'].startswith('ddim'): sample_fn = diffusion.ddim_sample_loop_progressive else: sample_fn = diffusion.p_sample_loop_progressive for i in range(n_batches): cur_t = diffusion.num_timesteps - skip_timesteps - 1 samples = sample_fn( model, (batch_size, 3, side_y, side_x), clip_denoised=False, model_kwargs={}, cond_fn=cond_fn, progress=True, skip_timesteps=skip_timesteps, init_image=init, randomize_class=True, ) for j, sample in enumerate(samples): cur_t -= 1 if j % 100 == 0 or cur_t == -1: print() for k, image in enumerate(sample['pred_xstart']): filename = f'progress_{i * batch_size + k:05}.png' TF.to_pil_image(image.add(1).div(2).clamp(0, 1)).save(filename) tqdm.write(f'Batch {i}, step {j}, output {k}:') display.display(display.Image(filename)) gc.collect() do_run() ```
github_jupyter
## Chapter 2: Refresher of OOP concepts in Python ### Classes and Objects ``` class ClassName: '''attributes...''' '''methods...''' objName = ClassName() class Branch: '''attributes...''' '''methods...''' class Branch: '''attributes''' branchID = None branchStreet = None branchCity = None branchState = None branchZip = None '''methods''' def getProduct(self): return 'product' def getSales(self): return 'sales' def getInvoice(self): return 'invoice' branchAlbany = Branch() branchAlbany.branchID = 123 branchAlbany.branchStreet = '123 Main Street' branchAlbany.branchCity = 'Albany' branchAlbany.branchState = 'New York' branchAlbany.branchZip = 12084 branchAlbany.branchID branchAlbany.branchStreet branchAlbany.branchCity branchAlbany.branchState branchAlbany.branchZip branchAlbany.getInvoice() branchNevada = Branch() branchNevada.branchID branchNevada.branchID = 456 branchNevada.branchID class Branch: def __init__(self, branchID, branchStreet, branchCity, branchState, branchZip): self.branchID = branchID self.branchStreet = branchStreet self.branchCity = branchCity self.branchState = branchState self.branchZip = branchZip def getProduct(self): return 'product' def getSales(self): return 'sales' def getInvoice(self): return 'invoice' objectAlbany = Branch(101,'123 Main Street','Albany','New York', 12084) print (objectAlbany.branchID, objectAlbany.branchStreet,\ objectAlbany.branchCity,objectAlbany.branchState,\ objectAlbany.branchZip) ``` ### Methods ``` class Branch: def __init__(self, branchID, branchStreet, branchCity, branchState, branchZip): self.branchID = branchID self.branchStreet = branchStreet self.branchCity = branchCity self.branchState = branchState self.branchZip = branchZip def getProduct(self): return 'product' def getSales(self): return 'sales' def getInvoice(self): return 'invoice' objectAlbany = Branch(101,'123 Main Street','Albany','New York', 12084) objectAlbany.getInvoice() objectAlbany.getSales() objectAlbany.getProduct() class Branch: def setBranch(self, **branch): return branch def setSales(self, **sales): return sales def setProduct(self, **product): return product def calcTax(self): branch = self.branch product = self.product sales = self.sales pricebeforetax = sales['purchasePrice'] + sales['purchasePrice'] * sales['profitMargin'] finalsellingprice = pricebeforetax + (pricebeforetax * sales['taxRate']) sales['sellingPrice'] = finalsellingprice return branch, product, sales branchNyc = Branch() branchNyc.branch = branchNyc.setBranch(branchID = 202, branchStreet = '234 3rd Main Street', branchCity = 'New York City', branchState = 'New York', branchZip = 11005) branchNyc.branch branchNyc.product = branchNyc.setProduct( productId = 100001, productName = 'Refrigerator', productBrand = 'Whirlpool' ) branchNyc.product branchNyc.sales = branchNyc.setSales( purchasePrice = 300, profitMargin = 0.20, taxRate = 0.452 ) branchNyc.sales branchNyc.calcTax() ``` ### Inheritance ``` class Parent: '''attributes...''' '''methods...''' class Child(Parent): '''attributes...''' '''methods...''' class NYC(Branch): def setManagement(self, **intercitybranch): return intercitybranch def calcTaxNYC(self): branch = self.branch intercitybranch = self.intercitybranch product = self.product sales = self.sales pricebeforetax = sales['purchasePrice'] + sales['purchasePrice'] * sales['profitMargin'] finalsellingprice = pricebeforetax + (pricebeforetax * (sales['taxRate'] + sales['localRate'])) sales['sellingPrice'] = finalsellingprice return branch,intercitybranch, product, sales branchManhattan = NYC() branchManhattan.branch = branchManhattan.setBranch(branchID = 2021, branchStreet = '40097 5th Main Street', branchBorough = 'Manhattan', branchCity = 'New York City', branchState = 'New York', branchZip = 11007) branchManhattan.branch branchManhattan.intercitybranch = branchManhattan.setManagement( regionalManager = 'John M', branchManager = 'Tom H', subBranchID = '2021-01' ) branchManhattan.intercitybranch branchManhattan.product = branchManhattan.setProduct( productId = 100002, productName = 'WashingMachine', productBrand = 'Whirlpool' ) branchManhattan.product branchManhattan.sales = branchManhattan.setSales( purchasePrice = 450, profitMargin = 0.19, taxRate = 0.4, localRate = 0.055 ) branchManhattan.sales branchManhattan.calcTax() branchManhattan.calcTaxNYC() ``` ### Polymorphism ``` class NYC(Branch): def setManagement(self, **intercitybranch): return intercitybranch def calcTax(self): branch = self.branch intercitybranch = self.intercitybranch product = self.product sales = self.sales pricebeforetax = sales['purchasePrice'] + sales['purchasePrice'] * sales['profitMargin'] finalsellingprice = pricebeforetax + (pricebeforetax * (sales['taxRate'] + sales['localRate'])) sales['sellingPrice'] = finalsellingprice return branch,intercitybranch, product, sales branchManhattan = NYC() branchManhattan.branch = branchManhattan.setBranch(branchID = 2021, branchStreet = '40097 5th Main Street', branchBorough = 'Manhattan', branchCity = 'New York City', branchState = 'New York', branchZip = 11007) branchManhattan.intercitybranch = branchManhattan.setManagement( regionalManager = 'John M', branchManager = 'Tom H', subBranchID = '2021-01' ) branchManhattan.product = branchManhattan.setProduct( productId = 100002, productName = 'WashingMachine', productBrand = 'Whirlpool' ) branchManhattan.sales = branchManhattan.setSales( purchasePrice = 450, profitMargin = 0.19, taxRate = 0.4, localRate = 0.055 ) branchManhattan.calcTax() class Brooklyn: def maintenanceCost(self, productType, quantity): self.productType = productType self.quantity = quantity coldstorageCost = 100 if (productType == 'FMCG'): maintenanceCost = self.quantity * 0.25 + coldstorageCost return maintenanceCost else: return "We don't stock this product" class Queens: def maintenanceCost(self, productType, quantity): self.productType = productType self.quantity = quantity if (productType == 'Electronics'): maintenanceCost = self.quantity * 0.05 return maintenanceCost else: return "We don't stock this product" objectBrooklyn = Brooklyn() objectQueens = Queens() objectBrooklyn.maintenanceCost('FMCG', 2000) objectQueens.maintenanceCost('Electronics', 2000) ``` ### Multiple Inheritance ``` class Product: _productID = 100902 _productName = 'Iphone X' _productCategory = 'Electronics' _unitPrice = 700 def getProduct(self): return self._productID, self._productName, self._productCategory, self._unitPrice class Branch: _branchID = 2021 _branchStreet = '40097 5th Main Street' _branchBorough = 'Manhattan' _branchCity = 'New York City' _branchState = 'New York' _branchZip = 11007 def getBranch(self): return self._branchID, self._branchStreet, self._branchBorough, self._branchCity, self._branchState, self._branchZip class Sales(Product, Branch): date = '08/02/2021' def getSales(self): return self.date, Product.getProduct(self), Branch.getBranch(self) sales = Sales() sales.getSales() ``` ### Abstraction ``` class Branch(): def maintenanceCost(self): pass class Brooklyn(Branch): def maintenanceCost(self, productType, quantity): self.productType = productType self.quantity = quantity coldstorageCost = 100 if (productType == 'FMCG'): maintenanceCost = self.quantity * 0.25 + coldstorageCost return maintenanceCost else: return "We don't stock this product" class Queens(Branch): def maintenanceCost(self, productType, quantity): self.productType = productType self.quantity = quantity if (productType == 'Electronics'): maintenanceCost = self.quantity * 0.05 return maintenanceCost else: return "We don't stock this product" branch = Branch() branch.maintenanceCost() from abc import ABC,abstractmethod class Branch(ABC): @abstractmethod def maintenanceCost(self): pass class Brooklyn(Branch): def maintenanceCost(self, productType, quantity): self.productType = productType self.quantity = quantity coldstorageCost = 100 if (productType == 'FMCG'): maintenanceCost = self.quantity * 0.25 + coldstorageCost return maintenanceCost else: return "We don't stock this product" class Queens(Branch): def maintenanceCost(self, productType, quantity): self.productType = productType self.quantity = quantity if (productType == 'Electronics'): maintenanceCost = self.quantity * 0.05 return maintenanceCost else: return "We don't stock this product" branch = Branch() branchBrooklyn = Brooklyn() branchBrooklyn.maintenanceCost('FMCG', 5000) branchQueens = Queens() branchQueens.maintenanceCost('Electronics', 5000) ``` ### Encapsulation ``` class Branch(): branchID = 2021 regionalManager = 'John M' branchManager = 'Tom H' __productId = None __productName = None __productBrand = None __purchasePrice = None __profitMargin = None def __displayProductDetails(self): self.__productId = 100002 self.__productName = 'Washing Machine' self.__productBrand = 'Whirlpool' self.__purchasePrice = 450 self.__profitMargin = 0.19 print('Product ID: ' + str(self.__productId) + ', Product Name: ' + self.__productName + ', Product Brand: ' + self.__productBrand + ', Purchase Price: ' + str(self.__purchasePrice) + ', Profit Margin: ' + str(self.__profitMargin)) def __init__(self): self.__displayProductDetails() branch = Branch() branch.branchID branch.__profitMargin branch.__displayProductDetails() class Branch(): branchID = 2022 regionalManager = 'Ron D' __branchManager = 'Sam J' _productId = None _productName = None _productBrand = None _purchasePrice = None _profitMargin = None def _displayProductDetails(self): self._productId = 100003 self._productName = 'Washing Machine' self._productBrand = 'Samsung' self._purchasePrice = 430 self._profitMargin = 0.18 print('Product ID: ' + str(self._productId) + ', Product Name: ' + self._productName + ', Product Brand: ' + self._productBrand + ', Purchase Price: ' + str(self._purchasePrice) + ', Profit Margin: ' + str(self._profitMargin)) def __init__(self): self._displayProductDetails() branch = Branch() class Brooklyn(Branch): def __init__(self): print(self._productId) self._displayProductDetails() branchBrooklyn = Brooklyn() class Brooklyn(Branch): def __init__(self): print(self._productId) self._displayProductDetails() print(self.__branchManager) branchBrooklyn = Brooklyn() ``` ### These are all the examples covered in this chapter.
github_jupyter
Deep Learning ============= Assignment 5 ------------ The goal of this assignment is to train a Word2Vec skip-gram model over [Text8](http://mattmahoney.net/dc/textdata) data. ``` # These are all the modules we'll be using later. Make sure you can import them # before proceeding further. %matplotlib inline from __future__ import print_function import collections import math import numpy as np import os import random import tensorflow as tf import zipfile from matplotlib import pylab from six.moves import range from six.moves.urllib.request import urlretrieve from sklearn.manifold import TSNE ``` Download the data from the source website if necessary. ``` url = 'http://mattmahoney.net/dc/' def maybe_download(filename, expected_bytes): """Download a file if not present, and make sure it's the right size.""" if not os.path.exists(filename): filename, _ = urlretrieve(url + filename, filename) statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified %s' % filename) else: print(statinfo.st_size) raise Exception( 'Failed to verify ' + filename + '. Can you get to it with a browser?') return filename filename = maybe_download('text8.zip', 31344016) ``` Read the data into a string. ``` def read_data(filename): """Extract the first file enclosed in a zip file as a list of words""" with zipfile.ZipFile(filename) as f: data = tf.compat.as_str(f.read(f.namelist()[0])).split() return data words = read_data(filename) print('Data size %d' % len(words)) ``` Build the dictionary and replace rare words with UNK token. ``` vocabulary_size = 50000 def build_dataset(words): count = [['UNK', -1]] count.extend(collections.Counter(words).most_common(vocabulary_size - 1)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: if word in dictionary: index = dictionary[word] else: index = 0 # dictionary['UNK'] unk_count = unk_count + 1 data.append(index) count[0][1] = unk_count reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reverse_dictionary data, count, dictionary, reverse_dictionary = build_dataset(words) print('Most common words (+UNK)', count[:5]) print('Sample data', data[:10]) del words # Hint to reduce memory. ``` Function to generate a training batch for the skip-gram model. ``` data_index = 0 def generate_batch(batch_size, num_skips, skip_window): global data_index assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) for _ in range(span): buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) for i in range(batch_size // num_skips): target = skip_window # target label at the center of the buffer targets_to_avoid = [ skip_window ] for j in range(num_skips): while target in targets_to_avoid: target = random.randint(0, span - 1) targets_to_avoid.append(target) batch[i * num_skips + j] = buffer[skip_window] labels[i * num_skips + j, 0] = buffer[target] buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) return batch, labels print('data:', [reverse_dictionary[di] for di in data[:8]]) for num_skips, skip_window in [(2, 1), (4, 2)]: data_index = 0 batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window) print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window)) print(' batch:', [reverse_dictionary[bi] for bi in batch]) print(' labels:', [reverse_dictionary[li] for li in labels.reshape(8)]) ``` Train a skip-gram model. ``` batch_size = 128 embedding_size = 128 # Dimension of the embedding vector. skip_window = 1 # How many words to consider left and right. num_skips = 2 # How many times to reuse an input to generate a label. # We pick a random validation set to sample nearest neighbors. here we limit the # validation samples to the words that have a low numeric ID, which by # construction are also the most frequent. valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # Only pick dev samples in the head of the distribution. valid_examples = np.array(random.sample(range(valid_window), valid_size)) num_sampled = 64 # Number of negative examples to sample. graph = tf.Graph() with graph.as_default(), tf.device('/cpu:0'): # Input data. train_dataset = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # Variables. embeddings = tf.Variable( tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) softmax_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) softmax_biases = tf.Variable(tf.zeros([vocabulary_size])) # Model. # Look up embeddings for inputs. embed = tf.nn.embedding_lookup(embeddings, train_dataset) # Compute the softmax loss, using a sample of the negative labels each time. loss = tf.reduce_mean( tf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=embed, labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size)) # Optimizer. # Note: The optimizer will optimize the softmax_weights AND the embeddings. # This is because the embeddings are defined as a variable quantity and the # optimizer's `minimize` method will by default modify all variable quantities # that contribute to the tensor it is passed. # See docs on `tf.train.Optimizer.minimize()` for more details. optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss) # Compute the similarity between minibatch examples and all embeddings. # We use the cosine distance: norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup( normalized_embeddings, valid_dataset) similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings)) num_steps = 100001 with tf.Session(graph=graph) as session: tf.global_variables_initializer().run() print('Initialized') average_loss = 0 for step in range(num_steps): batch_data, batch_labels = generate_batch( batch_size, num_skips, skip_window) feed_dict = {train_dataset : batch_data, train_labels : batch_labels} _, l = session.run([optimizer, loss], feed_dict=feed_dict) average_loss += l if step % 2000 == 0: if step > 0: average_loss = average_loss / 2000 # The average loss is an estimate of the loss over the last 2000 batches. print('Average loss at step %d: %f' % (step, average_loss)) average_loss = 0 # note that this is expensive (~20% slowdown if computed every 500 steps) if step % 10000 == 0: sim = similarity.eval() for i in range(valid_size): valid_word = reverse_dictionary[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k+1] log = 'Nearest to %s:' % valid_word for k in range(top_k): close_word = reverse_dictionary[nearest[k]] log = '%s %s,' % (log, close_word) print(log) final_embeddings = normalized_embeddings.eval() num_points = 400 tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact') two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :]) def plot(embeddings, labels): assert embeddings.shape[0] >= len(labels), 'More labels than embeddings' pylab.figure(figsize=(15,15)) # in inches for i, label in enumerate(labels): x, y = embeddings[i,:] pylab.scatter(x, y) pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') pylab.show() words = [reverse_dictionary[i] for i in range(1, num_points+1)] plot(two_d_embeddings, words) ``` --- Problem ------- An alternative to skip-gram is another Word2Vec model called [CBOW](http://arxiv.org/abs/1301.3781) (Continuous Bag of Words). In the CBOW model, instead of predicting a context word from a word vector, you predict a word from the sum of all the word vectors in its context. Implement and evaluate a CBOW model trained on the text8 dataset. ---
github_jupyter
# VQGAN JAX Encoding for `webdataset` This notebook shows how to pre-encode images to token sequences using JAX, VQGAN and a dataset in the [`webdataset` format](https://webdataset.github.io/webdataset/). This example uses a small subset of YFCC100M we created for testing, but it should be easy to adapt to any other image/caption dataset in the `webdataset` format. ``` import numpy as np from tqdm import tqdm import torch import torchvision.transforms as T import torchvision.transforms.functional as TF from torchvision.transforms import InterpolationMode import math import webdataset as wds import jax from jax import pmap ``` ## Dataset and Parameters The following is the list of shards we'll process. We hardcode the length of data so that we can see nice progress bars using `tqdm`. ``` shards = 'https://huggingface.co/datasets/dalle-mini/YFCC100M_OpenAI_subset/resolve/main/data/shard-{0000..0008}.tar' length = 8320 ``` If we are extra cautious or our server is unreliable, we can enable retries by providing a custom `curl` retrieval command: ``` # Enable curl retries to try to work around temporary network / server errors. # This shouldn't be necessary when using reliable servers. # shards = f'pipe:curl -s --retry 5 --retry-delay 5 -L {shards} || true' from pathlib import Path # Output directory for encoded files encoded_output = Path.home()/'data'/'wds'/'encoded' batch_size = 128 # Per device num_workers = 8 # For parallel processing bs = batch_size * jax.device_count() # You can use a smaller size while testing batches = math.ceil(length / bs) ``` Image processing ``` def center_crop(image, max_size=256): # Note: we allow upscaling too. We should exclude small images. image = TF.resize(image, max_size, interpolation=InterpolationMode.LANCZOS) image = TF.center_crop(image, output_size=2 * [max_size]) return image preprocess_image = T.Compose([ center_crop, T.ToTensor(), lambda t: t.permute(1, 2, 0) # Reorder, we need dimensions last ]) ``` Caption preparation. Note that we receive the contents of the `json` structure, which will be replaced by the string we return. If we want to keep other fields inside `json`, we can add `caption` as a new field. ``` def create_caption(item): title = item['title_clean'].strip() description = item['description_clean'].strip() if len(title) > 0 and title[-1] not in '.!?': title += '.' return f'{title} {description}' ``` When an error occurs (a download is disconnected, an image cannot be decoded, etc) the process stops with an exception. We can use one of the exception handlers provided by the `webdataset` library, such as `wds.warn_and_continue` or `wds.ignore_and_continue` to ignore the offending entry and keep iterating. **IMPORTANT WARNING:** Do not use error handlers to ignore exceptions until you have tested that your processing pipeline works fine. Otherwise, the process will continue trying to find a valid entry, and it will consume your whole dataset without doing any work. We can also create our custom exception handler as demonstrated here: ``` # UNUSED - Log exceptions to a file def ignore_and_log(exn): with open('errors.txt', 'a') as f: f.write(f'{repr(exn)}\n') return True # Or simply use `wds.ignore_and_continue` exception_handler = wds.warn_and_continue dataset = wds.WebDataset(shards, length=batches, # Hint so `len` is implemented shardshuffle=False, # Keep same order for encoded files for easier bookkeeping. Set to `True` for training. handler=exception_handler, # Ignore read errors instead of failing. ) dataset = (dataset .decode('pil') # decode image with PIL # .map_dict(jpg=preprocess_image, json=create_caption, handler=exception_handler) # Process fields with functions defined above .map_dict(jpg=preprocess_image, json=create_caption) # Process fields with functions defined above .to_tuple('__key__', 'jpg', 'json') # filter to keep only key (for reference), image, caption. .batched(bs)) # better to batch in the dataset (but we could also do it in the dataloader) - this arg does not affect speed and we could remove it %%time keys, images, captions = next(iter(dataset)) images.shape T.ToPILImage()(images[0].permute(2, 0, 1)) ``` ### Torch DataLoader ``` dl = torch.utils.data.DataLoader(dataset, batch_size=None, num_workers=num_workers) ``` ## VQGAN-JAX model ``` from vqgan_jax.modeling_flax_vqgan import VQModel ``` We'll use a VQGAN trained with Taming Transformers and converted to a JAX model. ``` model = VQModel.from_pretrained("flax-community/vqgan_f16_16384") ``` ## Encoding Encoding is really simple using `shard` to automatically distribute "superbatches" across devices, and `pmap`. This is all it takes to create our encoding function, that will be jitted on first use. ``` from flax.training.common_utils import shard from functools import partial @partial(jax.pmap, axis_name="batch") def encode(batch): # Not sure if we should `replicate` params, does not seem to have any effect _, indices = model.encode(batch) return indices ``` ### Encoding loop ``` import os import pandas as pd def encode_captioned_dataset(dataloader, output_dir, save_every=14): output_dir.mkdir(parents=True, exist_ok=True) # Saving strategy: # - Create a new file every so often to prevent excessive file seeking. # - Save each batch after processing. # - Keep the file open until we are done with it. file = None for n, (keys, images, captions) in enumerate(tqdm(dataloader)): if (n % save_every == 0): if file is not None: file.close() split_num = n // save_every file = open(output_dir/f'split_{split_num:05x}.jsonl', 'w') images = shard(images.numpy().squeeze()) encoded = encode(images) encoded = encoded.reshape(-1, encoded.shape[-1]) encoded_as_string = list(map(lambda item: np.array2string(item, separator=',', max_line_width=50000, formatter={'int':lambda x: str(x)}), encoded)) batch_df = pd.DataFrame.from_dict({"key": keys, "caption": captions, "encoding": encoded_as_string}) batch_df.to_json(file, orient='records', lines=True) ``` Create a new file every 318 iterations. This should produce splits of ~500 MB each, when using a total batch size of 1024. ``` save_every = 318 encode_captioned_dataset(dl, encoded_output, save_every=save_every) ``` ----
github_jupyter
<a href="https://colab.research.google.com/github/ekramasif/Basic-Machine-Learning/blob/main/Classification/Random_Forest_Classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Random Forest Classification ## Importing the libraries ``` import numpy as np import matplotlib.pyplot as plt import pandas as pd ``` ## Importing the dataset ``` dataset = pd.read_csv('Social_Network_Ads.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values ``` ## Splitting the dataset into the Training set and Test set ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) print(X_train) print(y_train) print(X_test) print(y_test) ``` ## Feature Scaling ``` from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) print(X_train) print(X_test) ``` ## Training the Random Forest Classification model on the Training set ``` from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0) classifier.fit(X_train, y_train) ``` ## Predicting a new result ``` print(classifier.predict(sc.transform([[30,87000]]))) ``` ## Predicting the Test set results ``` y_pred = classifier.predict(X_test) print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1)) ``` ## Making the Confusion Matrix ``` from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test, y_pred) print(cm) accuracy_score(y_test, y_pred) ``` ## Visualising the Training set results ``` from matplotlib.colors import ListedColormap X_set, y_set = sc.inverse_transform(X_train), y_train X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25), np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25)) plt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j) plt.title('Random Forest Classification (Training set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show() ``` ## Visualising the Test set results ``` from matplotlib.colors import ListedColormap X_set, y_set = sc.inverse_transform(X_test), y_test X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25), np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25)) plt.contourf(X1, X2, classifier.predict(sc.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j) plt.title('Random Forest Classification (Test set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show() ```
github_jupyter
# Introduction: How H1st.AI enables the Industrial AI Revolution This tutorial will teach you how H1st AI can help solve the Cold Start problem in domains where labeled data is not available or prohibitively expensive to obtain. One example of such a domain is cybersecurity, which is increasingly looking forward to adopting ML to detect intrusions. Another domain is predictive maintenance that tries to anticipate industrial machine failures before they happen. In both domains, labels are expensive because fundamentally these occurrences are rare and costly (as compared to NLP where e.g. sentiment are common and labels can be obtained i.g. via crowdsourcing or weak supervision). Yet this is a fundamental challenge of Industrial AI. <img src="http://docs.arimo.com/H1ST_AI_Tutorial/img/batman h1st.ai.jpg" alt="H1st.AI woke meme" style="float: left; margin-right: 20px; margin-bottom: 20px;" width=320px height=320px> Jurgen Schmidhuber, one of AI & deep learning's pioneer, [remarked in his 2020s outlook that](http://people.idsia.ch/~juergen/2010s-our-decade-of-deep-learning.html#Sec.%207) in the last decade AI "excelled in virtual worlds, e.g., in video games, board games, and especially on the major WWW platforms", but the main challenge for the next decades is for AI to be "driving industrial processes and machines and robots". As pioneers in Industrial AI who regularly work with massive global fleets of IoT equipment, Arimo & Panasonic whole-heartedly agrees with this outlook. Importantly, many industrial AI use cases with significant impact have become urgent and demand solutions now that requires a fresh approach. We will work on one such example in this tutorial: detection intrusion in automotive cybersecurity. We’ll learn that using H1st.AI we can tackle these problems and make it tractable by leveraging human experience and data-driven models in a harmonious way. Especially, we’ll learn how to: * Perform use-case analysis to decompose problems and adopt different models at the right level of abstractions * Encode human experience as a model * Combine human and ML models to work in tandem in a H1st.Graph Too many tutorials, esp data science ones, start out with some toy applications and the really basic stuff, and then stalls out on the more complex real-world scenario. This one is going to be different. So, grab a cup of coffee before you continue :) If you can't wait, go ahead and [star our Github repository](https://github.com/h1st-ai/h1st) and check out the "Quick Start" section. We're open-source!
github_jupyter
``` from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) ``` # Data Ingestion From External Sources - Spark * Generic Format * Special Format - Need Drivers * Avro * S3 * Relational Database * Postgres * MySQL * SQLServer * NoN-Relational Database * Cassandra ``` from pyspark.sql import SparkSession import os os.environ["PYSPARK_PYTHON"]="/usr/bin/python3" os.environ["PYSPARK_DRIVER_PYTHON"]="/usr/bin/python3" os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages org.apache.hadoop:hadoop-aws:2.7.1,com.datastax.spark:spark-cassandra-connector_2.11:2.3.0,mysql:mysql-connector-java:8.0.15 pyspark-shell' spark = SparkSession.builder\ .appName('AWS external sources spark')\ .getOrCreate() spark ``` # Generic Format - Dont need drivers * csv * json * parquet * libsvm * text #### Read ``` spark.read.<format>("<file name>") ``` #### Write ``` spark.write.<format>("<file name>") ``` # Special Formats - Need Drivers You can include the following packages using **--packages** |Source| Driver Package| |-----------|----------------| |S3 |org.apache.hadoop:hadoop-aws:2.7.1| |Avro |org.apache.spark:spark-avro_2.11:2.4.0| # S3 #### Read ``` df = spark.read.<format>("s3a://<bucket name>/<file name>") ``` #### Write ``` df.write.<format>("s3a://<bucket name>/<file name>", mode="overwrite") ``` # Relational Databases |Source| Driver Package|Driver Name|Standard Port| |-----------|----------------|---------|----| |Postgres |org.postgresql:postgresql:42.1.1|org.postgresql.Driver|5432 |MySQL |mysql:mysql-connector-java:8.0.13|com.mysql.jdbc.Driver|3306 #### Generic Read ``` spark.read\ .format("jdbc")\ .option("driver", "<driver name>")\ .option("url", "jdbc:<database type>://<ip>:<port>/<dbname>")\ .option("dbtable", "<table>")\ .option("user", "<username>")\ .option("password","<password>")\ .load() ``` #### Generic Write ``` df.write\ .format("jdbc")\ .option("driver", "<driver name>")\ .option("url", "jdbc:<databse type>://<ip>:<port>/<db name>")\ .option("dbtable", "<table name>")\ .option("user", "<username>")\ .option("password","<password>")\ .mode("overwrite")\ .save() ``` ## Postgres #### Read ``` spark.read\ .format("jdbc")\ .option("driver", "org.postgresql.Driver")\ .option("url", "jdbc:postgresql://<ip>:5432/<dbname>")\ .option("dbtable", "<table>")\ .option("user", "<username>")\ .option("password","<password>")\ .load() ``` #### Write ``` df.write\ .format("jdbc")\ .option("driver", "org.postgresql.Driver")\ .option("url", "jdbc:postgresql://localhost:5432/spark_demo_db")\ .option("dbtable", "my_table")\ .option("user", "sahil")\ .option("password","12345")\ .mode("overwrite")\ .save() ``` ## MYSQL #### Read ``` spark.read\ .format("jdbc")\ .option("driver", "com.mysql.jdbc.Driver")\ .option("url", "jdbc:mysql://<ip>:3306/<dbname>")\ .option("dbtable", "<table name>")\ .option("user", " <username>")\ .option("password","<password>")\ .load() ``` #### Write ``` df.write\ .format("jdbc")\ .option("driver", "com.mysql.jdbc.Driver")\ .option("url", "jdbc:mysql://<ip>:3306/<dbname>")\ .option("dbtable", "<table name>")\ .option("user", " <username>")\ .option("password","<password>")\ .mode("overwrite")\ .save() ``` ## SQL Server Install SQL Server from [here](https://docs.microsoft.com/en-us/sql/linux/quickstart-install-connect-ubuntu?view=sql-server-2017) Download latest driver package from [here](https://docs.microsoft.com/en-us/sql/connect/jdbc/download-microsoft-jdbc-driver-for-sql-server?view=sql-server-2017) ``` # include the jar from the above downloaded packages as --jars option import os os.environ['PYSPARK_SUBMIT_ARGS'] = = '--jars /home/sahil/Desktop/sqljdbc_7.2/enu/mssql-jdbc-7.2.1.jre8.jar' ``` #### Read ``` df = spark.read.format("jdbc") \ .option("url", "jdbc:sqlserver://<ip>:1433;databaseName=<dbname>") \ .option("dbtable", "<table>") \ .option("user", "<user>") \ .option("password", "<pasword>")\ .load() ``` #### Write ``` df.write.format("jdbc") \ .option("url", "jdbc:sqlserver://<ip>:1433;databaseName=<dbname>") \ .option("dbtable", "<table>") \ .option("user", "<user>") \ .option("password", "<password>")\ .mode("overwrite")\ .save() ``` # NoSQL Databases |Source| Driver Package|Format Name|Standard Port| |-----------|----------------|---------|----| |Cassandra |com.datastax.spark:spark-cassandra-connector_2.11:2.3.0|org.apache.spark.sql.cassandra|9042 |DynamoDB |com.amazon.emr:emr-dynamodb-hadoop:4.2.0| ## Cassandra #### Read ``` spark.read.format("org.apache.spark.sql.cassandra")\ .option("spark.cassandra.connection.host","<ip>")\ .option("spark.cassandra.connection.port","<port>")\ .option("keyspace","<keyspace name>")\ .option("table","<table name>") .load() ``` #### Write ``` spark.write.format("org.apache.spark.sql.cassandra")\ .option("spark.cassandra.connection.host","<ip>")\ .option("spark.cassandra.connection.port","<port>")\ .option("keyspace","<keyspace name>")\ .option("table","<table name>") .save() ``` ## Dynamo DB (TODO) #### Read #### Write
github_jupyter
``` ## import required packages for a parameter estimation technique import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit import pandas as pd import math ##Import Experimental Reversible Data: rev_exp_data = pd.read_csv("data/10mVs_Reversible.csv") current_exp=rev_exp_data['current(A)'].values voltage_exp=rev_exp_data['voltage(mV)'].values time_exp=rev_exp_data['time(s)'].values ## all appropriate packages and the singular experimental data file is imported now # Import the dimensionless voltammagram (V I) for reversible reactions rev_dim_values = pd.read_csv("data/dimensionless_values_rev.csv") rev_dim_current=rev_dim_values['dimensionless_current'].values rev_dim_voltage=rev_dim_values['dimensionless_Voltage'].values ##We will now prompt the user to submit known parameters (THESE CAN BE CHANGED OR MADE MORE CONVENIENT) sweep_rate= float(input("What is the Voltage sweep rate in mV/s?(10)")) electrode_surface_area= float(input("What is the electrode surface area in cm^2?(.2)")) concentration_initial= float(input("What is the initial concentration in mol/cm^3?(.00001)")) Temp= float(input("What is the temperature in K?(298)")) eq_pot= float(input("What is the equilibrium potential in V?(.10)")) ##we are inserting a diffusion coefficient to check math here, we will estimate this later: Diff_coeff=0.00001 ## Here we define constant variables, these can be made to user inputs if needed. n=1 Faradays_const=96285 R_const=8.314 sigma=(n*Faradays_const*sweep_rate)/(R_const*Temp) Pre=electrode_surface_area*concentration_initial*n*Faradays_const*math.sqrt(Diff_coeff*sigma) output_voltage=(eq_pot+rev_dim_voltage/n) output_current=Pre*rev_dim_current plt.plot(output_voltage,output_current) # Fitting Diff_Coeff from scipy import optimize def test_func(rev_dim_current, D): return electrode_surface_area*concentration_initial*n*Faradays_const*math.sqrt(D*sigma)*rev_dim_current params, params_covariance = optimize.curve_fit(test_func, rev_dim_current, output_current,p0=None,bounds = (0,[1])) print(params) ##Import Experimental Irreversible Data: irrev_exp_data = pd.read_csv("data/10mVs_Irreversible.csv") current_exp=irrev_exp_data['current(A)'].values voltage_exp=irrev_exp_data['voltage(mV)'].values time_exp=irrev_exp_data['time(s)'].values print(irrev_exp_data) ## all appropriate packages and the singular experimental data file is imported now # Import the dimensionless voltammagram (V I) for irreversible reactions irrev_dim_values = pd.read_csv("data/dimensionless_values_irrev.csv") irrev_dim_current=irrev_dim_values['dimensionless_current'].values irrev_dim_voltage=irrev_dim_values['dimensionless_Voltage'].values print(irrev_dim_values) ##We will now prompt the user to submit known parameters (THESE CAN BE CHANGED OR MADE MORE CONVENIENT) sweep_rate= float(input("What is the Voltage sweep rate in mV/s?(10)")) electrode_surface_area= float(input("What is the electrode surface area in cm^2?(.2)")) concentration_initial= float(input("What is the initial concentration in mol/cm^3?(.00001)")) Temp= float(input("What is the temperature in K?(298)")) eq_pot= float(input("What is the equilibrium potential in mV?(100)")) ##we are inserting a diffusion coefficient to check math here, we will estimate this later: Diff_coeff=0.00001 ## Here we define constant variables, these can be made to user inputs if needed. n=1 Faradays_const=96285 R_const=8.314 exchange_current_density=0.0002 kinetic_coefficient=exchange_current_density/n/Faradays_const/electrode_surface_area/concentration_initial transfer_coefficient=.6 eV_const=59.1 beta=transfer_coefficient*n*Faradays_const*sweep_rate/R_const/Temp/1000 Pre=(concentration_initial*n*Faradays_const* math.sqrt(Diff_coeff*sweep_rate*transfer_coefficient *Faradays_const/(R_const*Temp*1000))) output_voltage=eq_pot+irrev_dim_voltage/transfer_coefficient-eV_const/transfer_coefficient*math.log(math.sqrt(math.pi*Diff_coeff*beta)/kinetic_coefficient) output_current=Pre*irrev_dim_current plt.plot(output_voltage,output_current) print(beta) print(Pre) print(kinetic_coefficient) print(eq_pot) print(output_current[19]) print(output_voltage[19]) # Fitting Diff_Coeff from scipy import optimize def test_func(irrev_dim_voltage, exchange_current_density): return eq_pot+irrev_dim_voltage/transfer_coefficient-eV_const/transfer_coefficient*math.log(math.sqrt(math.pi*Diff_coeff*beta)/(exchange_current_density/n/Faradays_const/electrode_surface_area/concentration_initial)) params, params_covariance = optimize.curve_fit(test_func, rev_dim_current, output_current,p0=None,bounds = (0,[1])) print(params) ```
github_jupyter
# Image Registration ## Overview ### Learning Objectives * Understand how ITK does computations in physical space * Understand why ITK does registration in physical space instead of pixel space * Become familiar with the components of the ITK Registration Framework, and survey their possible values The content for this section is provided as [a separate presentation](https://data.kitware.com/api/v1/file/5b1000148d777f15ebe1fca3/download?contentDisposition=inline). ## Exercises ### Exercise 1: Transforms and Pixels ![Transforms and pixels](data/transforms-and-pixels.png) ### Exercise 2: Components of the registration framework Study the image registration pipeline below, and replace the `fixMe` the components with their correct values. ``` import numpy as np import itk from itkwidgets import view from matplotlib import pyplot as plt %matplotlib inline from ipywidgets import interact PixelType = itk.ctype('float') fixedImage = itk.imread('data/BrainProtonDensitySliceBorder20.png', PixelType) plt.imshow(itk.GetArrayViewFromImage(fixedImage)) movingImage = itk.imread('data/BrainProtonDensitySliceShifted13x17y.png', PixelType) plt.imshow(itk.GetArrayViewFromImage(movingImage)) Dimension = fixedImage.GetImageDimension() FixedImageType = type(fixedImage) MovingImageType = type(movingImage) TransformType = itk.TranslationTransform[itk.D, Dimension] initialTransform = TransformType.New() optimizer = itk.RegularStepGradientDescentOptimizerv4.New( LearningRate=4, MinimumStepLength=0.001, RelaxationFactor=0.5, NumberOfIterations=200) metric = itk.MeanSquaresImageToImageMetricv4[ FixedImageType, MovingImageType].New() registration = itk.ImageRegistrationMethodv4.New(FixedImage=fixMe, MovingImage=fixMe, Metric=fixMe, Optimizer=fixMe, InitialTransform=fixMe) movingInitialTransform = TransformType.New() initialParameters = movingInitialTransform.GetParameters() initialParameters[0] = 0 initialParameters[1] = 0 movingInitialTransform.SetParameters(initialParameters) registration.SetMovingInitialTransform(movingInitialTransform) identityTransform = TransformType.New() identityTransform.SetIdentity() registration.SetFixedInitialTransform(identityTransform) registration.SetNumberOfLevels(1) registration.SetSmoothingSigmasPerLevel([0]) registration.SetShrinkFactorsPerLevel([1]) registration.Update() transform = registration.GetTransform() finalParameters = transform.GetParameters() translationAlongX = finalParameters.GetElement(0) translationAlongY = finalParameters.GetElement(1) numberOfIterations = optimizer.GetCurrentIteration() bestValue = optimizer.GetValue() print("Result = ") print(" Translation X = " + str(translationAlongX)) print(" Translation Y = " + str(translationAlongY)) print(" Iterations = " + str(numberOfIterations)) print(" Metric value = " + str(bestValue)) CompositeTransformType = itk.CompositeTransform[itk.D, Dimension] outputCompositeTransform = CompositeTransformType.New() outputCompositeTransform.AddTransform(movingInitialTransform) outputCompositeTransform.AddTransform(registration.GetModifiableTransform()) resampler = itk.ResampleImageFilter.New(Input=fixMe, Transform=fixMe, UseReferenceImage=True, ReferenceImage=fixMe) resampler.SetDefaultPixelValue(100) OutputPixelType = itk.ctype('unsigned char') OutputImageType = itk.Image[OutputPixelType, Dimension] resampler.Update() plt.imshow(itk.GetArrayViewFromImage(movingImage)) difference = itk.SubtractImageFilter.New(Input1=fixedImage, Input2=resampler) resampler.SetDefaultPixelValue(1) resampler.SetTransform(identityTransform) difference.Update() view(difference.GetOutput()) ``` ### Exercise 3: Image similarity matching metrics Examine the available [registration framework image similarity matching metrics](https://itk.org/Doxygen/html/classitk_1_1ImageToImageMetricv4.html). - Can an alternative metric be used in the registration pipeline above? - Which metrics should be used for multi-modality registration? ### Enjoy ITK!
github_jupyter
# Simulating Language 5, Simple Innate Signalling (walkthrough) This is a line-by-line walkthrough of the code for lab on simple signalling. ### Data Structures: a signalling matrix represented as a list of lists A production system can be thought of as a matrix which maps meanings to signals. We are representing this as a list. Each member of the list is itself a list containing the association strengths for *one particular meaning*. Look at the example below: ```python psys = [[1, 0, 0], [1, 2, 1], [3, 4, 4]] ``` Here, a production system called `psys` is defined: it has three members, representing three meanings. The length of the system `len(psys)` is equivalent to the number of meanings in the system. `psys[0]` is `[1, 0, 0]`, which are the association strengths for the first meaning (remember python indexes start from 0!). Each of these sub-lists has three members, representing three possible signals. So `psys[0][0]` is the strength of association between the first meaning and the first signal. We sometimes refer to these association strengths as "weights". We can do the same thing to model a reception system, but in this case we are dealing with a system which maps from signals to meanings: so, if `rsys` is a reception system then each member of `rsys` is itself a list that contains the association strengths between a signal and several meanings. - Create a variable containing the following production matrix: |. | s1 | s2 | s3 | |----|----|----|----| | m1 | 1 | 0 | 2 | | m2 | 2 | 2 | 0 | | m3 | 0 | 1 | 3 | - Print the weights for meaning m1 - Print the weight for the connection between meaning m2 and signal s3 - Create a variable containing the following reception matrix: |. | m1 | m2 | m3 | |----|----|----|----| | s1 | 1 | 2 | 0 | | s2 | 0 | 2 | 1 | | s3 | 2 | 0 | 3 | - Print the weights for signal s3 - Print the weight of the connection between signal s1 and meaning m2 ## The code proper The code begins by importing various random number and plotting modules: ``` import random %matplotlib inline import matplotlib.pyplot as plt from IPython.display import set_matplotlib_formats set_matplotlib_formats('svg', 'pdf') ``` ### Function wta The function `wta` ("winner takes all") takes a list of numbers (`items`) as its argument. This represents a row of a production or reception matrix. The function returns the index of the largest number in the list `items`. If there are multiple equally large numbers, then one of them is chosen at random. ``` def wta(items): maxweight = max(items) candidates = [] for i in range(len(items)): if items[i] == maxweight: candidates.append(i) return random.choice(candidates) ``` `maxweight == max(items)` uses the built-in function `max` to calculate the maximum value of `items` and allocates this value to the variable `maxweight`. `candidates = []` creates an empty list. `for i in range(len(items)):` lets us look at each item in the list in turn, keeping track of where it is in the list. `range(len(items))` creates a sequence of numbers from 0 up to (but not including) the length of the list `items`. These represent each possible index of `items`, and in the the for loop, we go through each of these in turn, allocating it to the variable, `i`, and then carrying out everything in the next code block for each value of `i`: ```python if items[i] == maxweight: candidates.append(i) ``` This block of code checks each member of `items` in turn; if its value is equal to `maxweight`, then the index `i` is appended to (added to) the list of `candidates`. After this loop has been completed, `candidates` will contain the indices of all the largest numbers. `return random.choice(candidates)` returns a random choice from the numbers in the list, `candidates`. If there is only one number in `candidates`, then this is returned. - Using the `wta` function and the variables you created above to store the production and reception matrices: - find the preferred signal for each meaning in turn - find the preferred meaning for each signal in turn For example, if you called your production system `my_psys`, you could find the preferred signal for meaning 1 like this: ```python wta(my_psys[0]) ``` This takes the first row of the production system we defined earlier (`my_psys[0]`), then uses `wta` to find the index of the preferred signal for that row. Note that the `wta` function will only work if you pressed SHIFT+ENTER on the cell in the notebook above, otherwise the computer doesn't know what `wta` means. ### Function communicate The function `communicate` plays a communication episode; it takes three arguments: - `speaker_system`, the production matrix of the speaker; - `hearer_system`, the reception matrix of the hearer; and - `meaning`, the index of the meanign which is to be communicated. ``` def communicate(speaker_system, hearer_system, meaning): speaker_signal = wta(speaker_system[meaning]) hearer_meaning = wta(hearer_system[speaker_signal]) if meaning == hearer_meaning: return 1 else: return 0 ``` In a communication episode, the speaker chooses a signal it uses to communicate `meaning`, and expresses this signal to the hearer; the hearer then chooses the meaning it understands by the speaker's signal. If the hearer's meaning is the same as the speaker's meaning, then the communication episode succeeds, otherwise it fails. `speaker_signal = wta(speaker_system[meaning])` uses `speaker_system[meaning]` to extract a list of association strengths from the speaker's production matrix (`speaker_system`) for `meaning`, and then uses `wta` (see above) to find the index corresponding to the largest of these weights. This value is then stored in the variable `speaker_signal`. `hearer_meaning = wta(hearer_system[speaker_signal])` uses `hearer_system[speaker_signal]` to extract a list of association strengths from the hearer's reception matrix (`hearer_system`) for `speaker_signal`, and then uses `wta` (see above) to find the index corresponding to the largest of these weights. This value is then stored in the variable `hearer_meaning`. ```python if meaning == hearer_meaning: return 1 else: return 0 ``` If the hearer's interpretation of the speaker's signal (`hearer_meaning`) equals the original value of `meaning` (i.e. the meaning the speaker was trying to convey) and thus the communication episode succeeds, then the function returns 1 (indicating success), otherwise (`else`) it returns 0 (indicating failure). - Using the same matrices you created earlier, find out which of the meanings can be successfully communicated using these production and reception matrices. ### Function ca_monte The function `ca_monte` (standing for "Communicative Accuracy Monte Carlo") is the main function in this program. It performs a Monte Carlo simulation, which runs a set number of communication episodes between a production system and a reception system, calculates how many of them were communicatively successful, and returns a trial-by-trial list of results. It takes three arguments: - `speaker_system`, the production matrix of the speaker; - `hearer_system`, the reception matrix of the hearer; and - `trials`, the number of trials of the simulation, or the number of communicative episodes over which communicative accuracy should be calculated. ``` def ca_monte(speaker_system, hearer_system, trials): total = 0. accumulator = [] for n in range(trials): total += communicate(speaker_system, hearer_system, random.randrange(len(speaker_system))) accumulator.append(total / (n + 1)) return accumulator ``` `total = 0.` creates a variable called total, which will store the number of successful communicative episodes. We use `0.` rather than `0` as a shorthand for `0.0`, which indicates that the eventual result isn't going to be a round number. In fact, this isn't strictly necessary for the version of Python we're using, but you're likely to see something like this in a lot of code you read. `accumulator = []` creates a variable called `accumulator`, which will be used to build up a list of trial-by-trial success rates. We initialise the accumulator with an empty list: before we have conducted any trials, we don't have any results for success or failure. `for n in range(trials):` sets up a loop to allow us to test communicative accuracy over and over again. `range(trials)` creates a sequence of numbers from 0 up to (but not including) `trials`, which is then traversed by the for loop. `total += communicate(speaker_system, hearer_system, random.randrange(len(speaker_system)))` updates a running total of the number of communicative episodes that were successful. On each communicative episode, we choose a random meaning (that's what `random.randrange(len(speaker_system))` does - the length of `speaker_system` is the number of rows in their production matrix, which is the same as the number of meanings). Then we use the function `communicate` to see whether the speaker can successfully communicate this meaning to the hearer (`hearer_system`). We add the value returned by `communicate` (i.e. 0 or 1) to the existing value in `total`, which therefore contains the number of successful communicative episodes. `accumulator.append(total / (n + 1))` builds up a list of exposure-by-exposure proportions of communicative episodes so far which have been successful. `total / (n + 1)` gives the total number of events so far that have been successful (stored in `total`) divided by the number of times we've been round the loop at this point. Note that the number of trials conducted so far is `n + 1`, not just `n`, because of the way `range` works. The first trial has `n` equal to 0, the second 1 and so on, so we have to add 1 to this number to get the number of trials completed. We then use `append` to add this value to `accumulator`, which is our building list of trial-by-trial success proportions. `return accumulator` simply returns this list. Note that this line of code is outside of the the for loop. `accumulator` is only returned one the loop has run the necessary number of trials. - What is the overall communicative accuracy for the matrices you defined earlier? - create another matrix (maybe with more meanings and/or signals). What is its communicative accuracy?
github_jupyter
# Natural Language Processing with `nltk` `nltk` is the most popular Python package for Natural Language processing, it provides algorithms for importing, cleaning, pre-processing text data in human language and then apply computational linguistics algorithms like sentiment analysis. ## Inspect the Movie Reviews Dataset It also includes many easy-to-use datasets in the `nltk.corpus` package, we can download for example the `movie_reviews` package using the `nltk.download` function: ``` import nltk nltk.download("movie_reviews") ``` You can also list and download other datasets interactively just typing: nltk.download() in the Jupyter Notebook. Once the data have been downloaded, we can import them from `nltk.corpus` ``` from nltk.corpus import movie_reviews ``` The `fileids` method provided by all the datasets in `nltk.corpus` gives access to a list of all the files available. In particular in the movie_reviews dataset we have 2000 text files, each of them is a review of a movie, and they are already split in a `neg` folder for the negative reviews and a `pos` folder for the positive reviews: ``` len(movie_reviews.fileids()) movie_reviews.fileids()[:5] movie_reviews.fileids()[-5:] ``` `fileids` can also filter the available files based on their category, which is the name of the subfolders they are located in. Therefore we can have lists of positive and negative reviews separately. ``` negative_fileids = movie_reviews.fileids('neg') positive_fileids = movie_reviews.fileids('pos') len(negative_fileids), len(positive_fileids) ``` We can inspect one of the reviews using the `raw` method of `movie_reviews`, each file is split into sentences, the curators of this dataset also removed from each review from any direct mention of the rating of the movie. ``` print(movie_reviews.raw(fileids=positive_fileids[0])) ``` ## Tokenize Text in Words ``` romeo_text = """Why then, O brawling love! O loving hate! O any thing, of nothing first create! O heavy lightness, serious vanity, Misshapen chaos of well-seeming forms, Feather of lead, bright smoke, cold fire, sick health, Still-waking sleep, that is not what it is! This love feel I, that feel no love in this.""" ``` The first step in Natural Language processing is generally to split the text into words, this process might appear simple but it is very tedious to handle all corner cases, see for example all the issues with punctuation we have to solve if we just start with a split on whitespace: ``` romeo_text.split() ``` `nltk` has a sophisticated word tokenizer trained on English named `punkt`, we first have to download its parameters: ``` nltk.download("punkt") ``` Then we can use the `word_tokenize` function to properly tokenize this text, compare to the whitespace splitting we used above: ``` romeo_words = nltk.word_tokenize(romeo_text) romeo_words ``` Good news is that the `movie_reviews` corpus already has direct access to tokenized text with the `words` method: ``` movie_reviews.words(fileids=positive_fileids[0]) ``` ## Build a bag-of-words model The simplest model for analyzing text is just to think about text as an unordered collection of words (bag-of-words). This can generally allow to infer from the text the category, the topic or the sentiment. From the bag-of-words model we can build features to be used by a classifier, here we assume that each word is a feature that can either be `True` or `False`. We implement this in Python as a dictionary where for each word in a sentence we associate `True`, if a word is missing, that would be the same as assigning `False`. ``` {word:True for word in romeo_words} type(_) def build_bag_of_words_features(words): return {word:True for word in words} build_bag_of_words_features(romeo_words) ``` This is what we wanted, but we notice that also punctuation like "!" and words useless for classification purposes like "of" or "that" are also included. Those words are named "stopwords" and `nltk` has a convenient corpus we can download: ``` nltk.download("stopwords") import string string.punctuation ``` Using the Python `string.punctuation` list and the English stopwords we can build better features by filtering out those words that would not help in the classification: ``` useless_words = nltk.corpus.stopwords.words("english") + list(string.punctuation) #useless_words #type(useless_words) def build_bag_of_words_features_filtered(words): return { word:1 for word in words \ if not word in useless_words} build_bag_of_words_features_filtered(romeo_words) ``` ## Plotting Frequencies of Words It is common to explore a dataset before starting the analysis, in this section we will find the most common words and plot their frequency. Using the `.words()` function with no argument we can extract the words from the entire dataset and check that it is about 1.6 millions. ``` all_words = movie_reviews.words() len(all_words)/1e6 ``` First we want to filter out `useless_words` as defined in the previous section, this will reduce the length of the dataset by more than a factor of 2: ``` filtered_words = [word for word in movie_reviews.words() if not word in useless_words] type(filtered_words) len(filtered_words)/1e6 ``` The `collection` package of the standard library contains a `Counter` class that is handy for counting frequencies of words in our list: ``` from collections import Counter word_counter = Counter(filtered_words) ``` It also has a `most_common()` method to access the words with the higher count: ``` most_common_words = word_counter.most_common()[:10] most_common_words ``` Then we would like to have a visualization of this using `matplotlib`. First we want to use the Jupyter magic function %matplotlib inline to setup the Notebook to show the plot embedded into the Jupyter Notebook page, you can also test: %matplotlib notebook for a more interactive plotting interface which however is not as well supported on all platforms and browsers. ``` %matplotlib inline import matplotlib.pyplot as plt ``` We can sort the word counts and plot their values on Logarithmic axes to check the shape of the distribution. This visualization is particularly useful if comparing 2 or more datasets, a flatter distribution indicates a large vocabulary while a peaked distribution a restricted vocabulary often due to a focused topic or specialized language. ``` sorted_word_counts = sorted(list(word_counter.values()), reverse=True) plt.loglog(sorted_word_counts) plt.ylabel("Freq") plt.xlabel("Word Rank"); ``` Another related plot is the histogram of `sorted_word_counts`, which displays how many words have a count in a specific range. Of course the distribution is highly peaked at low counts, i.e. most of the words appear which a low count, so we better display it on semilogarithmic axes to inspect the tail of the distribution. ``` plt.hist(sorted_word_counts, bins=50); plt.hist(sorted_word_counts, bins=50, log=True); ``` ## Train a Classifier for Sentiment Analysis Using our `build_bag_of_words_features` function we can build separately the negative and positive features. Basically for each of the 1000 negative and for the 1000 positive review, we create one dictionary of the words and we associate the label "neg" and "pos" to it. ``` negative_features = [ (build_bag_of_words_features_filtered(movie_reviews.words(fileids=[f])), 'neg') \ for f in negative_fileids ] print(negative_features[3]) positive_features = [ (build_bag_of_words_features_filtered(movie_reviews.words(fileids=[f])), 'pos') \ for f in positive_fileids ] print(positive_features[6]) from nltk.classify import NaiveBayesClassifier ``` One of the simplest supervised machine learning classifiers is the Naive Bayes Classifier, it can be trained on 80% of the data to learn what words are generally associated with positive or with negative reviews. ``` split = 800 sentiment_classifier = NaiveBayesClassifier.train(positive_features[:split]+negative_features[:split]) ``` We can check after training what is the accuracy on the training set, i.e. the same data used for training, we expect this to be a very high number because the algorithm already "saw" those data. Accuracy is the fraction of the data that is classified correctly, we can turn it into percent: ``` nltk.classify.util.accuracy(sentiment_classifier, positive_features[:split]+negative_features[:split])*100 ``` The accuracy above is mostly a check that nothing went very wrong in the training, the real measure of accuracy is on the remaining 20% of the data that wasn't used in training, the test data: ``` nltk.classify.util.accuracy(sentiment_classifier, positive_features[split:]+negative_features[split:])*100 ``` Accuracy here is around 70% which is pretty good for such a simple model if we consider that the estimated accuracy for a person is about 80%. We can finally print the most informative features, i.e. the words that mostly identify a positive or a negative review: ``` sentiment_classifier.show_most_informative_features() ```
github_jupyter
<a href="https://colab.research.google.com/github/Mukilan-Krishnakumar/NLP_With_Disaster_Tweets/blob/main/NLP_with_Disaster_Tweets_Part_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> This is the part 2 of **NLP with Disaster Tweets Series**. The previous [Part](https://github.com/Mukilan-Krishnakumar/NLP_With_Disaster_Tweets/blob/main/NLP_with_Disaster_Tweets_Part_1.ipynb) along with the blogpost can be found in [this link](https://medium.com/@mukilankrishnakumar2002/natural-language-processing-with-disaster-tweets-part-1-db31c9ad07). In the previous part, we created a baseline nlp model, we covered the basics of building a NLP model and ended up with an accuracy which is far from good. In this part, we are going to implement NLP model with LSTM architecture. We will be seeing in detail, how a LSTM model works and how to implement it in our code. Let's get started. ``` # Prerequisite Block 1 ! pip install kaggle ! mkdir ~/.kaggle ! cp kaggle.json ~/.kaggle/ ! chmod 600 ~/.kaggle/kaggle.json ! kaggle competitions download nlp-getting-started # ! unzip nlp-getting-started.zip import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import re import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences def cleaningText(df): df['text'] = [re.sub(r'http\S+', '', x, flags=re.MULTILINE) for x in df['text']] df['text'] = df['text'].str.lower() df = pd.read_csv('/content/train.csv') df_test = pd.read_csv('/content/train.csv') df.head() cleaningText(df) df.head() sentences = [x for x in df['text']] labels = [x for x in df['target']] print(sentences) labels = np.array(labels) training_sentences = sentences[:6090] training_labels = labels[:6090] testing_sentences = sentences[6090:] testing_labels = labels[6090:] ``` Now that we have taken the data from the table and converted it into sequences and labels. We will now convert the labels into numpy arrays. Then we will tokenize the words in sequences. The max length of a tweet is 280 characters. We will fix `max_length` to be **280**. The we will pad all the sentence to length. We will do `post_trunc`. ``` vocab_size = 10000 embedding_dim = 16 max_length = 280 trunc_type='post' oov_tok = "<OOV>" tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok) tokenizer.fit_on_texts(training_sentences) word_index = tokenizer.word_index sequences = tokenizer.texts_to_sequences(training_sentences) padded = pad_sequences(sequences,maxlen=max_length, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(testing_sentences) testing_padded = pad_sequences(testing_sequences,maxlen=max_length) reverse_word_index = dict([(v, k) for (k, v) in word_index.items()]) def decode_tweet(text): return ' '.join([reverse_word_index.get(i, '?') for i in text]) print(decode_tweet(padded[3])) print(training_sentences[3]) ``` Before we get to building our model, we need to understand what is an LSTM. LSTM is a type of Recurrent Neural Network (RNN). Traditional neural networks are called Feed-Forward Networks, information only flows in one direction. Let us consider an example to see, why feed forward networks are not useful for NLP. Consider the sentence: "Hello, I am Gakuto Kajiwara, I am from Japan... I speak" When we read this sentence, we can correctly predict that the next word is **japanese**. This is possible because the word **japan** has importance on the prediction even though it is not the last word before prediction. It might even be a few sentences ahead. The ability of a network to remember information learned in previous words to use it in future predictions is embedded in something called a **cell state**. Cell state is implemented in RNN where the hidden layers are connected and information flows between them. LSTMs are much better modifications on RNN, they leverage few activation functions and **gates**. Gates have the ability to add or remove information from cell state. For a much better deep dive read colah's blog on [LSTM](https://colah.github.io/posts/2015-08-Understanding-LSTMs/). ``` model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size,embedding_dim,input_length = max_length), tf.keras.layers.Dropout(rate = 0.25), tf.keras.layers.LSTM(16, activation = 'tanh', recurrent_activation = 'sigmoid', recurrent_dropout = 0.0), tf.keras.layers.Dropout(rate = 0.25), tf.keras.layers.Dense(1, activation = "sigmoid") ]) model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy']) model.summary() num_epochs = 10 model.fit( padded, training_labels,epochs = num_epochs, validation_data = (testing_padded, testing_labels)) ``` We were able to get 75% validation accuracy, which is really good compared to our previous model.
github_jupyter
## 线性回归 ``` import numpy as np import pandas as pd ### 初始化模型参数 def initialize_params(dims): ''' 输入: dims:训练数据变量维度 输出: w:初始化权重参数值 b:初始化偏差参数值 ''' # 初始化权重参数为零矩阵 w = np.zeros((dims, 1)) # 初始化偏差参数为零 b = 0 return w, b ### 定义模型主体部分 ### 包括线性回归公式、均方损失和参数偏导三部分 def linear_loss(X, y, w, b): ''' 输入: X:输入变量矩阵 y:输出标签向量 w:变量参数权重矩阵 b:偏差项 输出: y_hat:线性模型预测输出 loss:均方损失值 dw:权重参数一阶偏导 db:偏差项一阶偏导 ''' # 训练样本数量 num_train = X.shape[0] # 训练特征数量 num_feature = X.shape[1] # 线性回归预测输出 y_hat = np.dot(X, w) + b # 计算预测输出与实际标签之间的均方损失 loss = np.sum((y_hat-y)**2)/num_train # 基于均方损失对权重参数的一阶偏导数 dw = np.dot(X.T, (y_hat-y)) /num_train # 基于均方损失对偏差项的一阶偏导数 db = np.sum((y_hat-y)) /num_train return y_hat, loss, dw, db ### 定义线性回归模型训练过程 def linear_train(X, y, learning_rate=0.01, epochs=10000): ''' 输入: X:输入变量矩阵 y:输出标签向量 learning_rate:学习率 epochs:训练迭代次数 输出: loss_his:每次迭代的均方损失 params:优化后的参数字典 grads:优化后的参数梯度字典 ''' # 记录训练损失的空列表 loss_his = [] # 初始化模型参数 w, b = initialize_params(X.shape[1]) # 迭代训练 for i in range(1, epochs): # 计算当前迭代的预测值、损失和梯度 y_hat, loss, dw, db = linear_loss(X, y, w, b) # 基于梯度下降的参数更新 w += -learning_rate * dw b += -learning_rate * db # 记录当前迭代的损失 loss_his.append(loss) # 每1000次迭代打印当前损失信息 if i % 10000 == 0: print('epoch %d loss %f' % (i, loss)) # 将当前迭代步优化后的参数保存到字典 params = { 'w': w, 'b': b } # 将当前迭代步的梯度保存到字典 grads = { 'dw': dw, 'db': db } return loss_his, params, grads from sklearn.datasets import load_diabetes diabetes = load_diabetes() data = diabetes.data target = diabetes.target print(data.shape) print(target.shape) print(data[:5]) print(target[:5]) # 导入sklearn diabetes数据接口 from sklearn.datasets import load_diabetes # 导入sklearn打乱数据函数 from sklearn.utils import shuffle # 获取diabetes数据集 diabetes = load_diabetes() # 获取输入和标签 data, target = diabetes.data, diabetes.target # 打乱数据集 X, y = shuffle(data, target, random_state=13) # 按照8/2划分训练集和测试集 offset = int(X.shape[0] * 0.8) # 训练集 X_train, y_train = X[:offset], y[:offset] # 测试集 X_test, y_test = X[offset:], y[offset:] # 将训练集改为列向量的形式 y_train = y_train.reshape((-1,1)) # 将验证集改为列向量的形式 y_test = y_test.reshape((-1,1)) # 打印训练集和测试集维度 print("X_train's shape: ", X_train.shape) print("X_test's shape: ", X_test.shape) print("y_train's shape: ", y_train.shape) print("y_test's shape: ", y_test.shape) # 线性回归模型训练 loss_his, params, grads = linear_train(X_train, y_train, 0.01, 200000) # 打印训练后得到模型参数 print(params) ### 定义线性回归预测函数 def predict(X, params): ''' 输入: X:测试数据集 params:模型训练参数 输出: y_pred:模型预测结果 ''' # 获取模型参数 w = params['w'] b = params['b'] # 预测 y_pred = np.dot(X, w) + b return y_pred # 基于测试集的预测 y_pred = predict(X_test, params) # 打印前五个预测值 y_pred[:5] print(y_test[:5]) ### 定义R2系数函数 def r2_score(y_test, y_pred): ''' 输入: y_test:测试集标签值 y_pred:测试集预测值 输出: r2:R2系数 ''' # 测试标签均值 y_avg = np.mean(y_test) # 总离差平方和 ss_tot = np.sum((y_test - y_avg)**2) # 残差平方和 ss_res = np.sum((y_test - y_pred)**2) # R2计算 r2 = 1 - (ss_res/ss_tot) return r2 print(r2_score(y_test, y_pred)) import matplotlib.pyplot as plt f = X_test.dot(params['w']) + params['b'] plt.scatter(range(X_test.shape[0]), y_test) plt.plot(f, color = 'darkorange') plt.xlabel('X_test') plt.ylabel('y_test') plt.show(); plt.plot(loss_his, color = 'blue') plt.xlabel('epochs') plt.ylabel('loss') plt.show() from sklearn.utils import shuffle X, y = shuffle(data, target, random_state=13) X = X.astype(np.float32) data = np.concatenate((X, y.reshape((-1,1))), axis=1) data.shape from random import shuffle def k_fold_cross_validation(items, k, randomize=True): if randomize: items = list(items) shuffle(items) slices = [items[i::k] for i in range(k)] for i in range(k): validation = slices[i] training = [item for s in slices if s is not validation for item in s] training = np.array(training) validation = np.array(validation) yield training, validation for training, validation in k_fold_cross_validation(data, 5): X_train = training[:, :10] y_train = training[:, -1].reshape((-1,1)) X_valid = validation[:, :10] y_valid = validation[:, -1].reshape((-1,1)) loss5 = [] #print(X_train.shape, y_train.shape, X_valid.shape, y_valid.shape) loss, params, grads = linar_train(X_train, y_train, 0.001, 100000) loss5.append(loss) score = np.mean(loss5) print('five kold cross validation score is', score) y_pred = predict(X_valid, params) valid_score = np.sum(((y_pred-y_valid)**2))/len(X_valid) print('valid score is', valid_score) from sklearn.datasets import load_diabetes from sklearn.utils import shuffle from sklearn.model_selection import train_test_split diabetes = load_diabetes() data = diabetes.data target = diabetes.target X, y = shuffle(data, target, random_state=13) X = X.astype(np.float32) y = y.reshape((-1, 1)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) print(X_train.shape, y_train.shape, X_test.shape, y_test.shape) import matplotlib.pyplot as plt import numpy as np from sklearn import linear_model from sklearn.metrics import mean_squared_error, r2_score regr = linear_model.LinearRegression() regr.fit(X_train, y_train) y_pred = regr.predict(X_test) # The coefficients print('Coefficients: \n', regr.coef_) # The mean squared error print("Mean squared error: %.2f" % mean_squared_error(y_test, y_pred)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % r2_score(y_test, y_pred)) print(r2_score(y_test, y_pred)) # Plot outputs plt.scatter(range(X_test.shape[0]), y_test, color='red') plt.plot(range(X_test.shape[0]), y_pred, color='blue', linewidth=3) plt.xticks(()) plt.yticks(()) plt.show(); import numpy as np import pandas as pd from sklearn.utils import shuffle from sklearn.model_selection import KFold from sklearn.linear_model import LinearRegression ### 交叉验证 def cross_validate(model, x, y, folds=5, repeats=5): ypred = np.zeros((len(y),repeats)) score = np.zeros(repeats) for r in range(repeats): i=0 print('Cross Validating - Run', str(r + 1), 'out of', str(repeats)) x,y = shuffle(x, y, random_state=r) #shuffle data before each repeat kf = KFold(n_splits=folds,random_state=i+1000) #random split, different each time for train_ind, test_ind in kf.split(x): print('Fold', i+1, 'out of', folds) xtrain,ytrain = x[train_ind,:],y[train_ind] xtest,ytest = x[test_ind,:],y[test_ind] model.fit(xtrain, ytrain) #print(xtrain.shape, ytrain.shape, xtest.shape, ytest.shape) ypred[test_ind]=model.predict(xtest) i+=1 score[r] = R2(ypred[:,r],y) print('\nOverall R2:',str(score)) print('Mean:',str(np.mean(score))) print('Deviation:',str(np.std(score))) pass cross_validate(regr, X, y, folds=5, repeats=5) ```
github_jupyter
# Transformer的Keras实现 参考tensorflow的官方教程:[transformer](https://www.tensorflow.org/alpha/tutorials/sequences/transformer) * tensorflow==2.0.0a ``` !pip install -q tensorflow==2.0.0a !pip install -q matplotlib import tensorflow as tf import numpy as np import matplotlib.pyplot as plt tf.__version__ ``` ## positional encoding $$PE_{pos, 2i} = sin(\frac{pos}{10000^{2i/d}})$$ $$PE_{pos, 2i+1} = cos(\frac{pos}{10000^{2i/d}})$$ ``` def get_angles(pos, i, d_model): angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model)) return pos * angle_rates def positional_encoding(position, d_model): angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model) # apply sin to even indices in the array; 2i sines = np.sin(angle_rads[:, 0::2]) # apply cos to odd indices in the array; 2i+1 cosines = np.cos(angle_rads[:, 1::2]) pos_encoding = np.concatenate([sines, cosines], axis=-1) pos_encoding = pos_encoding[np.newaxis, ...] return tf.cast(pos_encoding, dtype=tf.float32) pos_encoding = positional_encoding(50, 512) print (pos_encoding.shape) plt.pcolormesh(pos_encoding[0], cmap='RdBu') plt.xlabel('Depth') plt.xlim((0, 512)) plt.ylabel('Position') plt.colorbar() plt.show() ``` ## Masking maksing分为两种: * padding masking,为了屏蔽对齐的时候补全的位置的词语的影响 * look ahead masking,为了屏蔽未来时间步的词语的影响 ``` def create_padding_mask(seq): seq = tf.cast(tf.math.equal(seq, 0), tf.float32) # add extra dimensions so that we can add the padding # to the attention logits. return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len) def create_look_ahead_mask(size): mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0) return mask # (seq_len, seq_len) print('padding mask test:') x = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]) padding_mask = create_padding_mask(x) print(padding_mask.shape) print(padding_mask) print('look ahead mask test:') x = tf.random.uniform((1, 3)) temp = create_look_ahead_mask(x.shape[1]) print(temp.shape) print(temp) ``` ## Scaled dot-product attention ``` def scaled_dot_product_attention(q, k, v, mask): """Calculate the attention weights. q, k, v must have matching leading dimensions. The mask has different shapes depending on its type(padding or look ahead) but it must be broadcastable for addition. Args: q: query shape == (..., seq_len_q, depth) k: key shape == (..., seq_len_k, depth) v: value shape == (..., seq_len_v, depth) mask: Float tensor with shape broadcastable to (..., seq_len_q, seq_len_k). Defaults to None. Returns: output, attention_weights """ matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) # add the mask to the scaled tensor. if mask is not None: scaled_attention_logits += (mask * -1e9) # softmax is normalized on the last axis (seq_len_k) so that the scores # add up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) # (..., seq_len_v, depth) return output, attention_weights def print_out(q, k, v): temp_out, temp_attn = scaled_dot_product_attention( q, k, v, None) print ('Attention weights are:') print (temp_attn) print ('Output is:') print (temp_out) np.set_printoptions(suppress=True) temp_k = tf.constant([[10,0,0], [0,10,0], [0,0,10], [0,0,10]], dtype=tf.float32) # (4, 3) temp_v = tf.constant([[ 1,0], [ 10,0], [ 100,5], [1000,6]], dtype=tf.float32) # (4, 3) # This `query` aligns with the second `key`, # so the second `value` is returned. temp_q = tf.constant([[0, 10, 0]], dtype=tf.float32) # (1, 3) print_out(temp_q, temp_k, temp_v) print() # This query aligns with a repeated key (third and fourth), # so all associated values get averaged. temp_q = tf.constant([[0, 0, 10]], dtype=tf.float32) # (1, 3) print_out(temp_q, temp_k, temp_v) print() # This query aligns equally with the first and second key, # so their values get averaged. temp_q = tf.constant([[10, 10, 0]], dtype=tf.float32) # (1, 3) print_out(temp_q, temp_k, temp_v) print() temp_q = tf.constant([[0, 0, 10], [0, 10, 0], [10, 10, 0]], dtype=tf.float32) # (3, 3) print_out(temp_q, temp_k, temp_v) ``` ## Multi-head attention ``` class MultiHeadAttention(tf.keras.layers.Layer): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads == 0 self.depth = d_model // self.num_heads self.wq = tf.keras.layers.Dense(d_model) self.wk = tf.keras.layers.Dense(d_model) self.wv = tf.keras.layers.Dense(d_model) self.dense = tf.keras.layers.Dense(d_model) def split_heads(self, x, batch_size): """Split the last dimension into (num_heads, depth). Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth) """ x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k, q, mask): batch_size = tf.shape(q)[0] q = self.wq(q) # (batch_size, seq_len, d_model) k = self.wk(k) # (batch_size, seq_len, d_model) v = self.wv(v) # (batch_size, seq_len, d_model) q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth) k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth) v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape == (batch_size, num_heads, seq_len_v, depth) # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention( q, k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_v, num_heads, depth) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_v, d_model) output = self.dense(concat_attention) # (batch_size, seq_len_v, d_model) return output, attention_weights temp_mha = MultiHeadAttention(d_model=512, num_heads=8) y = tf.random.uniform((1, 60, 512)) # (batch_size, encoder_sequence, d_model) out, attn = temp_mha(y, k=y, q=y, mask=None) out.shape, attn.shape ``` ## Feed forward network ``` def point_wise_feed_forward_network(d_model, dff): return tf.keras.Sequential([ tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff) tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model) ]) class PointWiseFeedForwardNetwork(tf.keras.Model): def __init__(self, d_model, dff): super(PointWiseFeedForwardNetwork, self).__init__(name='ffn') self.d_model = d_model self.dense_1 = tf.keras.layers.Dense(dff, activation='relu') self.dense_2 = tf.keras.layers.Dense(d_model) def call(self, x): x = self.dense_1(x) return self.dense_2(x) def compute_output_shape(self, input_shape): shapes = tf.shape(input_shape).as_list() shapes[-1] = self.d_model return tf.TensorShape(shapes) sample_ffn = point_wise_feed_forward_network(512, 2048) print(sample_ffn(tf.random.uniform((64, 50, 512))).shape) print() ffn = PointWiseFeedForwardNetwork(512, 2048) print(ffn(tf.random.uniform((64, 50, 256))).shape) ``` ## Encoder layer ``` class LayerNormalization(tf.keras.layers.Layer): def __init__(self, epsilon=1e-6, **kwargs): self.eps = epsilon super(LayerNormalization, self).__init__(**kwargs) def build(self, input_shape): self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:], initializer=tf.keras.initializers.Ones(), trainable=True) self.beta = self.add_weight(name='beta', shape=input_shape[-1:], initializer=tf.keras.initializers.Zeros(), trainable=True) super(LayerNormalization, self).build(input_shape) def call(self, x): mean = tf.keras.backend.mean(x, axis=-1, keepdims=True) std = tf.keras.backend.std(x, axis=-1, keepdims=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta def compute_output_shape(self, input_shape): return input_shape class EncoderLayer(tf.keras.layers.Layer): def __init__(self, d_model, num_heads, dff, rate=0.1): super(EncoderLayer, self).__init__() self.mha = MultiHeadAttention(d_model, num_heads) self.ffn = point_wise_feed_forward_network(d_model, dff) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.dropout1 = tf.keras.layers.Dropout(rate) self.dropout2 = tf.keras.layers.Dropout(rate) def call(self, x, training, mask): attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model) attn_output = self.dropout1(attn_output, training=training) out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model) ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model) ffn_output = self.dropout2(ffn_output, training=training) out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model) return out2 sample_encoder_layer = EncoderLayer(512, 8, 2048) sample_encoder_layer_output = sample_encoder_layer( tf.random.uniform((64, 43, 512)), False, None) sample_encoder_layer_output.shape # (batch_size, input_seq_len, d_model) class Encoder(tf.keras.layers.Layer): def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size, rate=0.1): super(Encoder, self).__init__() self.d_model = d_model self.num_layers = num_layers self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model) self.pos_encoding = positional_encoding(input_vocab_size, self.d_model) self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)] self.dropout = tf.keras.layers.Dropout(rate) def call(self, x, training, mask): seq_len = tf.shape(x)[1] # adding embedding and position encoding. x = self.embedding(x) # (batch_size, input_seq_len, d_model) x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32)) x += self.pos_encoding[:, :seq_len, :] x = self.dropout(x, training=training) for i in range(self.num_layers): x = self.enc_layers[i](x, training, mask) return x # (batch_size, input_seq_len, d_model) sample_encoder = Encoder(num_layers=2, d_model=512, num_heads=8, dff=2048, input_vocab_size=8500) sample_encoder_output = sample_encoder(tf.random.uniform((64, 62)), training=False, mask=None) print (sample_encoder_output.shape) # (batch_size, input_seq_len, d_model) ``` ## Decoder layer ``` class DecoderLayer(tf.keras.layers.Layer): def __init__(self, d_model, num_heads, dff, rate=0.1): super(DecoderLayer, self).__init__() self.mha1 = MultiHeadAttention(d_model, num_heads) self.mha2 = MultiHeadAttention(d_model, num_heads) self.ffn = point_wise_feed_forward_network(d_model, dff) self.layernorm1 = LayerNormalization(epsilon=1e-6) self.layernorm2 = LayerNormalization(epsilon=1e-6) self.layernorm3 = LayerNormalization(epsilon=1e-6) self.dropout1 = tf.keras.layers.Dropout(rate) self.dropout2 = tf.keras.layers.Dropout(rate) self.dropout3 = tf.keras.layers.Dropout(rate) def call(self, x, enc_output, training, look_ahead_mask, padding_mask): # enc_output.shape == (batch_size, input_seq_len, d_model) attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model) attn1 = self.dropout1(attn1, training=training) out1 = self.layernorm1(attn1 + x) attn2, attn_weights_block2 = self.mha2( enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model) attn2 = self.dropout2(attn2, training=training) out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model) ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model) ffn_output = self.dropout3(ffn_output, training=training) out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model) return out3, attn_weights_block1, attn_weights_block2 sample_decoder_layer = DecoderLayer(512, 8, 2048) sample_decoder_layer_output, _, _ = sample_decoder_layer( tf.random.uniform((64, 50, 512)), sample_encoder_layer_output, False, None, None) sample_decoder_layer_output.shape # (batch_size, target_seq_len, d_model) class Decoder(tf.keras.layers.Layer): def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size, rate=0.1): super(Decoder, self).__init__() self.d_model = d_model self.num_layers = num_layers self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model) self.pos_encoding = positional_encoding(target_vocab_size, self.d_model) self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)] self.dropout = tf.keras.layers.Dropout(rate) def call(self, x, enc_output, training, look_ahead_mask, padding_mask): seq_len = tf.shape(x)[1] attention_weights = {} x = self.embedding(x) # (batch_size, target_seq_len, d_model) x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32)) x += self.pos_encoding[:, :seq_len, :] x = self.dropout(x, training=training) for i in range(self.num_layers): x, block1, block2 = self.dec_layers[i](x, enc_output, training, look_ahead_mask, padding_mask) attention_weights['decoder_layer{}_block1'.format(i+1)] = block1 attention_weights['decoder_layer{}_block2'.format(i+1)] = block2 # x.shape == (batch_size, target_seq_len, d_model) return x, attention_weights sample_decoder = Decoder(num_layers=2, d_model=512, num_heads=8, dff=2048, target_vocab_size=8000) output, attn = sample_decoder(tf.random.uniform((64, 26)), enc_output=sample_encoder_output, training=False, look_ahead_mask=None, padding_mask=None) output.shape, attn['decoder_layer2_block2'].shape ``` ## Transformer ``` class Transformer(tf.keras.Model): def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size, target_vocab_size, rate=0.1): super(Transformer, self).__init__() self.encoder = Encoder(num_layers, d_model, num_heads, dff, input_vocab_size, rate) self.decoder = Decoder(num_layers, d_model, num_heads, dff, target_vocab_size, rate) self.final_layer = tf.keras.layers.Dense(target_vocab_size) def call(self, inp, tar, training, enc_padding_mask, look_ahead_mask, dec_padding_mask): enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model) # dec_output.shape == (batch_size, tar_seq_len, d_model) dec_output, attention_weights = self.decoder( tar, enc_output, training, look_ahead_mask, dec_padding_mask) final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size) return final_output, attention_weights sample_transformer = Transformer( num_layers=2, d_model=512, num_heads=8, dff=2048, input_vocab_size=8500, target_vocab_size=8000) temp_input = tf.random.uniform((64, 62)) temp_target = tf.random.uniform((64, 26)) fn_out, _ = sample_transformer(temp_input, temp_target, training=False, enc_padding_mask=None, look_ahead_mask=None, dec_padding_mask=None) fn_out.shape # (batch_size, tar_seq_len, target_vocab_size) ``` ## HParams ``` num_layers = 4 d_model = 128 dff = 512 num_heads = 8 input_vocab_size = 100 target_vocab_size = 100 dropout_rate = 0.1 ``` ## Optimizer ``` class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def __call__(self, step): arg1 = tf.math.rsqrt(step) arg2 = step * (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) temp_learning_rate_schedule = CustomSchedule(d_model) plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) plt.ylabel("Learning Rate") plt.xlabel("Train Step") ```
github_jupyter
# RadarCOVID-Report ## Data Extraction ``` import datetime import json import logging import os import shutil import tempfile import textwrap import uuid import matplotlib.pyplot as plt import matplotlib.ticker import numpy as np import pandas as pd import pycountry import retry import seaborn as sns %matplotlib inline current_working_directory = os.environ.get("PWD") if current_working_directory: os.chdir(current_working_directory) sns.set() matplotlib.rcParams["figure.figsize"] = (15, 6) extraction_datetime = datetime.datetime.utcnow() extraction_date = extraction_datetime.strftime("%Y-%m-%d") extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1) extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d") extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H") current_hour = datetime.datetime.utcnow().hour are_today_results_partial = current_hour != 23 ``` ### Constants ``` from Modules.ExposureNotification import exposure_notification_io spain_region_country_code = "ES" germany_region_country_code = "DE" default_backend_identifier = spain_region_country_code backend_generation_days = 7 * 2 daily_summary_days = 7 * 4 * 3 daily_plot_days = 7 * 4 tek_dumps_load_limit = daily_summary_days + 1 ``` ### Parameters ``` environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER") if environment_backend_identifier: report_backend_identifier = environment_backend_identifier else: report_backend_identifier = default_backend_identifier report_backend_identifier environment_enable_multi_backend_download = \ os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD") if environment_enable_multi_backend_download: report_backend_identifiers = None else: report_backend_identifiers = [report_backend_identifier] report_backend_identifiers environment_invalid_shared_diagnoses_dates = \ os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES") if environment_invalid_shared_diagnoses_dates: invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",") else: invalid_shared_diagnoses_dates = [] invalid_shared_diagnoses_dates ``` ### COVID-19 Cases ``` report_backend_client = \ exposure_notification_io.get_backend_client_with_identifier( backend_identifier=report_backend_identifier) @retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10)) def download_cases_dataframe(): return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv") confirmed_df_ = download_cases_dataframe() confirmed_df_.iloc[0] confirmed_df = confirmed_df_.copy() confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]] confirmed_df.rename( columns={ "date": "sample_date", "iso_code": "country_code", }, inplace=True) def convert_iso_alpha_3_to_alpha_2(x): try: return pycountry.countries.get(alpha_3=x).alpha_2 except Exception as e: logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}") return None confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2) confirmed_df.dropna(inplace=True) confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True) confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_df.sort_values("sample_date", inplace=True) confirmed_df.tail() confirmed_days = pd.date_range( start=confirmed_df.iloc[0].sample_date, end=extraction_datetime) confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"]) confirmed_days_df["sample_date_string"] = \ confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_days_df.tail() def sort_source_regions_for_display(source_regions: list) -> list: if report_backend_identifier in source_regions: source_regions = [report_backend_identifier] + \ list(sorted(set(source_regions).difference([report_backend_identifier]))) else: source_regions = list(sorted(source_regions)) return source_regions report_source_regions = report_backend_client.source_regions_for_date( date=extraction_datetime.date()) report_source_regions = sort_source_regions_for_display( source_regions=report_source_regions) report_source_regions def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None): source_regions_at_date_df = confirmed_days_df.copy() source_regions_at_date_df["source_regions_at_date"] = \ source_regions_at_date_df.sample_date.apply( lambda x: source_regions_for_date_function(date=x)) source_regions_at_date_df.sort_values("sample_date", inplace=True) source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \ source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x))) source_regions_at_date_df.tail() #%% source_regions_for_summary_df_ = \ source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy() source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True) source_regions_for_summary_df_.tail() #%% confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"] confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns) for source_regions_group, source_regions_group_series in \ source_regions_at_date_df.groupby("_source_regions_group"): source_regions_set = set(source_regions_group.split(",")) confirmed_source_regions_set_df = \ confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy() confirmed_source_regions_group_df = \ confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \ .reset_index().sort_values("sample_date") confirmed_source_regions_group_df = \ confirmed_source_regions_group_df.merge( confirmed_days_df[["sample_date_string"]].rename( columns={"sample_date_string": "sample_date"}), how="right") confirmed_source_regions_group_df["new_cases"] = \ confirmed_source_regions_group_df["new_cases"].clip(lower=0) confirmed_source_regions_group_df["covid_cases"] = \ confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round() confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[confirmed_output_columns] confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan) confirmed_source_regions_group_df.fillna(method="ffill", inplace=True) confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[ confirmed_source_regions_group_df.sample_date.isin( source_regions_group_series.sample_date_string)] confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df) result_df = confirmed_output_df.copy() result_df.tail() #%% result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True) result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left") result_df.sort_values("sample_date_string", inplace=True) result_df.fillna(method="ffill", inplace=True) result_df.tail() #%% result_df[["new_cases", "covid_cases"]].plot() if columns_suffix: result_df.rename( columns={ "new_cases": "new_cases_" + columns_suffix, "covid_cases": "covid_cases_" + columns_suffix}, inplace=True) return result_df, source_regions_for_summary_df_ confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe( report_backend_client.source_regions_for_date) confirmed_es_df, _ = get_cases_dataframe( lambda date: [spain_region_country_code], columns_suffix=spain_region_country_code.lower()) ``` ### Extract API TEKs ``` raw_zip_path_prefix = "Data/TEKs/Raw/" base_backend_identifiers = [report_backend_identifier] multi_backend_exposure_keys_df = \ exposure_notification_io.download_exposure_keys_from_backends( backend_identifiers=report_backend_identifiers, generation_days=backend_generation_days, fail_on_error_backend_identifiers=base_backend_identifiers, save_raw_zip_path_prefix=raw_zip_path_prefix) multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"] multi_backend_exposure_keys_df.rename( columns={ "generation_datetime": "sample_datetime", "generation_date_string": "sample_date_string", }, inplace=True) multi_backend_exposure_keys_df.head() early_teks_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.rolling_period < 144].copy() early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6 early_teks_df[early_teks_df.sample_date_string != extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) early_teks_df[early_teks_df.sample_date_string == extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[ "sample_date_string", "region", "key_data"]] multi_backend_exposure_keys_df.head() active_regions = \ multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() active_regions multi_backend_summary_df = multi_backend_exposure_keys_df.groupby( ["sample_date_string", "region"]).key_data.nunique().reset_index() \ .pivot(index="sample_date_string", columns="region") \ .sort_index(ascending=False) multi_backend_summary_df.rename( columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) multi_backend_summary_df.rename_axis("sample_date", inplace=True) multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int) multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days) multi_backend_summary_df.head() def compute_keys_cross_sharing(x): teks_x = x.key_data_x.item() common_teks = set(teks_x).intersection(x.key_data_y.item()) common_teks_fraction = len(common_teks) / len(teks_x) return pd.Series(dict( common_teks=common_teks, common_teks_fraction=common_teks_fraction, )) multi_backend_exposure_keys_by_region_df = \ multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index() multi_backend_exposure_keys_by_region_df["_merge"] = True multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_df.merge( multi_backend_exposure_keys_by_region_df, on="_merge") multi_backend_exposure_keys_by_region_combination_df.drop( columns=["_merge"], inplace=True) if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1: multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_combination_df[ multi_backend_exposure_keys_by_region_combination_df.region_x != multi_backend_exposure_keys_by_region_combination_df.region_y] multi_backend_exposure_keys_cross_sharing_df = \ multi_backend_exposure_keys_by_region_combination_df \ .groupby(["region_x", "region_y"]) \ .apply(compute_keys_cross_sharing) \ .reset_index() multi_backend_cross_sharing_summary_df = \ multi_backend_exposure_keys_cross_sharing_df.pivot_table( values=["common_teks_fraction"], columns="region_x", index="region_y", aggfunc=lambda x: x.item()) multi_backend_cross_sharing_summary_df multi_backend_without_active_region_exposure_keys_df = \ multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier] multi_backend_without_active_region = \ multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() multi_backend_without_active_region exposure_keys_summary_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.region == report_backend_identifier] exposure_keys_summary_df.drop(columns=["region"], inplace=True) exposure_keys_summary_df = \ exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame() exposure_keys_summary_df = \ exposure_keys_summary_df.reset_index().set_index("sample_date_string") exposure_keys_summary_df.sort_index(ascending=False, inplace=True) exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) exposure_keys_summary_df.head() ``` ### Dump API TEKs ``` tek_list_df = multi_backend_exposure_keys_df[ ["sample_date_string", "region", "key_data"]].copy() tek_list_df["key_data"] = tek_list_df["key_data"].apply(str) tek_list_df.rename(columns={ "sample_date_string": "sample_date", "key_data": "tek_list"}, inplace=True) tek_list_df = tek_list_df.groupby( ["sample_date", "region"]).tek_list.unique().reset_index() tek_list_df["extraction_date"] = extraction_date tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour tek_list_path_prefix = "Data/TEKs/" tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json" tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json" tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json" for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]: os.makedirs(os.path.dirname(path), exist_ok=True) tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier] tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json( tek_list_current_path, lines=True, orient="records") tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json( tek_list_daily_path, lines=True, orient="records") tek_list_base_df.to_json( tek_list_hourly_path, lines=True, orient="records") tek_list_base_df.head() ``` ### Load TEK Dumps ``` import glob def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame: extracted_teks_df = pd.DataFrame(columns=["region"]) file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json")))) if limit: file_paths = file_paths[:limit] for file_path in file_paths: logging.info(f"Loading TEKs from '{file_path}'...") iteration_extracted_teks_df = pd.read_json(file_path, lines=True) extracted_teks_df = extracted_teks_df.append( iteration_extracted_teks_df, sort=False) extracted_teks_df["region"] = \ extracted_teks_df.region.fillna(spain_region_country_code).copy() if region: extracted_teks_df = \ extracted_teks_df[extracted_teks_df.region == region] return extracted_teks_df daily_extracted_teks_df = load_extracted_teks( mode="Daily", region=report_backend_identifier, limit=tek_dumps_load_limit) daily_extracted_teks_df.head() exposure_keys_summary_df_ = daily_extracted_teks_df \ .sort_values("extraction_date", ascending=False) \ .groupby("sample_date").tek_list.first() \ .to_frame() exposure_keys_summary_df_.index.name = "sample_date_string" exposure_keys_summary_df_["tek_list"] = \ exposure_keys_summary_df_.tek_list.apply(len) exposure_keys_summary_df_ = exposure_keys_summary_df_ \ .rename(columns={"tek_list": "shared_teks_by_generation_date"}) \ .sort_index(ascending=False) exposure_keys_summary_df = exposure_keys_summary_df_ exposure_keys_summary_df.head() ``` ### Daily New TEKs ``` tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply( lambda x: set(sum(x, []))).reset_index() tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True) tek_list_df.head() def compute_teks_by_generation_and_upload_date(date): day_new_teks_set_df = tek_list_df.copy().diff() try: day_new_teks_set = day_new_teks_set_df[ day_new_teks_set_df.index == date].tek_list.item() except ValueError: day_new_teks_set = None if pd.isna(day_new_teks_set): day_new_teks_set = set() day_new_teks_df = daily_extracted_teks_df[ daily_extracted_teks_df.extraction_date == date].copy() day_new_teks_df["shared_teks"] = \ day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set)) day_new_teks_df["shared_teks"] = \ day_new_teks_df.shared_teks.apply(len) day_new_teks_df["upload_date"] = date day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True) day_new_teks_df = day_new_teks_df[ ["upload_date", "generation_date", "shared_teks"]] day_new_teks_df["generation_to_upload_days"] = \ (pd.to_datetime(day_new_teks_df.upload_date) - pd.to_datetime(day_new_teks_df.generation_date)).dt.days day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0] return day_new_teks_df shared_teks_generation_to_upload_df = pd.DataFrame() for upload_date in daily_extracted_teks_df.extraction_date.unique(): shared_teks_generation_to_upload_df = \ shared_teks_generation_to_upload_df.append( compute_teks_by_generation_and_upload_date(date=upload_date)) shared_teks_generation_to_upload_df \ .sort_values(["upload_date", "generation_date"], ascending=False, inplace=True) shared_teks_generation_to_upload_df.tail() today_new_teks_df = \ shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.upload_date == extraction_date].copy() today_new_teks_df.tail() if not today_new_teks_df.empty: today_new_teks_df.set_index("generation_to_upload_days") \ .sort_index().shared_teks.plot.bar() generation_to_upload_period_pivot_df = \ shared_teks_generation_to_upload_df[ ["upload_date", "generation_to_upload_days", "shared_teks"]] \ .pivot(index="upload_date", columns="generation_to_upload_days") \ .sort_index(ascending=False).fillna(0).astype(int) \ .droplevel(level=0, axis=1) generation_to_upload_period_pivot_df.head() new_tek_df = tek_list_df.diff().tek_list.apply( lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index() new_tek_df.rename(columns={ "tek_list": "shared_teks_by_upload_date", "extraction_date": "sample_date_string",}, inplace=True) new_tek_df.tail() shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \ [["upload_date", "shared_teks"]].rename( columns={ "upload_date": "sample_date_string", "shared_teks": "shared_teks_uploaded_on_generation_date", }) shared_teks_uploaded_on_generation_date_df.head() estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \ .groupby(["upload_date"]).shared_teks.max().reset_index() \ .sort_values(["upload_date"], ascending=False) \ .rename(columns={ "upload_date": "sample_date_string", "shared_teks": "shared_diagnoses", }) invalid_shared_diagnoses_dates_mask = \ estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates) estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0 estimated_shared_diagnoses_df.head() ``` ### Hourly New TEKs ``` hourly_extracted_teks_df = load_extracted_teks( mode="Hourly", region=report_backend_identifier, limit=25) hourly_extracted_teks_df.head() hourly_new_tek_count_df = hourly_extracted_teks_df \ .groupby("extraction_date_with_hour").tek_list. \ apply(lambda x: set(sum(x, []))).reset_index().copy() hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \ .sort_index(ascending=True) hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff() hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply( lambda x: len(x) if not pd.isna(x) else 0) hourly_new_tek_count_df.rename(columns={ "new_tek_count": "shared_teks_by_upload_date"}, inplace=True) hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[ "extraction_date_with_hour", "shared_teks_by_upload_date"]] hourly_new_tek_count_df.head() hourly_summary_df = hourly_new_tek_count_df.copy() hourly_summary_df.set_index("extraction_date_with_hour", inplace=True) hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index() hourly_summary_df["datetime_utc"] = pd.to_datetime( hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H") hourly_summary_df.set_index("datetime_utc", inplace=True) hourly_summary_df = hourly_summary_df.tail(-1) hourly_summary_df.head() ``` ### Official Statistics ``` import requests import pandas.io.json official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics") official_stats_response.raise_for_status() official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json()) official_stats_df = official_stats_df_.copy() official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True) official_stats_df.head() official_stats_column_map = { "date": "sample_date", "applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated", "communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated", } accumulated_suffix = "_accumulated" accumulated_values_columns = \ list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values())) interpolated_values_columns = \ list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns)) official_stats_df = \ official_stats_df[official_stats_column_map.keys()] \ .rename(columns=official_stats_column_map) official_stats_df["extraction_date"] = extraction_date official_stats_df.head() official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json" previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True) previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True) official_stats_df = official_stats_df.append(previous_official_stats_df) official_stats_df.head() official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)] official_stats_df.sort_values("extraction_date", ascending=False, inplace=True) official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True) official_stats_df.head() official_stats_stored_df = official_stats_df.copy() official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d") official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True) official_stats_df.drop(columns=["extraction_date"], inplace=True) official_stats_df = confirmed_days_df.merge(official_stats_df, how="left") official_stats_df.sort_values("sample_date", ascending=False, inplace=True) official_stats_df.head() official_stats_df[accumulated_values_columns] = \ official_stats_df[accumulated_values_columns] \ .astype(float).interpolate(limit_area="inside") official_stats_df[interpolated_values_columns] = \ official_stats_df[accumulated_values_columns].diff(periods=-1) official_stats_df.drop(columns="sample_date", inplace=True) official_stats_df.head() ``` ### Data Merge ``` result_summary_df = exposure_keys_summary_df.merge( new_tek_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( official_stats_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df = confirmed_es_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string) result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left") result_summary_df.set_index(["sample_date", "source_regions"], inplace=True) result_summary_df.drop(columns=["sample_date_string"], inplace=True) result_summary_df.sort_index(ascending=False, inplace=True) result_summary_df.head() with pd.option_context("mode.use_inf_as_na", True): result_summary_df = result_summary_df.fillna(0).astype(int) result_summary_df["teks_per_shared_diagnosis"] = \ (result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0) result_summary_df["shared_diagnoses_per_covid_case"] = \ (result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0) result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0) result_summary_df.head(daily_plot_days) def compute_aggregated_results_summary(days) -> pd.DataFrame: aggregated_result_summary_df = result_summary_df.copy() aggregated_result_summary_df["covid_cases_for_ratio"] = \ aggregated_result_summary_df.covid_cases.mask( aggregated_result_summary_df.shared_diagnoses == 0, 0) aggregated_result_summary_df["covid_cases_for_ratio_es"] = \ aggregated_result_summary_df.covid_cases_es.mask( aggregated_result_summary_df.shared_diagnoses_es == 0, 0) aggregated_result_summary_df = aggregated_result_summary_df \ .sort_index(ascending=True).fillna(0).rolling(days).agg({ "covid_cases": "sum", "covid_cases_es": "sum", "covid_cases_for_ratio": "sum", "covid_cases_for_ratio_es": "sum", "shared_teks_by_generation_date": "sum", "shared_teks_by_upload_date": "sum", "shared_diagnoses": "sum", "shared_diagnoses_es": "sum", }).sort_index(ascending=False) with pd.option_context("mode.use_inf_as_na", True): aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int) aggregated_result_summary_df["teks_per_shared_diagnosis"] = \ (aggregated_result_summary_df.shared_teks_by_upload_date / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \ (aggregated_result_summary_df.shared_diagnoses / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (aggregated_result_summary_df.shared_diagnoses_es / aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0) return aggregated_result_summary_df aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7) aggregated_result_with_7_days_window_summary_df.head() last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1] last_7_days_summary aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13) last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1] last_14_days_summary ``` ## Report Results ``` display_column_name_mapping = { "sample_date": "Sample\u00A0Date\u00A0(UTC)", "source_regions": "Source Countries", "datetime_utc": "Timestamp (UTC)", "upload_date": "Upload Date (UTC)", "generation_to_upload_days": "Generation to Upload Period in Days", "region": "Backend", "region_x": "Backend\u00A0(A)", "region_y": "Backend\u00A0(B)", "common_teks": "Common TEKs Shared Between Backends", "common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)", "covid_cases": "COVID-19 Cases (Source Countries)", "shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)", "shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)", "shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)", "shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)", "teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)", "shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)", "covid_cases_es": "COVID-19 Cases (Spain)", "app_downloads_es": "App Downloads (Spain – Official)", "shared_diagnoses_es": "Shared Diagnoses (Spain – Official)", "shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)", } summary_columns = [ "covid_cases", "shared_teks_by_generation_date", "shared_teks_by_upload_date", "shared_teks_uploaded_on_generation_date", "shared_diagnoses", "teks_per_shared_diagnosis", "shared_diagnoses_per_covid_case", "covid_cases_es", "app_downloads_es", "shared_diagnoses_es", "shared_diagnoses_per_covid_case_es", ] summary_percentage_columns= [ "shared_diagnoses_per_covid_case_es", "shared_diagnoses_per_covid_case", ] ``` ### Daily Summary Table ``` result_summary_df_ = result_summary_df.copy() result_summary_df = result_summary_df[summary_columns] result_summary_with_display_names_df = result_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) result_summary_with_display_names_df ``` ### Daily Summary Plots ``` result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \ .droplevel(level=["source_regions"]) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar( title=f"Daily Summary", rot=45, subplots=True, figsize=(15, 30), legend=False) ax_ = summary_ax_list[0] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.95) _ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist())) for percentage_column in summary_percentage_columns: percentage_column_index = summary_columns.index(percentage_column) summary_ax_list[percentage_column_index].yaxis \ .set_major_formatter(matplotlib.ticker.PercentFormatter(1.0)) ``` ### Daily Generation to Upload Period Table ``` display_generation_to_upload_period_pivot_df = \ generation_to_upload_period_pivot_df \ .head(backend_generation_days) display_generation_to_upload_period_pivot_df \ .head(backend_generation_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) fig, generation_to_upload_period_pivot_table_ax = plt.subplots( figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df))) generation_to_upload_period_pivot_table_ax.set_title( "Shared TEKs Generation to Upload Period Table") sns.heatmap( data=display_generation_to_upload_period_pivot_df .rename_axis(columns=display_column_name_mapping) .rename_axis(index=display_column_name_mapping), fmt=".0f", annot=True, ax=generation_to_upload_period_pivot_table_ax) generation_to_upload_period_pivot_table_ax.get_figure().tight_layout() ``` ### Hourly Summary Plots ``` hourly_summary_ax_list = hourly_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .plot.bar( title=f"Last 24h Summary", rot=45, subplots=True, legend=False) ax_ = hourly_summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.9) _ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())) ``` ### Publish Results ``` github_repository = os.environ.get("GITHUB_REPOSITORY") if github_repository is None: github_repository = "pvieito/Radar-STATS" github_project_base_url = "https://github.com/" + github_repository display_formatters = { display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "", } general_columns = \ list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values())) general_formatter = lambda x: f"{x}" if x != 0 else "" display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns))) daily_summary_table_html = result_summary_with_display_names_df \ .head(daily_plot_days) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .to_html(formatters=display_formatters) multi_backend_summary_table_html = multi_backend_summary_df \ .head(daily_plot_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html(formatters=display_formatters) def format_multi_backend_cross_sharing_fraction(x): if pd.isna(x): return "-" elif round(x * 100, 1) == 0: return "" else: return f"{x:.1%}" multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html( classes="table-center", formatters=display_formatters, float_format=format_multi_backend_cross_sharing_fraction) multi_backend_cross_sharing_summary_table_html = \ multi_backend_cross_sharing_summary_table_html \ .replace("<tr>","<tr style=\"text-align: center;\">") extraction_date_result_summary_df = \ result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date] extraction_date_result_hourly_summary_df = \ hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour] covid_cases = \ extraction_date_result_summary_df.covid_cases.item() shared_teks_by_generation_date = \ extraction_date_result_summary_df.shared_teks_by_generation_date.item() shared_teks_by_upload_date = \ extraction_date_result_summary_df.shared_teks_by_upload_date.item() shared_diagnoses = \ extraction_date_result_summary_df.shared_diagnoses.item() teks_per_shared_diagnosis = \ extraction_date_result_summary_df.teks_per_shared_diagnosis.item() shared_diagnoses_per_covid_case = \ extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item() shared_teks_by_upload_date_last_hour = \ extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int) display_source_regions = ", ".join(report_source_regions) if len(report_source_regions) == 1: display_brief_source_regions = report_source_regions[0] else: display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺" def get_temporary_image_path() -> str: return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png") def save_temporary_plot_image(ax): if isinstance(ax, np.ndarray): ax = ax[0] media_path = get_temporary_image_path() ax.get_figure().savefig(media_path) return media_path def save_temporary_dataframe_image(df): import dataframe_image as dfi df = df.copy() df_styler = df.style.format(display_formatters) media_path = get_temporary_image_path() dfi.export(df_styler, media_path) return media_path summary_plots_image_path = save_temporary_plot_image( ax=summary_ax_list) summary_table_image_path = save_temporary_dataframe_image( df=result_summary_with_display_names_df) hourly_summary_plots_image_path = save_temporary_plot_image( ax=hourly_summary_ax_list) multi_backend_summary_table_image_path = save_temporary_dataframe_image( df=multi_backend_summary_df) generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image( ax=generation_to_upload_period_pivot_table_ax) ``` ### Save Results ``` report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-" result_summary_df.to_csv( report_resources_path_prefix + "Summary-Table.csv") result_summary_df.to_html( report_resources_path_prefix + "Summary-Table.html") hourly_summary_df.to_csv( report_resources_path_prefix + "Hourly-Summary-Table.csv") multi_backend_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Summary-Table.csv") multi_backend_cross_sharing_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv") generation_to_upload_period_pivot_df.to_csv( report_resources_path_prefix + "Generation-Upload-Period-Table.csv") _ = shutil.copyfile( summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png") _ = shutil.copyfile( summary_table_image_path, report_resources_path_prefix + "Summary-Table.png") _ = shutil.copyfile( hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png") _ = shutil.copyfile( multi_backend_summary_table_image_path, report_resources_path_prefix + "Multi-Backend-Summary-Table.png") _ = shutil.copyfile( generation_to_upload_period_pivot_table_image_path, report_resources_path_prefix + "Generation-Upload-Period-Table.png") ``` ### Publish Results as JSON ``` def generate_summary_api_results(df: pd.DataFrame) -> list: api_df = df.reset_index().copy() api_df["sample_date_string"] = \ api_df["sample_date"].dt.strftime("%Y-%m-%d") api_df["source_regions"] = \ api_df["source_regions"].apply(lambda x: x.split(",")) return api_df.to_dict(orient="records") summary_api_results = \ generate_summary_api_results(df=result_summary_df) today_summary_api_results = \ generate_summary_api_results(df=extraction_date_result_summary_df)[0] summary_results = dict( backend_identifier=report_backend_identifier, source_regions=report_source_regions, extraction_datetime=extraction_datetime, extraction_date=extraction_date, extraction_date_with_hour=extraction_date_with_hour, last_hour=dict( shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour, shared_diagnoses=0, ), today=today_summary_api_results, last_7_days=last_7_days_summary, last_14_days=last_14_days_summary, daily_results=summary_api_results) summary_results = \ json.loads(pd.Series([summary_results]).to_json(orient="records"))[0] with open(report_resources_path_prefix + "Summary-Results.json", "w") as f: json.dump(summary_results, f, indent=4) ``` ### Publish on README ``` with open("Data/Templates/README.md", "r") as f: readme_contents = f.read() readme_contents = readme_contents.format( extraction_date_with_hour=extraction_date_with_hour, github_project_base_url=github_project_base_url, daily_summary_table_html=daily_summary_table_html, multi_backend_summary_table_html=multi_backend_summary_table_html, multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html, display_source_regions=display_source_regions) with open("README.md", "w") as f: f.write(readme_contents) ``` ### Publish on Twitter ``` enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER") github_event_name = os.environ.get("GITHUB_EVENT_NAME") if enable_share_to_twitter and github_event_name == "schedule" and \ (shared_teks_by_upload_date_last_hour or not are_today_results_partial): import tweepy twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"] twitter_api_auth_keys = twitter_api_auth_keys.split(":") auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1]) auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3]) api = tweepy.API(auth) summary_plots_media = api.media_upload(summary_plots_image_path) summary_table_media = api.media_upload(summary_table_image_path) generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path) media_ids = [ summary_plots_media.media_id, summary_table_media.media_id, generation_to_upload_period_pivot_table_image_media.media_id, ] if are_today_results_partial: today_addendum = " (Partial)" else: today_addendum = "" def format_shared_diagnoses_per_covid_case(value) -> str: if value == 0: return "–" return f"≤{value:.2%}" display_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case) display_last_14_days_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"]) display_last_14_days_shared_diagnoses_per_covid_case_es = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"]) status = textwrap.dedent(f""" #RadarCOVID – {extraction_date_with_hour} Today{today_addendum}: - Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour) - Shared Diagnoses: ≤{shared_diagnoses:.0f} - Usage Ratio: {display_shared_diagnoses_per_covid_case} Last 14 Days: - Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case} - Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es} Info: {github_project_base_url}#documentation """) status = status.encode(encoding="utf-8") api.update_status(status=status, media_ids=media_ids) ```
github_jupyter
Building the dataset of numerical data ``` #### STOP - ONLY if needed # Allows printing full text import pandas as pd pd.set_option('display.max_colwidth', None) #mid_keywords = best_keywords(data, 1, 0.49, 0.51) # same as above, but for average papers #low_keywords = best_keywords(data, 1, 0.03, 0.05) # same as above, but for poor papers ### PUT MAIN HERE ### # Machine Learning Challenge # Course: Machine Learning (880083-M-6) # Group 58 ########################################## # Import packages # ########################################## import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy.stats import pearsonr import yake #NOTE: with Anaconda: conda install -c conda-forge yake ########################################## # Import self-made functions # ########################################## from CODE.data_preprocessing.split_val import split_val from CODE.data_preprocessing.find_outliers_tukey import find_outliers_tukey #feature based on the title of the paper from CODE.features.length_title import length_title # features based on 'field_of_study' column from CODE.features.field_variety import field_variety from CODE.features.field_popularity import field_popularity from CODE.features.field_citations_avarage import field_citations_avarage # features based on the topics of the paper from CODE.features.topic_citations_avarage import topic_citations_avarage from CODE.features.topic_variety import topics_variety from CODE.features.topic_popularity import topic_popularity from CODE.features.topic_citations_avarage import topic_citations_avarage # features based on the abstract of the paper from CODE.features.keywords import best_keywords from CODE.features.abst_words import abst_words from CODE.features.abst_words import abst_count # features based on the venue of the paper from CODE.features.venue_popularity import venue_popularity from CODE.features.venue_citations import venues_citations from CODE.features.age import age # features based on the authors of the paper from CODE.features.author_h_index import author_h_index from CODE.features.paper_h_index import paper_h_index from CODE.features.team_size import team_size from CODE.features.author_database import author_database ########################################## # Load datasets # ########################################## # Main datasets data = pd.read_json('DATA/train.json') # Training set test = pd.read_json('DATA/test.json') # Test set # Author-centric datasets # These datasets were made using our self-made functions 'citations_per_author' (for the author_citation_dic) # These functions took a long time to make (ballpark ~10 minutes on a laptop in 'silent mode'), so instead we # decided to run this function once, save the data, and reload the datasets instead of running the function again. import pickle with open('my_dataset1.pickle', 'rb') as dataset: author_citation_dic = pickle.load(dataset) with open('my_dataset2.pickle', 'rb') as dataset2: author_db = pickle.load(dataset2) ########################################## # Missing values handling # ########################################## # Missing values for feature 'fields_of_study' data.loc[data['fields_of_study'].isnull(), 'fields_of_study'] = "" # Missing values for feature 'title' data.loc[data['title'].isnull(), 'title'] = "" # Missing values for feature 'abstract' data.loc[data['abstract'].isnull(), 'abstract'] = "" # Missing values for features 'authors' data.loc[data['authors'].isnull(), 'authors'] = "" # Missing values for feature 'venue' data.loc[data['venue'].isnull(), 'venue'] = "" # Missing values for feature 'year' # data.loc[data['fields_of_study'].isnull(), 'fields_of_study'] = mean(year) # Take mean by venue instead # If venue not known, take something else? # Missing values for feature 'references' data.loc[data['references'].isnull(), 'references'] = "" # Missing values for feature 'topics' data.loc[data['topics'].isnull(), 'topics'] = "" # Missing values for feature 'is_open_access' #data.loc[data['is_open_access'].isnull(), 'is_open_access'] = "" # Take most frequent occurrence for venue # If venue not known, do something else? ########################################## # Create basic numeric df # ########################################## end = len(data) num_X = data.loc[ 0:end+1 , ('doi', 'citations', 'year', 'references') ] ##REMOVE DOI ########################################## # Feature creation # ########################################## """ FEATURE DATAFRAME: num_X ALL: After writing a funtion to create a feature, please incorporate your new feature as a column on the dataframe below. This is the dataframe we will use to train the models. DO NOT change the order in this section if at all possible """ num_X['title_length'] = length_title(data) # returns a numbered series num_X['field_variety'] = field_variety(data) # returns a numbered series num_X['field_popularity'] = field_popularity(data) # returns a numbered series # num_X['field_citations_avarage'] = field_citations_avarage(data) # returns a numbered series num_X['team_sz'] = team_size(data) # returns a numbered series num_X['topic_var'] = topics_variety(data) # returns a numbered series num_X['topic_popularity'] = topic_popularity(data) # returns a numbered series num_X['topic_citations_avarage'] = topic_citations_avarage(data) # returns a numbered series num_X['venue_popularity'], num_X['venue'] = venue_popularity(data) # returns a numbered series and a pandas.Series of the 'venues' column reformatted num_X['open_access'] = pd.get_dummies(data["is_open_access"], drop_first = True) # returns pd.df (True = 1) num_X['age'] = age(data) # returns a numbered series. Needs to be called upon AFTER the venues have been reformed (from venue_frequency) num_X['venPresL'] = venues_citations(data) # returns a numbered series. Needs to be called upon AFTER the venues have been reformed (from venue_frequency) keywords = best_keywords(data, 1, 0.954, 0.955) # from [data set] get [integer] keywords from papers btw [lower bound] and [upper bound] quantiles; returns list num_X['has_keyword'] = abst_words(data, keywords)#returns a numbered series: 1 if any of the words is present in the abstract, else 0 num_X['keyword_count'] = abst_count(data, keywords) # same as above, only a count (noot bool) # Author H-index author_db, reformatted_authors = author_database(data) data['authors'] = reformatted_authors num_X['h_index'] = paper_h_index(data, author_citation_dic) # Returns a numbered series. Must come after author names have been reformatted. field_avg_cit = num_X.groupby('field_variety').citations.mean() for field, field_avg in zip(field_avg_cit.index, field_avg_cit): num_X.loc[num_X['field_variety'] == field, 'field_cit'] = field_avg """ END do not reorder """ ########################################## # Deal with specific missing values # ########################################## # Open_access, thanks to jreback (27th of July 2016) https://github.com/pandas-dev/pandas/issues/13809 OpAc_by_venue = num_X.groupby('venue').open_access.apply(lambda x: x.mode()) # Take mode for each venue OpAc_by_venue = OpAc_by_venue.to_dict() missing_OpAc = num_X.loc[num_X['open_access'].isnull(),] for i, i_paper in missing_OpAc.iterrows(): venue = i_paper['venue'] doi = i_paper['doi'] index = num_X[num_X['doi'] == doi].index[0] if venue in OpAc_by_venue.keys(): # If a known venue, append the most frequent value for that venue num_X[num_X['doi'] == doi]['open_access'] = OpAc_by_venue[venue] # Set most frequent occurrence else: # Else take most occurring value in entire dataset num_X.loc[index,'open_access'] = num_X.open_access.mode()[0] # Thanks to BENY (2nd of February, 2018) https://stackoverflow.com/questions/48590268/pandas-get-the-most-frequent-values-of-a-column ### Drop columns containing just strings num_X = num_X.drop(['venue', 'doi', 'field_variety'], axis = 1) num_X = num_X.dropna() ########################################## # Train/val split # ########################################## ## train/val split X_train, X_val, y_train, y_val = split_val(num_X, target_variable = 'citations') """ INSERT outlier detection on X_train here - ALBERT """ ########################################## # Outlier detection # ########################################## ### MODEL code for outlier detection ### names: X_train, X_val, y_train, y_val # print(list(X_train.columns)) out_y = (find_outliers_tukey(x = y_train['citations'], top = 93, bottom = 0))[0] out_rows = out_y # out_X = (find_outliers_tukey(x = X_train['team_sz'], top = 99, bottom = 0))[0] # out_rows = out_y + out_X out_rows = sorted(list(set(out_rows))) # print("X_train:") # print(X_train.shape) X_train = X_train.drop(labels = out_rows) # print(X_train.shape) # print() # print("y_train:") # print(y_train.shape) y_train = y_train.drop(labels = out_rows) # print(y_train.shape) # Potential features to get rid of: team_sz ########################################## # Model implementations # ########################################## """ IMPLEMENT models here NOTE: Please do not write over X_train, X_val, y_train, y_val in your model - make new variables if needed """ #-----------simple regression, all columns """ MODEL RESULTS: R2: 0.03724 MSE: 33.38996 """ #-----------logistic regression, all columns """ MODEL RESULTS: R2: 0.006551953988217396 MSE: 34.07342328208346 """ #-----------SGD regression, all columns """ # MODEL RESULTS: # Best outcome: ('constant', 0.01, 'squared_error', 35.74249957361433, 0.04476790061780822) """ #-----------polynomial regression, all columns """ """ #model.fit(X_train, y_train) #print('Best score: ', model.best_score_) #print('Best parameters: ', model.best_params_) #y_pred = model.predict(X_val) #from sklearn.metrics import r2_score #print(r2_score(y_val,y_pred)) # import json #with open("sample.json", "w") as outfile: #json.dump(dictionary, outfile) """ ----------------------------------------------------------------------------------------------------------- ------------------------------ LETS EXPLORE!!! ------------------------------------------------------------ ----------------------------------------------------------------------------------------------------------- """ """ """ ### FOR: exploring the new dataframe with numerical columns # --> NOTE: it would be more efficient to combine these first and only expand the df once (per addition type) num_X ### FOR: explore data train/val split (should be 6470 train rows and 3188 validation rows) # names: X_train, X_val, y_train, y_val print("number of keywords:", len(keywords)) print("total train rows:", X_train.shape) print("numer w keyword:", sum(X_train['has_keyword'])) print() print(keywords) #X_val #y_train #y_val #6210 of 6313 #6136 (of 6313) for 1 keyword from the top 1% of papers #4787 for 2 keywords from top .01% of papers (correlation: 0.036) #2917 for 1 keyword from top .01% of papers (correlation: 0.049) """ Look at some correlations - full num_X """ # names: X_train, X_val, y_train, y_val # From: https://www.kaggle.com/ankitjha/comparing-regression-models import seaborn as sns corr_mat = num_X.corr(method='pearson') plt.figure(figsize=(20,10)) sns.heatmap(corr_mat,vmax=1,square=True,annot=True,cmap='cubehelix') """ Look at some correlations - X_train NOTE: there is no y here """ # names: X_train, X_val, y_train, y_val #temp = y_train hstack X_train # From: https://www.kaggle.com/ankitjha/comparing-regression-models corr_mat = X_train.corr(method='pearson') plt.figure(figsize=(20,10)) sns.heatmap(corr_mat,vmax=1,square=True,annot=True,cmap='cubehelix') """ ----------------------------------------------------------------------------------------------------------- ------------------------- LETS CODE!!! -------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------------- """ """ """ print(list(X_train.columns)) """ Choose your columns """ #X_train_small = X_train.loc[ : , 'topic_var':'h_index'].copy() #X_val_small = X_val.loc[ : , 'topic_var':'h_index'].copy() drops = ['year', 'team_sz', 'has_keyword'] X_train_small = X_train.copy() X_train_small.drop(drops, inplace = True, axis=1) X_val_small = X_val.copy() X_val_small.drop(drops, inplace = True, axis=1) from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.metrics import r2_score, mean_absolute_error from CODE.models.regression import simple_linear from CODE.models.regression import log_reg summaries = list(X_train.columns) print(summaries) for i in range(len(summaries)): # fs = summaries[:i] + summaries[i+1:] X_train_small = X_train.copy() X_val_small = X_val.copy() drops = summaries[i] X_train_small.drop(drops, inplace = True, axis=1) X_val_small.drop(drops, inplace = True, axis=1) print("dropped:", summaries[i]) simple_linear(X_train_small, y_train, X_val_small, y_val) #dropping venue_popularity helps a tiny bit # log_reg(X_train_small, y_train, X_val_small, y_val) # log_reg # helps to drop: year, field_popularity, team_size, topic_var, age, has_keyword, keyword_count # hurts to drop: references, title length, topic_popularity, opic_citations_avarage, venue_popularity(!), # venPresL(!), h_index(!), field_cit playX = num_X.copy() playX = playX[playX['field_cit'] < 30] num_X.plot.scatter(x="field_cit", y="citations", alpha=0.5) playX.plot.scatter(x="field_cit", y="citations", alpha=0.5) print(list(num_X.columns)) import matplotlib as plt #num_X.plot.scatter(x="year", y="citations", alpha=0.5) #num_X.plot.scatter(x="references", y="citations", alpha=0.5) # might have 3 outliers #num_X.plot.scatter(x="title_length", y="citations", alpha=0.5) #num_X.plot.scatter(x="team_sz", y="citations", alpha=0.5) # might have 3 outliers #num_X.plot.scatter(x="topic_var", y="citations", alpha=0.5) # one outlier; maybe anything over 40 #num_X.plot.scatter(x="topic_popularity", y="citations", alpha=0.5) #num_X.plot.scatter(x="topic_citations_average", y="citations", alpha=0.5) #num_X.plot.scatter(x="venue_popularity", y="citations", alpha=0.5) #num_X.plot.scatter(x="open_access", y="citations", alpha=0.5) #num_X.plot.scatter(x="age", y="citations", alpha=0.5) #num_X.plot.scatter(x="venPresL", y="citations", alpha=0.5) # anything over 300 as outlier? #num_X.plot.scatter(x="has_keyword", y="citations", alpha=0.5) #num_X.plot.scatter(x="keyword_count", y="citations", alpha=0.5) #num_X.plot.scatter(x="h_index", y="citations", alpha=0.5) # anything over 35 as outlier? #num_X.plot.scatter(x="field_cit", y="citations", alpha=0.5) X_train_small #X_val_small def abst_categories (the_data, keywords, mid_keywords, low_keywords): abst = the_data['abstract'] counts = [] abst_key = [] for i in abst: if i == None: abst_key.append(0) continue else: high = 0 for word in keywords: if word in i.lower(): high += 1 mid = 0 for word in mid_keywords: if word in i.lower(): mid += 1 low = 0 for word in low_keywords: if word in i.lower(): low +=1 # abst_key = np.argmax(abst_key) # abst_key = (max(abst_key)).index return pd.Series(abst_key) print(sum(abst_categories (data, keywords, mid_keywords, low_keywords))) #9499 rows """ Remove outliers NOTE: can't rerun this code without restarting the kernal """ #names: X_train, X_val, y_train, y_val #print(list(X_train.columns)) # print("citations:", find_outliers_tukey(x = y_train['citations'], top = 93, bottom = 0)) # print("year:", find_outliers_tukey(X_train['year'], top = 74, bottom = 25)) # seems unnecessary # print("references:", find_outliers_tukey(X_train['references'], top = 90, bottom = 10)) # seems unnecessary # print("team_size:", find_outliers_tukey(X_train['team_size'], top = 99, bottom = 0)) # Meh # print("topic_variety:", find_outliers_tukey(X_train['topic_variety'], top = 75, bottom = 10)) # not much diff btw top and normal # print("age:", find_outliers_tukey(X_train['age'], top = 90, bottom = 10)) # Meh # print("open_access:", find_outliers_tukey(X_train['open_access'], top = 100, bottom = 0)) # Not necessary: boolean # print("has_keyword:", find_outliers_tukey(X_train['has_keyword'], top = 100, bottom = 0)) # Not necessary: boolean # print("title_length:", find_outliers_tukey(X_train['title_length'], top = 90, bottom = 10)) # Meh # print("field_variety:", find_outliers_tukey(X_train['field_variety'], top = 90, bottom = 10)) # seems unnecessary # print("venue_freq:", find_outliers_tukey(X_train['venue_freq'], top = 90, bottom = 10)) # seems unnecessary out_y = (find_outliers_tukey(x = y_train['citations'], top = 95, bottom = 0))[0] #out_X = (find_outliers_tukey(x = X_train['team_size'], top = 99, bottom = 0))[0] out_rows = out_y #out_rows = out_y + out_X out_rows = sorted(list(set(out_rows))) print("X_train:") print(X_train.shape) X_train = X_train.drop(labels = out_rows) print(X_train.shape) print() print("y_train:") print(y_train.shape) y_train = y_train.drop(labels = out_rows) print(y_train.shape) X_train # Create a mini version of the main 'data' dataframe import pandas as pd import numpy as np # %pwd # %cd C:\Users\r_noc\Desktop\Python\GIT\machinelearning play = data.sample(100, replace = False, axis = 0, random_state = 123) print(play.shape) # print(play['abstract']) print(list(play.columns)) # play['has_keyword'] = np.nan # print(play.shape) # play from sklearn.linear_model import PoissonRegressor from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_z = scaler.fit_transform(X_train_small) X_val_z =scaler.transform(X_val_small) polynomial_features = PolynomialFeatures(degree = 2) x_train_poly = polynomial_features.fit_transform(X_train_z) x_val_poly = polynomial_features.transform(X_val_z) model = LinearRegression() model.fit(x_train_poly, y_train) y_poly_pred = model.predict(x_val_poly) print(r2_score(y_val, y_poly_pred)) # -0.04350391168707901 print(mean_absolute_error(y_val, y_poly_pred)) # 32.65668266590838 from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_z = scaler.fit_transform(X_train_small) X_val_z =scaler.transform(X_val_small) model = PolynomialFeatures(degree = 2) X_poly = model.fit_transform(X_train_z) model.fit(X_poly, y_train) model2 = LinearRegression() model2.fit(X_poly, y_train) y_pred_val = model2.predict(model.fit_transform(X_val_z)) print(r2_score(y_val, y_pred_val)) #0.03724015197555319 print(mean_absolute_error(y_val, y_pred_val)) #33.38996938585591 #names: X_train, X_val, y_train, y_val from sklearn.preprocessing import StandardScaler from sklearn.linear_model import SGDRegressor scaler = StandardScaler() X_train_z = scaler.fit_transform(X_train_small) X_val_z =scaler.transform(X_val_small) y_ravel = np.ravel(y_train) lr = [ 1.1, 1, .1, .01, .001, .0001] settings = [] for learning_rate in ['constant', 'optimal', 'invscaling']: for loss in ['squared_error', 'huber']: for eta0 in lr: model = SGDRegressor(learning_rate=learning_rate, eta0=eta0, loss=loss,random_state=666, max_iter=5000) model.fit(X_train_z, y_ravel) y_pred = model.predict(X_val_z) mae = mean_absolute_error(y_val, y_pred) r2 = r2_score(y_val, y_pred) settings.append((learning_rate, eta0, loss, mae, r2)) print(settings[-1]) # Best outcome: ('constant', 0.01, 'squared_error', 35.74249957361433, 0.04476790061780822) # With small: ('invscaling', 1, 'squared_error', 48.92137807970932, 0.05128477811871335) X_train ```
github_jupyter
# Metric learning for MIR coding demo (2) # Training ## Enabling and testing the GPU First, you'll need to enable GPUs for the notebook: - Navigate to **Edit→Notebook** Settings - select **GPU** from the **Hardware Accelerator** drop-down Next, we'll confirm that we can connect to the GPU with tensorflow: > Source: https://colab.research.google.com/notebooks/gpu.ipynb ``` %tensorflow_version 2.x import tensorflow as tf device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name)) print(f'TensorFlow version: {tf.__version__}') ``` ## Preparing the dataset ``` # Install a Google Drive downloading tool !pip install gdown # Download the dataset !gdown --id 1MycZ6p3Y4OPtQVQXddqbOOTi7f7Wh_8f !gdown --id 17Yl_K84dbADoHude6v_ON6pGqsPCMPPA # Extract mel-spectrograms !tar zxf dim-sim_mel.tar.gz ``` ## Importing packages ``` import json import numpy as np import tensorflow as tf import tensorflow.keras.backend as K from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, CSVLogger from tensorflow.keras.layers import (Conv1D, MaxPool1D, BatchNormalization, GlobalAvgPool1D, Dense, dot, Activation, Input, Flatten, Lambda, Embedding, Concatenate, Layer, Reshape) from sklearn.preprocessing import normalize ``` ## Loading the metadata ``` # Load json metadata def load_json(file_name): """Load json.""" with open(file_name, 'r') as f: data = json.load(f) return data trainset = load_json('dim-sim_all.json') print(f'The number of training examples: {len(trainset)}') ``` ## Creating data loaders ``` # Setup the batch size and compute steps batch_size = 10 steps_per_epoch = int(len(trainset) / batch_size) def data_loader(dataset): """Data loader.""" # IDs for dataset. triplet_ids = list(dataset.keys()) # Generator. count_triplet = 0 while True: for batch_iter in range(0, steps_per_epoch * batch_size, batch_size): if count_triplet > len(dataset) - batch_size: count_triplet = 0 batch_x, batch_y = batch_triplet_loader(dataset, triplet_ids[count_triplet: count_triplet + batch_size]) count_triplet += batch_size yield batch_x, batch_y def mel_normalization(mel): """Normalization mel value.""" mel -= 0.20 mel /= 0.25 return mel def batch_triplet_loader(dataset, triplet_ids): """Batch loader.""" anchor_col = [] positive_col = [] negative_col = [] for triplet_id in triplet_ids: triplet = dataset[triplet_id] anchor_mel = np.load('./dim-sim_mel/' + triplet['anchor']['id'] + '.npy') positive_mel = np.load('./dim-sim_mel/' + triplet['positive']['id'] + '.npy') negative_mel = np.load('./dim-sim_mel/' + triplet['negative']['id'] + '.npy') # Normalize mel. anchor_mel = mel_normalization(anchor_mel) positive_mel = mel_normalization(positive_mel) negative_mel = mel_normalization(negative_mel) # Stack batch data. anchor_col.append(anchor_mel) positive_col.append(positive_mel) negative_col.append(negative_mel) # To array. anchor_col = np.array(anchor_col) positive_col = np.array(positive_col) negative_col = np.array(negative_col) batch_x = { 'anchor_input': anchor_col, 'positive_input': positive_col, 'negative_input': negative_col } batch_y = np.zeros((batch_size, 2)) batch_y[:, 0] = 1 return batch_x, batch_y ``` ## Creating a backbone model ``` # Basic block. def basic_block(x, num_features, fp_length): x = Conv1D(num_features, fp_length, padding='same', use_bias=True, kernel_initializer='he_uniform')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPool1D(pool_size=fp_length, padding='valid')(x) return x # Backbone model. num_frames = 130 x_in = Input(shape = (num_frames, 128)) x = basic_block(x_in, 64, 4) x = basic_block(x, 64, 4) x = basic_block(x, 64, 4) x = basic_block(x, 64, 2) x = GlobalAvgPool1D()(x) backbone_model = Model(inputs=[x_in], outputs=[x], name='backbone') backbone_model.summary() ``` ## Creating a triplet model ``` # Triplet model. anchor = Input(shape = (num_frames, 128), name='anchor_input') positive = Input(shape = (num_frames, 128), name='positive_input') negative = Input(shape = (num_frames, 128), name='negative_input') anchor_embedding = backbone_model(anchor) positive_embedding = backbone_model(positive) negative_embedding = backbone_model(negative) # Cosine similarity. dist_fn = Lambda(lambda x: dot(x, axes=1, normalize=True)) dist_anchor_positive = dist_fn([anchor_embedding, positive_embedding]) dist_anchor_negative = dist_fn([anchor_embedding, negative_embedding]) # Stack the similarity scores [1,0] and triplet model. similarity_scores = Lambda(lambda vects: K.stack(vects, axis=1))([dist_anchor_positive, dist_anchor_negative]) tripletmodel = Model(inputs=[anchor, positive, negative], outputs=similarity_scores, name='triplet') tripletmodel.summary() ``` ## Defining the triplet loss function ``` # Define the loss function def triplet_hinge_loss(y_true, y_pred): """Triplet hinge loss.""" # Always the first dimension of the similarity score is true. # Margin is set to 0.1 y_pos = y_pred[:, 0] y_neg = y_pred[:, 1] loss = K.mean(K.maximum(0., 0.1 + y_neg - y_pos)) return loss ``` ## Training! ``` # Create an optimizer optimizer = Adam(lr=0.001) # Compile the model with the loss tripletmodel.compile(optimizer, loss=triplet_hinge_loss) # Kick off the training! tripletmodel.fit(data_loader(trainset), epochs=20, verbose=1, steps_per_epoch=steps_per_epoch, ) ``` --- # Evaluation ## Preparing input data ``` # Collect unique tracks. track_ids = [] triplet_ids = list(trainset.keys()) for triplet_id in triplet_ids: triplet = trainset[triplet_id] anchor = triplet['anchor']['id'] positive = triplet['positive']['id'] negative = triplet['negative']['id'] track_ids.append(anchor) track_ids.append(positive) track_ids.append(negative) # Load mel. track_id_to_mel = {} for track_id in track_ids: mel = np.load('./dim-sim_mel/' + track_id + '.npy') # Normalize mel. mel = mel_normalization(mel) mel = np.expand_dims(mel, axis=0) track_id_to_mel[track_id] = mel # Prepare input mel-spectrograms mels = np.squeeze(np.array(list(track_id_to_mel.values()))) ``` ## Extracting embedding features ``` # Extract embedding features of the tracks embedding_features = backbone_model.predict(mels, batch_size=64) # Collect the embedding features track_id_to_features = {} for i, track_id in enumerate(track_ids): track_id_to_features[track_id] = embedding_features[i] ``` ## Computing distances and scores (triplet prediction) ``` # Define a distance function def euclidean_distance(x1, x2): return np.sqrt(np.maximum(np.sum(np.square(x1 - x2)), 1e-07)) # Define an evaluation metric def calculate_accuracy(prediction, groundtruth): y_true = np.argmax(groundtruth, axis=-1) y_pred = np.argmin(prediction, axis=-1) accuracy = float(sum(y_true == y_pred))/len(groundtruth) return accuracy # A placeholder array for triplet prediction prediction = np.zeros((len(triplet_ids), 2)) # A placeholder array for the baseline mel_prediction = np.zeros((len(triplet_ids), 2)) # Create a groundtruth array groundtruth = np.zeros_like(prediction) groundtruth[:, 0] = 1 # Compute distances and scores for i in range(len(triplet_ids)): triplet = trainset[triplet_ids[i]] anchor = triplet['anchor']['id'] positive = triplet['positive']['id'] negative = triplet['negative']['id'] prediction[i, 0] = euclidean_distance( np.squeeze(normalize(track_id_to_features[anchor].reshape(1, -1), 'l2')), np.squeeze(normalize(track_id_to_features[positive].reshape(1, -1), 'l2')) ) prediction[i, 1] = euclidean_distance( np.squeeze(normalize(track_id_to_features[anchor].reshape(1, -1), 'l2')), np.squeeze(normalize(track_id_to_features[negative].reshape(1, -1), 'l2')) ) # mel similarity mel_prediction[i, 0] = euclidean_distance( np.squeeze(normalize(track_id_to_mel[anchor].flatten().reshape(1, -1), 'l2')), np.squeeze(normalize(track_id_to_mel[positive].flatten().reshape(1, -1), 'l2')) ) mel_prediction[i, 1] = euclidean_distance( np.squeeze(normalize(track_id_to_mel[anchor].flatten().reshape(1, -1), 'l2')), np.squeeze(normalize(track_id_to_mel[negative].flatten().reshape(1, -1), 'l2')) ) accuracy = calculate_accuracy(prediction, groundtruth) mel_accuracy = calculate_accuracy(mel_prediction, groundtruth) print(f'Triplet model accuracy: {accuracy:.2f}') print(f'Baseline accuracy : {mel_accuracy:.2f}') ```
github_jupyter
# ANALYSIS OF VARIANCE (ANOVA) ## What is one-way ANOVA? The one-way analysis of variance (ANOVA) is used to determine whether there are any statistically significant differences between the means of three or more independent (unrelated) groups. ### Assumptions of ANOVA The assumptions of the ANOVA test are the same as the general assumptions for any parametric test: - __Independence of observations:__ the data were collected using statistically-valid methods, and there are no hidden relationships among observations. - __Normally-distributed response variable:__ Your dependent variable should be approximately normally distributed for each category of the independent variable. We talk about the one-way ANOVA only requiring approximately normal data because it is quite "robust" to violations of normality, meaning that assumption can be a little violated and still provide valid results. You can test for normality using the Shapiro-Wilk test of normality. - __Homogeneity of variance:__ The variation within each group being compared is similar for every group. If the variances are different among the groups, then ANOVA probably isn’t the right fit for the data. ## Explaination by Implementation Throughout this notebook, We will follow an example on price across different bookstores for used harry potter book. Imagine that we have purchased books many times for our friends at different stores — A, B, C, D and E — and we have calculated prices. ### Data Generation ``` #importing libraries import pandas as pd from numpy import random #making a list of 5 stores and randomly assigning values L = ['A','B','C','D','E'] random.seed(30) L = [random.uniform(5,6, size=(7)),random.uniform(8,15, size=(7)),random.uniform(12,15, size=(7)),random.uniform(9,12, size=(7)),random.uniform(6,8, size=(7))] #making two lists one with all the store names and other containing the prices corresponding to each stores all_price = [] store = [] for i in range(len(L)): all_price.extend(L[i]) store.extend(chr(65+i)*len(L[i])) #making a dataframe with the above lists as columns namely 'store' and 'prices' data = pd.DataFrame({'store': store, 'price': all_price}) data.head() #using groupby finding the mean price value for each store df = data.groupby('store').mean().reset_index() df ``` The average price per store give a first insight in which store is cheaper — in this case, store A. ### How does ANOVA work? - The ANOVA model starts by estimating the total amount of variation that exists in the price of book across various stores (this is why it is called Analysis of Variance). - If we ignore the information about the store, the best estimation of price(just looking by the avgs only for now) we could give for new book is apx between 5.59 and 13.42 dollars. Let’s for now call this total variation. ### Data Visualisation ``` import seaborn as sns import numpy as np import matplotlib.pyplot as plt fig, (ax1,ax2) = plt.subplots(1,2,figsize=(10,5)) sns.boxplot(y=data["price"],ax=ax1) sns.boxplot(x=data['store'],y=data["price"],ax=ax2) ax2.axes.yaxis.set_ticks([]) ax2.set_ylabel('') ax1.set_xlabel('Total Average Price') plt.show() ``` Above is the comparision of the total and group wise prices boxplots. As we can see in the right plot group A, C and E seems like are heavily skewed which might influence the normality of residuals(one of the assumptions) but we'll check for that later. ``` fig, (ax1,ax2) = plt.subplots(1,2,figsize=(10,5)) sns.violinplot(y=data["price"],ax=ax1) sns.violinplot(x=data['store'],y=data["price"],ax=ax2) ax2.axes.yaxis.set_ticks([]) ax2.set_ylabel('') ax1.set_xlabel('Total Average Price') plt.show() ``` Violin plots have many of the same summary statistics: - the white dot represents the median - the thick gray bar in the center represents the interquartile range - the thin gray line represents the rest of the distribution, except for points that are determined to be “outliers” using a method that is a function of the interquartile range. - On each side of the gray line is a kernel density estimation to show the distribution shape of the data. Wider sections of the - violin plot represent a higher probability that members of the population will take on the given value; the skinnier sections represent a lower probability ``` x = ['A','B','C','D','E'] comp_dict = {} #Assigns each score series to a dictionary key for comp in x: comp_dict[comp] = data[data['store'] == comp]['price'] y = [] yerror = [] #Assigns the mean score and 95% confidence limit to each store for comp in x: y.append(comp_dict[comp].mean()) yerror.append(1.96*comp_dict[comp].std()/np.sqrt(comp_dict[comp].shape[0])) print(comp + '_std : {}'.format(comp_dict[comp].std())) sns.set(font_scale=1.5) fig = plt.figure(figsize=(10,5)) ax = sns.barplot(df['store'], df['price'], yerr=yerror) ax.set_ylabel('Average Price') plt.show() ``` From our data exploration, we can see that the average Price are different for each stores except B and D which are comparable. We are interested in knowing if this is caused by random variation in data, or if there is an underlying cause. we are going to use the 1-way ANOVA test anyway just to understand the concepts. #### Between-Group Variation is Explained by our Variable Store If we add the variable store in the graph, we see that if we know which store we purchase book from, we can give a more precise range of prices. ``` #finding the range for each group def minmax(val_list): """ Function takes a list as input and returns the maximum and minimum value of the list Input: val_list: list(int or float) """ min_val = min(val_list) max_val = max(val_list) return (min_val, max_val) # Range for stores A,B,C,D, and E for i in df.store: print("Range for store "+i+":",minmax(data[data.store==i].price)) ``` This phenomenon is due to the Between-Group variation: a quantification of the variation explained by our variable. #### Within-Group Variation is not Explained by our Variable Store However, there is also some part of variation that cannot be explained by our variable ‘store’. for eg. we still don’t know why there is a difference between 8.95 and 13.3 in store B’s price and we would need more variables to explain this. Since we do not have those new variables, the variation remains unexplained and is called the within-group variation. ### The Null and Alternative Hypothesis - There are no significant differences between the store's mean Prices. $$ H_{0}: \mu_{1}=\mu_{2}=\mu_{3}=\mu_{4}=\mu_{5}$$ - There is a significant difference between the store's mean Prices. $$ H_{a}: \mu_{i} \neq \mu_{j} $$ Where $\mu_{i}$ and $\mu_{j}$ can be the mean of any store. If there is at least one store with a significant difference with another store, the null hypothesis will be rejected. ### ANOVA: hypothesis test for group differences When the total variation is split in two, a hypothesis test is applied to find out whether the observed differences in our sample of 35 is significant: - Is one store giving book at lesser price, or is this random noise due to the sampling effect? We need a statistical test to give us this answer: the ANOVA F-test. ### Why ANOVA uses F-test and what F-test does? ANOVA uses the F-test for statistical significance. This allows for comparison of multiple means at once, because the error is calculated for the whole set of comparisons rather than for each individual two-way comparison The F-test compares the variance in each group mean from the overall group variance. If the variance within groups is smaller than the variance between groups, the F-test will find a higher F-value, and therefore a higher likelihood that the difference observed is real and not due to chance. ### Formula involved in ANOVA We can calculate a score for the ANOVA. Then we can look up the score in the F-distribution and obtain a p-value. - The F-statistic is defined as follows: $$F = \frac{MS_{b}}{MS_{w}}$$ $$F = \frac{SS_{b}}{K-1}$$ $$SS_{b} = \frac{SS_{w}}{N-K}$$ $$SS_{b} = n_{k}\sum (\bar{x_{k}}-\bar{x_{G}})$$ $$SS_{w} = sum (x_{i}-\bar{x_{G}})^{2}$$ Where $MS_{b}$ is the estimated variance between groups and $MS_{w}$ is the estimated variance within groups, $\bar{x_{k}}$ is the mean within each group, $n_{k}$ is the sample size for each group, $x_{i}$ is the individual data point, and $\bar{x_{G}}$ is the total mean, $SS_{b}$ is sum of squares between the groups and $SS_{w}$ is sum of squares within the groups This is quite a lot of math, fortunately scipy has a function that plugs in all the values for us. ## 1-Way ANOVA Using StatsModels This method conducts a one-way ANOVA in two steps: - Fit the model using an estimation method - The default estimation method in most statistical software packages is ordinary least squares - If you are not familiar with it and don't care to really dive into it, then just know it's one of many types of estimation methods that aim to provide estimates of the parameter (mean, propertion, etc.) being tested - Pass fitted model into ANOVA method to produce ANOVA table ``` #importing stats library import statsmodels.api as sm from statsmodels.formula.api import ols import scipy.stats as stats def Anova_table(x,y,df): """ This function takes variables(independent and dependent) along with dataframe they were derived from and returns anova table and model Inputs: x: Explanatory variable (column name) y: Response variable/Dependent variable (column name) df: The dataframe in which x and y belong This hypothesis is tested by calculating sums of squares and looking for a variation in y between levels of x that exceeds the variation within levels. """ lm = ols('{} ~ {}'.format(x,y),data=df).fit() table = sm.stats.anova_lm(lm) return lm,table lm, anova_table = Anova_table('price','store', data) anova_table ``` The ANOVA output provides an estimate of how much variation in the dependent variable that can be explained by the independent variable. - The first column lists the independent variable along with the model residuals (aka the model error). - The __Df__ column displays the degrees of freedom for the independent variable (calculated by taking the number of levels within the variable and subtracting 1), and the degrees of freedom for the residuals (calculated by taking the total number of observations minus 1, then subtracting the number of levels in each of the independent variables). - The __Sum Sq__ column displays the sum of squares (a.k.a. the total variation) between the group means and the overall mean explained by that variable. - The __Mean Sq__ column is the mean of the sum of squares, which is calculated by dividing the sum of squares by the degrees of freedom. - The __F-value__ column is the test statistic from the F test: the mean square of each independent variable divided by the mean square of the residuals. The larger the F value, the more likely it is that the variation associated with the independent variable is real and not due to chance. - The __Pr(>F)__ column is the p-value of the F-statistic. This shows how likely it is that the F-value calculated from the test would have occurred if the null hypothesis of no difference among group means were true. __We can treat table as a normal dataframe to extract individual values from it__ ``` anova_table['PR(>F)'] ``` Since our p-value of 2.226964e-15 is smaller than 0.05, so we can reject our null hypothesis and accept alternate hypothesis i.e There are significant differences between the store's mean Prices. ### ASSUMPTION CHECK Initially we looked at some assumptions that we did not check for our data, we are going to visit them now. __NORMALITY__ - The assumption of normality is tested on the residuals of the model when coming from an ANOVA or regression framework. One method for testing the assumption of normality is the __Shapiro-Wilk test__. This can be completed using the shapiro() method from scipy.stats. The output is (W-test statistic, p-value). - __What is the Shapiro-Wilk Test?__ - The Shapiro-Wilk test is a way to tell if a random sample comes from a normal distribution. The test gives you a W value; small values indicate your sample is not normally distributed (you can reject the null hypothesis that your population is normally distributed if your values are under a certain threshold). $$ W = \frac{(\sum_{i=1}^{n} {a_{i}x_{i}})^2}{\sum_{i=1}^{n} {(x_{i}-\bar{x})^{2}}}$$ where: $x_{i}$ are the ordered random sample values $a_{i}$ are constants generated from the covariances, variances and means of the sample (size n) from a normally distributed sample. ``` import scipy.stats as stats stats.shapiro(lm.resid) ``` - The test is non-significant, W= 0.99187, p= 0.99513(>0.05), which indicates that the residuals are normally distributed. __HOMOGENEITY OF VARIANCE__ - The final assumption is that all groups have equal variances. One method for testing this assumption is the __Levene's test__ of homogeneity of variances. This can be completed using the levene() method from Scipy.stats. - __What is the Levene Test?__ - Levene’s test is used to check that variances are equal for all samples when your data comes from a non normal distribution. - The null hypothesis for Levene's test: - the groups we're comparing all have equal population variances. ``` stats.levene(data['price'][data['store'] == 'A'], data['price'][data['store'] == 'B'], data['price'][data['store'] == 'C'], data['price'][data['store'] == 'D'], data['price'][data['store'] == 'E']) ``` The Levene's test of homogeneity of variances is not significant as pvalue>0.05 which indicates that the groups have non-statistically significant difference in their varability. ### POST-HOC TESTING A post hoc test is used only after we find a statistically significant result and need to determine where our differences truly came from. __Tukey Honestly Significant Difference (HSD)__ Tukey’s test calculates a new critical value that can be used to evaluate whether differences between any two pairs of means are significant. The critical value is a little different because it involves the mean difference that has to be exceeded to achieve significance. So one simply calculates one critcal value and then the difference between all possible pairs of means. Each difference is then compared to the Tukey critical value. If the difference is larger than the Tukey value, the comparison is significant. The formula for the critical value is as follows $$\bar {d_{t}} = q_t \sqrt {\frac{MS_{s/A}}{n}}$$ qT is the studentized range statistic, $MS_{s/A}$ is the mean square error from the overall F-test, and n is the sample size for each group. - Tests all pairwise group comparisons while controlling for the multiple comparisons which protects the familywise error rate and from making a Type I error - Not technically a "post-hoc" test since this test can be used as a test independently of the ANOVA and can be planned before hand - Tukey-Kramer Test When sample sizes are unequal, the Tukey HSD test can be modified by replacing $\frac{2}{n}$ with $\frac{1}{n_i} + \frac{1}{n_j}$ in the above formulas. Although the stats function does that by itself. ``` #For the Tukey test, we assign a variable to the MultiComparison() function,where we provide our response variable('price'), #the group we are testing (store, in this case),and our alpha value (0.05). Then, we simply print the result. import statsmodels.stats.multicomp as mc def Tukey_Table(x,y,data): """ The function returns the tukey results as well as a dataframe which contains all the components of results which can be used later to examine each value Inputs: x: Explanatory variable (column name) y: Response variable/Dependent variable (column name) data: The dataframe in which x and y belong """ comp = mc.MultiComparison(data[x],data[y]) post_hoc_res = comp.tukeyhsd() df = pd.DataFrame(data=post_hoc_res._results_table.data[1:], columns=post_hoc_res._results_table.data[0]) return post_hoc_res,df tukey_res,tukey_table = Tukey_Table('price','store',data) print("mean diffs:", tukey_res.meandiffs) print("----------------------------------------------------------") print("std pairs:",tukey_res.std_pairs) print("----------------------------------------------------------") print("groups unique: ", tukey_res.groupsunique) print("----------------------------------------------------------") print("df total:", tukey_res.df_total) print("----------------------------------------------------------") print("df total:", tukey_res) ``` Now to make sense of the table. At the top the table testing information is provided - __FWER__ is the familywise error rate, i.e. what $\alpha$ is being set to and controlled at - __group1__ and __group2__ columns are the groups being compared - __meandiff__ is the difference between the group means - __p-adj__ is the corrected p-value which takes into account the multiple comparisons being conducted - __lower__ is the lower band of the confidence interval. - __upper__ is the upper band of the confidence interval. - __reject__ is the decision rule based on the corrected p-value. ``` tukey_table[['group1','group2','p-adj','reject']] ``` Above results from Tukey’s HSD suggests that pairwise comparisions for A-E and B-D, rejects null hypothesis (p < 0.05) and indicates statistical significant differences. ## 1-Way ANOVA (from scratch) ### 1. Sum of Squares Total Let’s get to the action! What I described before as variation is mathematically measured by the Sum of Squares, $$\sum_{i=1}^{n} {(y_{i}-\bar{y})^{2}}$$ ``` # compute overall mean overall_mean = data['price'].mean() overall_mean # compute Sum of Squares Total data['overall_mean'] = overall_mean ss_total = sum((data['price'] - data['overall_mean'])**2) ss_total ``` This value can be found in the ANOVA table of statsmodels by taking the sum of the sum_sq column. ### 2. Sum of Squares Residual The computation for residual Sum of Squares is slightly different because it takes not the overall average, but the three group averages. We need to subtract each value from the mean of its group (the mean of its own store) and then square those differences and sum them. Here’s how it’s done in Python: ``` # compute group means group_means = data.groupby('store').mean() group_means = group_means.rename(columns = {'price': 'group_mean'}) group_means # add group means and overall mean to the original data frame data = data.merge(group_means, left_on = 'store', right_index = True) # compute Sum of Squares Residual ss_residual = sum((data['price'] - data['group_mean'])**2) ss_residual ``` We can find this value in the ANOVA table of statsmodels under sum_sq at the line Residual. ### 3. Sum of Squares Explained Having computed the total sum of squares and the residual sum of squares, we can now compute the Explained Sum of Squares using: $$SS_{total} = SS_{explained} + SS_{residual}$$ Since we already have the SS-Residual and SS-Total, we could do a simple subtraction to get SS-Explained. To get there the hard way, we take the weighted sum of the squared differences between each group means and the overall mean, as follows: ``` # compute Sum of Squares Model ss_explained = sum((data['overall_mean_x'] - data['group_mean'])**2) ss_explained ``` The value can be found in the statsmodels table under sum_sq at the line company. ### 4. Degrees of freedom The term degrees of freedom (df) refers to the number of independent sample points used to compute a statistic minus the number of parameters estimated from the sample points. For example, consider the sample estimate of the population variance ( $s^2$ ): $$s^2 = \frac{\sum_{i=1}^{n} {(x_{i}-x)^{2}}}{n-1}$$ where $x_{i}$ is the score for observation i in the sample, x is the sample estimate of the population mean, and n is the number of observations in the sample. The formula is based on n independent sample points and one estimated population parameter (X). Therefore, the number of degrees of freedom is n minus one. For this example, df = n - 1 Here, - df1 = df of the explained part = number of groups — 1 - df2 = df of the residual = number of observations — number of groups In our example, df1 = 4 and df2 = 30. ### 5. Mean Squares The statistical test that is central in ANOVA is the F-test. The null hypothesis states that the mean of all groups is equal, which implies that our model has no explanatory value and that we don’t have proof for choosing one store over another to buy the book. The alternative hypothesis states that at least one of the means is different, which would be a reason to go more in-depth and find out which store or stores are cheaper. We compute the Mean Squares as follows: $$MS_{explained} = \frac{SS_{explained}}{df_{explained}}$$ $$MS_{residual} = \frac{SS_{residual}}{df_{residual}}$$ Here’s how it’s done in Python: ``` # compute Mean Square Residual n_groups = len(set(data['store'])) n_obs = data.shape[0] df_residual = n_obs - n_groups ms_residual = ss_residual / df_residual ms_residual ``` ### 6. F Statistic The F-test arises in tests of hypotheses concerning whether or not two population variances are equal and concerning whether or not three or more population means are equal. We use the Mean Squares to compute the F statistic as the ratio between explained vs unexplained variation: $$f = \frac{MS_{explained}}{MS_{residual}}$$ Here is the implementation in Python: ``` # compute Mean Square Explained df_explained = n_groups - 1 ms_explained = ss_explained / df_explained ms_explained # compute F-Value f = ms_explained / ms_residual f ``` ### 7. P-value In hypothesis testing, the p-value is used to decide whether an alternative hypothesis can be accepted or not (read more on p-values here if necessary). If the p-value is below 0.05, we reject the null hypothesis in favor of the alternative: this means that at least one group mean is __significantly different__. We compute the P-Value using the F distribution with df1 and df2 degrees of freedom, in our case an F(4, 30) distribution [If the K populations are normally distributed with a common variance and if H0 : μ1=μ2=⋯=μK is true then under independent random sampling F approximately follows an F -distribution with degrees of freedom df1=K−1 and df2=n−K ]. Using probability calculation, we find the p-value as follows: ``` # compute p-value import scipy.stats p_value = 1 - scipy.stats.f.cdf(f, df_explained, df_residual) p_value ``` ### 8. Interpret p-value We have to compare the p-value to our chosen alpha, in this case, 0,05. Our p-value of 2.220446049250313e-15 is significantly lesser than 0,05, so we can reject our null hypothesis and can accept our alternative. __we do have a statistically significant difference between are 5 samples.__ This means that with the observed data, there is enough evidence to assume a general difference in price of book across the given 5 bookstores . ## Conclusion In this Notebook, ANOVA has allowed us to statistically test whether sample differences can be generalized as population differences. This perfectly illustrates the goal of statistical inference: telling whether an observed difference is significant or not.
github_jupyter
Basic Terminal Apps === By this point, you have learned enough Python to start building interactive apps. If you are set on the larger projects such as building a video game, making a visualization, or making a web app, you can skip this section. But if you would like to start building some simpler apps that run directly in your terminal, check out this notebook. Terminal apps are a great way to build the core functionality of a program you are interested in, which can then be extended into a more accessible format. [Previous: While Loops and Input](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/while_input.ipynb) | [Home](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/index.ipynb) | [Next: Dictionaries](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/dictionaries.ipynb) Contents === - [What are terminal apps?](#What-are-terminal-apps?) - [Why write terminal apps?](#Why-write-terminal-apps?) - [Greeter - A simple terminal app](#Greeter-A-simple-terminal-app) - [Clearing the screen](#Clearing-the-screen) - [Exercises](#Exercises_clearing) - [A Persistent Title Bar](#A-Persistent-Title-Bar) - [The `sleep()` function](#The-sleep%28%29-function) - [A slowly disappearing title bar](#A-slowly-disappearing-title-bar) - [Reducing repeated code](#Reducing-repeated-code) - [Building Greeter](#Building-greeter) - [Basic Pickling](#Basic-Pickling) - [Exercises](#Exercises-pickling) - [Pickling in Greeter](#Pickling-in-Greeter) - [Overall Exercises](#Overall-Exercises) - [Overall Challenges](#Overall-Challenges) [top](#) What are terminal apps? === A terminal application is simply an application that runs inside the terminal. By now, you have seen that most of the output from your programs is printed to the terminal. By learning a few more techniques such as clearing the terminal screen and "pickling" data, you can create full-fledged standalone applications that run in the terminal. A terminal app starts just like any other program, but it finishes when the user selects an action that causes the program to quit. In terminal applications, this often means something like entering 'q' or 'quit'. If you are on a linux system, you can run some terminal apps right now. Many of these are not written in Python, but that doesn't matter. They are applications that run in the terminal, and you can write your own apps in Python. If you are on Linux, try the following: - Open a terminal and enter `top`. - You will see a bunch of processes that are running on your system, sorted by which ones are using the most resources. - You can press 'q' to quit this application. - Open a terminal and enter `nano test_file.txt`. - This is a text editor that runs in the terminal. It is useful for quick edits. - If you are editing files on a remote server, you need a terminal-based editor. Nano is one of the simplest. - Press Ctrl-x to exit. If you have typed anything, you will be given the option to save your file. - Open a terminal and enter `vi`. - This is another text editor. It is much less intuitive than nano at first, but once you learn how to use it you can edit files extremely quickly. - Enter `:q` to exit. There are quite a few other terminal apps. Let's figure out how to write some of our own. Why write terminal apps? --- Unless you see people working in technical fields on a regular basis, it's quite possible you have not seen many people using terminal applications. Even so, there are a number of reasons to consider writing a few terminal apps of your own. - **They can be much more fun and satisfying to write than simpler programs.** - Applications, by their nature, solve interesting problems. When you write an application, you are creating an environment in which people work and play. That's pretty satisfying. - **Terminal apps let you play with complex code, without layers of abstraction between you and the user.** - When you write more complex graphical applications, there are layers of abstraction between you and the user. To get input in a terminal, you issue the command `input("Please tell me some information: ")`. When you want some information in a graphical program, you have to build text boxes and buttons for submitting information. That has gotten pretty simple these days, but it is still more complicated than what you will do when writing terminal applications. Terminal applications let you focus on getting the logic right, rather than building a graphical interface. - **You will learn about user interaction issues.** - Terminal apps do have users, even if that is just you, and maybe your friends or family. If you see people using your applications at an early stage, you will write better code. There is nothing like watching people use your programs to make you code more defensively. You know to enter a string in some places, but what keeps your users from entering numbers? Influencing your users to give you the right information the first time, and being prepared to deal with the wrong kind of data are good skills as a programmer. Having real users at an early stage in your programming career is a good thing. [top](#) Greeter - A simple terminal app === Let's define a simple terminal app that we can make in this notebook. Greeter will: - Always display a title bar at the top of the screen, showing the name of the app that is running. - Offer you three choices: - Enter a name. - If the program knows that name, it will print a message saying something like "Hello, old friend." - If the program does not know that name, it will print a message saying something like, "It's nice to meet you. I will remember you." - The next time the program hears this name, it will greet the person as an old friend. - See a list of everyone that is known. - Quit. - The program will remember people, even after it closes. - The program may list the number of known people in the title bar. Now we will go over a few things we need to know in order to build this app, and then we will build it. [top](#) Clearing the screen === You may have noticed that your programs which produce a lot of output scroll down the terminal. This keeps your program from looking like a running application. It's fairly easy to clear the terminal screen any time you want to, though. Let's make a really simple program to show this: ``` # Show a simple message. print("I like climbing mountains.") ``` Now we will modify the program so that the screen is cleared right after the message is displayed: ``` ###highlight=[2,7,8] import os # Show a simple message. print("I like climbing mountains.") # Clear the screen. os.system('clear') ``` There is no output to show here, because as soon as the message is displayed the screen is cleared. Run the program on your computer to see this. The first line *imports* the **module** "os". This is a set of functions that let you interact with your operating system's commands. The line `os.system('clear')` tells Python to talk to the operating system, and ask the system to run the `clear` command. You can see this same thing by typing `clear` in any open terminal window. On a technical note, the screen is not actually erased when you enter the `clear` command. Instead, the terminal is scrolled down one vertical window length. If you scroll the terminal window up, you can see the old output. This is fine, and it can actually be good to be able to scroll back up and look at some output you might have missed. **On Windows:** The command to clear the terminal screen is different on Windows. The command `os.system('cls')` should work. <a id="Exercises-clearing"></a> Exercises --- #### Simple Clear - Print one line to the screen. - Run your program to make sure the line actually prints. - Add a call to your system's "clear" command. - Run your program and make sure the line disappears. - If it didn't work, make sure you have the line `import os` at the top of your file. [top](#) A Persistent Title Bar === Let's consider how to make a title bar that stays at the top of the screen. We can't make this yet, because as our program creates more and more output, any title bar we print will disappear up the top of the screen. Now that we know how to clear the screen, we can rebuild the screen that our user sees any time we want. Let's start out by printing a title bar for Greeter. If you want to follow along, type out the following code and save it as `greeter.py`: ``` # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. # Display a title bar. print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") ``` Everything is fine so far. Let's look at what happens when we have a bunch of output below the title bar. Copy the following code onto your machine and run it. ``` ###highlight=[10,11,12] # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. # Display a title bar. print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") # Display a bunch of output, representing a long-running program. for x in range(0,51): print("\nWe have done %d fun and interesting things together!" % x) ``` I won't include the output here. All you should see are a number of lines about doing fun and interesting things together, with the title bar far up your terminal window, well above the visible portion. The `sleep()` function --- Sometimes it is helpful to be able to pause a program for a moment. This can slow things down, and it can let us show output in intervals. Run this code on your own machine, and see if you can understand how it works: ``` # Import the sleep function. from time import sleep print("I'm going to sleep now.") # Sleep for 3 seconds. sleep(3) print("I woke up!") ``` If you ran this code, you should have seen the message "I'm going to sleep now." Then you should have seen nothing happen for 3 seconds, and then you should have seen the message "I woke up." The first line imports the *sleep()* function from the **module** *time*. You will be seeing more and more *import* statements. Basically, we are importing a function from Python's standard library of functions. The sleep function can accept decimal inputs as well, so you can pause for as short or as long in your programs as you want. A slowly disappearing title bar --- This time we are going to print out the title bar, but then we are going to print enough output that we start to lose the title bar. Run this code on your own machine to see the full effect: ``` ###highlight=[2,12,13,15,16,17,18,19,20,21,22] from time import sleep # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. # Display a title bar. print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") # Print a bunch of information, in short intervals names = ['aaron', 'brenda', 'cyrene', 'david', 'eric'] # Print each name 5 times. for name in names: # Pause for 1 second between batches, and then skip two lines. sleep(1) print("\n\n") for x in range(0,5): print(name.title()) ``` You should have seen the title bar appear, and then groups of names appear every second for a while. At some point, the title bar probably disappeared. Now we are going to modify the code so that the screen is cleared each time through the loop. ``` ###highlight=[3,18,19,20,21,22,23,24,25,26,27,28,29,30,31] from time import sleep import os # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. # Display a title bar. print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") # Print a bunch of information, in short intervals names = ['aaron', 'brenda', 'cyrene', 'david', 'eric'] # Print each name 5 times. for name in names: # Clear the screen before listing names. os.system('clear') # Display the title bar. print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") print("\n\n") for x in range(0,5): print(name.title()) # Pause for 1 second between batches. sleep(1) ``` This time you should see the same output, but you should see a steady title bar, and the new information takes the place of the old information instead of being listed below the old output. We have a "running" application! Reducing repeated code --- If you understood the section [Introducing Functions](introducing_functions), you may have noticed that we have some repeated code in the last program listing. Any time you see a significant amount of repetition, you can probably introduce a function. The repeated code in this example involves displaying the title bar. Let's write a function to show the title bar. ``` ###highlight=[8,9,10,11,12,13,14,22] from time import sleep import os # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. def display_title_bar(): # Clears the terminal screen, and displays a title bar. os.system('clear') print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") # Print a bunch of information, in short intervals names = ['aaron', 'brenda', 'cyrene', 'david', 'eric'] # Print each name 5 times. for name in names: display_title_bar() print("\n\n") for x in range(0,5): print(name.title()) # Pause for 1 second between batches. sleep(1) ``` Again, you really need to copy this code onto your own machine and run it locally to see this in action. You should see the same behavior as before. It's interesting to note that having good comments makes writing functions easier. The comment just before the code for displaying the title bar was "Display the title bar." This practically gives us a name for the function that will take over this job: `display_title_bar`. If we use short, descriptive function names that tell us exactly what the function does, it becomes much easier to follow what is happening in the program. We can see that we don't even need a comment when the function is called, because the function name itself is so informative. The name `display_title_bar` is much better than something like `title`, although you don't want to go overboard with a name such as `clear_screen_and_display_title_bar`. This code gets a bit hard to read overall though, because we have several sections of code. You can use comments to visually break up your programs. Here's what that can look like in our current program: ``` ###highlight=[9,20] from time import sleep import os # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. ### FUNCTIONS ### def display_title_bar(): # Clears the terminal screen, and displays a title bar. os.system('clear') print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") ### MAIN PROGRAM ### # Print a bunch of information, in short intervals names = ['aaron', 'brenda', 'cyrene', 'david', 'eric'] # Print each name 5 times. for name in names: display_title_bar() print("\n\n") for x in range(0,5): print(name.title()) # Pause for 1 second between batches. sleep(1) ``` If this is more appealing visually to you, feel free to use these kind of "section header" comments in your longer programs. [top](#) Building Greeter === Let's continue building Greeter. There are a couple more things we will introduce, but you should be able to follow everything we do. We will start with the previous part where we have a persistent title bar, but we will remove the sleep functions. We will begin by offering users the choice to see a list of names, or enter a new name. ``` ###highlight=[2,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41] import os # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. ### FUNCTIONS ### def display_title_bar(): # Clears the terminal screen, and displays a title bar. os.system('clear') print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") ### MAIN PROGRAM ### # Set up a loop where users can choose what they'd like to do. choice = '' while choice != 'q': display_title_bar() # Let users know what they can do. print("\n[1] See a list of friends.") print("[2] Tell me about someone new.") print("[q] Quit.") choice = input("What would you like to do? ") # Respond to the user's choice. if choice == '1': print("\nHere are the people I know.\n") elif choice == '2': print("\nI can't wait to meet this person!\n") elif choice == 'q': print("\nThanks for playing. Bye.") else: print("\nI didn't understand that choice.\n") ``` If you run this code, you will see that it doesn't quite work. There are no Python errors, but the logic is a little off. We can enter choices, but the call to `display_title_bar()` at the beginning of the loop clears the screen as soon as the output is printed. We can fix this by moving the call to `display_title_bar()`, and putting it in two places. We call `display_title_bar()` once just before we enter the loop, when the program starts running. But we also call it right after the user makes a choice, and before we respond to that choice: ``` ###highlight=[23,34] import os # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. ### FUNCTIONS ### def display_title_bar(): # Clears the terminal screen, and displays a title bar. os.system('clear') print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") ### MAIN PROGRAM ### # Set up a loop where users can choose what they'd like to do. choice = '' display_title_bar() while choice != 'q': # Let users know what they can do. print("\n[1] See a list of friends.") print("[2] Tell me about someone new.") print("[q] Quit.") choice = input("What would you like to do? ") # Respond to the user's choice. display_title_bar() if choice == '1': print("\nHere are the people I know.\n") elif choice == '2': print("\nI can't wait to meet this person!\n") elif choice == 'q': print("\nThanks for playing. Bye.") else: print("\nI didn't understand that choice.\n") ``` This works, so let's put the menu into a function: ``` ###highlight=[18,19,20,21,22,23,24,34] import os # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. ### FUNCTIONS ### def display_title_bar(): # Clears the terminal screen, and displays a title bar. os.system('clear') print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") def get_user_choice(): # Let users know what they can do. print("\n[1] See a list of friends.") print("[2] Tell me about someone new.") print("[q] Quit.") return input("What would you like to do? ") ### MAIN PROGRAM ### # Set up a loop where users can choose what they'd like to do. choice = '' display_title_bar() while choice != 'q': choice = get_user_choice() # Respond to the user's choice. display_title_bar() if choice == '1': print("\nHere are the people I know.\n") elif choice == '2': print("\nI can't wait to meet this person!\n") elif choice == 'q': print("\nThanks for playing. Bye.") else: print("\nI didn't understand that choice.\n") ``` Now, let's make it so that the program actually does something. - We will make an empty list to store names. - We will print names from this list in choice 1. - We will get a new name in choice 2. - We will store that new name in the list of names. ``` ###highlight=[28,29,42,43,45,46,47] import os # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. ### FUNCTIONS ### def display_title_bar(): # Clears the terminal screen, and displays a title bar. os.system('clear') print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") def get_user_choice(): # Let users know what they can do. print("\n[1] See a list of friends.") print("[2] Tell me about someone new.") print("[q] Quit.") return input("What would you like to do? ") ### MAIN PROGRAM ### # Set up a loop where users can choose what they'd like to do. names = [] choice = '' display_title_bar() while choice != 'q': choice = get_user_choice() # Respond to the user's choice. display_title_bar() if choice == '1': print("\nHere are the people I know.\n") for name in names: print(name.title()) elif choice == '2': new_name = input("\nPlease tell me this person's name: ") names.append(new_name) print("\nI'm so happy to know %s!\n" % new_name.title()) elif choice == 'q': print("\nThanks for playing. Bye.") else: print("\nI didn't understand that choice.\n") ``` If you run this program, you will see that it works mostly as its supposed to. But the code in lines 37-50, where we are responding to the user's choice, is starting to get crowded. Let's move the code for choice 1 and choice 2 into separate functions: ``` ###highlight=[26,27,28,29,30,32,33,34,35,36,52,54] import os # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. ### FUNCTIONS ### def display_title_bar(): # Clears the terminal screen, and displays a title bar. os.system('clear') print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") def get_user_choice(): # Let users know what they can do. print("\n[1] See a list of friends.") print("[2] Tell me about someone new.") print("[q] Quit.") return input("What would you like to do? ") def show_names(): # Shows the names of everyone who is already in the list. print("\nHere are the people I know.\n") for name in names: print(name.title()) def get_new_name(): # Asks the user for a new name, and stores the name. new_name = input("\nPlease tell me this person's name: ") names.append(new_name) print("\nI'm so happy to know %s!\n" % new_name.title()) ### MAIN PROGRAM ### # Set up a loop where users can choose what they'd like to do. names = [] choice = '' display_title_bar() while choice != 'q': choice = get_user_choice() # Respond to the user's choice. display_title_bar() if choice == '1': show_names() elif choice == '2': get_new_name() elif choice == 'q': print("\nThanks for playing. Bye.") else: print("\nI didn't understand that choice.\n") ``` This code doesn't behave any differently, but the main program itself is much easier to read. Lines 48-57 are a clean series of if-elif-else statements, each of which has a clear action. Putting each action into its own function also lets us focus on improving that action. If you look at the function `get_new_name()`, you might notice that we are storing the name without checking anything about it. Let's put in a simple check to make sure we don't already know about this person. ``` ###highlight=[36,37,38,39,40] import os # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. ### FUNCTIONS ### def display_title_bar(): # Clears the terminal screen, and displays a title bar. os.system('clear') print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") def get_user_choice(): # Let users know what they can do. print("\n[1] See a list of friends.") print("[2] Tell me about someone new.") print("[q] Quit.") return input("What would you like to do? ") def show_names(): # Shows the names of everyone who is already in the list. print("\nHere are the people I know.\n") for name in names: print(name.title()) def get_new_name(): # Asks the user for a new name, and stores the name if we don't already # know about this person. new_name = input("\nPlease tell me this person's name: ") if new_name in names: print("\n%s is an old friend! Thank you, though." % new_name.title()) else: names.append(new_name) print("\nI'm so happy to know %s!\n" % new_name.title()) ### MAIN PROGRAM ### # Set up a loop where users can choose what they'd like to do. names = [] choice = '' display_title_bar() while choice != 'q': choice = get_user_choice() # Respond to the user's choice. display_title_bar() if choice == '1': show_names() elif choice == '2': get_new_name() elif choice == 'q': print("\nThanks for playing. Bye.") else: print("\nI didn't understand that choice.\n") ``` This works well. Now the program only stores new names, and names that are already in the list are greeted as old friends. It's interesting to note that you can run this program through [python tutor](http://pythontutor.com/visualize.html#mode=edit), and step through each line of the code. It's pretty enlightening to see exactly how the Python interpreter steps through a program like this. You can see all of the jumps from the main program to each of the functions, and the change in flow when the user makes a particular choice. It's informative to see which lines are skipped when certain if tests pass or fail. There is one last thing we'd like Greeter to do, to consider it a basic terminal app. Let's make it remember the list of names after the program closes. To do this, let's learn about *pickling* using a simpler example. [top](#) Basic Pickling --- When we "pickle" something in the physical world, we soak it in salt and vinegar so it won't rot. "Pickling" an object in Python packages it up and stores it on disk in a way that we can get it back in its original form later. You don't want to use `pickle` for imporant data, but it's a good way to get started with storing data after your program closes. Here is a simple program that asks the user for some input, and then stores the input in a list. The program dumps the list to a file using `pickle`, and the next time the program runs it loads that data back in. Run this program on your computer, and see if it works for you. ``` import pickle # This program asks the user for some animals, and stores them. # Make an empty list to store new animals in. animals = [] # Create a loop that lets users store new animals. new_animal = '' while new_animal != 'quit': print("\nPlease tell me a new animal to remember.") new_animal = input("Enter 'quit' to quit: ") if new_animal != 'quit': animals.append(new_animal) # Try to save the animals to the file 'animals.pydata'. try: file_object = open('animals.pydata', 'wb') pickle.dump(animals, file_object) file_object.close() print("\nI will remember the following animals: ") for animal in animals: print(animal) except Exception as e: print(e) print("\nI couldn't figure out how to store the animals. Sorry.") ``` This program uses three new things: - **A `try-except` block.** - A try-except block is used when you think a section of code might create an error. If an error occurs in a try block, the program does not end. Instead, program execution drops into the except block. - In this case, we try to open a file to write out a list of animals. - If the file can not be opened for some reason, for example because the program doesn't have permission to create a new file, then the program drops to the `except` block. - In this case, we print the actual error message and a friendlier message of our own. - **Opening and closing files.** - Line 33 tries to open the file 'animals.pydata'. - Line 33 tells Python to open the file for writing. The 'b' argument tells Python to write the file in bytes. - If successful, the open file can be used through the *file_object* variable. - If the file does not yet exist, this line creates the file, in the same directory as the program. - Line 35 closes the file once we are finished working with it. - **A call to `pickle.dump()`.** - Line 34 'dumps' the list animals into the file that was opened. (It is not dumped in a format that we can read.) **Note:** This may work slightly differently in Python 2, and on Windows or Mac. This notebook will be updated to include those specific differences. When you run this program on your computer, look for the file *animals.pydata* in the same directory as the program file. If that worked, we need to modify the program so that the next time it runs it loads the data from *animals.pydata* back in. We do that with a try-except block at the beginning of the program, which tries to open the file and read the data back into the `animals` list. If that doesn't work, which will happen when the program is run for the first time or if you delete *animals.pydata*, we make the same empty list we had before. ``` ###highlight=[9,10,11,12,13,14] import pickle # This program asks the user for some animals, and stores them. # It loads animals if they exist. # Try to load animals. If they don't exist, make an empty list # to store new animals in. try: file_object = open('animals.pydata', 'rb') animals = pickle.load(file_object) file_object.close() except: animals = [] # Show the animals that are stored so far. if len(animals) > 0: print("I know the following animals: ") for animal in animals: print(animal) else: print("I don't know any animals yet.") # Create a loop that lets users store new animals. new_animal = '' while new_animal != 'quit': print("\nPlease tell me a new animal to remember.") new_animal = input("Enter 'quit' to quit: ") if new_animal != 'quit': animals.append(new_animal) # Try to save the animals to the file 'animals.pydata'. try: file_object = open('animals.pydata', 'wb') pickle.dump(animals, file_object) file_object.close() print("\nI will remember the following animals: ") for animal in animals: print(animal) except Exception as e: print(e) print("\nI couldn't figure out how to store the animals. Sorry.") ``` The new try-except block at the beginning of the file works like this: - It tries to open a file to read in a list of animals. - If the file exists, animals will contain the previously stored list. - If the file does not exist, line 9 will create an error, which would normally end the program. - Instead, the program drops to the except block, where an empty list is created. Now the program should just build a cumulative list of animals every time it is run. This is our first example of **persistent** data, data that lasts even after our program stops running. Now let's use these concepts to finish Greeter. [top](#) <a id="Exercises-pickling"></a> Exercises --- #### Pickling Games - Write a program that lets users enter a number of different games. - Save the games to disk, using pickle, before the program closes. - Load the games from the saved file at the beginning of your program. [top](#) Pickling in Greeter --- We really just have a little left to do in order to finish Greeter. We need to dump the list of names before the program ends, and we need to load that list of names when the program starts. Let's do this by creating two new functions: - The function `load_names()` will load the names from a file when the program first starts. This function will be called before the main loop begins. - The function `quit()` will dump the names into a file just before the program ends. ``` ###highlight=[43,44,45,46,47,48,49,50,51,52,53,55,56,57,58,59,60,61,62,63,64,69,84] import os import pickle # Greeter is a terminal application that greets old friends warmly, # and remembers new friends. ### FUNCTIONS ### def display_title_bar(): # Clears the terminal screen, and displays a title bar. os.system('clear') print("\t**********************************************") print("\t*** Greeter - Hello old and new friends! ***") print("\t**********************************************") def get_user_choice(): # Let users know what they can do. print("\n[1] See a list of friends.") print("[2] Tell me about someone new.") print("[q] Quit.") return input("What would you like to do? ") def show_names(): # Shows the names of everyone who is already in the list. print("\nHere are the people I know.\n") for name in names: print(name.title()) def get_new_name(): # Asks the user for a new name, and stores the name if we don't already # know about this person. new_name = input("\nPlease tell me this person's name: ") if new_name in names: print("\n%s is an old friend! Thank you, though." % new_name.title()) else: names.append(new_name) print("\nI'm so happy to know %s!\n" % new_name.title()) def load_names(): # This function loads names from a file, and puts them in the list 'names'. # If the file doesn't exist, it creates an empty list. try: file_object = open('names.pydata', 'rb') names = pickle.load(file_object) file_object.close() return names except Exception as e: print(e) return [] def quit(): # This function dumps the names into a file, and prints a quit message. try: file_object = open('names.pydata', 'wb') pickle.dump(names, file_object) file_object.close() print("\nThanks for playing. I will remember these good friends.") except Exception as e: print("\nThanks for playing. I won't be able to remember these names.") print(e) ### MAIN PROGRAM ### # Set up a loop where users can choose what they'd like to do. names = load_names() choice = '' display_title_bar() while choice != 'q': choice = get_user_choice() # Respond to the user's choice. display_title_bar() if choice == '1': show_names() elif choice == '2': get_new_name() elif choice == 'q': quit() print("\nThanks for playing. Bye.") else: print("\nI didn't understand that choice.\n") ``` We now have a long-running, standalone terminal application. It doesn't do a whole lot, but it shows the basic structure for creating terminal apps of your own. If you expand the list of choices and keep your code organized into clean, simple functions, you now know enough to make some pretty interesting programs. [top](#) Overall Exercises === #### Favorite Games - Write a terminal app that asks people for their favorite games. - Your app should offer users the choice of: - Seeing all current games that are stored. - Entering a new game. - If the game is already stored, don't store it. Inform the user that you know that game, and like it too. - If the game is not already stored, store it and tell the user you are happy to learn about that game. - Quit. - Your app should store all games when the program quits, so that it remembers all the games the next time it runs. - Bonus: Add a "stats" element in your title bar that shows how many games are currently stored. [top](#) Overall Challenges === #### Hangman - If it's been a while since you played Hangman, play it with someone first. - Play both sides, so you remember how it feels as a player and as the person making up the word. - Write a terminal app that replicates Hangman. - [hints](#hints_hangman) #### Abacus - The following text could be used to represent the number 1972 on an abacus: - |xxxxxxxx------xx| - |xxx------xxxxxxx| - |x------xxxxxxxxx| - |xxxxxxxxx------x| - Write a terminal app that asks the user for a number, and then displays that number on a simple abacus. - [hints](#hints_abacus) [top](#) [Previous: While Loops and Input](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/while_input.ipynb) | [Home](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/index.ipynb) | [Next: Dictionaries](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/dictionaries.ipynb) Hints === ####Hangman - Print a line that identifies the game people are playing. - Store a list of words that can be chosen from. - Make a for loop that runs through the words in the list. - Bonus: learn about [randomly selecting an item from a list](http://stackoverflow.com/questions/306400/how-do-i-randomly-select-an-item-from-a-list-using-python), and choose the next word randomly. - Print a series of underscores, showing how many letters are in the word. - Ask the user for a letter. - Use an if statement to either place the appropriate letters over the underscores, or to add a part to a text-based hangman. - Keep track of the guesses that have been made, and when too many wrong guesses have been made, print a message that the game is over. - Place your entire game inside of a while loop, with an option for the user to enter a value that will make the loop fail. ####Abacus - Print a line that identifies the application that is running. - Ask the user for a number between 1 and 9999. - There are some mathematical ways to get each digit in the number. If you know can use any of these methods on paper, feel free to try to code them. But you can just treat the number as a string, and use a for loop to get each number in turn. You may have to turn the number into a string first. - Use each digit, along with the range function, to print the appropriate number of beads on each line of the abacus. - You can use subtraction to figure out how many beads to put on the other side of the abacus. - You may need to turn each digit back into a number before you can use the range function. - If your for loop prints each bead on a separate line, store your beads in a variable. Rather than printing all beads out immediately, add them to the string representing the bar on the abacus. Then print the abacus bar out all at once. - `abacus_bar = ''` - `for...` - `abacus_bar = abacus_bar + 'x'` - `print(abacus_bar)` - Put a call to clear the screen in the right place in your code, and you will have a nice simple abacus app. - **Bonus:** Make a more complicated abacus, and make your abacus work in different bases. [top](#) - - - [Previous: While Loops and Input](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/while_input.ipynb) | [Home](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/index.ipynb) | [Next: Dictionaries](http://nbviewer.ipython.org/urls/raw.github.com/ehmatthes/intro_programming/master/notebooks/dictionaries.ipynb)
github_jupyter
# Simple Line Plots Perhaps the simplest of all plots is the visualization of a single function $y = f(x)$. Here we will take a first look at creating a simple plot of this type. As with all the following sections, we'll start by setting up the notebook for plotting and importing the packages we will use: ``` %matplotlib inline #magic Funcation import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import numpy as np plt.style.available li = ['Solarize_Light2', '_classic_test_patch', 'bmh', 'classic', 'dark_background', 'fast', 'fivethirtyeight', 'ggplot', 'grayscale', 'seaborn', 'seaborn-bright', 'seaborn-colorblind', 'seaborn-dark', 'seaborn-dark-palette', 'seaborn-darkgrid', 'seaborn-deep', 'seaborn-muted', 'seaborn-notebook', 'seaborn-paper', 'seaborn-pastel', 'seaborn-poster', 'seaborn-talk', 'seaborn-ticks', 'seaborn-white', 'seaborn-whitegrid', 'tableau-colorblind10'] for i in li: fig = plt.figure() plt.style.use(i) ax=plt.axes() ``` For all Matplotlib plots, we start by creating a figure and an axes. In their simplest form, a figure and axes can be created as follows: ``` fig = plt.figure() ax = plt.axes() ``` In Matplotlib, the *figure* (an instance of the class ``plt.Figure``) can be thought of as a single container that contains all the objects representing axes, graphics, text, and labels. The *axes* (an instance of the class ``plt.Axes``) is what we see above: a bounding box with ticks and labels, which will eventually contain the plot elements that make up our visualization. Throughout this book, we'll commonly use the variable name ``fig`` to refer to a figure instance, and ``ax`` to refer to an axes instance or group of axes instances. Once we have created an axes, we can use the ``ax.plot`` function to plot some data. Let's start with a simple sinusoid: ``` fig = plt.figure() ax = plt.axes() x = np.linspace(0, 10, 1000) ax.plot(x, np.sin(x)); ``` Alternatively, we can use the pylab interface and let the figure and axes be created for us in the background (see [Two Interfaces for the Price of One](04.00-Introduction-To-Matplotlib.ipynb#Two-Interfaces-for-the-Price-of-One) for a discussion of these two interfaces): ``` plt.plot(x, np.sin(x)); ``` If we want to create a single figure with multiple lines, we can simply call the ``plot`` function multiple times: ``` plt.plot(x, np.sin(x)) plt.plot(x, np.cos(x)); ``` That's all there is to plotting simple functions in Matplotlib! We'll now dive into some more details about how to control the appearance of the axes and lines. ## Adjusting the Plot: Line Colors and Styles The first adjustment you might wish to make to a plot is to control the line colors and styles. The ``plt.plot()`` function takes additional arguments that can be used to specify these. To adjust the color, you can use the ``color`` keyword, which accepts a string argument representing virtually any imaginable color. The color can be specified in a variety of ways: ``` plt.plot(x, np.sin(x - 0), color='blue') # specify color by name plt.plot(x, np.sin(x - 1), color='g') # short color code (rgbcmyk) plt.plot(x, np.sin(x - 2), color='0.75') # Grayscale between 0 and 1 plt.plot(x, np.sin(x - 3), color='#FFDD44') # Hex code (RRGGBB from 00 to FF) plt.plot(x, np.sin(x - 4), color=(1.0,0.2,0.3)) # RGB tuple, values 0 to 1 plt.plot(x, np.sin(x - 5), color='chartreuse'); # all HTML color names supported ``` If no color is specified, Matplotlib will automatically cycle through a set of default colors for multiple lines. Similarly, the line style can be adjusted using the ``linestyle`` or ``ls`` keyword: ``` plt.plot(x, x + 0, linestyle='solid') plt.plot(x, x + 1, linestyle='dashed') plt.plot(x, x + 2, linestyle='dashdot') plt.plot(x, x + 3, linestyle='dotted'); # For short, you can use the following codes: plt.plot(x, x + 4, linestyle='-') # solid plt.plot(x, x + 5, linestyle='--') # dashed plt.plot(x, x + 6, linestyle='-.') # dashdot plt.plot(x, x + 7, linestyle=':'); # dotted ``` If you would like to be extremely terse, these ``linestyle`` and ``color`` codes can be combined into a single non-keyword argument to the ``plt.plot()`` function: ``` plt.plot(x, x + 0, '-g') # solid green plt.plot(x, x + 1, '--c') # dashed cyan plt.plot(x, x + 2, '-.k') # dashdot black plt.plot(x, x + 3, ':r'); # dotted red ``` These single-character color codes reflect the standard abbreviations in the RGB (Red/Green/Blue) and CMYK (Cyan/Magenta/Yellow/blacK) color systems, commonly used for digital color graphics. There are many other keyword arguments that can be used to fine-tune the appearance of the plot; for more details, I'd suggest viewing the docstring of the ``plt.plot()`` function using IPython's help tools (See [Help and Documentation in IPython](01.01-Help-And-Documentation.ipynb)). ## Adjusting the Plot: Axes Limits Matplotlib does a decent job of choosing default axes limits for your plot, but sometimes it's nice to have finer control. The most basic way to adjust axis limits is to use the ``plt.xlim()`` and ``plt.ylim()`` methods: ``` plt.plot(x, np.sin(x)) plt.xlim(-1, 11) plt.ylim(-1.5, 1.5); ``` If for some reason you'd like either axis to be displayed in reverse, you can simply reverse the order of the arguments: ``` plt.plot(x, np.sin(x)) plt.xlim(10, 0) plt.ylim(1.2, -1.2); ``` A useful related method is ``plt.axis()`` (note here the potential confusion between *axes* with an *e*, and *axis* with an *i*). The ``plt.axis()`` method allows you to set the ``x`` and ``y`` limits with a single call, by passing a list which specifies ``[xmin, xmax, ymin, ymax]``: ``` plt.plot(x, np.sin(x)) plt.axis([-1, 11, -1.5, 1.5]); ``` The ``plt.axis()`` method goes even beyond this, allowing you to do things like automatically tighten the bounds around the current plot: ``` plt.plot(x, np.sin(x)) plt.axis('tight'); ``` It allows even higher-level specifications, such as ensuring an equal aspect ratio so that on your screen, one unit in ``x`` is equal to one unit in ``y``: ``` plt.plot(x, np.sin(x)) plt.axis('equal'); ``` For more information on axis limits and the other capabilities of the ``plt.axis`` method, refer to the ``plt.axis`` docstring. ## Labeling Plots As the last piece of this section, we'll briefly look at the labeling of plots: titles, axis labels, and simple legends. Titles and axis labels are the simplest such labels—there are methods that can be used to quickly set them: ``` plt.plot(x, np.sin(x)) plt.title("A Sine Curve") plt.xlabel("x") plt.ylabel("sin(x)"); ``` The position, size, and style of these labels can be adjusted using optional arguments to the function. For more information, see the Matplotlib documentation and the docstrings of each of these functions. When multiple lines are being shown within a single axes, it can be useful to create a plot legend that labels each line type. Again, Matplotlib has a built-in way of quickly creating such a legend. It is done via the (you guessed it) ``plt.legend()`` method. Though there are several valid ways of using this, I find it easiest to specify the label of each line using the ``label`` keyword of the plot function: ``` plt.plot(x, np.sin(x), '-g', label='sin(x)') plt.plot(x, np.cos(x), ':b', label='cos(x)') plt.axis('equal') plt.legend(); ``` As you can see, the ``plt.legend()`` function keeps track of the line style and color, and matches these with the correct label. More information on specifying and formatting plot legends can be found in the ``plt.legend`` docstring; additionally, we will cover some more advanced legend options in [Customizing Plot Legends](04.06-Customizing-Legends.ipynb). ## Aside: Matplotlib Gotchas While most ``plt`` functions translate directly to ``ax`` methods (such as ``plt.plot()`` → ``ax.plot()``, ``plt.legend()`` → ``ax.legend()``, etc.), this is not the case for all commands. In particular, functions to set limits, labels, and titles are slightly modified. For transitioning between MATLAB-style functions and object-oriented methods, make the following changes: - ``plt.xlabel()`` → ``ax.set_xlabel()`` - ``plt.ylabel()`` → ``ax.set_ylabel()`` - ``plt.xlim()`` → ``ax.set_xlim()`` - ``plt.ylim()`` → ``ax.set_ylim()`` - ``plt.title()`` → ``ax.set_title()`` In the object-oriented interface to plotting, rather than calling these functions individually, it is often more convenient to use the ``ax.set()`` method to set all these properties at once: ``` ax = plt.axes() ax.plot(x, np.sin(x)) ax.set(xlim=(0, 10), ylim=(-2, 2), xlabel='x', ylabel='sin(x)', title='A Simple Plot'); ```
github_jupyter
# 3 - An Introduction to Working with Annuli This Notebooks works through how we can work with annuli of line emission to infer their velocity structure. Using the full line emission can be beneficial to using just the collapsed rotation map as you have more information to work with. ## `linecube` For this Tutorial, we will use the TW Hya data that we used in the [first tutorial](https://eddy.readthedocs.io/en/latest/tutorials/tutorial_1.html) which is from [Huang et al. (2018)](https://ui.adsabs.harvard.edu/#abs/2018ApJ...852..122H), and downloadable from [here](https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/PXDKBC). This time we do not want to collapse the data to a rotation map, but keep it as a full line cube. As such, we use the `linecube` class from `eddy`, rather than the `rotationmap`. ``` import matplotlib.pyplot as plt from eddy import linecube import numpy as np ``` Let's load up the data. Again, we can use the same field of view argument, `FOV`, to cut down the field of view. ``` cube = linecube('../../cubes/TWHya_CO_cube.fits', FOV=8.0) ``` ### Inspecting the Data Unlike for `rotationmap`, a `linecube` instance will be 3D, with the third dimension representing the spectral dimension. This can be seen by plotting the integrated spectrum with the `plot_spectrum` function which basically integrating the flux in each channel. ``` cube.plot_spectrum() ``` You can clearly see the spectrum is centred on a velocity of $\sim 2.84~{\rm km\,s^{-1}}$, the systemic velocity of TW Hya. Another way to inspect the data is to plot the peak intensity along every line of sight. This can be achieved with the `plot_maximum` function. ``` cube.plot_maximum() ``` Here it's obvious to see that the CS emission has a ring-like morphology and extends out to about $2.5^{\prime\prime}$. This is always a good check to make as you can see whether the data is well centered or not. ## `annulus` The main focus of this tutorial, however, is working with the `annulus` class. This contains an ensemble of spectra extracted from the `linecube` based on some geometrical cuts (usually just a small radial range). This is useful because if we expect the disk to be azimuthally symmetric, then these spectra should have the same form (i.e., peak and width), but have their line centers shifted due to the projected velocity structure of the disk. Leveraging this assumption that the line profiles should _look_ the same, we can use this to infer the underlying velocity structure. ### Extracting an `annulus` To extract an annulus, we simply use the `get_annulus` function, specifying the disk properties and the region we're interested in. By default, this will select a random sample of _spatially independent_ pixels from the cube. Remember, if we want to assume that all the spectra look the same, then we only want a small radial range. ``` annulus = cube.get_annulus(r_min=1.0, r_max=1.1, inc=6.5, PA=151.0) ``` ### Inspecting an `annulus` We can quickly see spectra that we've selected through `plot_spectra`: ``` annulus.plot_spectra() ``` This figure shows that all the lines look similar, but are spread out along the velocity axis due to the Doppler shift of the lines. Another useful function is `plot_river`: ``` annulus.plot_river() ``` What this figure shows with the colored panel are each of the spectra stacked on top of one another; each row represents a spectrum. So you can see at $\phi = 0^{\circ}$, the red-shifted axis (remember in `eddy` the ${\rm PA}$ is always measured to the red-shifted axis, and the deprojected polar coordinate $\phi$ is measured from this axis in an east-of-north direction) has the peak at around $3.2~{\rm km\,s^{-1}}$, while the blue-shifted axis, $\phi = \pm 180^{\circ}$, has the peak around $2.4~{\rm km\,s^{-1}}$. The top panel shows the azimuthally averaged spectrum. This is exceptionally broad as we're averaging over spectra with a large range of line centroids. ### Inferring Rotation Velocity The most basic approach to accounting for this velocity shift is to model the line centroid as a very simple harmonic oscillator: $$ v_0(\phi) = v_{\phi} \cos(\phi) + v_{\rm LSR} $$ This can be easily done with the `get_vlos_SHO` function. In short, this determins the line centroid for each spectrum in the `annulus`, then fits $v_{\phi}$ and $v_{\rm LSR}$ to best recover the observation. By default, the function will use the `quadratic` method described in [Teague & Foreman-Mackey (2018)](https://ui.adsabs.harvard.edu/#abs/2018RNAAS...2c.173T) to fit the line centroids, and will return both $v_{\phi}$ and $v_{\rm LSR}$ and their associated uncertainties. ``` annulus.get_vlos_SHO() ``` There are three methods implemented in `eddy` to determine the line centroid: * `'quadratic'` - The method described in [Teague & Foreman-Mackey (2018)](https://ui.adsabs.harvard.edu/#abs/2018RNAAS...2c.173T) * `'max'` - Assumes the line center is the velocity of the channel with the peak intensity in. This is the fastest, but is limited by the spectral resolution of the data and the noise. * `'gaussian'` - Finds the line center by fitting a Gaussian profile to it. * `'gaussthick'` - Finds the line center by fitting an optically thick Gaussian profile to it. To understand the quality of the fit, we can use the convenience function, `plot_centroids` to show our data, and overplot the fit. Note this function also takes the `centroid_method` arugment to change between the different methods described above. ``` annulus.plot_centroids(plot_fit=True) ``` As another check that this is the correct velocity, we can use this to 'straighten out' the river plot from above by providing it the projected rotational velocity. ``` annulus.plot_river(vrot=annulus.get_vlos_SHO()[0][0]) ``` We can easily see that this correction has straightened out the river and tightened up the azimuthally averaged spectrum. It is this approach that we use with [GoFish](https://github.com/richteague/gofish) to tease out weak emission lines. ### Radial Velocities Radial velocities may also be present. We can extend the simple SHO model above to account for this: $$ v_0(\phi) = v_{\phi} \cos(\phi) + \omega \, v_{\rm r} \sin(\phi) + v_{\rm LSR} $$ where $\omega$ describes the rotation of the disk on the sky: $\omega = 1$ if the disk is rotating anti-clockwise on the sky and $\omega = -1$ if the disk is rotating clockwise. Remember in `eddy` this is described by the sign of the disk inclination used, with positive inclinations describing clockwise rotation, and negative inclinations described anti-clockwise rotation. This rotation direction is inherited when using `get_annulus`. In this form, positive $v_{\rm r}$ values are moving _away_ from the star. To include this component, most functions allow for a `fit_vrad` argument. ``` annulus.plot_centroids(plot_fit=True, fit_vrad=True, centroid_method='quadratic') ``` ## Velocity Profiles The `linecube` class provides a wrapper for splitting the disk into concentric annuli assuming a source geometry, and then calculating the rotational and, if requested, radial velocity profiles. --- **NOTE**: This approach is different to the one implemented in [ConeRot](https://github.com/simoncasassus/ConeRot) which allows each annulus to be described by a different set of geometrical parameters. --- This is the `get_velocity_profile` function, as demonstrated below. By default it will calculate the profile for the whole image with bin annuli of 1/4 the beam major axis size, however for this we trim down the region to speed things up. For this Tutorial, we will stick with the `fit_method='SHO'`. Other fit methods are discussed in [a second Tutorial](https://eddy.readthedocs.io/en/latest/tutorials/tutorial_4.html). ``` r, v, dv = cube.get_velocity_profile(x0=0.0, y0=0.0, inc=6.0, PA=151.0, fit_vrad=True, fit_method='SHO', get_vlos_kwargs=dict(centroid_method='quadratic'), rbins=np.arange(0.3, 3.0, 0.25 * cube.bmaj)) ``` This function will return three arrays: the bin centers, the velocity profiles and the uncertainties on the velocity profiles. These profiles can then easily be plotted up. Note that by default, this function returns the _projected_ velocities, so do not take into account the inclination of the disk. This can be changed with `deproject=True`. ``` fig, axs = plt.subplots(figsize=(6.75, 4.17), nrows=2) axs[0].grid(ls=':', color='0.9') axs[0].errorbar(r, v[0], dv[0]) axs[0].set_xticklabels([]) axs[0].set_ylabel(r'$v_{\rm \phi,\, proj}$' + ' (m/s)') axs[1].grid(ls=':', color='0.9') axs[1].errorbar(r, v[1], dv[1]) axs[1].set_xlabel('Radius (arcsec)') axs[1].set_ylabel(r'$v_{\rm r,\, proj}$' + ' (m/s)') axs[1].set_ylim(-20, 20) for ax in axs: ax.set_xlim(r[0], r[-1]) fig.align_labels(axs) ``` ### Multiple Iterations This approch often yields uncertainties (if they do at all!) that are implausibly small and more than likely reflect the inflexibility in the model. One approach to circumvent this is use the `niter` argument to calculate several different velocity profiles, each using annuli with different pixels (at least statistically, each annulus is taken with a random draw of pixels), and then taking a weighted average over the samples. ``` r, v, dv = cube.get_velocity_profile(x0=0.0, y0=0.0, inc=6.0, PA=151.0, fit_vrad=True, fit_method='SHO', get_vlos_kwargs=dict(centroid_method='gaussian'), rbins=np.arange(0.3, 3.0, 0.25 * cube.bmaj), niter=5) fig, axs = plt.subplots(figsize=(6.75, 4.17), nrows=2) axs[0].grid(ls=':', color='0.9') axs[0].errorbar(r, v[0], dv[0]) axs[0].set_xticklabels([]) axs[0].set_ylabel(r'$v_{\rm \phi,\, proj}$' + ' (m/s)') axs[1].grid(ls=':', color='0.9') axs[1].errorbar(r, v[1], dv[1]) axs[1].set_xlabel('Radius (arcsec)') axs[1].set_ylabel(r'$v_{\rm r,\, proj}$' + ' (m/s)') axs[1].set_ylim(-20, 20) for ax in axs: ax.set_xlim(r[0], r[-1]) fig.align_labels(axs) ``` ### A `rotationmap` Wrapper You may have noticed that this approach of splitting the data into annuli, calculating the centroids of each spectrum within each annulus and then fitting a SHO model can be accelerated if we already have a map of the line centroids, as we worked with in the [previous tutorial](https://eddy.readthedocs.io/en/latest/tutorials/tutorial_1.html). In fact, `rotationmap` has a similar functionailty, `fit_annuli`, which performs the same process, but without having to calculate the line centroids each time. This also has the option to return the linearly interpolated model and residuals using the same `returns` argument as found for `fit_map`. There are many more options for this function, and we encourage the reader to read the documentation for find more. ``` from eddy import rotationmap cube = rotationmap('../../cubes/TWHya_CO_cube_v0.fits', FOV=8.0) cube.plot_data() r, v, dv = cube.fit_annuli(x0=0.0, y0=0.0, inc=6.0, PA=151.0, fit_vrad=True, rbins=np.arange(0.3, 3.5, 0.25 * cube.bmaj)) ```
github_jupyter
``` %matplotlib inline import pyNN.nest as p from pyNN.random import NumpyRNG, RandomDistribution from pyNN.utility import Timer import matplotlib.pyplot as plt import pylab import numpy as np timer = Timer() p.setup(timestep=0.1) # 0.1ms rngseed = 98766987 parallel_safe = True rng = NumpyRNG(seed=rngseed, parallel_safe=parallel_safe) n = 150 # number of cells exc_ratio = 0.8 # ratio of excitatory neurons n_exc = int(round(n*0.8)) n_inh = n-n_exc print n_exc, n_inh celltype = p.Izhikevich() # default_parameters = {'a': 0.02, 'c': -65.0, 'd': 2.0, 'b': 0.2, 'i_offset': 0.0}¶ # default_initial_values = {'v': -70.0, 'u': -14.0}¶ exc_cells = p.Population(n_exc, celltype, label="Excitatory_Cells") inh_cells = p.Population(n_inh, celltype, label="Inhibitory_Cells") poisson_input = p.SpikeSourcePoisson(rate = 10, start = 2.) input_neurons = p.Population(10, poisson_input, label='input') exc_cells.celltype.recordable pconn = 0.1 # sparse connection probability w_exc = 7.6 # later add unit w_inh = -28. # later add unit delay_exc = 1 # defines how long (ms) the synapse takes for transmission delay_inh = 1 stat_syn_exc = p.StaticSynapse(weight =w_exc, delay=delay_exc) stat_syn_inh = p.StaticSynapse(weight =w_inh, delay=delay_inh) exc_conn = p.FixedProbabilityConnector(pconn, rng=rng) inh_conn = p.FixedProbabilityConnector(pconn, rng=rng) inp_conn = p.FixedProbabilityConnector(0.2, rng=rng) connections = {} connections['e2e'] = p.Projection(exc_cells, exc_cells, exc_conn, synapse_type=stat_syn_exc, receptor_type='excitatory') connections['e2i'] = p.Projection(exc_cells, inh_cells, exc_conn, synapse_type=stat_syn_exc,receptor_type='excitatory') connections['i2e'] = p.Projection(inh_cells, exc_cells, inh_conn, synapse_type=stat_syn_inh,receptor_type='inhibitory') connections['i2i'] = p.Projection(inh_cells, inh_cells, inh_conn, synapse_type=stat_syn_inh,receptor_type='inhibitory') connections['inp2e'] = p.Projection(input_neurons, exc_cells, inp_conn, synapse_type=stat_syn_exc,receptor_type='excitatory') connections['inp2i'] = p.Projection(input_neurons, inh_cells, inp_conn, synapse_type=stat_syn_exc,receptor_type='excitatory') exc_cells[2:13].record(['spikes']) exc_cells[2:3].record(['v']) inh_cells.record(['v','spikes']) global sim_time sim_time=100 p.run(sim_time) spikes_exc = exc_cells[2:13].get_data() inh_cells_data = inh_cells.get_data() fig_settings = { 'lines.linewidth': 0.5, 'axes.linewidth': 0.5, 'axes.labelsize': 'small', 'legend.fontsize': 'small', 'font.size': 8 } plt.rcParams.update(fig_settings) plt.figure(1, figsize=(6,8)) def plot_spiketrains(segment,y_label): i=0 for spiketrain in segment.spiketrains: neuron_id =spiketrain.annotations['source_id'] if i==0: first_neuron_id=neuron_id i+=1 y = np.ones_like(spiketrain) * neuron_id plt.plot(spiketrain, y, '.') #print spiketrain plt.ylabel(y_label) plt.setp(plt.gca().get_xticklabels(), visible=False) plt.axis([0, sim_time, first_neuron_id-1, neuron_id+1]) def plot_signal(signal, index, colour='b'): label = "Neuron %d" % signal.annotations['source_ids'][index] plt.plot(signal.times, signal[:, index], colour, label=label) plt.ylabel("%s (%s)" % (signal.name, signal.units._dimensionality.string)) plt.setp(plt.gca().get_xticklabels(), visible=False) plt.legend() print np.shape(spikes_exc.segments[0].analogsignals) print np.shape(inh_cells_data.segments[0].analogsignals) n_panels = sum(a.shape[1] for a in spikes_exc.segments[0].analogsignals) + 1 print n_panels plt.subplot(n_panels, 1, 1) plot_spiketrains(spikes_exc.segments[0],'exc_neurons') panel = 2 for array in spikes_exc.segments[0].analogsignals: for i in range(array.shape[1]): plt.subplot(n_panels, 1, panel) plot_signal(array, i, colour='bg'[panel % 2]) panel += 1 plt.xlabel("time (ms)") plt.setp(plt.gca().get_xticklabels(), visible=True) plt.show() ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. # Tutorial #3: Deploy an image classification model for encrypted inferencing in Azure Container Instance (ACI) This tutorial is **a new addition to the two-part series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud. Now, you're ready to deploy the model as a encrypted inferencing web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself. In this part of the tutorial, you use Azure Machine Learning service (Preview) to: > * Set up your testing environment > * Retrieve the model from your workspace > * Test the model locally > * Deploy the model to ACI > * Test the deployed model ACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where). ## Prerequisites Complete the model training in the [Tutorial #1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook. ``` # If you did NOT complete the tutorial, you can instead run this cell # This will register a model and download the data needed for this tutorial # These prerequisites are created in the training tutorial # Feel free to skip this cell if you completed the training tutorial # register a model from azureml.core import Workspace ws = Workspace.from_config() from azureml.core.model import Model model_name = "sklearn_mnist" model = Model.register(model_path="sklearn_mnist_model.pkl", model_name=model_name, tags={"data": "mnist", "model": "classification"}, description="Mnist handwriting recognition", workspace=ws) ``` ### Setup the Environment Add `encrypted-inference` package as a conda dependency ``` from azureml.core.environment import Environment from azureml.core.conda_dependencies import CondaDependencies # to install required packages env = Environment('tutorial-encryption-env') cd = CondaDependencies.create(pip_packages=['azureml-dataset-runtime[pandas,fuse]', 'azureml-defaults', 'azure-storage-blob', 'encrypted-inference==0.9'], conda_packages = ['scikit-learn==0.22.1']) env.python.conda_dependencies = cd # Register environment to re-use later env.register(workspace = ws) ``` ## Set up the environment Start by setting up a testing environment. ### Import packages Import the Python packages needed for this tutorial. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import azureml.core # display the core SDK version number print("Azure ML SDK Version: ", azureml.core.VERSION) ``` #### Install Homomorphic Encryption based library for Secure Inferencing Our library is based on [Microsoft SEAL](https://github.com/Microsoft/SEAL) and pubished to [PyPi.org](https://pypi.org/project/encrypted-inference) as an easy to use package ``` !pip install encrypted-inference==0.9 ``` ## Deploy as web service Deploy the model as a web service hosted in ACI. To build the correct environment for ACI, provide the following: * A scoring script to show how to use the model * A configuration file to build the ACI * The model you trained before ### Create scoring script Create the scoring script, called score_encrypted.py, used by the web service call to show how to use the model. You must include two required functions into the scoring script: * The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started. * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported. The function fetches homomorphic encryption based public keys that are uploaded by the service caller. ``` %%writefile score_encrypted.py import json import os import pickle import joblib from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient, PublicAccess from encrypted.inference.eiserver import EIServer def init(): global model # AZUREML_MODEL_DIR is an environment variable created during deployment. # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION) # For multiple models, it points to the folder containing all deployed models (./azureml-models) model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl') model = joblib.load(model_path) global server server = EIServer(model.coef_, model.intercept_, verbose=True) def run(raw_data): json_properties = json.loads(raw_data) key_id = json_properties['key_id'] conn_str = json_properties['conn_str'] container = json_properties['container'] data = json_properties['data'] # download the Galois keys from blob storage #TODO optimize by caching the keys locally blob_service_client = BlobServiceClient.from_connection_string(conn_str=conn_str) blob_client = blob_service_client.get_blob_client(container=container, blob=key_id) public_keys = blob_client.download_blob().readall() result = {} # make prediction result = server.predict(data, public_keys) # you can return any data type as long as it is JSON-serializable return result ``` ### Create configuration file Create a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service. ``` from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1, tags={"data": "MNIST", "method" : "sklearn"}, description='Encrypted Predict MNIST with sklearn + SEAL') ``` ### Deploy in ACI Estimated time to complete: **about 2-5 minutes** Configure the image and deploy. The following code goes through these steps: 1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`) 1. Create inference configuration necessary to deploy the model as a web service using: * The scoring file (`score_encrypted.py`) * envrionment object created in previous step 1. Deploy the model to the ACI container. 1. Get the web service HTTP endpoint. ``` %%time import uuid from azureml.core.webservice import Webservice from azureml.core.model import InferenceConfig from azureml.core.environment import Environment from azureml.core import Workspace from azureml.core.model import Model ws = Workspace.from_config() model = Model(ws, 'sklearn_mnist') myenv = Environment.get(workspace=ws, name="tutorial-encryption-env") inference_config = InferenceConfig(entry_script="score_encrypted.py", environment=myenv) service_name = 'sklearn-mnist-svc-' + str(uuid.uuid4())[:4] service = Model.deploy(workspace=ws, name=service_name, models=[model], inference_config=inference_config, deployment_config=aciconfig) service.wait_for_deployment(show_output=True) ``` Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application. ``` print(service.scoring_uri) ``` ## Test the model ### Download test data Download the test data to the **./data/** directory ``` import os from azureml.core import Dataset from azureml.opendatasets import MNIST data_folder = os.path.join(os.getcwd(), 'data') os.makedirs(data_folder, exist_ok=True) mnist_file_dataset = MNIST.get_file_dataset() mnist_file_dataset.download(data_folder, overwrite=True) ``` ### Load test data Load the test data from the **./data/** directory created during the training tutorial. ``` from utils import load_data import os import glob data_folder = os.path.join(os.getcwd(), 'data') # note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster X_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-images-idx3-ubyte.gz"), recursive=True)[0], False) / 255.0 y_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-labels-idx1-ubyte.gz"), recursive=True)[0], True).reshape(-1) ``` ### Predict test data Feed the test dataset to the model to get predictions. The following code goes through these steps: 1. Create our Homomorphic Encryption based client 1. Upload HE generated public keys 1. Encrypt the data 1. Send the data as JSON to the web service hosted in ACI. 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl. #### Create our Homomorphic Encryption based client Create a new EILinearRegressionClient and setup the public keys ``` from encrypted.inference.eiclient import EILinearRegressionClient # Create a new Encrypted inference client and a new secret key. edp = EILinearRegressionClient(verbose=True) public_keys_blob, public_keys_data = edp.get_public_keys() ``` #### Upload HE generated public keys Upload the public keys to the workspace default blob store. This will allow us to share the keys with the inference server ``` import azureml.core from azureml.core import Workspace, Datastore import os ws = Workspace.from_config() datastore = ws.get_default_datastore() container_name=datastore.container_name # Create a local file and write the keys to it public_keys = open(public_keys_blob, "wb") public_keys.write(public_keys_data) public_keys.close() # Upload the file to blob store datastore.upload_files([public_keys_blob]) # Delete the local file os.remove(public_keys_blob) ``` #### Encrypt the data ``` #choose any one sample from the test data sample_index = 1 #encrypt the data raw_data = edp.encrypt(X_test[sample_index]) ``` #### Send the test data to the webservice hosted in ACI Feed the test dataset to the model to get predictions. We will need to send the connection string to the blob storage where the public keys were uploaded ``` import json from azureml.core import Webservice service = Webservice(ws, service_name) #pass the connection string for blob storage to give the server access to the uploaded public keys conn_str_template = 'DefaultEndpointsProtocol={};AccountName={};AccountKey={};EndpointSuffix=core.windows.net' conn_str = conn_str_template.format(datastore.protocol, datastore.account_name, datastore.account_key) #build the json data = json.dumps({"data": raw_data, "key_id" : public_keys_blob, "conn_str" : conn_str, "container" : container_name }) data = bytes(data, encoding='ASCII') print ('Making an encrypted inference web service call ') eresult = service.run(input_data=data) print ('Received encrypted inference results') ``` #### Decrypt the data Use the client to decrypt the results ``` import numpy as np results = edp.decrypt(eresult) print ('Decrypted the results ', results) #Apply argmax to identify the prediction result prediction = np.argmax(results) print ( ' Prediction : ', prediction) print ( ' Actual Label : ', y_test[sample_index]) ``` ## Clean up resources To keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call: ``` service.delete() ``` If you're not going to use what you've created here, delete the resources you just created with this quickstart so you don't incur any charges. In the Azure portal, select and delete your resource group. You can also keep the resource group, but delete a single workspace by displaying the workspace properties and selecting the Delete button. ## Next steps In this Azure Machine Learning tutorial, you used Python to: > * Set up your testing environment > * Retrieve the model from your workspace > * Test the model locally > * Deploy the model to ACI > * Test the deployed model You can also try out the [regression tutorial](regression-part1-data-prep.ipynb). ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/tutorials/img-classification-part2-deploy.png)
github_jupyter
#### importing libraries ``` import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib.dates as mdates import seaborn as sns ``` #### Importing and understanding the dataset ``` #reading the dataset : "Dataframe.csv" df = pd.read_csv('./Datasets/DataFrame.csv') df.head(10) #let's get some infromation about the data features df.info() ``` ##### Inshight : We can see that Unnamed column is useless so we better drop it ``` df.drop(['Unnamed: 7'],axis=1,inplace=True) #inplace = true as we don't want to store it in another dataset df.head() #to check wheather changes are reflected to the original dataset or not ``` #### Data Description : Dataset features - Type : NIFTY - Date : Specific trading Date - Time : Time of the trade - Open : Opening price of the trade - High : maximum price at particular time - Low : minimum price at particular time - Close : Closing price - Total change - { Close - Open } ##### Type of the data ``` df.info() ``` ##### Inshights : Date and Time is listed as int64 and object respectively, so we need to convert it into more readable type, but before we do it let's check out about the missing values and duplicates in the dataset ##### Finding about the missing values ``` df.isnull().values.any() ``` ##### Inshight : False means there are no null values in the dataset ##### Finding about the duplicates ``` df[df.duplicated()] ``` ##### Inshight : So by looking at the output we can say there are no duplicates in our dataset. ##### Working with date and time column, it is very important in the data about the datetime that we add month, year, day, hour and other additional information to get better inshights ``` df['Date'] = dt = pd.to_datetime(df["Date"],format="%Y%m%d") df.head() df["Year"] = df.Date.dt.year df["month"] = df.Date.dt.month df["day"] = df.Date.dt.day df["day_of_week"] = df.Date.dt.dayofweek df["week"] = df.Date.dt.isocalendar().week df.head(10) # df.head(10000) # Important Note : The ISO definition for week 1 is the week with the first Thursday in it. Using the ISO week number may give unexpected results near the start and end of a year. # For instance, in above output 01/01/2021 falls in week 53 of year 2020. # df.drop(["Date"], axis = 1, inplace = True) #now we don't need date coulumn df.head() df["hour"] = pd.to_datetime(df["Time"]).dt.hour df["min"] = pd.to_datetime(df["Time"]).dt.minute df.head() df.drop(["Time"], axis = 1, inplace = True) df.head() #df.head(1000) ``` ##### Calculating the change in percentage that occured since the opening ``` df['change'] = df['close'] - df['open'] df['change_in_percentage'] = round( (( (df['close'] - df['open'] ) / df['open'] ) * 100 ), 3) df.head() # df.head(1000) ``` ##### Now that we have all the features in our data we need, we can describe our data ``` df.describe() ``` ##### Question : In particular week, month, found out changes in opening, closing, high and low ``` df.week.unique() df.Year.unique() df.month.unique() df.day.unique() df.day_of_week.unique() # code for : int(datetime.datetime.today().strftime('%w')) to change the start of the week ``` ##### Technical Note: here python starts 0 with monday so if you want to change you can refere [Documentation](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) ##### Inshights from above : In our data, - For year, We have only year 2021 data - For month, We have only 3 months data which consists of January, February & March - For week, We have only limited weeks from year 2021 - For day of week, We have only data of Monday to Thursday - In given data, Data consists of all the days in the particular month. ``` short_period = 12 long_period = 26 signal_period = 9 df['ema_12'] = df['close'].ewm(span=short_period, adjust=False).mean() df['ema_26'] = df['close'].ewm(span=long_period, adjust=False).mean() df['ema_50'] = df['close'].ewm(span=50, adjust=False).mean() df['ema_100'] = df['close'].ewm(span=100, adjust=False).mean() df['ema_200'] = df['close'].ewm(span=200, adjust=False).mean() ewm_short = df['ema_12'] ewm_long = df['ema_26'] df['MACD'] = ewm_long - ewm_short df['Signal'] = df['close'].ewm(span=signal_period, adjust=False).mean() df.head() df['temp'] = df['close'].shift(1) # df.head() df['Returns'] = (df['close']/df['temp']) -1 df.drop(['temp'],axis=1,inplace=True) df.head() # Plot to view the same df.plot(x = "Date", y = ['close', 'ema_12', 'ema_26', 'ema_50', 'ema_100', 'ema_200'] , subplots = True, layout = (3, 3), figsize = (15, 15), title = "Close vs EMA", rot = 90) ``` ##### Corelation Matrix ``` # corelation matrix plt.figure(figsize=(12,8)) sns.heatmap(df.corr(),cmap='Blues',annot=True) k = 13 #number of variables for heatmap cols = df.corr().nlargest(k, 'change')['change'].index cm = df[cols].corr() plt.figure(figsize=(14,10)) sns.heatmap(cm, annot=True, cmap = 'viridis') plt.plot(df.index, df['open']) plt.grid(True) plt.xticks(rotation=90) plt.figure(figsize=(10,10)) plt.show() plt.plot(df.index, df['close']) plt.grid(True) plt.xticks(rotation=90) plt.figure(figsize=(10,10)) plt.show() ``` ##### Inshights : - Open and close both variables are highly corelated to each other as well to high and low - By looking at the visualization we can say that there is not much difference in the trends of the plots #### Visualizing the skewness ``` Column_List = ['open', 'high', 'low', 'close'] #We don't want to check other columns fig, ax = plt.subplots(len(Column_List), figsize = (20, 20)) for i, col_list in enumerate(Column_List): sns.distplot(df[col_list], hist = True, ax = ax[i]) ax[i].set_title ("Frequency Distribution of " + col_list, fontsize = 14) ax[i].set_xlabel (col_list, fontsize = 14) ax[i].set_ylabel ('Distribution Value', fontsize = 14) ax[i].grid('on') fig.tight_layout (pad = 5) ``` ##### Inshights : Frequency distribution of the open, close, high & Low is pretty similar ##### Outlier ``` for i, col_list in enumerate(Column_List): fig = plt.figure() fig.suptitle('BoxPlot for '+col_list, fontsize=14, fontweight='bold') ax = fig.add_subplot(111) ax.boxplot(x=df[col_list]) ax.set_ylabel('Values') ax.grid('on') plt.show() ``` ##### Inshights : By looking at the boxplots, there's no/little difference in the similarity of the boxplots. ``` import pickle pickle_out = open("niftydf.pickle","wb") pickle.dump(df, pickle_out) pickle_out.close() ```
github_jupyter
# Neo4j command note **Outline** * [Introduction](#intro) * [Cypher Note](#cypher) * [Reference](#refer) --- # <a id='intro'>Introduction</a> A graph database is an online database management system with Create, Read, Update and Delete (CRUD) operations working on a graph data model. **Why do we need Graph database**: The relationships between data points matter more than the individual points themselves. Generate deep insights over the data. **Graph Databases are composed of two elements: ** * Nodes * Relationships (a.k.a. links/edges) **Note** * Both nodes and links can have attributes. # <a id='cypher'>Cypher</a> In all the following example, any thing start with $ is a variable, which I can change it into something I want. Here is a screenshot after running the following creating related commands <img src="pic/neo4j.png" style="width: 400px;height: 400px;"/> > **show all nodes** ``` MATCH (n) RETURN (n) ``` > **create node** ``` # format create ($variable: $NodeName{name: $value} ) # example create (masud:User{name:"masud"}) create (goce:User{name:"goce"}) create (diego:User{name:"diego"}) create (sarah:User{name:"sarah"}) create (gamestream:Channel{name:"Masud’s online gaming"}) create (lab430:Channel{name:"Masud’s 430 lab"}) create (bigdataclass:Channel{name:"Diego’s big data class"}) create (sarahCalendar:Channel{name:"Sarah’s calendar"}) create (diegoCalendar:Channel{name:"Diego’s calendar"}) ``` > **delete node** ``` ### format MATCH ($variable: User { name: 'johnny' }) DELETE $variable ### example # create the node to be deleted create (jchiu:User{name:"johnny"}) # delete a specific node using match MATCH (n:User { name: 'johnny' }) DELETE n # delete everything MATCH (n) DETACH DELETE n ``` > **Create links/relationships** ``` ### format version1, Preferable match ($variable1: $node_name1 {name:"$target_name1"}), ($variable2: $node_name2 {name:"$target_name2"}) CREATE ($variable1)-[:$link_name]->($variable2) ### example create in separate command MATCH (u:User {name:"masud"}), (c:Channel {name:"Masud’s online gaming"}) CREATE (u)-[:OWNS]->(c) MATCH (u:User {name:"masud"}), (c:Channel {name:"Masud’s 430 lab"}) CREATE (u)-[:OWNS]->(c) MATCH (u:User {name:"diego"}), (c:Channel {name:"Diego’s big data class"}) CREATE (u)-[:OWNS]->(c) MATCH (u:User {name:"sarah"}), (c:Channel {name:"Sarah’s calendar"}) CREATE (u)-[:OWNS]->(c) MATCH (u:User {name:"diego"}), (c:Channel {name:"Diego’s calendar"}) CREATE (u)-[:OWNS]->(c) ### format version2 # noted that both $variable1 and $variable2 in this case are defined previously # this only works creating for altogheter, i.e., creating node and link in a single query create ($variable1)-[:$link_name]->($variable2) ### example (masud)-[:OWNS]->(gamestream), (masud)-[:OWNS]->(lab430), (diego)-[:OWNS]->(bigdataclass), (sarah)-[:OWNS]->(sarahCalendar), (diego)-[:OWNS]->(diegoCalendar) # or (masud)-[:OWNS]->(gamestream) (masud)-[:OWNS]->(lab430) (diego)-[:OWNS]->(bigdataclass) (sarah)-[:OWNS]->(sarahCalendar) (diego)-[:OWNS]->(diegoCalendar) ``` > **match** ``` # Find anything named “goce” match (n {name:"goce"}) return n # Find users named “goce” match (n:User {name:"goce"}) return n # Find all users match (user:User) return user limit 3 # Find users, return the names of them limiting output to be 3 match (user:User) return user.name limit 3 # List all class owned by masud MATCH (masud:User {name:"masud"})-[:OWNS]->(masudChannels) return masud,masudChannels # Who owned “Diego’s bigdataclass”? # * the output is a table MATCH (c:Channel {name: "Diego’s big data class"})<-[:OWNS]-(owner) return owner.name # Masud’sco-subscribers # * the output is a table match (masud:User {name:"masud"})-[:SUB_TO]->(c)<-[:SUB_TO]-(coSubs) return coSubs.name # Lets recommend new channels for masud to sub_to. # Extend masud’s co-subs, to find co-sub-channels that masud has not sub_to. # m collect all the channels that masud has subscribe to # coSubs collect all the Users that has subsrcibed to the channels that subcribed by masud # m2 records all the channels that these coSubs has subscribed to # RETURN the name of the courses that all the coSubs has subscribed to # WHERE filter out the channels that masud has not yet subscribed to # * the output is a table match (masud:User {name:"masud"})-[:SUB_TO]->(m)<-[:SUB_TO]- (coSubs)-[:SUB_TO]->(m2) WHERE NOT (masud)-[:SUB_TO]->(m2) RETURN m2.name AS Recommended, count(*) AS Strength ORDER BY Strength DESC ``` # <a id='refer'>Reference</a> **Reference** * [Neo4j Download Links](https://neo4j.com/download/) * [Install Neo4j: Youtube tutorial](https://www.youtube.com/watch?v=0FO81O-nTrc) * [Cypher Query Language](https://neo4j.com/developer/cypher/) * [Neo4j Sandbox](https://www.youtube.com/watch?v=7aON114bXxA) * [Cypher Documentation](https://neo4j.com/docs/developer-manual/current/cypher/) * Titan DB + Gephi * [titan](http://titan.thinkaurelius.com/) * [gephi](https://gephi.org/)
github_jupyter
1) Given the array split the array at the middle (if it is odd length consider next higher integer), add reverse the array. Ex: if input is [12,10,5,6,52,36] output should be [6,52,36,12,10,5] if input is [12,10,5,6,52,36,34] output should be [6,52,36,34,12,10,5] ``` import math # Complete this function to get the desired result def reverseatCenter(arr): n = len(arr) #length of the array m = n//2 #Middle element arr2 = arr[m:]+arr[:m] #Reversing the sub-lists return arr2 # Print wroking examples print("Example 1 - [14,9,5,6,52,36,34]") print(reverseatCenter([14,9,5,6,52,36,34,23,111])) print("Example 2 - [14,9,5,6,52,36]") print(reverseatCenter([14,9,5,6,52,36])) ``` 2) Given a list of numbers, return a list where all adjacent duplicate elements have been removed. Ex: 2, 2, 2, 3, 2 returns 2, 3, 2. ``` # Complete the function to get the desired result def remove_adjacent(b): b2 = [b[0]] for i in range(1,len(b)): #Checking consecutive repitions if b2[-1]!=b[i]: b2.append(b[i]) return b2 # Print working examples print("Example 1 - [2, 2, 2, 3, 2]") print(remove_adjacent([2, 2, 2, 3, 2])) print("\n") print("Example 2 - [1,2,3,3,2,3,1,1,1,2,1,2,2]") print(remove_adjacent([1,2,3,3,2,3,1,1,1,2,1,2,2])) ``` 3) given matrix of 7x7 full of ones, create a square with given side length (center same as original square(7x7)) replace ones with zeors at the edges for example, $$ \begin{align} A = \begin{bmatrix} 1 & 1 & 1 & 1 & 1 & 1 & 1\\ 1 & 1 & 1 & 1 & 1 & 1 & 1\\ 1 & 1 & 1 & 1 & 1 & 1 & 1\\ 1 & 1 & 1 & 1 & 1 & 1 & 1\\ 1 & 1 & 1 & 1 & 1 & 1 & 1\\ 1 & 1 & 1 & 1 & 1 & 1 & 1\\ 1 & 1 & 1 & 1 & 1 & 1 & 1\\ \end{bmatrix} \end{align} $$ After modification $$ \begin{align} A = \begin{bmatrix} 1 & 1 & 1 & 1 & 1 & 1 & 1\\ 1 & 1 & 1 & 1 & 1 & 1 & 1\\ 1 & 1 & 0 & 0 & 0 & 1 & 1\\ 1 & 1 & 0 & 1 & 0 & 1 & 1\\ 1 & 1 & 0 & 0 & 0 & 1 & 1\\ 1 & 1 & 1 & 1 & 1 & 1 & 1\\ 1 & 1 & 1 & 1 & 1 & 1 & 1\\ \end{bmatrix} \end{align} $$ ``` import numpy as np # write programm to get the desired result def matrix_square(A,l): n = A.shape[0] #Size of the matrix #the length of the smaller square should be lesser than the size of the matrix, both of which #are odd assert l<=n and n%2==1 and l%2==1 #start and end of inner square start = (n-l)//2 end = (n+l)//2 for i in range(start,end): for j in range(start,end): #Making the elements in the edges of the smaller square 0 if (i==start or i==end-1) or (j==start or j==end-1): A[i][j] = 0 return A.astype(int) #Print working examples #Example 1 - side length 3 print("Example 1 - side length 3") A = np.ones((7,7)) #7x7 matrix of ones l = 3 #Side length = 3 print(matrix_square(A,l)) print("\n") #Example 2 - side length 5 print("Example 2 - side length 5") A = np.ones((7,7)) #7x7 matrix of ones l = 5 #Side length = 5 print(matrix_square(A,l)) print("\n") #Example 3 - side length 7 print("Example 3 - side length 7") A = np.ones((7,7)) #7x7 matrix of ones l = 7 #Side length = 7 print(matrix_square(A,l)) ``` 4) Paragraph present in data.txt is encoded such that each alphabet in word is incremented to next ascii value. Decode the paragraph present in the data_encoded.txt (Hint: decrease the ascii value of each character in the word) ``` def sentence_decode(str): dec_words = "" #initializing empty string for character in str: #Copying spaces as it is if character==" ": dec_words+=" " #Decoding - identifying the previous ascii value else: dec_words += chr(ord(character)-1) return dec_words with open ("data_encoded.txt", "r") as myfile: data=myfile.read() print(data) print(" ") print(sentence_decode(data)) # Has to print the decoded paragraph. ```
github_jupyter